diff --git a/.claude/agents/tentacle-manager.md b/.claude/agents/tentacle-manager.md new file mode 100644 index 0000000000..236d94840a --- /dev/null +++ b/.claude/agents/tentacle-manager.md @@ -0,0 +1,136 @@ +--- +name: tentacle-manager +description: "Use this agent to export and install OctoBot tentacles. Handles the two-step process: export tentacles from packages/tentacles into a zip, then install that zip. Use when the user says 'install tentacles', 'export tentacles', 'update tentacles', or after modifying tentacle source files." +tools: Bash, Read, Glob, Grep +model: sonnet +--- + +# Tentacle Manager Agent + +You manage OctoBot tentacles export and installation. This is a two-step process that mirrors the VSCode "Install tentacles zip" launch configuration. + +## Environment + +- **Python**: `venv/bin/python` (workspace venv, one level above the OctoBot repo root) +- **Tentacles source**: `packages/tentacles/` + +All commands run from the OctoBot repo root (the working directory). + +Before running any commands, export ROOT and PYTHONPATH once (do NOT use `$()` or subshells — they trigger permission prompts). PYTHONPATH must be absolute so that build subprocesses running from tentacle subdirectories can resolve all packages. Use `$PWD` to avoid hardcoded paths: + +```bash +ROOT=$PWD +export PYTHONPATH="$ROOT:$ROOT/packages/agents:$ROOT/packages/async_channel:$ROOT/packages/backtesting:$ROOT/packages/binary:$ROOT/packages/commons:$ROOT/packages/evaluators:$ROOT/packages/flow:$ROOT/packages/node:$ROOT/packages/services:$ROOT/packages/sync:$ROOT/packages/tentacles:$ROOT/packages/tentacles_manager:$ROOT/packages/trading:$ROOT/packages/trading_backend" +``` + +## Step 1: Export tentacles to zip + +```bash +venv/bin/python start.py tentacles -p tentacles_default_export.zip -d packages/tentacles +``` + +This packs all tentacles from `packages/tentacles/` into a zip at `output/any_platform.zip`. + +## Step 2: Install tentacles from zip + +```bash +venv/bin/python start.py tentacles -i --all --location output/any_platform.zip +``` + +This installs all tentacles from the exported zip. + +## Generate CCXT exchange tentacles + +Generates Python exchange implementations from the CCXT TypeScript sources and copies them into the tentacle tree. This is a multi-step pipeline. + +### Step 1: Build CCXT exchange (in `../ccxt/`) + +```bash +cd ../ccxt && nvm use 24 && npm run export-exchanges && npm run tsBuild && npm run emitAPIPy && npm run transpileRest && npm run transpileWs +``` + +Replace `` with the exchange name (e.g. `polymarket`, `bisq`). + +This transpiles the TypeScript exchange implementation into Python files at `../ccxt/python/ccxt/`. + +### Step 2: Copy generated files into tentacles + +```bash +python ../download_all_exchanges.py +``` + +This runs each exchange's `packages/tentacles/Trading/Exchange//script/download.py` which: +- Copies the 4 generated files (sync, async, pro, abstract) from `../ccxt/python/ccxt/` into the tentacle's `ccxt/` subdirectory +- Patches imports to use relative paths instead of ccxt module paths + +### Generated file mapping per exchange + +| Source (`../ccxt/python/ccxt/`) | Destination (`packages/tentacles/Trading/Exchange//ccxt/`) | +|---|---| +| `.py` | `_sync.py` | +| `async_support/.py` | `_async.py` | +| `pro/.py` | `_pro.py` | +| `abstract/.py` | `_abstract.py` | + +### Full pipeline (build + download + export + install) + +To regenerate an exchange and install updated tentacles: +1. Build CCXT exchange (step above) +2. Run `download_all_exchanges.py` +3. Export tentacles to zip (Step 1 from main workflow) +4. Install tentacles from zip (Step 2 from main workflow) + +## Default behavior + +When invoked without specific instructions, run export + install (the two main steps). If one step fails, report the error and stop. + +## CLI reference + +Base command: `venv/bin/python start.py tentacles [OPTIONS] [tentacle_names...]` + +### Operations (pick one) + +| Flag | Description | +|------|-------------| +| `-i`, `--install` | Install tentacles (requires names or `--all`, and `--location`) | +| `-u`, `--update` | Update tentacles (requires names or `--all`, and `--location`) | +| `-ui`, `--uninstall` | Uninstall tentacles (requires names or `--all`) | +| `-r`, `--repair` | Repair installation (fix __init__.py, missing folders, configs) | +| `-p`, `--pack ` | Pack tentacles into a zip (requires `-d`) | +| `-e`, `--export [pkg]` | Export tentacles to folder, optionally filtered by package (requires `-d`) | +| `-sti ` | Install single tentacle from local path, e.g. `-sti "/bot/macd_eval" "Evaluator/TA"` | +| `-c`, `--creator ` | Start tentacle creator (e.g. `-c Evaluator`, `-c help`) | + +### Target selection + +| Flag | Description | +|------|-------------| +| `-a`, `--all` | Apply to all tentacles | +| `tentacle_names` | Positional args: specific tentacle names | + +### Paths + +| Flag | Description | +|------|-------------| +| `-d`, `--directory ` | Root tentacles folder to operate on | +| `-l`, `--location ` | Tentacles package path or URL | + +### Export/upload options + +| Flag | Description | +|------|-------------| +| `-ite` | Also export each tentacle as a separate bundle | +| `-idm` | Include dev-mode tentacles in export | +| `--export-with-package-name` | Use artifact name as package name | +| `-ute ` | Upload tentacles export to path | +| `-upe ` | Upload package export to path | +| `-ut ` | Upload type: `s3` (default) or `nexus` | +| `-m ` | Metadata file for export | +| `-cy`, `--cythonize` | Cythonize/compile packed tentacles | + +### Misc + +| Flag | Description | +|------|-------------| +| `-f`, `--force` | Skip confirmations | +| `-q`, `--quite` | Quiet mode (errors only) | diff --git a/.claude/agents/test-runner.md b/.claude/agents/test-runner.md new file mode 100644 index 0000000000..551baac1c4 --- /dev/null +++ b/.claude/agents/test-runner.md @@ -0,0 +1,117 @@ +--- +name: test-runner +description: "Run and debug OctoBot tests. Handles both root-level tests (tests/) and package tests (packages//tests/). Use when the user says 'run tests', 'test ', 'debug test', or after modifying source code that has tests." +tools: Bash, Read, Glob, Grep, Edit +model: sonnet +--- + +# Test Runner Agent + +You run and debug OctoBot Python tests using pytest. You can run the full suite, a specific package's tests, or individual test files/functions. + +## Environment + +All commands run from the OctoBot repo root (the working directory). + +Before running any commands, export ROOT and PYTHONPATH once (do NOT use `$()` or subshells): + +```bash +ROOT=$PWD +export PYTHONPATH="$ROOT:$ROOT/packages/agents:$ROOT/packages/async_channel:$ROOT/packages/backtesting:$ROOT/packages/binary:$ROOT/packages/commons:$ROOT/packages/evaluators:$ROOT/packages/flow:$ROOT/packages/node:$ROOT/packages/services:$ROOT/packages/sync:$ROOT/packages/tentacles:$ROOT/packages/tentacles_manager:$ROOT/packages/trading:$ROOT/packages/trading_backend" +``` + +Python: `venv/bin/python` + +## Test layout + +- **Root tests**: `tests/` — OctoBot-level unit and functional tests (has a `conftest.py` that sets up paths and tentacles) +- **Package tests**: `packages//tests/` — per-package test suites +- Packages with tests: `async_channel`, `backtesting`, `commons`, `evaluators`, `flow`, `node`, `services`, `sync`, `tentacles_manager`, `trading`, `trading_backend` +- Some packages have nested test directories (e.g., `packages/commons/tests/databases/`) +- Some packages load `.env` via conftest (e.g., `flow`, `sync`) + +## Running tests + +### Specific package +```bash +venv/bin/python -m pytest packages//tests/ -x -v +``` + +### Specific test file +```bash +venv/bin/python -m pytest packages//tests/test_foo.py -x -v +``` + +### Specific test function +```bash +venv/bin/python -m pytest packages//tests/test_foo.py::TestClass::test_method -x -v +``` + +### Root OctoBot tests +```bash +venv/bin/python -m pytest tests/ -x -v +``` + +### Tentacle tests +Tentacles have their own test suites inside `packages/tentacles////tests/`. +```bash +venv/bin/python -m pytest packages/tentacles////tests/ -x -v +``` + +Example: +```bash +venv/bin/python -m pytest packages/tentacles/Trading/Mode/daily_trading_mode/tests/ -x -v +``` + +### With keyword filter +```bash +venv/bin/python -m pytest packages//tests/ -x -v -k "keyword" +``` + +## Exchange-specific tests (only for CCXT/exchange API updates) + +These tests hit real exchange APIs and should only be run when updating CCXT or exchange implementations. Do NOT run them as part of normal test workflows. + +- **`packages/trading/tests_additional/`** — real exchange API tests for the trading package (per-exchange testers under `real_exchanges/`) +- **`additional_tests/exchanges_tests/`** — authenticated exchange integration tests at the OctoBot level + +```bash +# Single exchange +venv/bin/python -m pytest packages/trading/tests_additional/real_exchanges/test_binance.py -x -v +venv/bin/python -m pytest additional_tests/exchanges_tests/test_binance.py -x -v +``` + +## Debugging workflow + +When a test fails: + +1. **Read the failure output** — understand the traceback, which assertion failed, and why +2. **Read the test code** — understand what the test expects +3. **Read the source code** — find the function/class being tested +4. **Identify the root cause** — is it a test bug or a source bug? +5. **Fix** — make the minimal edit to fix the issue +6. **Re-run** — run the specific failing test to confirm the fix +7. **Run broader** — re-run the full test file/package to check for regressions + +## Flags reference + +| Flag | Purpose | +|------|---------| +| `-x` | Stop on first failure | +| `-v` | Verbose output | +| `-vv` | Extra verbose (shows full diffs) | +| `-s` | Show print/stdout output | +| `-k "expr"` | Filter by keyword expression | +| `--tb=short` | Shorter tracebacks | +| `--tb=long` | Full tracebacks | +| `--lf` | Re-run only last failed tests | +| `--pdb` | Drop into debugger on failure (interactive — avoid in agent) | + +## Default behavior + +When invoked without specific instructions: +- If the user names a package, run that package's tests +- If the user names a file or test, run that specifically +- If unclear, ask which package or test to run +- Always use `-x -v` by default +- On failure, read the failing test and source, diagnose the issue, fix it, and re-run to confirm diff --git a/.dockerignore b/.dockerignore index ffa6c9a4a4..e13592242f 100644 --- a/.dockerignore +++ b/.dockerignore @@ -51,7 +51,7 @@ __pycache__/ .Python build/ develop-eggs/ -dist/ +# dist/ downloads/ eggs/ .eggs/ diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000000..a5040ce86a --- /dev/null +++ b/.gitattributes @@ -0,0 +1,4 @@ +docs/static/**/*.png filter=lfs diff=lfs merge=lfs -text +docs/static/**/*.jpg filter=lfs diff=lfs merge=lfs -text +docs/static/**/*.jpeg filter=lfs diff=lfs merge=lfs -text +docs/static/**/*.webp filter=lfs diff=lfs merge=lfs -text diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml deleted file mode 100644 index d93d74b603..0000000000 --- a/.github/workflows/docker.yml +++ /dev/null @@ -1,203 +0,0 @@ -name: OctoBot-Docker -on: - push: - branches: - - "master" - - "dev" - tags: - - "*" - pull_request: - -jobs: - lint: - name: ubuntu-latest - Docker - lint - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v5 - - - name: Run hadolint - uses: reviewdog/action-hadolint@v1 - with: - github_token: ${{ secrets.github_token }} - hadolint_ignore: DL3013 DL3008 - - build_test_push: - needs: lint - name: ubuntu-latest - Docker - build & test & push - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v5 - - - name: Set Environment Variables - run: | - OWNER="$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]' | tr -d '-')" - IMG=octobot - echo "VERSION=${GITHUB_REF##*/}" >> $GITHUB_ENV - echo "IMAGE=${OWNER}/${IMG}" >> $GITHUB_ENV - echo "LATEST=latest" >> $GITHUB_ENV - echo "STAGING=staging" >> $GITHUB_ENV - echo "STABLE=stable" >> $GITHUB_ENV - echo "TEST=test" >> $GITHUB_ENV - echo "SHA=${GITHUB_SHA}" >> $GITHUB_ENV - echo "CONTAINER_NAME=octobot" >> $GITHUB_ENV - echo "CHECK_TENTACLE_CONTAINER_TIME=10" >> $GITHUB_ENV - echo "WAIT_CONTAINER_TIME=80" >> $GITHUB_ENV - - - name: Wait for tentacles - if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags') - uses: fountainhead/action-wait-for-check@v1.0.0 - id: wait-for-tentacles - with: - token: ${{ secrets.AUTH_TOKEN }} - checkName: "ubuntu-latestx64 - Python - 3.10 - Upload" - ref: ${{ github.ref }} - repo: OctoBot-Tentacles - timeoutSeconds: 3600 - - - name: Trigger fail when Tentacles failed - if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags') && steps.wait-for-tentacles.outputs.conclusion == 'failure' - run: exit 1 - - - name: Set up QEMU - id: qemu-setup - uses: docker/setup-qemu-action@master - with: - platforms: all - - - name: Print available platforms - run: echo ${{ steps.qemu.outputs.platforms }} - - - name: Set up Docker Buildx - id: buildx - uses: docker/setup-buildx-action@master - with: - driver: docker-container - use: true - - - name: Cache Docker layers - uses: actions/cache@v4 - with: - path: /tmp/.buildx-cache - key: ${{ runner.os }}-buildx-${{ github.sha }} - restore-keys: | - ${{ runner.os }}-buildx- - - - name: Login to DockerHub - if: github.event_name == 'push' - uses: docker/login-action@v1 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Build latest - if: github.event_name != 'push' - uses: docker/build-push-action@master - with: - context: . - builder: ${{ steps.buildx.outputs.name }} - platforms: linux/amd64 - # Using "load: true" forces the docker driver. - # Unfortunately, the "docker" driver does not support - # multi-platform builds. - load: true - push: false - tags: ${{ env.IMAGE }}:${{ env.SHA }} - build-args: | - TENTACLES_URL_TAG=${{ env.LATEST }} - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache - - - name: Docker build test container - if: github.event_name != 'push' - run: | - docker build -f ./tests/Dockerfile --build-arg OCTOBOT_IMAGE=${{ env.IMAGE }}:${{ env.SHA }} -t ${{ env.IMAGE }}:${{ env.TEST }} . - - - name: run tests in docker - if: github.event_name != 'push' - run: | - docker run -i -e TENTACLES_REPOSITORY=dev-tentacles -e TENTACLES_URL_TAG=${GITHUB_HEAD_REF////_} -e DISABLE_SENTRY=True ${{ env.IMAGE }}:${{ env.TEST }} - - - name: run and check health after start - if: github.event_name != 'push' - run: | - echo Start OctoBot docker with branche_name tentacles package - docker run -id -e TENTACLES_REPOSITORY=dev-tentacles -e TENTACLES_URL_TAG=${GITHUB_HEAD_REF////_} -e DISABLE_SENTRY=True --name ${{ env.CONTAINER_NAME }} ${{ env.IMAGE }}:${{ env.LATEST }} - sleep ${{ env.CHECK_TENTACLE_CONTAINER_TIME }} - if docker logs ${{ env.CONTAINER_NAME }} | grep "octobot_tentacles_manager.api.util.tentacles_management Failed to download file at url :" ; then - docker rm -f ${{ env.CONTAINER_NAME }} - echo Restarting docker with latest tentacle package... - docker run -id -e TENTACLES_URL_TAG=${{ env.LATEST }} -e DISABLE_SENTRY=True --name ${{ env.CONTAINER_NAME }} ${{ env.IMAGE }}:${{ env.LATEST }} - fi - sleep ${{ env.WAIT_CONTAINER_TIME }} - docker logs ${{ env.CONTAINER_NAME }} - docker inspect ${{ env.CONTAINER_NAME }} | jq '.[].State.Health.Status' | grep "healthy" - - - name: Build and push latest - if: github.event_name == 'push' && !startsWith(github.ref, 'refs/tags') && github.ref == 'refs/heads/dev' - uses: docker/build-push-action@master - with: - context: . - builder: ${{ steps.buildx.outputs.name }} - platforms: linux/amd64,linux/arm64 - push: true - tags: ${{ env.IMAGE }}:${{ env.LATEST }} - build-args: | - TENTACLES_URL_TAG=${{ env.LATEST }} - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache - - - name: Build and push staging - if: github.event_name == 'push' && github.ref == 'refs/heads/master' - uses: docker/build-push-action@master - with: - context: . - builder: ${{ steps.buildx.outputs.name }} - platforms: linux/amd64,linux/arm64 - push: true - tags: ${{ env.IMAGE }}:${{ env.STAGING }} - build-args: | - TENTACLES_URL_TAG=${{ env.STABLE }} - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache - - - name: Build and push on tag - if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags') - uses: docker/build-push-action@master - with: - context: . - file: ./Dockerfile - builder: ${{ steps.buildx.outputs.name }} - platforms: linux/amd64,linux/arm64 - push: true - tags: | - ${{ env.IMAGE }}:${{ env.LATEST }} - ${{ env.IMAGE }}:${{ env.STABLE }} - ${{ env.IMAGE }}:${{ env.VERSION }} - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache - - notify: - if: ${{ failure() }} - needs: - - lint - - build_test_push - uses: Drakkar-Software/.github/.github/workflows/failure_notify_workflow.yml@master - secrets: - DISCORD_GITHUB_WEBHOOK: ${{ secrets.DISCORD_GITHUB_WEBHOOK }} - - - notify-dockerhub-update: - if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags') - name: Notify dockerhub update - runs-on: ubuntu-latest - needs: - - lint - - build_test_push - - steps: - - name: Notify discord - uses: sarisia/actions-status-discord@v1 - with: - description: "@here a new tag has been published on Docker Hub, docker bots can be updated." - webhook: ${{ secrets.DISCORD_GITHUB_WEBHOOK }} diff --git a/.github/workflows/launcher-release.yml b/.github/workflows/launcher-release.yml new file mode 100644 index 0000000000..e387404983 --- /dev/null +++ b/.github/workflows/launcher-release.yml @@ -0,0 +1,136 @@ +name: Launcher Release + +on: + push: + tags: + - "launcher-v*" + +permissions: + contents: write + +jobs: + build: + name: Build ${{ matrix.target }} + runs-on: ${{ matrix.runner }} + strategy: + fail-fast: false + matrix: + include: + - target: aarch64-apple-darwin + runner: macos-14 + archive: tar.gz + - target: x86_64-apple-darwin + runner: macos-13 + archive: tar.gz + - target: x86_64-unknown-linux-gnu + runner: ubuntu-22.04 + archive: tar.gz + use_cross: false + - target: aarch64-unknown-linux-gnu + runner: ubuntu-22.04 + archive: tar.gz + use_cross: true + - target: x86_64-pc-windows-msvc + runner: windows-2022 + archive: zip + + steps: + - uses: actions/checkout@v4 + + - name: Install Rust toolchain + uses: dtolnay/rust-toolchain@stable + with: + targets: ${{ matrix.target }} + + - name: Install cross + if: matrix.use_cross == true + run: cargo install cross --locked + + - name: Build (cross) + if: matrix.use_cross == true + working-directory: packages/launcher + env: + LAUNCHER_UPDATE_PUBKEY_HEX: ${{ secrets.LAUNCHER_UPDATE_PUBKEY_HEX }} + run: cross build --release -p octobot-launcher-cli --target ${{ matrix.target }} + + - name: Build (native) + if: matrix.use_cross != true + working-directory: packages/launcher + env: + LAUNCHER_UPDATE_PUBKEY_HEX: ${{ secrets.LAUNCHER_UPDATE_PUBKEY_HEX }} + run: cargo build --release -p octobot-launcher-cli --target ${{ matrix.target }} + + - name: Extract version from tag + id: version + shell: bash + run: echo "version=${GITHUB_REF_NAME#launcher-v}" >> "$GITHUB_OUTPUT" + + - name: Pack archive (unix) + if: matrix.archive == 'tar.gz' + shell: bash + run: | + BIN=packages/launcher/target/${{ matrix.target }}/release/octobot-launcher + strip "$BIN" 2>/dev/null || true + ARCHIVE="octobot-launcher-${{ steps.version.outputs.version }}-${{ matrix.target }}.tar.gz" + tar -czf "$ARCHIVE" -C "$(dirname "$BIN")" octobot-launcher + sha256sum "$ARCHIVE" > "${ARCHIVE}.sha256" + echo "archive=$ARCHIVE" >> "$GITHUB_OUTPUT" + id: pack_unix + + - name: Pack archive (windows) + if: matrix.archive == 'zip' + shell: pwsh + id: pack_win + run: | + $bin = "packages/launcher/target/${{ matrix.target }}/release/octobot-launcher.exe" + $version = "${{ steps.version.outputs.version }}" + $archive = "octobot-launcher-${version}-${{ matrix.target }}.zip" + Compress-Archive -Path $bin -DestinationPath $archive + $hash = (Get-FileHash $archive -Algorithm SHA256).Hash.ToLower() + "$hash $archive" | Out-File -Encoding ASCII "${archive}.sha256" + echo "archive=$archive" >> $env:GITHUB_OUTPUT + + - name: Upload artifact + uses: actions/upload-artifact@v4 + with: + name: dist-${{ matrix.target }} + path: | + octobot-launcher-*.${{ matrix.archive }} + octobot-launcher-*.${{ matrix.archive }}.sha256 + + release: + name: Publish GitHub Release + needs: build + runs-on: ubuntu-22.04 + steps: + - uses: actions/download-artifact@v4 + with: + pattern: dist-* + merge-multiple: true + + - name: Build SHA256SUMS + run: | + cat *.sha256 > SHA256SUMS + echo "--- SHA256SUMS ---" + cat SHA256SUMS + + - name: Detect prerelease + id: prerelease + run: | + if echo "$GITHUB_REF_NAME" | grep -qE '\-(rc|beta|alpha)[0-9]*$'; then + echo "prerelease=true" >> "$GITHUB_OUTPUT" + else + echo "prerelease=false" >> "$GITHUB_OUTPUT" + fi + + - name: Create release + uses: softprops/action-gh-release@v2 + with: + prerelease: ${{ steps.prerelease.outputs.prerelease }} + generate_release_notes: true + files: | + octobot-launcher-*.tar.gz + octobot-launcher-*.tar.gz.sha256 + octobot-launcher-*.zip + octobot-launcher-*.zip.sha256 + SHA256SUMS diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 7beb8351f7..aa8dde909b 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -8,127 +8,378 @@ on: - '*' pull_request: +permissions: read-all + jobs: - lint: - name: ${{ matrix.os }}${{ matrix.arch }} - Python ${{ matrix.version }} - lint + build: + name: ${{ matrix.os }}${{ matrix.arch }} - Python ${{ matrix.version }} - Build wheel runs-on: ${{ matrix.os }} strategy: matrix: os: [ ubuntu-latest ] arch: [ x64 ] - version: [ "3.10.x" ] + version: [ "3.13.x" ] steps: - - uses: actions/checkout@v5 - - name: Set up Python ${{ matrix.version }} - uses: actions/setup-python@v6 - with: - python-version: ${{ matrix.version }} - architecture: ${{ matrix.arch }} + - uses: actions/checkout@v6 - - name: Install dependencies - run: pip install wheel && pip install --prefer-binary -r dev_requirements.txt -r requirements.txt -r full_requirements.txt + - name: Set up Python + uses: actions/setup-python@v6 + with: + python-version: ${{ matrix.version }} + architecture: ${{ matrix.arch }} -# - name: Black lint -# run: black ${{ secrets.PACKAGE_FOLDER }} --diff --check + - name: Install Pants + uses: pantsbuild/actions/init-pants@v11 + with: + gha-cache-key: ${{ runner.os }}-pants-build + named-caches-hash: ${{ hashFiles('pants.toml') }} - - name: Pylint - run: | - pylint --rcfile=standard.rc octobot - if [ $? -ne 1 ]; then exit 0; fi + - uses: actions/setup-node@v6 + with: + node-version: 22 + + - name: Install Rust toolchain + if: hashFiles('packages/*/crates/*/Cargo.toml') != '' + uses: dtolnay/rust-toolchain@stable + with: + components: clippy + + - name: Rust lint & tests + if: hashFiles('packages/*/crates/*/Cargo.toml') != '' + run: | + cargo clippy --workspace -- -D warnings + cargo test --workspace + + - name: Install maturin + if: hashFiles('packages/*/crates/*/pyproject.toml') != '' + run: pip install maturin + + - name: Build wheels + run: pants package :OctoBot $(pants list --filter-target-type=package_shell_command ::) + + - name: Upload wheel artifacts + uses: actions/upload-artifact@v7 + with: + name: octobot-wheel + path: dist/*.whl + if-no-files-found: error tests: - needs: lint - name: ${{ matrix.os }}${{ matrix.arch }} - Python ${{ matrix.version }} - tests + needs: [ build ] + name: ${{ matrix.os }}${{ matrix.arch }} - Python ${{ matrix.version }} - ${{ matrix.package }} runs-on: ${{ matrix.os }} strategy: matrix: - os: [ windows-latest, ubuntu-latest ] + os: [ ubuntu-latest ] arch: [ x64 ] - version: [ "3.10.x" ] + version: [ "3.13.x" ] + package: + - octobot + - packages/agents + - packages/async_channel + - packages/backtesting + - packages/commons + - packages/evaluators + - packages/node + - packages/flow + - packages/copy + - packages/services + - packages/sync + - packages/tentacles_manager + - packages/trading + - packages/trading_backend + + env: + USES_TENTACLES: ${{ matrix.package == 'octobot' || matrix.package == 'packages/node' || matrix.package == 'packages/flow' || matrix.package == 'packages/copy' }} steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 + + - name: Detect package features + id: detect + run: | + if ls ${{ matrix.package }}/crates/*/Cargo.toml 1>/dev/null 2>&1; then + echo "has_rust=true" >> $GITHUB_OUTPUT + else + echo "has_rust=false" >> $GITHUB_OUTPUT + fi + - name: Set up Python ${{ matrix.version }} uses: actions/setup-python@v6 with: python-version: ${{ matrix.version }} architecture: ${{ matrix.arch }} - - name: Wait for tentacles - if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags') - uses: fountainhead/action-wait-for-check@v1.0.0 - id: wait-for-build + - name: Set up Node.js + if: matrix.package == 'octobot' + uses: actions/setup-node@v6 with: - token: ${{ secrets.AUTH_TOKEN }} - checkName: "ubuntu-latestx64 - Python - 3.10 - Upload" - ref: ${{ github.ref }} - repo: OctoBot-Tentacles - timeoutSeconds: 3600 + node-version: 22 - - name: Trigger fail when Tentacles failed - if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags') && steps.wait-for-build.outputs.conclusion == 'failure' - run: exit 1 + - name: Download build wheel artifact + uses: actions/download-artifact@v8 + with: + name: octobot-wheel + path: dist/ - - name: Install dependencies - run: pip install wheel && pip install --prefer-binary -r dev_requirements.txt -r requirements.txt -r full_requirements.txt + - name: Install OctoBot + run: | + pip3 install -r dev_requirements.txt + pip3 install dist/*.whl - - name: Install tentacles on Unix - env: - DISABLE_SENTRY: True - if: matrix.os != 'windows-latest' + - name: Lint package + run: | + if [ -f "${{ matrix.package }}/standard.rc" ]; then + pylint --rcfile=${{ matrix.package }}/standard.rc ${{ matrix.package }}/ + rc=$? + else + pylint --rcfile=standard.rc ${{ matrix.package }}/ + rc=$? + fi + if [ $rc -eq 1 ]; then exit 1; fi + + - name: Install Rust toolchain + if: steps.detect.outputs.has_rust == 'true' + uses: dtolnay/rust-toolchain@stable + with: + components: clippy + + - name: Lint Rust crates + if: steps.detect.outputs.has_rust == 'true' run: | - mkdir user - cp ./octobot/config/default_config.json ./user/config.json - branch="${GITHUB_HEAD_REF}" - echo "Trying to download tentacles package ${branch////_}.zip ..." - TENTACLES_REPOSITORY=dev-tentacles TENTACLES_URL_TAG=${branch////_} python3 start.py tentacles -q --install --all || TENTACLES_URL_TAG=latest python3 start.py tentacles --install --all + for crate in ${{ matrix.package }}/crates/*/; do + cargo clippy --manifest-path "$crate/Cargo.toml" -- -D warnings + done - - name: Install tentacles on Windows + - name: Install tentacles + if: env.USES_TENTACLES == 'true' env: - DISABLE_SENTRY: True - if: matrix.os == 'windows-latest' + ALLOW_UNSIGNED_TENTACLES: "true" run: | - mkdir user - copy octobot\config\default_config.json user\config.json - $Env:TENTACLES_REPOSITORY = "dev-tentacles" - $Env:TENTACLES_URL_TAG = $env:GITHUB_HEAD_REF -replace "/", "_" - echo "Trying to download tentacles package $Env:TENTACLES_URL_TAG.zip ..." - python start.py tentacles -q --install --all - if ($LastExitCode -ne 0) - { - $Env:TENTACLES_URL_SUBCATEGORY = "" - If ($env:GITHUB_REF -like "*refs/tags/*") { - $Env:TENTACLES_URL_TAG = "" - $Env:TENTACLES_REPOSITORY = "" - } else { - $Env:TENTACLES_URL_TAG = "latest" - $Env:TENTACLES_REPOSITORY = "" - } - echo "Failed to download branch tentacles, trying to download tentacles package $Env:TENTACLES_URL_TAG.zip ..." - python start.py tentacles --install --all - } - shell: powershell + mkdir -p output + OctoBot tentacles -d packages/tentacles -p any_platform.zip + OctoBot tentacles --install --location output/any_platform.zip --all - - name: Pytests + - name: Run tests + run: | + if [ "${{ matrix.package }}" = "octobot" ]; then + pytest tests -n auto --dist loadfile + pytest --ignore=tentacles/Trading/Exchange tentacles -n auto --dist loadfile + else + if [ "${{ matrix.package }}" = "packages/node" ] || [ "${{ matrix.package }}" = "packages/flow" ] || [ "${{ matrix.package }}" = "packages/copy" ]; then + echo "Running tests from root dir to allow tentacles import" + PYTHONPATH=.:$PYTHONPATH pytest ${{ matrix.package }}/tests -n auto --dist loadfile + else + cd ${{ matrix.package }} + if [ "${{ matrix.package }}" = "packages/tentacles_manager" ]; then + pytest tests + else + pytest tests -n auto --dist loadfile + fi + fi + fi env: DISABLE_SENTRY: True + + - name: Run Rust backend tests + if: steps.detect.outputs.has_rust == 'true' run: | - pytest --cov=. --cov-config=.coveragerc --durations=0 -rw tests - pytest --durations=0 -rw --ignore=tentacles/Trading/Exchange tentacles + cd ${{ matrix.package }} + pytest tests --backend=rust -v - build_sdist: - needs: tests - if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags') - name: Source distribution - Python ${{ matrix.version }} - Deploy + - name: Run frontend tests + if: matrix.package == 'octobot' + run: | + find packages/tentacles -name "package.json" -not -path "*/node_modules/*" | while read pkg; do + cd "$(dirname "$pkg")" + npm ci + npm test + cd - > /dev/null + done + + docker: + name: Build & Push Docker images + needs: [ build, tests ] + if: github.ref == 'refs/heads/master' || github.ref == 'refs/heads/dev' || startsWith(github.ref, 'refs/tags/') + permissions: + contents: read + packages: write + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ ubuntu-latest ] + + steps: + - uses: actions/checkout@v6 + + - name: Run hadolint + uses: reviewdog/action-hadolint@v1 + with: + github_token: ${{ secrets.github_token }} + hadolint_ignore: DL3013 DL3008 + + - name: Set Environment Variables + run: | + OWNER="$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]' | tr -d '-')" + IMG=octobot + echo "VERSION=${GITHUB_REF##*/}" >> $GITHUB_ENV + echo "IMAGE=${OWNER}/${IMG}" >> $GITHUB_ENV + echo "LATEST=latest" >> $GITHUB_ENV + echo "STAGING=staging" >> $GITHUB_ENV + echo "STABLE=stable" >> $GITHUB_ENV + echo "TEST=test" >> $GITHUB_ENV + echo "SHA=${GITHUB_SHA}" >> $GITHUB_ENV + echo "CONTAINER_NAME=octobot" >> $GITHUB_ENV + echo "CHECK_TENTACLE_CONTAINER_TIME=10" >> $GITHUB_ENV + echo "WAIT_CONTAINER_TIME=80" >> $GITHUB_ENV + + - name: Set up Python ${{ matrix.version }} + uses: actions/setup-python@v6 + with: + python-version: "3.13.x" + architecture: x64 + + - name: Set up QEMU + id: qemu-setup + uses: docker/setup-qemu-action@v3 + with: + platforms: all + + - name: Print available platforms + run: echo ${{ steps.qemu.outputs.platforms }} + + - name: Set up Docker Buildx + id: buildx + uses: docker/setup-buildx-action@v3 + with: + driver: docker-container + use: true + + - name: Cache Docker layers + uses: actions/cache@v4 + with: + path: /tmp/.buildx-cache + key: ${{ runner.os }}-buildx-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-buildx- + + - name: Login to Docker Hub + if: github.event_name == 'push' + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Download build wheel artifact + uses: actions/download-artifact@v8 + with: + name: octobot-wheel + path: dist/ + + - name: Install OctoBot & tentacles + env: + ALLOW_UNSIGNED_TENTACLES: "true" + run: | + pip3 install -r dev_requirements.txt + pip3 install dist/octobot-*.whl + mkdir -p output + OctoBot tentacles -d packages/tentacles -p any_platform.zip + OctoBot tentacles --install --location output/any_platform.zip --all + + - name: Build latest + if: github.event_name != 'push' + uses: docker/build-push-action@v6 + with: + context: . + builder: ${{ steps.buildx.outputs.name }} + platforms: linux/amd64 + # Using "load: true" forces the docker driver. + # Unfortunately, the "docker" driver does not support + # multi-platform builds. + load: true + push: false + tags: ${{ env.IMAGE }}:${{ env.SHA }} + build-args: | + TENTACLES_URL_TAG=${{ env.LATEST }} + cache-from: type=local,src=/tmp/.buildx-cache + cache-to: type=local,dest=/tmp/.buildx-cache + + - name: Docker build test container + if: github.event_name != 'push' + run: | + docker build -f ./tests/Dockerfile --build-arg OCTOBOT_IMAGE=${{ env.IMAGE }}:${{ env.SHA }} -t ${{ env.IMAGE }}:${{ env.TEST }} . + + - name: run tests in docker + if: github.event_name != 'push' + run: | + docker run -i -v $(pwd)/tentacles:/octobot/tentacles -e DISABLE_SENTRY=True ${{ env.IMAGE }}:${{ env.TEST }} + + - name: run and check health after start + if: github.event_name != 'push' + run: | + echo Start OctoBot docker container with tentacles... + docker run -id -v $(pwd)/tentacles:/octobot/tentacles -e DISABLE_SENTRY=True --name ${{ env.CONTAINER_NAME }} ${{ env.IMAGE }}:${{ env.LATEST }} + sleep ${{ env.WAIT_CONTAINER_TIME }} + docker logs ${{ env.CONTAINER_NAME }} + docker inspect ${{ env.CONTAINER_NAME }} | jq '.[].State.Health.Status' | grep "healthy" + + - name: Build and push latest + if: github.event_name == 'push' && !startsWith(github.ref, 'refs/tags') && github.ref == 'refs/heads/dev' + uses: docker/build-push-action@v6 + with: + context: . + builder: ${{ steps.buildx.outputs.name }} + platforms: linux/amd64,linux/arm64 + push: true + tags: ${{ env.IMAGE }}:${{ env.LATEST }} + build-args: | + TENTACLES_URL_TAG=${{ env.LATEST }} + cache-from: type=local,src=/tmp/.buildx-cache + cache-to: type=local,dest=/tmp/.buildx-cache + + - name: Build and push staging + if: github.event_name == 'push' && github.ref == 'refs/heads/master' + uses: docker/build-push-action@v6 + with: + context: . + builder: ${{ steps.buildx.outputs.name }} + platforms: linux/amd64,linux/arm64 + push: true + tags: ${{ env.IMAGE }}:${{ env.STAGING }} + build-args: | + TENTACLES_URL_TAG=${{ env.STABLE }} + cache-from: type=local,src=/tmp/.buildx-cache + cache-to: type=local,dest=/tmp/.buildx-cache + + - name: Build and push on tag + if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags') + uses: docker/build-push-action@v6 + with: + context: . + file: ./Dockerfile + builder: ${{ steps.buildx.outputs.name }} + platforms: linux/amd64,linux/arm64 + push: true + tags: | + ${{ env.IMAGE }}:${{ env.LATEST }} + ${{ env.IMAGE }}:${{ env.STABLE }} + ${{ env.IMAGE }}:${{ env.VERSION }} + cache-from: type=local,src=/tmp/.buildx-cache + cache-to: type=local,dest=/tmp/.buildx-cache + + tentacles: + needs: [ build, tests ] + if: github.ref == 'refs/heads/master' || github.ref == 'refs/heads/dev' || startsWith(github.ref, 'refs/tags/') + name: ${{ matrix.os }}${{ matrix.arch }} - Python ${{ matrix.version }} - Upload Tentacles runs-on: ${{ matrix.os }} strategy: matrix: os: [ ubuntu-latest ] arch: [ x64 ] - version: [ "3.10.x" ] + version: [ "3.13.x" ] steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - name: Set up Python ${{ matrix.version }} uses: actions/setup-python@v6 @@ -136,22 +387,372 @@ jobs: python-version: ${{ matrix.version }} architecture: ${{ matrix.arch }} + - name: Download build wheel artifact + uses: actions/download-artifact@v8 + with: + name: octobot-wheel + path: dist/ + + - name: Install OctoBot + run: | + pip3 install -r dev_requirements.txt + pip3 install dist/octobot-*.whl + + - name: Prepare tentacles package + run: | + mkdir -p ../new_tentacles + cp -r packages/tentacles/Agent packages/tentacles/Automation packages/tentacles/Backtesting packages/tentacles/Evaluator packages/tentacles/Meta packages/tentacles/Services packages/tentacles/Trading packages/tentacles/profiles ../new_tentacles/ + + - name: Publish tag tentacles + if: startsWith(github.ref, 'refs/tags') + env: + S3_API_KEY: ${{ secrets.S3_API_KEY }} + S3_API_SECRET_KEY: ${{ secrets.S3_API_SECRET_KEY }} + S3_REGION_NAME: ${{ secrets.S3_REGION_NAME }} + S3_ENDPOINT_URL: ${{ secrets.S3_ENDPOINT_URL }} + CLOUDFLARE_TOKEN: ${{ secrets.CLOUDFLARE_TOKEN }} + CLOUDFLARE_ZONE: ${{ secrets.CLOUDFLARE_ZONE }} + S3_BUCKET_NAME: ${{ secrets.S3_BUCKET_NAME }} + TENTACLES_SIGNING_PRIVATE_KEY: ${{ secrets.TENTACLES_SIGNING_PRIVATE_KEY }} + run: | + echo "Publishing tentacles for tag: ${GITHUB_REF_NAME}" + cp packages/tentacles/metadata.yaml ../metadata.yaml + sed -i "s/VERSION_PLACEHOLDER/${GITHUB_REF_NAME}/g" ../metadata.yaml + OctoBot tentacles -m "../metadata.yaml" -d "../new_tentacles" -p "../any_platform.zip" -ite -ute ${{ secrets.TENTACLES_OFFICIAL_PATH }}/tentacles -upe ${{ secrets.TENTACLES_OFFICIAL_PATH }}/packages/full/${{ secrets.TENTACLES_REPOSITORY_NAME }}/ + python packages/tentacles/scripts/clear_cloudflare_cache.py ${GITHUB_REF_NAME} + + - name: Publish latest tentacles + if: github.ref == 'refs/heads/dev' && startsWith(github.ref, 'refs/tags') != true + env: + S3_API_KEY: ${{ secrets.S3_API_KEY }} + S3_API_SECRET_KEY: ${{ secrets.S3_API_SECRET_KEY }} + S3_REGION_NAME: ${{ secrets.S3_REGION_NAME }} + S3_ENDPOINT_URL: ${{ secrets.S3_ENDPOINT_URL }} + CLOUDFLARE_TOKEN: ${{ secrets.CLOUDFLARE_TOKEN }} + CLOUDFLARE_ZONE: ${{ secrets.CLOUDFLARE_ZONE }} + S3_BUCKET_NAME: ${{ secrets.S3_BUCKET_NAME }} + TENTACLES_SIGNING_PRIVATE_KEY: ${{ secrets.TENTACLES_SIGNING_PRIVATE_KEY }} + run: | + cp packages/tentacles/metadata.yaml ../metadata.yaml + sed -i "s/VERSION_PLACEHOLDER/latest/g" ../metadata.yaml + OctoBot tentacles -m "../metadata.yaml" -d "../new_tentacles" -p "../any_platform.zip" -upe ${{ secrets.TENTACLES_OFFICIAL_PATH }}/packages/full/${{ secrets.TENTACLES_REPOSITORY_NAME }}/ + python packages/tentacles/scripts/clear_cloudflare_cache.py latest + + - name: Publish stable tentacles + if: github.ref == 'refs/heads/master' + env: + S3_API_KEY: ${{ secrets.S3_API_KEY }} + S3_API_SECRET_KEY: ${{ secrets.S3_API_SECRET_KEY }} + S3_REGION_NAME: ${{ secrets.S3_REGION_NAME }} + S3_ENDPOINT_URL: ${{ secrets.S3_ENDPOINT_URL }} + CLOUDFLARE_TOKEN: ${{ secrets.CLOUDFLARE_TOKEN }} + CLOUDFLARE_ZONE: ${{ secrets.CLOUDFLARE_ZONE }} + S3_BUCKET_NAME: ${{ secrets.S3_BUCKET_NAME }} + TENTACLES_SIGNING_PRIVATE_KEY: ${{ secrets.TENTACLES_SIGNING_PRIVATE_KEY }} + run: | + cp packages/tentacles/metadata.yaml ../metadata.yaml + sed -i "s/VERSION_PLACEHOLDER/stable/g" ../metadata.yaml + OctoBot tentacles -m "../metadata.yaml" -d "../new_tentacles" -p "../any_platform.zip" -upe ${{ secrets.TENTACLES_OFFICIAL_PATH }}/packages/full/${{ secrets.TENTACLES_REPOSITORY_NAME }}/ + python packages/tentacles/scripts/clear_cloudflare_cache.py stable + + docs: + name: Build & Deploy Docs + needs: [build, tests] + runs-on: ubuntu-latest + permissions: + contents: read + deployments: write + + steps: + - uses: actions/checkout@v6 + with: + fetch-depth: 0 + lfs: true + + - uses: actions/setup-node@v6 + with: + node-version: 22 + cache: npm + cache-dependency-path: docs/package-lock.json + - name: Install dependencies - run: pip install --prefer-binary -r dev_requirements.txt -r requirements.txt -r full_requirements.txt + run: cd docs && npm ci + + - name: Build docs + run: cd docs && npm run build - - name: Build sdist - run: python setup.py sdist + - name: Upload version (preview URL) + uses: cloudflare/wrangler-action@v3 + with: + wranglerVersion: "4" + apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }} + accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }} + workingDirectory: docs + command: versions upload + + - name: Deploy to production + if: github.event_name == 'push' && github.ref == 'refs/heads/master' + uses: cloudflare/wrangler-action@v3 + with: + wranglerVersion: "4" + apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }} + accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }} + workingDirectory: docs + command: versions deploy --yes + + version: + needs: [ build, tests ] + if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags') + name: Source distribution - Python ${{ matrix.version }} - Deploy + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ ubuntu-latest ] + arch: [ x64 ] + version: [ "3.13.x" ] - - name: Publish package + steps: + - uses: actions/checkout@v6 + + - name: Set up Python ${{ matrix.version }} + uses: actions/setup-python@v6 + with: + python-version: ${{ matrix.version }} + architecture: ${{ matrix.arch }} + + - name: Download build wheel artifact + uses: actions/download-artifact@v8 + with: + name: octobot-wheel + path: dist/ + + - name: Publish packages run: | + pip install twine python -m twine upload --repository-url ${{ secrets.PYPI_OFFICIAL_UPLOAD_URL }} -u __token__ -p ${{ secrets.PYPI_TOKEN }} --skip-existing dist/* + binary: + needs: [ build, tests ] + name: ${{ matrix.os }} - ${{ matrix.arch }} - binary + if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags') + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [macos-latest, windows-latest, ubuntu-latest] + arch: [ x64, arm64 ] + exclude: + - os: windows-latest + arch: arm64 + - os: macos-latest + arch: x64 + + steps: + - uses: actions/checkout@v6 + + - name: Download wheel artifact + uses: actions/download-artifact@v8 + with: + name: octobot-wheel + path: dist/ + + - name: Set up Python 3.13 + uses: actions/setup-python@v6 + with: + python-version: '3.13.x' + architecture: x64 + + - name: Build OctoBot Binary on Linux arm64 + if: matrix.os == 'ubuntu-latest' && matrix.arch == 'arm64' + uses: uraimo/run-on-arch-action@v3.0.1 + with: + arch: aarch64 + distro: ubuntu24.04 + githubToken: ${{ github.token }} + dockerRunArgs: | + --volume "${PWD}/dist:/dist" + env: | + OCTOBOT_REPOSITORY_DIR: OctoBot + NLTK_DATA: nltk_data + BUILD_ARCH: ${{ matrix.arch }} + run: | + apt-get update + apt-get install -y --no-install-recommends python3.12 python3-pip python3-dev python3-venv git gcc musl-dev libc-dev build-essential zlib1g zlib1g-dev + python3 -m venv /opt/octobot_venv + source /opt/octobot_venv/bin/activate + python -m pip install -U pip setuptools wheel + python -m pip install -r packages/binary/requirements.txt + python -m pip install /dist/octobot-*.whl + bash ./packages/binary/build_scripts/unix.sh + + - name: Build OctoBot Binary on Linux + if: matrix.os == 'ubuntu-latest' && matrix.arch == 'x64' + env: + OCTOBOT_REPOSITORY_DIR: OctoBot + NLTK_DATA: nltk_data + BUILD_ARCH: ${{ matrix.arch }} + run: | + python3 -m pip install -U pip setuptools wheel + python3 -m pip install -r packages/binary/requirements.txt + pip3 install dist/octobot-*.whl + bash ./packages/binary/build_scripts/unix.sh + + - name: Build OctoBot Binary on MacOS + if: matrix.os == 'macos-latest' + env: + OCTOBOT_REPOSITORY_DIR: OctoBot + NLTK_DATA: nltk_data + BUILD_ARCH: ${{ matrix.arch }} + run: | + python3 -m pip install -U pip setuptools wheel + python3 -m pip install -r packages/binary/requirements.txt + pip3 install dist/octobot-*.whl + bash ./packages/binary/build_scripts/unix.sh + + - name: Build OctoBot Binary on Windows + if: matrix.os == 'windows-latest' + env: + OCTOBOT_REPOSITORY_DIR: OctoBot + NLTK_DATA: nltk_data + run: | + python -m pip install -U pip setuptools wheel + python -m pip install -r packages\binary\requirements.txt + python -m pip install (Get-Item dist/octobot-*.whl).FullName + .\packages\binary\build_scripts\windows.ps1 + shell: powershell + + - name: Upload OctoBot Binary on MacOS + uses: actions/upload-artifact@v7 + if: matrix.os == 'macos-latest' + with: + name: OctoBot_macos_${{ matrix.arch }} + path: OctoBot_${{ matrix.arch }} + if-no-files-found: error + + - name: Upload OctoBot Binary on Linux + uses: actions/upload-artifact@v7 + if: matrix.os == 'ubuntu-latest' + with: + name: OctoBot_linux_${{ matrix.arch }} + path: OctoBot_${{ matrix.arch }} + if-no-files-found: error + + - name: Upload OctoBot Binary on Windows + uses: actions/upload-artifact@v7 + if: matrix.os == 'windows-latest' + with: + name: OctoBot_windows_${{ matrix.arch }}.exe + path: OctoBot_windows.exe + if-no-files-found: error + + release: + name: Create Release + if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags') + needs: [ binary, docker, tentacles ] + permissions: + contents: write + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ ubuntu-latest ] + + steps: + - name: Download artifacts + uses: actions/download-artifact@v8 + with: + pattern: OctoBot_* + + - name: Set version as environement var + id: vars + run: echo ::set-output name=tag::${GITHUB_REF#refs/*/} + + - name: Clean artifact names + run: | + mkdir bin + mv OctoBot_linux_x64/OctoBot_x64 bin/OctoBot_linux_x64 + mv OctoBot_windows_x64.exe/OctoBot_windows.exe bin/OctoBot_windows_x64.exe + mv OctoBot_linux_arm64/OctoBot_arm64 bin/OctoBot_linux_arm64 + mv OctoBot_macos_arm64/OctoBot_arm64 bin/OctoBot_macos_arm64 + + - name: Compute hashes + id: hashes + run: | + echo ::set-output name=octobot_linux_x64_hash::$(openssl sha256 ./bin/OctoBot_linux_x64 | awk '{print $2}') + echo ::set-output name=octobot_linux_arm64_hash::$(openssl sha256 ./bin/OctoBot_linux_arm64 | awk '{print $2}') + echo ::set-output name=octobot_macos_arm64_hash::$(openssl sha256 ./bin/OctoBot_macos_arm64 | awk '{print $2}') + echo ::set-output name=octobot_windows_x64_hash::$(openssl sha256 ./bin/OctoBot_windows_x64.exe | awk '{print $2}') + + - name: Create Release + id: create_release + uses: actions/create-release@v1 + env: + GITHUB_TOKEN: ${{ secrets.AUTH_TOKEN }} + with: + tag_name: ${{ steps.vars.outputs.tag }} + release_name: Release - ${{ steps.vars.outputs.tag }} + owner: ${{ github.repository_owner }} + repo: OctoBot + draft: true + prerelease: false + commitish: master + body: | + | Binary | Download link | SHA256 | + | ------------- |:-------------:|:-------------:| + | Windows x64 | [Download](https://github.com/${{ github.repository_owner }}/OctoBot/releases/download/${{ steps.vars.outputs.tag }}/OctoBot_windows_x64.exe) | ${{ steps.hashes.outputs.octobot_windows_x64_hash }} | + | Linux x64 | [Download](https://github.com/${{ github.repository_owner }}/OctoBot/releases/download/${{ steps.vars.outputs.tag }}/OctoBot_linux_x64) | ${{ steps.hashes.outputs.octobot_linux_x64_hash }} | + | Linux arm64 | [Download](https://github.com/${{ github.repository_owner }}/OctoBot/releases/download/${{ steps.vars.outputs.tag }}/OctoBot_linux_arm64) | ${{ steps.hashes.outputs.octobot_linux_arm64_hash }} | + | MacOS arm64 | [Download](https://github.com/${{ github.repository_owner }}/OctoBot/releases/download/${{ steps.vars.outputs.tag }}/OctoBot_macos_arm64) | ${{ steps.hashes.outputs.octobot_macos_arm64_hash }} | + + - name: Upload Release Asset - OctoBot_windows_x64 + uses: actions/upload-release-asset@v1 + env: + GITHUB_TOKEN: ${{ secrets.AUTH_TOKEN }} + with: + upload_url: ${{ steps.create_release.outputs.upload_url }} + asset_path: ./bin/OctoBot_windows_x64.exe + asset_name: OctoBot_windows_x64.exe + asset_content_type: application/x-binary + + - name: Upload Release Asset - OctoBot_linux_x64 + uses: actions/upload-release-asset@v1 + env: + GITHUB_TOKEN: ${{ secrets.AUTH_TOKEN }} + with: + upload_url: ${{ steps.create_release.outputs.upload_url }} + asset_path: ./bin/OctoBot_linux_x64 + asset_name: OctoBot_linux_x64 + asset_content_type: application/x-binary + + - name: Upload Release Asset - OctoBot_linux_arm64 + uses: actions/upload-release-asset@v1 + env: + GITHUB_TOKEN: ${{ secrets.AUTH_TOKEN }} + with: + upload_url: ${{ steps.create_release.outputs.upload_url }} + asset_path: ./bin/OctoBot_linux_arm64 + asset_name: OctoBot_linux_arm64 + asset_content_type: application/x-binary + + - name: Upload Release Asset - OctoBot_macos_arm64 + uses: actions/upload-release-asset@v1 + env: + GITHUB_TOKEN: ${{ secrets.AUTH_TOKEN }} + with: + upload_url: ${{ steps.create_release.outputs.upload_url }} + asset_path: ./bin/OctoBot_macos_arm64 + asset_name: OctoBot_macos_arm64 + asset_content_type: application/x-binary + notify: if: ${{ failure() }} needs: - - lint + - build - tests - - build_sdist + - docker + - binary + - tentacles + - release + - version + - docs uses: Drakkar-Software/.github/.github/workflows/failure_notify_workflow.yml@master secrets: DISCORD_GITHUB_WEBHOOK: ${{ secrets.DISCORD_GITHUB_WEBHOOK }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml deleted file mode 100644 index b836c2ed0f..0000000000 --- a/.github/workflows/release.yml +++ /dev/null @@ -1,71 +0,0 @@ -name: "OctoBot-Release" - -on: - push: - tags: - - '*' - -jobs: - release-jobs: - name: Create release jobs - runs-on: ubuntu-latest - - steps: - - name: Checkout repository - uses: actions/checkout@v5 - - - name: Set Environment Variables - run: | - echo "VERSION=${GITHUB_REF##*/}" >> $GITHUB_ENV - - - name: Create release on OctoBot-Tentacles - uses: actions/create-release@v1 - env: - GITHUB_TOKEN: ${{ secrets.AUTH_TOKEN }} - with: - tag_name: ${{ env.VERSION }} - release_name: Version - ${{ env.VERSION }} - draft: false - prerelease: false - commitish: master - repo: OctoBot-Tentacles - -# TODO uncomment when adding tests to OctoBot-Binary -# - name: Wait for tentacles build done -# uses: fountainhead/action-wait-for-check@v1.0.0 -# id: wait-for-build -# with: -# token: ${{ secrets.AUTH_TOKEN }} -# checkName: "Done" -# ref: ${{ github.sha }} -# timeoutSeconds: 3600 -# -# - name: Trigger fail when Main failed -# if: steps.wait-for-build.outputs.conclusion == 'failure' -# run: exit 1 - - - name: Create release on OctoBot-Binary - uses: actions/create-release@v1 - env: - GITHUB_TOKEN: ${{ secrets.AUTH_TOKEN }} - with: - tag_name: ${{ env.VERSION }} - release_name: Version - ${{ env.VERSION }} - draft: false - prerelease: false - commitish: master - repo: OctoBot-Binary - - notify: - name: Notify - runs-on: ubuntu-latest - needs: - - release-jobs - if: ${{ failure() }} - - steps: - - name: Notify discord - uses: sarisia/actions-status-discord@v1 - with: - status: Failure - webhook: ${{ secrets.DISCORD_GITHUB_WEBHOOK }} diff --git a/.gitignore b/.gitignore index 40904d560e..5cb35f47c1 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,9 @@ __pycache__/ *$py.class *.orig +# Node +node_modules/ + # C extensions *.so @@ -66,6 +69,29 @@ instance/ # Sphinx documentation docs/_build/ +# Docusaurus documentation +docs/node_modules/ +docs/build/ +docs/.docusaurus/ +# Auto-generated tentacle docs (from collect-tentacles.mjs) +docs/content/creators/ +docs/content/guides/exchanges.md +docs/content/guides/exchanges/ +docs/content/guides/exchanges/_category_.json +docs/content/guides/strategies/getting-started.md +docs/content/guides/strategies/_category_.json +docs/content/guides/strategies/trading-modes/ +docs/content/guides/strategies/trading-modes/_category_.json +docs/content/guides/strategies/evaluators/ +docs/content/guides/strategies/evaluators/**/_category_.json +# Migration script (one-time use, not needed in repo) +docs/scripts/migrate.mjs +# Auto-synced root docs (from sync-root-docs.mjs) +docs/content/developers/contributing.md +docs/content/developers/changelog.md +# Auto-generated llms.txt (from generate-llms-txt.mjs) +docs/static/llms.txt + # PyBuilder target/ @@ -106,6 +132,7 @@ ENV/ # Tentacles manager temporary files octobot/creator_temp/ +packages/metadata.yaml creator_temp/ # Data @@ -113,17 +140,21 @@ backtesting/collector/data/ backtesting/data/ # Tentacles +output tentacles downloaded_temp_tentacles +!packages/tentacles +!docs/content/developers/packages/tentacles # User config -user/ +user temp_config.json *.csv *.ods *.c *.h +.DS_Store # OctoBot logs logs @@ -136,3 +167,27 @@ letsencrypt/ # dev env .env + +# Ansible decrypted temp files, SSH keys, and production collections config +infra/**/*.dec.yml +infra/**/.ssh/ + +# Ansible Galaxy installed roles (installed via requirements.yml) +infra/**/roles/geerlingguy.*/ + +# Pants build system +/.pants.d/ +/dist/ + +# bin +nltk_data/ +installer/ + +# resources watcher +*.dump + +# node +*.db* + +# ai +analysis/ diff --git a/.pants.ignore b/.pants.ignore new file mode 100644 index 0000000000..ee9de65d94 --- /dev/null +++ b/.pants.ignore @@ -0,0 +1,26 @@ +# Pants ignore file - directories to exclude from file watching +# This prevents "Filesystem changed during run" errors + +# IDE and editor directories +.idea/ +.vscode/ +*.swp +*.swo +*~ + +# Log files that may be written during tests +logs/ +*.log + +# Output directories +output/ +user/ + +# Cache directories +.pytest_cache/ +__pycache__/ +.mypy_cache/ + +# Pants internal directories (already ignored by default) +.pants.d/ +dist/ diff --git a/BUILD b/BUILD new file mode 100644 index 0000000000..8d51cdac8b --- /dev/null +++ b/BUILD @@ -0,0 +1,211 @@ +python_requirements(name="reqs") +python_requirements(name="full_reqs", source="full_requirements.txt") +python_requirements(name="dev_reqs", source="dev_requirements.txt") + +files( + name="cargo_workspace", + sources=["Cargo.toml", "Cargo.lock"], +) + +python_sources(name="octobot", sources=["octobot/**/*.py"]) + +files( + name="octobot_config", + sources=["octobot/config/**/*"], +) + +files( + name="octobot_strategy_optimizer_data", + sources=["octobot/strategy_optimizer/optimizer_data_files/**/*"], +) + +# For development purposes only +python_sources(name="tentacles", sources=["tentacles/**/*.py"]) + +resources( + name="web_interface_resources", + sources=[ + "tentacles/Services/Interfaces/web_interface/templates/**/*", + "tentacles/Services/Interfaces/web_interface/static/**/*", + "tentacles/Services/Interfaces/web_interface/advanced_templates/**/*" + ] +) + +PACKAGE_SOURCES = [ + "packages/async_channel:async_channel", + "packages/backtesting:octobot_backtesting", + "packages/commons:octobot_commons", + "packages/evaluators:octobot_evaluators", + "packages/node:octobot_node", + "packages/flow:octobot_flow", + "packages/copy:octobot_copy", + "packages/services:octobot_services", + "packages/sync:octobot_sync", + "packages/tentacles_manager:octobot_tentacles_manager", + "packages/trading:octobot_trading", + "packages/trading_backend:trading_backend", +] + +PACKAGE_REQS = [ + "packages/backtesting:reqs", + "packages/commons:reqs", + "packages/evaluators:reqs", + "packages/node:reqs", + "packages/sync:reqs", + "packages/tentacles_manager:reqs", + "packages/trading:reqs", + "packages/trading_backend:reqs", +] + +PACKAGE_FULL_REQS = [ + "packages/commons:full_reqs", + "packages/services:full_reqs", + "packages/sync:full_reqs", + "packages/tentacles_manager:full_reqs", + "packages/trading:full_reqs", +] + +# Tests +files( + name="test_data", + sources=["tests/static/**/*"], +) + +files( + name="tentacles_test_data", + sources=["tentacles/**/tests/static/**/*"], +) + +# Test utilities (not actual tests, used by tests) +# Include all tests/ package files that are used by tentacles tests +python_sources( + name="test_utils", + sources=[ + "tests/**/*.py", + "!tests/**/test_*.py", # Exclude actual test files + ], +) + +# test_exchanges.py is a utility file (not a test) despite its name +# It must be a separate target to avoid glob ordering issues +python_source( + name="test_exchanges_util", + source="tests/test_utils/test_exchanges.py", +) + +python_tests( + name="tests", + sources=[ + "tests/**/test_*.py", + "tentacles/**/test_*.py", + "!tests/test_utils/test_exchanges.py", + "!tentacles/Trading/Exchange/**", + ], + dependencies=[ + ":reqs", + ":full_reqs", + ":dev_reqs", + ":test_data", + ":tentacles_test_data", + ":test_utils", + ":test_exchanges_util", + ":web_interface_resources", + ":octobot_config", + ":octobot_strategy_optimizer_data", + "//:tentacles", + "packages/tentacles:tentacles_metadata", + "packages/tentacles:tentacles_test_utils", + "packages/tentacles:tentacles_test_data", + ":octobot", + ] + PACKAGE_SOURCES + PACKAGE_REQS + PACKAGE_FULL_REQS, +) + +# Entrypoint +pex_binary( + name="start", + entry_point="octobot.cli:main", + dependencies=[ + ":octobot", + ":octobot_config", + ":octobot_strategy_optimizer_data", + ":reqs", + ":full_reqs", + ] + PACKAGE_SOURCES + PACKAGE_REQS + PACKAGE_FULL_REQS, +) + +# Distributions +# Lite distribution - pants package :OctoBot-Lite +# python_distribution( +# name="OctoBot-Lite", +# dependencies=[ +# ":octobot", +# ":reqs", +# ] + PACKAGE_SOURCES + PACKAGE_REQS + PACKAGE_FULL_REQS, +# provides=python_artifact( +# name="octobot-lite", +# version="2.0.16", +# ), +# sdist=True, +# wheel=True, +# ) + +# Full distribution - pants package :OctoBot +python_distribution( + name="OctoBot", + dependencies=[ + ":octobot", + ":octobot_config", + ":octobot_strategy_optimizer_data", + ":reqs", + ":full_reqs", + ] + PACKAGE_SOURCES + PACKAGE_REQS + PACKAGE_FULL_REQS, + provides=python_artifact( + name="octobot", + version="2.1.1", + url='https://github.com/Drakkar-Software/OctoBot', + license='GPL-3.0', + author="Drakkar-Software", + author_email='contact@drakkar.software', + description='Cryptocurrencies alert / trading bot', + long_description_file="README.md", + long_description_content_type='text/markdown', + ), + entry_points={ + 'console_scripts': { + 'OctoBot': 'octobot.cli:main' + } + }, + generate_setup=True, + sdist=True, + wheel=True, +) + +files( + name="docker_files", + sources=["docker/**/*"], +) + +files( + name="wheel_files", + sources=["dist/"], + dependencies=[":OctoBot"], +) + +docker_image( + name="docker", + source="Dockerfile", + dependencies=[ + ":OctoBot", + ":wheel_files", + ":octobot_config", + ":octobot_strategy_optimizer_data", + ":docker_files", + ], + repository="drakkarsoftware/octobot", + image_tags=["local"], + extra_build_args={ + "VERSION": "local", + "TENTACLES_URL_TAG": "dev", + }, + skip_push = True, +) diff --git a/CHANGELOG.md b/CHANGELOG.md index ac67ae6d99..6ac8394ee7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,52 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 *It is strongly advised to perform an update of your tentacles after updating OctoBot. (start.py tentacles --install --all)* +## [2.1.1] - 2026-03-28 +### Added +- [GridTrading] Add reinvest_profits config option +- [DSLTradingMode] Add DSLTradingMode +### Updated +- [CCXT] Update to ccxt 4.5.44 +- [Exchanges]: Improve exchange market status cache +- [Polymarket]: Improve tickers fetching +### Fixed +- [Hyperliquid]: Fix assets valuation and websocket related issues +- [WebInterface]: Fix GPT settings display +- [Webinterface]: Improve redirect checks. Special thanks to zhangqy24 + +## [2.1.0] - 2026-03-17 +### Python version update +**OctoBot now runs on Python 3.13 and 3.12. 3.10 and 3.11 are not supported anymore** +### Repositories structure change +The whole OctoBot code is now located in the https://github.com/Drakkar-Software/OctoBot repository, all OctoBot dependencies have been migrated to the packages folder of this repo. +### Added +- [Polymarket] [Beta] Support Polymarket. Dedicated distribution at https://github.com/Drakkar-Software/OctoBot-Prediction-Market +- [ProfileCopyTradingMode] Add a trading mode to copy a public trading profile, currently used for polymarket copy trading +- [LBank] Added LBank exchange support +### Fixed +- [Coinex]: Fix maker only orders parsing + +### Ongoing Development, Beta soon +- [AI Agents] Support for AI agents trading and backtesting. Dedicated distribution at https://github.com/Drakkar-Software/OctoBot-AI + +### Upcoming +This release is the first step towards a brand new type of OctoBot we are currently working on. This new type of bot will bring more possibilities and flexibility than ever to make it simple to automate any action or strategy on your crypto investments. +More info soon. + +## [2.0.16] - 2025-12-24 +### Added +- [Automations] add scripted automation condition using the OctoBot DSL +- [DSL] add portfolio holding keyword +- [DSL] add web documentation +### Updated +- [Exchanges] update to ccxt 4.5.28 +- [HollaEx] make fee tiers configurable +### Fixed +- [Binance]: Fix stop futures orders +- [HollaEx]: fix missing fee error +- [Hyperliquid] fix websocket start + + ## [2.0.15] - 2025-12-08 ### Breaking pip installation change To install the full OctoBot (equivalent to previous versions), OctoBot needs to be installed with the [full] parameter: `pip install octobot[full]` diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 0000000000..53c98630ce --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,76 @@ +# OctoBot + +## Environment + +```bash +ROOT=$PWD +export PYTHONPATH="$ROOT:$ROOT/packages/agents:$ROOT/packages/async_channel:$ROOT/packages/backtesting:$ROOT/packages/binary:$ROOT/packages/commons:$ROOT/packages/evaluators:$ROOT/packages/flow:$ROOT/packages/node:$ROOT/packages/services:$ROOT/packages/sync:$ROOT/packages/tentacles:$ROOT/packages/tentacles_manager:$ROOT/packages/trading:$ROOT/packages/trading_backend" +``` + +- **Python**: `venv/bin/python` (workspace venv, one level above repo root) +- PYTHONPATH must be absolute (`$PWD`-based) — build subprocesses run from tentacle subdirectories and need to resolve all packages. + +## Tentacles + +- **Source of truth**: `packages/tentacles/` — all tentacle changes go here. +- **Never edit `tentacles/` directly** — it is generated from `packages/tentacles/` via export+install and will be overwritten. +- After any change to `packages/tentacles/`, run the **tentacle-manager** agent to export and install. + +## Agents + +Custom agents live in `.claude/agents/`. They are **not** dispatchable via `subagent_type` — trigger them with `@agent-` in your prompt (e.g. `@agent-test-runner run node tests`), or use `claude --agent ` to make one the session agent. + +### tentacle-manager + +Manages the OctoBot tentacles lifecycle: export from source, install from zip, and generate CCXT exchange tentacles. Mirrors the VSCode "Install tentacles zip" launch configuration. + +Use when: installing tentacles after code changes, generating exchange tentacles from CCXT, or running any `python start.py tentacles` command. + +Trigger: `@agent-tentacle-manager` · Definition: `.claude/agents/tentacle-manager.md` + +### test-runner + +Runs and debugs OctoBot Python tests. Handles root-level tests (`tests/`) and per-package tests (`packages//tests/`). On failure, reads the test and source code, diagnoses the issue, fixes it, and re-runs. + +Use when: running tests, debugging test failures, or verifying changes after code modifications. + +Trigger: `@agent-test-runner` · Definition: `.claude/agents/test-runner.md` + +## Documentation + +Documentation lives in `docs/content/` and is built with Docusaurus 3. Package docs go under `docs/content/developers/packages//`. + +### Tone + +Write descriptive prose that explains what things do and why, not how they're implemented line by line. Favor plain-language explanations over technical detail. Reference class or function names when they help anchor the explanation, but don't build the doc around them — the reader should understand the concepts even if names change. The style should be descriptive yet grounded in code, explains design decisions and non-obvious behavior, mentions concrete names only when they clarify the concept. + +### What to include + +- Architecture and design decisions +- How components interact and why +- Important concepts and patterns +- Code snippets that illustrate non-obvious behavior +- Configuration that users/developers need to know about + +### What NOT to include + +The code is the source of truth. Don't duplicate anything that can be read from source or will break on the next refactor: + +- **API surfaces**: function signatures, parameter lists, return types, class hierarchies, enum/constant values, error classes +- **Project structure**: directory trees, package layouts, dependency lists, requirements, version numbers +- **Categorized lists**: sections that just group and list code elements (helpers, classes, endpoints) without explaining why they exist +- **Implementation details**: build config specifics, hidden imports, lifecycle step-by-step sequences + +### File format + +Each `.md` file must have Docusaurus frontmatter: + +```yaml +--- +title: +description: <One-line description> +sidebar_position: <number> +--- +``` + +The sidebar uses `autogenerated` for `developers/packages`, so new files appear automatically. diff --git a/Cargo.lock b/Cargo.lock new file mode 100644 index 0000000000..1bba088d84 --- /dev/null +++ b/Cargo.lock @@ -0,0 +1,4274 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "aho-corasick" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301" +dependencies = [ + "memchr", +] + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + +[[package]] +name = "anstream" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "824a212faf96e9acacdbd09febd34438f8f711fb84e09a8916013cd7815ca28d" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is_terminal_polyfill", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "940b3a0ca603d1eade50a4846a2afffd5ef57a9feac2c0e2ec2e14f9ead76000" + +[[package]] +name = "anstyle-parse" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52ce7f38b242319f7cabaa6813055467063ecdc9d355bbb4ce0c68908cd8130e" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40c48f72fd53cd289104fc64099abca73db4166ad86ea0b4341abe65af83dadc" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "anstyle-wincon" +version = "3.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "291e6a250ff86cd4a820112fb8898808a366d8f9f58ce16d1f538353ad55747d" +dependencies = [ + "anstyle", + "once_cell_polyfill", + "windows-sys 0.61.2", +] + +[[package]] +name = "anyhow" +version = "1.0.102" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f202df86484c868dbad7eaa557ef785d5c66295e41b460ef922eca0723b842c" + +[[package]] +name = "argon2" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c3610892ee6e0cbce8ae2700349fcf8f98adb0dbfbee85aec3c9179d29cc072" +dependencies = [ + "base64ct", + "blake2", + "cpufeatures 0.2.17", + "password-hash", +] + +[[package]] +name = "assert-json-diff" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47e4f2b81832e72834d7518d8487a0396a28cc408186a2e8854c0f98011faf12" +dependencies = [ + "serde", + "serde_json", +] + +[[package]] +name = "assert_cmd" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39bae1d3fa576f7c6519514180a72559268dd7d1fe104070956cb687bc6673bd" +dependencies = [ + "anstyle", + "bstr", + "libc", + "predicates", + "predicates-core", + "predicates-tree", + "wait-timeout", +] + +[[package]] +name = "async-trait" +version = "0.1.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "atomic" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a89cbf775b137e9b968e67227ef7f775587cde3fd31b0d8599dbd0f598a48340" +dependencies = [ + "bytemuck", +] + +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + +[[package]] +name = "autocfg" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" + +[[package]] +name = "axum" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31b698c5f9a010f6573133b09e0de5408834d0c82f8d7475a89fc1867a71cd90" +dependencies = [ + "axum-core", + "axum-macros", + "bytes", + "form_urlencoded", + "futures-util", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-util", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "serde_core", + "serde_json", + "serde_path_to_error", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tower", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "axum-core" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08c78f31d7b1291f7ee735c1c6780ccde7785daae9a9206026862dab7d8792d1" +dependencies = [ + "bytes", + "futures-core", + "http", + "http-body", + "http-body-util", + "mime", + "pin-project-lite", + "sync_wrapper", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "axum-macros" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7aa268c23bfbbd2c4363b9cd302a4f504fb2a9dfe7e3451d66f35dd392e20aca" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "axum-test" +version = "20.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a86bfe2ef15bee102ac34912f7f4542b0bb37dc464fa55461763999c4d625e7" +dependencies = [ + "anyhow", + "axum", + "bytes", + "bytesize", + "cookie", + "expect-json", + "http", + "http-body-util", + "hyper", + "hyper-util", + "mime", + "pretty_assertions", + "reserve-port", + "rust-multipart-rfc7578_2", + "serde", + "serde_json", + "serde_urlencoded", + "tokio", + "tower", + "url", +] + +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + +[[package]] +name = "base64ct" +version = "1.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2af50177e190e07a26ab74f8b1efbfe2ef87da2116221318cb1c2e82baf7de06" + +[[package]] +name = "bitflags" +version = "2.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4512299f36f043ab09a583e57bceb5a5aab7a73db1805848e8fef3c9e8c78b3" + +[[package]] +name = "blake2" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" +dependencies = [ + "digest", +] + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + +[[package]] +name = "bollard" +version = "0.19.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87a52479c9237eb04047ddb94788c41ca0d26eaff8b697ecfbb4c32f7fdc3b1b" +dependencies = [ + "base64", + "bollard-stubs", + "bytes", + "chrono", + "futures-core", + "futures-util", + "hex", + "http", + "http-body-util", + "hyper", + "hyper-named-pipe", + "hyper-util", + "hyperlocal", + "log", + "pin-project-lite", + "serde", + "serde_derive", + "serde_json", + "serde_repr", + "serde_urlencoded", + "thiserror 2.0.18", + "tokio", + "tokio-util", + "tower-service", + "url", + "winapi", +] + +[[package]] +name = "bollard-stubs" +version = "1.49.1-rc.28.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5731fe885755e92beff1950774068e0cae67ea6ec7587381536fca84f1779623" +dependencies = [ + "chrono", + "serde", + "serde_json", + "serde_repr", + "serde_with", +] + +[[package]] +name = "bstr" +version = "1.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63044e1ae8e69f3b5a92c736ca6269b8d12fa7efe39bf34ddb06d102cf0e2cab" +dependencies = [ + "memchr", + "regex-automata", + "serde", +] + +[[package]] +name = "bumpalo" +version = "3.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d20789868f4b01b2f2caec9f5c4e0213b41e3e5702a50157d699ae31ced2fcb" + +[[package]] +name = "bytemuck" +version = "1.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8efb64bd706a16a1bdde310ae86b351e4d21550d98d056f22f8a7f7a2183fec" + +[[package]] +name = "byteorder-lite" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f1fe948ff07f4bd06c30984e69f5b4899c516a3ef74f34df92a2df2ab535495" + +[[package]] +name = "bytes" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33" + +[[package]] +name = "bytesize" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6bd91ee7b2422bcb158d90ef4d14f75ef67f340943fc4149891dcce8f8b972a3" + +[[package]] +name = "cc" +version = "1.2.61" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d16d90359e986641506914ba71350897565610e87ce0ad9e6f28569db3dd5c6d" +dependencies = [ + "find-msvc-tools", + "shlex", +] + +[[package]] +name = "cfg-if" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" + +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + +[[package]] +name = "chacha20" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f8d983286843e49675a4b7a2d174efe136dc93a18d69130dd18198a6c167601" +dependencies = [ + "cfg-if", + "cpufeatures 0.3.0", + "rand_core 0.10.1", +] + +[[package]] +name = "chrono" +version = "0.4.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c673075a2e0e5f4a1dde27ce9dee1ea4558c7ffe648f576438a20ca1d2acc4b0" +dependencies = [ + "iana-time-zone", + "js-sys", + "num-traits", + "serde", + "wasm-bindgen", + "windows-link", +] + +[[package]] +name = "clap" +version = "4.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ddb117e43bbf7dacf0a4190fef4d345b9bad68dfc649cb349e7d17d28428e51" +dependencies = [ + "clap_builder", + "clap_derive", +] + +[[package]] +name = "clap_builder" +version = "4.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "714a53001bf66416adb0e2ef5ac857140e7dc3a0c48fb28b2f10762fc4b5069f" +dependencies = [ + "anstream", + "anstyle", + "clap_lex", + "strsim", +] + +[[package]] +name = "clap_derive" +version = "4.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2ce8604710f6733aa641a2b3731eaa1e8b3d9973d5e3565da11800813f997a9" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "clap_lex" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8d4a3bb8b1e0c1050499d1815f5ab16d04f0959b233085fb31653fbfc9d98f9" + +[[package]] +name = "codepage" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48f68d061bc2828ae826206326e61251aca94c1e4a5305cf52d9138639c918b4" +dependencies = [ + "encoding_rs", +] + +[[package]] +name = "colorchoice" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d07550c9036bf2ae0c684c4297d503f838287c83c53686d05370d0e139ae570" + +[[package]] +name = "const-oid" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" + +[[package]] +name = "cookie" +version = "0.18.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ddef33a339a91ea89fb53151bd0a4689cfce27055c291dfa69945475d22c747" +dependencies = [ + "time", + "version_check", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "cpufeatures" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" +dependencies = [ + "libc", +] + +[[package]] +name = "cpufeatures" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b2a41393f66f16b0823bb79094d54ac5fbd34ab292ddafb9a0456ac9f87d201" +dependencies = [ + "libc", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" + +[[package]] +name = "crypto-common" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78c8292055d1c1df0cce5d180393dc8cce0abec0a7102adb6c7b1eef6016d60a" +dependencies = [ + "generic-array", + "typenum", +] + +[[package]] +name = "curve25519-dalek" +version = "4.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" +dependencies = [ + "cfg-if", + "cpufeatures 0.2.17", + "curve25519-dalek-derive", + "digest", + "fiat-crypto", + "rustc_version", + "subtle", + "zeroize", +] + +[[package]] +name = "curve25519-dalek-derive" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "dashmap" +version = "5.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" +dependencies = [ + "cfg-if", + "hashbrown 0.14.5", + "lock_api", + "once_cell", + "parking_lot_core", +] + +[[package]] +name = "deadpool" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0be2b1d1d6ec8d846f05e137292d0b89133caf95ef33695424c09568bdd39b1b" +dependencies = [ + "deadpool-runtime", + "lazy_static", + "num_cpus", + "tokio", +] + +[[package]] +name = "deadpool-runtime" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "092966b41edc516079bdf31ec78a2e0588d1d0c08f78b91d8307215928642b2b" + +[[package]] +name = "der" +version = "0.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7c1832837b905bbfb5101e07cc24c8deddf52f93225eee6ead5f4d63d53ddcb" +dependencies = [ + "const-oid", + "zeroize", +] + +[[package]] +name = "deranged" +version = "0.5.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cd812cc2bc1d69d4764bd80df88b4317eaef9e773c75226407d9bc0876b211c" +dependencies = [ + "powerfmt", + "serde_core", +] + +[[package]] +name = "diff" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" + +[[package]] +name = "difflib" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6184e33543162437515c2e2b48714794e37845ec9851711914eec9d308f6ebe8" + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "crypto-common", + "subtle", +] + +[[package]] +name = "directories" +version = "5.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a49173b84e034382284f27f1af4dcbbd231ffa358c0fe316541a7337f376a35" +dependencies = [ + "dirs-sys 0.4.1", +] + +[[package]] +name = "dirs" +version = "4.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3aa72a6f96ea37bbc5aa912f6788242832f75369bdfdadcb0e38423f100059" +dependencies = [ + "dirs-sys 0.3.7", +] + +[[package]] +name = "dirs-sys" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b1d1d91c932ef41c0f2663aa8b0ca0342d444d842c06914aa0a7e352d0bada6" +dependencies = [ + "libc", + "redox_users", + "winapi", +] + +[[package]] +name = "dirs-sys" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" +dependencies = [ + "libc", + "option-ext", + "redox_users", + "windows-sys 0.48.0", +] + +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "dyn-clone" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" + +[[package]] +name = "ed25519" +version = "2.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" +dependencies = [ + "pkcs8", + "signature", +] + +[[package]] +name = "ed25519-dalek" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70e796c081cee67dc755e1a36a0a172b897fab85fc3f6bc48307991f64e4eca9" +dependencies = [ + "curve25519-dalek", + "ed25519", + "rand_core 0.6.4", + "serde", + "sha2", + "subtle", + "zeroize", +] + +[[package]] +name = "either" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" + +[[package]] +name = "email_address" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e079f19b08ca6239f47f8ba8509c11cf3ea30095831f7fed61441475edd8c449" +dependencies = [ + "serde", +] + +[[package]] +name = "encoding-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87b881ab2524b96a5ce932056c7482ba6152e2226fed3936b3e592adeb95ca6d" +dependencies = [ + "codepage", + "encoding_rs", + "windows-sys 0.52.0", +] + +[[package]] +name = "encoding_rs" +version = "0.8.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "env_home" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7f84e12ccf0a7ddc17a6c41c93326024c42920d7ee630d04950e6926645c0fe" + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "erased-serde" +version = "0.4.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2add8a07dd6a8d93ff627029c51de145e12686fbc36ecb298ac22e74cf02dec" +dependencies = [ + "serde", + "serde_core", + "typeid", +] + +[[package]] +name = "errno" +version = "0.3.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" +dependencies = [ + "libc", + "windows-sys 0.61.2", +] + +[[package]] +name = "expect-json" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "869f97f4abe8e78fc812a94ad6b721d72c4fb5532877c79610f2c238d7ccf6c4" +dependencies = [ + "chrono", + "email_address", + "expect-json-macros", + "num", + "regex", + "serde", + "serde_json", + "thiserror 2.0.18", + "typetag", + "uuid", +] + +[[package]] +name = "expect-json-macros" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e6fdf550180a6c29a28cb9aac262dc0064c25735641d2317f670075e9a469d9" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "fastrand" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f1f227452a390804cdb637b74a86990f2a7d7ba4b7d5693aac9b4dd6defd8d6" + +[[package]] +name = "fiat-crypto" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" + +[[package]] +name = "figment" +version = "0.10.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8cb01cd46b0cf372153850f4c6c272d9cbea2da513e07538405148f95bd789f3" +dependencies = [ + "atomic", + "pear", + "serde", + "toml", + "uncased", + "version_check", +] + +[[package]] +name = "find-msvc-tools" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5baebc0774151f905a1a2cc41989300b1e6fbb29aff0ceffa1064fdd3088d582" + +[[package]] +name = "float-cmp" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b09cf3155332e944990140d967ff5eceb70df778b34f77d8075db46e4704e6d8" +dependencies = [ + "num-traits", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foldhash" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" + +[[package]] +name = "form_urlencoded" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "fs2" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "futures" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b147ee9d1f6d097cef9ce628cd2ee62288d963e16fb287bd9286455b241382d" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07bbe89c50d7a535e539b8c17bc0b49bdb77747034daa8087407d655f3f7cc1d" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e3450815272ef58cec6d564423f6e755e25379b217b0bc688e295ba24df6b1d" + +[[package]] +name = "futures-executor" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf29c38818342a3b26b5b923639e7b1f4a61fc5e76102d4b1981c6dc7a7579d" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-io" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cecba35d7ad927e23624b22ad55235f2239cfa44fd10428eecbeba6d6a717718" + +[[package]] +name = "futures-macro" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e835b70203e41293343137df5c0664546da5745f82ec9b84d40be8336958447b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "futures-sink" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c39754e157331b013978ec91992bde1ac089843443c49cbc7f46150b0fad0893" + +[[package]] +name = "futures-task" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "037711b3d59c33004d3856fbdc83b99d4ff37a24768fa1be9ce3538a1cde4393" + +[[package]] +name = "futures-timer" +version = "3.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" + +[[package]] +name = "futures-util" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "389ca41296e6190b48053de0321d02a77f32f8a5d2461dd38762c0593805c6d6" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "slab", +] + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", +] + +[[package]] +name = "getrandom" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff2abc00be7fca6ebc474524697ae276ad847ad0a6b3faa4bcb027e9a4614ad0" +dependencies = [ + "cfg-if", + "js-sys", + "libc", + "wasi", + "wasm-bindgen", +] + +[[package]] +name = "getrandom" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" +dependencies = [ + "cfg-if", + "js-sys", + "libc", + "r-efi 5.3.0", + "wasip2", + "wasm-bindgen", +] + +[[package]] +name = "getrandom" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0de51e6874e94e7bf76d726fc5d13ba782deca734ff60d5bb2fb2607c7406555" +dependencies = [ + "cfg-if", + "libc", + "r-efi 6.0.0", + "rand_core 0.10.1", + "wasip2", + "wasip3", +] + +[[package]] +name = "governor" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68a7f542ee6b35af73b06abc0dad1c1bae89964e4e253bc4b587b91c9637867b" +dependencies = [ + "cfg-if", + "dashmap", + "futures", + "futures-timer", + "no-std-compat", + "nonzero_ext", + "parking_lot", + "portable-atomic", + "quanta", + "rand 0.8.6", + "smallvec", + "spinning_top", +] + +[[package]] +name = "h2" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f44da3a8150a6703ed5d34e164b875fd14c2cdab9af1252a9a1020bde2bdc54" +dependencies = [ + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http", + "indexmap 2.14.0", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + +[[package]] +name = "hashbrown" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" + +[[package]] +name = "hashbrown" +version = "0.15.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" +dependencies = [ + "foldhash", +] + +[[package]] +name = "hashbrown" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f467dd6dccf739c208452f8014c75c18bb8301b050ad1cfb27153803edb0f51" + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "hermit-abi" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "home" +version = "0.5.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc627f471c528ff0c4a49e1d5e60450c8f6461dd6d10ba9dcd3a61d3dff7728d" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "http" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3ba2a386d7f85a81f119ad7498ebe444d2e22c2af0b86b069416ace48b3311a" +dependencies = [ + "bytes", + "itoa", +] + +[[package]] +name = "http-body" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +dependencies = [ + "bytes", + "http", +] + +[[package]] +name = "http-body-util" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" +dependencies = [ + "bytes", + "futures-core", + "http", + "http-body", + "pin-project-lite", +] + +[[package]] +name = "httparse" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" + +[[package]] +name = "httpdate" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" + +[[package]] +name = "hyper" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6299f016b246a94207e63da54dbe807655bf9e00044f73ded42c3ac5305fbcca" +dependencies = [ + "atomic-waker", + "bytes", + "futures-channel", + "futures-core", + "h2", + "http", + "http-body", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "smallvec", + "tokio", + "want", +] + +[[package]] +name = "hyper-named-pipe" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73b7d8abf35697b81a825e386fc151e0d503e8cb5fcb93cc8669c376dfd6f278" +dependencies = [ + "hex", + "hyper", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", + "winapi", +] + +[[package]] +name = "hyper-rustls" +version = "0.27.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33ca68d021ef39cf6463ab54c1d0f5daf03377b70561305bb89a8f83aab66e0f" +dependencies = [ + "http", + "hyper", + "hyper-util", + "rustls", + "tokio", + "tokio-rustls", + "tower-service", + "webpki-roots", +] + +[[package]] +name = "hyper-util" +version = "0.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96547c2556ec9d12fb1578c4eaf448b04993e7fb79cbaad930a656880a6bdfa0" +dependencies = [ + "base64", + "bytes", + "futures-channel", + "futures-util", + "http", + "http-body", + "hyper", + "ipnet", + "libc", + "percent-encoding", + "pin-project-lite", + "socket2", + "tokio", + "tower-service", + "tracing", +] + +[[package]] +name = "hyperlocal" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "986c5ce3b994526b3cd75578e62554abd09f0899d6206de48b3e96ab34ccc8c7" +dependencies = [ + "hex", + "http-body-util", + "hyper", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", +] + +[[package]] +name = "iana-time-zone" +version = "0.1.65" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e31bc9ad994ba00e440a8aa5c9ef0ec67d5cb5e5cb0cc7f8b744a35b389cc470" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "log", + "wasm-bindgen", + "windows-core 0.62.2", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + +[[package]] +name = "icu_collections" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2984d1cd16c883d7935b9e07e44071dca8d917fd52ecc02c04d5fa0b5a3f191c" +dependencies = [ + "displaydoc", + "potential_utf", + "utf8_iter", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locale_core" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92219b62b3e2b4d88ac5119f8904c10f8f61bf7e95b640d25ba3075e6cac2c29" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_normalizer" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c56e5ee99d6e3d33bd91c5d85458b6005a22140021cc324cea84dd0e72cff3b4" +dependencies = [ + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da3be0ae77ea334f4da67c12f149704f19f81d1adf7c51cf482943e84a2bad38" + +[[package]] +name = "icu_properties" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bee3b67d0ea5c2cca5003417989af8996f8604e34fb9ddf96208a033901e70de" +dependencies = [ + "icu_collections", + "icu_locale_core", + "icu_properties_data", + "icu_provider", + "zerotrie", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e2bbb201e0c04f7b4b3e14382af113e17ba4f63e2c9d2ee626b720cbce54a14" + +[[package]] +name = "icu_provider" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "139c4cf31c8b5f33d7e199446eff9c1e02decfc2f0eec2c8d71f65befa45b421" +dependencies = [ + "displaydoc", + "icu_locale_core", + "writeable", + "yoke", + "zerofrom", + "zerotrie", + "zerovec", +] + +[[package]] +name = "id-arena" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d3067d79b975e8844ca9eb072e16b31c3c1c36928edf9c6789548c524d0d954" + +[[package]] +name = "idna" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb68373c0d6620ef8105e855e7745e18b0d00d3bdb07fb532e434244cdb9a714" +dependencies = [ + "icu_normalizer", + "icu_properties", +] + +[[package]] +name = "image" +version = "0.25.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85ab80394333c02fe689eaf900ab500fbd0c2213da414687ebf995a65d5a6104" +dependencies = [ + "bytemuck", + "byteorder-lite", + "moxcms", + "num-traits", +] + +[[package]] +name = "indexmap" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg", + "hashbrown 0.12.3", + "serde", +] + +[[package]] +name = "indexmap" +version = "2.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d466e9454f08e4a911e14806c24e16fba1b4c121d1ea474396f396069cf949d9" +dependencies = [ + "equivalent", + "hashbrown 0.17.0", + "serde", + "serde_core", +] + +[[package]] +name = "inlinable_string" +version = "0.1.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8fae54786f62fb2918dcfae3d568594e50eb9b5c25bf04371af6fe7516452fb" + +[[package]] +name = "inventory" +version = "0.3.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4f0c30c76f2f4ccee3fe55a2435f691ca00c0e4bd87abe4f4a851b1d4dac39b" +dependencies = [ + "rustversion", +] + +[[package]] +name = "ipnet" +version = "2.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d98f6fed1fde3f8c21bc40a1abb88dd75e67924f9cffc3ef95607bad8017f8e2" + +[[package]] +name = "iri-string" +version = "0.7.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25e659a4bb38e810ebc252e53b5814ff908a8c58c2a9ce2fae1bbec24cbf4e20" +dependencies = [ + "memchr", + "serde", +] + +[[package]] +name = "is_terminal_polyfill" +version = "1.70.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6cb138bb79a146c1bd460005623e142ef0181e3d0219cb493e02f7d08a35695" + +[[package]] +name = "itoa" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f42a60cbdf9a97f5d2305f08a87dc4e09308d1276d28c869c684d7777685682" + +[[package]] +name = "js-sys" +version = "0.3.95" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2964e92d1d9dc3364cae4d718d93f227e3abb088e747d92e0395bfdedf1c12ca" +dependencies = [ + "cfg-if", + "futures-util", + "once_cell", + "wasm-bindgen", +] + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + +[[package]] +name = "leb128fmt" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2" + +[[package]] +name = "libc" +version = "0.2.186" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68ab91017fe16c622486840e4c83c9a37afeff978bd239b5293d61ece587de66" + +[[package]] +name = "libredox" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e02f3bb43d335493c96bf3fd3a321600bf6bd07ed34bc64118e9293bdffea46c" +dependencies = [ + "libc", +] + +[[package]] +name = "linux-raw-sys" +version = "0.4.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" + +[[package]] +name = "linux-raw-sys" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a66949e030da00e8c7d4434b251670a91556f4144941d37452769c25d58a53" + +[[package]] +name = "litemap" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92daf443525c4cce67b150400bc2316076100ce0b3686209eb8cf3c31612e6f0" + +[[package]] +name = "lock_api" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965" +dependencies = [ + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" + +[[package]] +name = "lru-slab" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" + +[[package]] +name = "matchers" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1525a2a28c7f4fa0fc98bb91ae755d1e2d1505079e05539e35bc876b5d65ae9" +dependencies = [ + "regex-automata", +] + +[[package]] +name = "matchit" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3" + +[[package]] +name = "memchr" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79" + +[[package]] +name = "mime" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + +[[package]] +name = "mio" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50b7e5b27aa02a74bac8c3f23f448f8d87ff11f92d3aac1a6ed369ee08cc56c1" +dependencies = [ + "libc", + "wasi", + "windows-sys 0.61.2", +] + +[[package]] +name = "moxcms" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb85c154ba489f01b25c0d36ae69a87e4a1c73a72631fc6c0eb6dde34a73e44b" +dependencies = [ + "num-traits", + "pxfm", +] + +[[package]] +name = "nix" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" +dependencies = [ + "bitflags", + "cfg-if", + "cfg_aliases", + "libc", +] + +[[package]] +name = "no-std-compat" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b93853da6d84c2e3c7d730d6473e8817692dd89be387eb01b94d7f108ecb5b8c" + +[[package]] +name = "nonzero_ext" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38bf9645c8b145698bb0b18a4637dcacbc421ea49bef2317e4fd8065a387cf21" + +[[package]] +name = "normalize-line-endings" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" + +[[package]] +name = "ntapi" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3b335231dfd352ffb0f8017f3b6027a4917f7df785ea2143d8af2adc66980ae" +dependencies = [ + "winapi", +] + +[[package]] +name = "nu-ansi-term" +version = "0.50.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "num" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35bd024e8b2ff75562e5f34e7f4905839deb4b22955ef5e73d2fea1b9813cb23" +dependencies = [ + "num-bigint", + "num-complex", + "num-integer", + "num-iter", + "num-rational", + "num-traits", +] + +[[package]] +name = "num-bigint" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" +dependencies = [ + "num-integer", + "num-traits", +] + +[[package]] +name = "num-complex" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-conv" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6673768db2d862beb9b39a78fdcb1a69439615d5794a1be50caa9bc92c81967" + +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-iter" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-rational" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" +dependencies = [ + "num-bigint", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", +] + +[[package]] +name = "num_cpus" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "octobot-launcher-api" +version = "0.1.0" +dependencies = [ + "argon2", + "axum", + "axum-test", + "base64", + "chrono", + "futures-util", + "governor", + "hex", + "hyperlocal", + "octobot-launcher-config", + "octobot-launcher-core", + "parking_lot", + "rand 0.8.6", + "serde", + "serde_json", + "sha2", + "subtle", + "tempfile", + "thiserror 2.0.18", + "tokio", + "tokio-stream", + "tower", + "tower-http", + "tracing", + "uuid", +] + +[[package]] +name = "octobot-launcher-binary" +version = "0.1.0" +dependencies = [ + "async-trait", + "chrono", + "nix", + "octobot-launcher-core", + "octobot-launcher-update", + "reqwest", + "serde", + "serde_json", + "sysinfo", + "tempfile", + "tokio", + "tracing", + "which 7.0.3", + "windows 0.58.0", +] + +[[package]] +name = "octobot-launcher-cli" +version = "0.1.0" +dependencies = [ + "anyhow", + "assert_cmd", + "bytes", + "chrono", + "clap", + "fs2", + "futures-util", + "http-body-util", + "hyper", + "hyper-util", + "octobot-launcher-api", + "octobot-launcher-binary", + "octobot-launcher-config", + "octobot-launcher-core", + "octobot-launcher-docker", + "octobot-launcher-python", + "octobot-launcher-service", + "octobot-launcher-update", + "predicates", + "qrcode", + "reqwest", + "serde", + "serde_json", + "tempfile", + "tokio", + "tracing", + "tracing-subscriber", + "uuid", +] + +[[package]] +name = "octobot-launcher-config" +version = "0.1.0" +dependencies = [ + "chrono", + "directories", + "figment", + "octobot-launcher-core", + "parking_lot", + "serde", + "serde_json", + "tempfile", + "thiserror 2.0.18", + "tokio", + "toml", + "tracing", +] + +[[package]] +name = "octobot-launcher-core" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "chrono", + "serde", + "serde_json", + "thiserror 2.0.18", + "tokio", + "tracing", + "uuid", +] + +[[package]] +name = "octobot-launcher-docker" +version = "0.1.0" +dependencies = [ + "async-trait", + "bollard", + "chrono", + "futures-util", + "octobot-launcher-core", + "serde", + "serde_json", + "tokio", + "tracing", +] + +[[package]] +name = "octobot-launcher-python" +version = "0.1.0" +dependencies = [ + "async-trait", + "chrono", + "nix", + "octobot-launcher-core", + "octobot-launcher-update", + "serde", + "serde_json", + "sha2", + "sysinfo", + "tempfile", + "tokio", + "tracing", + "which 7.0.3", + "windows 0.58.0", +] + +[[package]] +name = "octobot-launcher-service" +version = "0.1.0" +dependencies = [ + "octobot-launcher-core", + "service-manager", + "thiserror 2.0.18", + "tracing", +] + +[[package]] +name = "octobot-launcher-tests" +version = "0.1.0" +dependencies = [ + "assert_cmd", + "axum", + "axum-test", + "chrono", + "ed25519-dalek", + "hex", + "octobot-launcher-api", + "octobot-launcher-config", + "octobot-launcher-core", + "octobot-launcher-update", + "predicates", + "rand 0.8.6", + "reqwest", + "semver", + "serde", + "serde_json", + "sha2", + "tempfile", + "tokio", + "wiremock", +] + +[[package]] +name = "octobot-launcher-update" +version = "0.1.0" +dependencies = [ + "chrono", + "ed25519-dalek", + "hex", + "octobot-launcher-core", + "rand 0.8.6", + "reqwest", + "self-replace", + "semver", + "serde", + "serde_json", + "sha2", + "tempfile", + "thiserror 2.0.18", + "tokio", + "tracing", + "wiremock", +] + +[[package]] +name = "once_cell" +version = "1.21.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f7c3e4beb33f85d45ae3e3a1792185706c8e16d043238c593331cc7cd313b50" + +[[package]] +name = "once_cell_polyfill" +version = "1.70.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "384b8ab6d37215f3c5301a95a4accb5d64aa607f1fcb26a11b5303878451b4fe" + +[[package]] +name = "option-ext" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" + +[[package]] +name = "parking_lot" +version = "0.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93857453250e3077bd71ff98b6a65ea6621a19bb0f559a85248955ac12c45a1a" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-link", +] + +[[package]] +name = "password-hash" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166" +dependencies = [ + "base64ct", + "rand_core 0.6.4", + "subtle", +] + +[[package]] +name = "pear" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bdeeaa00ce488657faba8ebf44ab9361f9365a97bd39ffb8a60663f57ff4b467" +dependencies = [ + "inlinable_string", + "pear_codegen", + "yansi", +] + +[[package]] +name = "pear_codegen" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4bab5b985dc082b345f812b7df84e1bef27e7207b39e448439ba8bd69c93f147" +dependencies = [ + "proc-macro2", + "proc-macro2-diagnostics", + "quote", + "syn", +] + +[[package]] +name = "percent-encoding" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" + +[[package]] +name = "pin-project-lite" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a89322df9ebe1c1578d689c92318e070967d1042b512afbe49518723f4e6d5cd" + +[[package]] +name = "pkcs8" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" +dependencies = [ + "der", + "spki", +] + +[[package]] +name = "plist" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "092791278e026273c1b65bbdcfbba3a300f2994c896bd01ab01da613c29c46f1" +dependencies = [ + "base64", + "indexmap 2.14.0", + "quick-xml", + "serde", + "time", +] + +[[package]] +name = "portable-atomic" +version = "1.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c33a9471896f1c69cecef8d20cbe2f7accd12527ce60845ff44c153bb2a21b49" + +[[package]] +name = "potential_utf" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0103b1cef7ec0cf76490e969665504990193874ea05c85ff9bab8b911d0a0564" +dependencies = [ + "zerovec", +] + +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + +[[package]] +name = "ppv-lite86" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" +dependencies = [ + "zerocopy", +] + +[[package]] +name = "predicates" +version = "3.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ada8f2932f28a27ee7b70dd6c1c39ea0675c55a36879ab92f3a715eaa1e63cfe" +dependencies = [ + "anstyle", + "difflib", + "float-cmp", + "normalize-line-endings", + "predicates-core", + "regex", +] + +[[package]] +name = "predicates-core" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cad38746f3166b4031b1a0d39ad9f954dd291e7854fcc0eed52ee41a0b50d144" + +[[package]] +name = "predicates-tree" +version = "1.0.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0de1b847b39c8131db0467e9df1ff60e6d0562ab8e9a16e568ad0fdb372e2f2" +dependencies = [ + "predicates-core", + "termtree", +] + +[[package]] +name = "pretty_assertions" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ae130e2f271fbc2ac3a40fb1d07180839cdbbe443c7a27e1e3c13c5cac0116d" +dependencies = [ + "diff", + "yansi", +] + +[[package]] +name = "prettyplease" +version = "0.2.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" +dependencies = [ + "proc-macro2", + "syn", +] + +[[package]] +name = "proc-macro2" +version = "1.0.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fd00f0bb2e90d81d1044c2b32617f68fcb9fa3bb7640c23e9c748e53fb30934" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "proc-macro2-diagnostics" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "version_check", + "yansi", +] + +[[package]] +name = "pxfm" +version = "0.1.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0c5ccf5294c6ccd63a74f1565028353830a9c2f5eb0c682c355c471726a6e3f" + +[[package]] +name = "qrcode" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d68782463e408eb1e668cf6152704bd856c78c5b6417adaee3203d8f4c1fc9ec" +dependencies = [ + "image", +] + +[[package]] +name = "quanta" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3ab5a9d756f0d97bdc89019bd2e4ea098cf9cde50ee7564dde6b81ccc8f06c7" +dependencies = [ + "crossbeam-utils", + "libc", + "once_cell", + "raw-cpuid", + "wasi", + "web-sys", + "winapi", +] + +[[package]] +name = "quick-xml" +version = "0.39.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "958f21e8e7ceb5a1aa7fa87fab28e7c75976e0bfe7e23ff069e0a260f894067d" +dependencies = [ + "memchr", +] + +[[package]] +name = "quinn" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20" +dependencies = [ + "bytes", + "cfg_aliases", + "pin-project-lite", + "quinn-proto", + "quinn-udp", + "rustc-hash", + "rustls", + "socket2", + "thiserror 2.0.18", + "tokio", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-proto" +version = "0.11.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "434b42fec591c96ef50e21e886936e66d3cc3f737104fdb9b737c40ffb94c098" +dependencies = [ + "bytes", + "getrandom 0.3.4", + "lru-slab", + "rand 0.9.4", + "ring", + "rustc-hash", + "rustls", + "rustls-pki-types", + "slab", + "thiserror 2.0.18", + "tinyvec", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-udp" +version = "0.5.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "addec6a0dcad8a8d96a771f815f0eaf55f9d1805756410b39f5fa81332574cbd" +dependencies = [ + "cfg_aliases", + "libc", + "once_cell", + "socket2", + "tracing", + "windows-sys 0.60.2", +] + +[[package]] +name = "quote" +version = "1.0.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41f2619966050689382d2b44f664f4bc593e129785a36d6ee376ddf37259b924" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + +[[package]] +name = "r-efi" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dcc9c7d52a811697d2151c701e0d08956f92b0e24136cf4cf27b57a6a0d9bf" + +[[package]] +name = "rand" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ca0ecfa931c29007047d1bc58e623ab12e5590e8c7cc53200d5202b69266d8a" +dependencies = [ + "libc", + "rand_chacha 0.3.1", + "rand_core 0.6.4", +] + +[[package]] +name = "rand" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44c5af06bb1b7d3216d91932aed5265164bf384dc89cd6ba05cf59a35f5f76ea" +dependencies = [ + "rand_chacha 0.9.0", + "rand_core 0.9.5", +] + +[[package]] +name = "rand" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2e8e8bcc7961af1fdac401278c6a831614941f6164ee3bf4ce61b7edb162207" +dependencies = [ + "chacha20", + "getrandom 0.4.2", + "rand_core 0.10.1", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core 0.9.5", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom 0.2.17", +] + +[[package]] +name = "rand_core" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76afc826de14238e6e8c374ddcc1fa19e374fd8dd986b0d2af0d02377261d83c" +dependencies = [ + "getrandom 0.3.4", +] + +[[package]] +name = "rand_core" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63b8176103e19a2643978565ca18b50549f6101881c443590420e4dc998a3c69" + +[[package]] +name = "raw-cpuid" +version = "11.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "498cd0dc59d73224351ee52a95fee0f1a617a2eae0e7d9d720cc622c73a54186" +dependencies = [ + "bitflags", +] + +[[package]] +name = "rayon" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb39b166781f92d482534ef4b4b1b2568f42613b53e5b6c160e24cfbfa30926d" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22e18b0f0062d30d4230b2e85ff77fdfe4326feb054b9783a3460d8435c8ab91" +dependencies = [ + "crossbeam-deque", + "crossbeam-utils", +] + +[[package]] +name = "redox_syscall" +version = "0.5.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" +dependencies = [ + "bitflags", +] + +[[package]] +name = "redox_users" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" +dependencies = [ + "getrandom 0.2.17", + "libredox", + "thiserror 1.0.69", +] + +[[package]] +name = "ref-cast" +version = "1.0.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f354300ae66f76f1c85c5f84693f0ce81d747e2c3f21a45fef496d89c960bf7d" +dependencies = [ + "ref-cast-impl", +] + +[[package]] +name = "ref-cast-impl" +version = "1.0.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "regex" +version = "1.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e10754a14b9137dd7b1e3e5b0493cc9171fdd105e0ab477f51b72e7f3ac0e276" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e1dd4122fc1595e8162618945476892eefca7b88c52820e74af6262213cae8f" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc897dd8d9e8bd1ed8cdad82b5966c3e0ecae09fb1907d58efaa013543185d0a" + +[[package]] +name = "reqwest" +version = "0.12.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eddd3ca559203180a307f12d114c268abf583f59b03cb906fd0b3ff8646c1147" +dependencies = [ + "base64", + "bytes", + "futures-core", + "futures-util", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-rustls", + "hyper-util", + "js-sys", + "log", + "percent-encoding", + "pin-project-lite", + "quinn", + "rustls", + "rustls-pki-types", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tokio-rustls", + "tokio-util", + "tower", + "tower-http", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "wasm-streams", + "web-sys", + "webpki-roots", +] + +[[package]] +name = "reserve-port" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94070964579245eb2f76e62a7668fe87bd9969ed6c41256f3bf614e3323dd3cc" +dependencies = [ + "thiserror 2.0.18", +] + +[[package]] +name = "ring" +version = "0.17.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" +dependencies = [ + "cc", + "cfg-if", + "getrandom 0.2.17", + "libc", + "untrusted", + "windows-sys 0.52.0", +] + +[[package]] +name = "rust-multipart-rfc7578_2" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00bdaa068902270ca7fa8619775e1838e23a63620abac0947ce0f715819b8cec" +dependencies = [ + "bytes", + "futures-core", + "futures-util", + "http", + "mime", + "rand 0.10.1", + "thiserror 2.0.18", +] + +[[package]] +name = "rustc-hash" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94300abf3f1ae2e2b8ffb7b58043de3d399c73fa6f4b73826402a5c457614dbe" + +[[package]] +name = "rustc_version" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" +dependencies = [ + "semver", +] + +[[package]] +name = "rustix" +version = "0.38.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" +dependencies = [ + "bitflags", + "errno", + "libc", + "linux-raw-sys 0.4.15", + "windows-sys 0.59.0", +] + +[[package]] +name = "rustix" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6fe4565b9518b83ef4f91bb47ce29620ca828bd32cb7e408f0062e9930ba190" +dependencies = [ + "bitflags", + "errno", + "libc", + "linux-raw-sys 0.12.1", + "windows-sys 0.61.2", +] + +[[package]] +name = "rustls" +version = "0.23.39" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c2c118cb077cca2822033836dfb1b975355dfb784b5e8da48f7b6c5db74e60e" +dependencies = [ + "once_cell", + "ring", + "rustls-pki-types", + "rustls-webpki", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls-pki-types" +version = "1.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30a7197ae7eb376e574fe940d068c30fe0462554a3ddbe4eca7838e049c937a9" +dependencies = [ + "web-time", + "zeroize", +] + +[[package]] +name = "rustls-webpki" +version = "0.103.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61c429a8649f110dddef65e2a5ad240f747e85f7758a6bccc7e5777bd33f756e" +dependencies = [ + "ring", + "rustls-pki-types", + "untrusted", +] + +[[package]] +name = "rustversion" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" + +[[package]] +name = "ryu" +version = "1.0.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9774ba4a74de5f7b1c1451ed6cd5285a32eddb5cccb8cc655a4e50009e06477f" + +[[package]] +name = "schemars" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd191f9397d57d581cddd31014772520aa448f65ef991055d7f61582c65165f" +dependencies = [ + "dyn-clone", + "ref-cast", + "serde", + "serde_json", +] + +[[package]] +name = "schemars" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2b42f36aa1cd011945615b92222f6bf73c599a102a300334cd7f8dbeec726cc" +dependencies = [ + "dyn-clone", + "ref-cast", + "serde", + "serde_json", +] + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "self-replace" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03ec815b5eab420ab893f63393878d89c90fdd94c0bcc44c07abb8ad95552fb7" +dependencies = [ + "fastrand", + "tempfile", + "windows-sys 0.52.0", +] + +[[package]] +name = "semver" +version = "1.0.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a7852d02fc848982e0c167ef163aaff9cd91dc640ba85e263cb1ce46fae51cd" + +[[package]] +name = "serde" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" +dependencies = [ + "serde_core", + "serde_derive", +] + +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.149" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83fc039473c5595ace860d8c4fafa220ff474b3fc6bfdb4293327f1a37e94d86" +dependencies = [ + "itoa", + "memchr", + "serde", + "serde_core", + "zmij", +] + +[[package]] +name = "serde_path_to_error" +version = "0.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10a9ff822e371bb5403e391ecd83e182e0e77ba7f6fe0160b795797109d1b457" +dependencies = [ + "itoa", + "serde", + "serde_core", +] + +[[package]] +name = "serde_repr" +version = "0.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_spanned" +version = "0.6.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3" +dependencies = [ + "serde", +] + +[[package]] +name = "serde_urlencoded" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "serde_with" +version = "3.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd5414fad8e6907dbdd5bc441a50ae8d6e26151a03b1de04d89a5576de61d01f" +dependencies = [ + "base64", + "chrono", + "hex", + "indexmap 1.9.3", + "indexmap 2.14.0", + "schemars 0.9.0", + "schemars 1.2.1", + "serde_core", + "serde_json", + "time", +] + +[[package]] +name = "service-manager" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b73b205a13c82cdd9fd05e22d5f4ff0269f656adf68732c4d4e4f11360975ebb" +dependencies = [ + "cfg-if", + "dirs", + "encoding-utils", + "encoding_rs", + "log", + "plist", + "which 4.4.2", + "xml-rs", +] + +[[package]] +name = "sha2" +version = "0.10.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" +dependencies = [ + "cfg-if", + "cpufeatures 0.2.17", + "digest", +] + +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "signal-hook-registry" +version = "1.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4db69cba1110affc0e9f7bcd48bbf87b3f4fc7c61fc9155afd4c469eb3d6c1b" +dependencies = [ + "errno", + "libc", +] + +[[package]] +name = "signature" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" +dependencies = [ + "rand_core 0.6.4", +] + +[[package]] +name = "slab" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c790de23124f9ab44544d7ac05d60440adc586479ce501c1d6d7da3cd8c9cf5" + +[[package]] +name = "smallvec" +version = "1.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" + +[[package]] +name = "socket2" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a766e1110788c36f4fa1c2b71b387a7815aa65f88ce0229841826633d93723e" +dependencies = [ + "libc", + "windows-sys 0.61.2", +] + +[[package]] +name = "spinning_top" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d96d2d1d716fb500937168cc09353ffdc7a012be8475ac7308e1bdf0e3923300" +dependencies = [ + "lock_api", +] + +[[package]] +name = "spki" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" +dependencies = [ + "base64ct", + "der", +] + +[[package]] +name = "stable_deref_trait" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" + +[[package]] +name = "strsim" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + +[[package]] +name = "syn" +version = "2.0.117" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e665b8803e7b1d2a727f4023456bbbbe74da67099c585258af0ad9c5013b9b99" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "sync_wrapper" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" +dependencies = [ + "futures-core", +] + +[[package]] +name = "synstructure" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sysinfo" +version = "0.32.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c33cd241af0f2e9e3b5c32163b873b29956890b5342e6745b917ce9d490f4af" +dependencies = [ + "core-foundation-sys", + "libc", + "memchr", + "ntapi", + "rayon", + "windows 0.57.0", +] + +[[package]] +name = "tempfile" +version = "3.27.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32497e9a4c7b38532efcdebeef879707aa9f794296a4f0244f6f69e9bc8574bd" +dependencies = [ + "fastrand", + "getrandom 0.4.2", + "once_cell", + "rustix 1.1.4", + "windows-sys 0.61.2", +] + +[[package]] +name = "termtree" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" + +[[package]] +name = "thiserror" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4" +dependencies = [ + "thiserror-impl 2.0.18", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "thread_local" +version = "1.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "time" +version = "0.3.47" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "743bd48c283afc0388f9b8827b976905fb217ad9e647fae3a379a9283c4def2c" +dependencies = [ + "deranged", + "itoa", + "num-conv", + "powerfmt", + "serde_core", + "time-core", + "time-macros", +] + +[[package]] +name = "time-core" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7694e1cfe791f8d31026952abf09c69ca6f6fa4e1a1229e18988f06a04a12dca" + +[[package]] +name = "time-macros" +version = "0.2.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e70e4c5a0e0a8a4823ad65dfe1a6930e4f4d756dcd9dd7939022b5e8c501215" +dependencies = [ + "num-conv", + "time-core", +] + +[[package]] +name = "tinystr" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8323304221c2a851516f22236c5722a72eaa19749016521d6dff0824447d96d" +dependencies = [ + "displaydoc", + "zerovec", +] + +[[package]] +name = "tinyvec" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e61e67053d25a4e82c844e8424039d9745781b3fc4f32b8d55ed50f5f667ef3" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + +[[package]] +name = "tokio" +version = "1.52.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b67dee974fe86fd92cc45b7a95fdd2f99a36a6d7b0d431a231178d3d670bbcc6" +dependencies = [ + "bytes", + "libc", + "mio", + "parking_lot", + "pin-project-lite", + "signal-hook-registry", + "socket2", + "tokio-macros", + "windows-sys 0.61.2", +] + +[[package]] +name = "tokio-macros" +version = "2.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "385a6cb71ab9ab790c5fe8d67f1645e6c450a7ce006a33de03daa956cf70a496" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tokio-rustls" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" +dependencies = [ + "rustls", + "tokio", +] + +[[package]] +name = "tokio-stream" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32da49809aab5c3bc678af03902d4ccddea2a87d028d86392a4b1560c6906c70" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tokio-util" +version = "0.7.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ae9cec805b01e8fc3fd2fe289f89149a9b66dd16786abd8b19cfa7b48cb0098" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "toml" +version = "0.8.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + +[[package]] +name = "toml_datetime" +version = "0.6.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.22.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" +dependencies = [ + "indexmap 2.14.0", + "serde", + "serde_spanned", + "toml_datetime", + "toml_write", + "winnow", +] + +[[package]] +name = "toml_write" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" + +[[package]] +name = "tower" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebe5ef63511595f1344e2d5cfa636d973292adc0eec1f0ad45fae9f0851ab1d4" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper", + "tokio", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-http" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" +dependencies = [ + "bitflags", + "bytes", + "futures-util", + "http", + "http-body", + "http-body-util", + "iri-string", + "pin-project-lite", + "tower", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-layer" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" + +[[package]] +name = "tower-service" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" + +[[package]] +name = "tracing" +version = "0.1.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100" +dependencies = [ + "log", + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tracing-core" +version = "0.1.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db97caf9d906fbde555dd62fa95ddba9eecfd14cb388e4f491a66d74cd5fb79a" +dependencies = [ + "once_cell", + "valuable", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7f578e5945fb242538965c2d0b04418d38ec25c79d160cd279bf0731c8d319" +dependencies = [ + "matchers", + "nu-ansi-term", + "once_cell", + "regex-automata", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", +] + +[[package]] +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + +[[package]] +name = "typeid" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc7d623258602320d5c55d1bc22793b57daff0ec7efc270ea7d55ce1d5f5471c" + +[[package]] +name = "typenum" +version = "1.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40ce102ab67701b8526c123c1bab5cbe42d7040ccfd0f64af1a385808d2f43de" + +[[package]] +name = "typetag" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be2212c8a9b9bcfca32024de14998494cf9a5dfa59ea1b829de98bac374b86bf" +dependencies = [ + "erased-serde", + "inventory", + "once_cell", + "serde", + "typetag-impl", +] + +[[package]] +name = "typetag-impl" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "27a7a9b72ba121f6f1f6c3632b85604cac41aedb5ddc70accbebb6cac83de846" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "uncased" +version = "0.9.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1b88fcfe09e89d3866a5c11019378088af2d24c3fbd4f0543f96b479ec90697" +dependencies = [ + "version_check", +] + +[[package]] +name = "unicode-ident" +version = "1.0.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6e4313cd5fcd3dad5cafa179702e2b244f760991f45397d14d4ebf38247da75" + +[[package]] +name = "unicode-xid" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" + +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + +[[package]] +name = "url" +version = "2.5.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff67a8a4397373c3ef660812acab3268222035010ab8680ec4215f38ba3d0eed" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", + "serde", +] + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + +[[package]] +name = "utf8parse" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" + +[[package]] +name = "uuid" +version = "1.23.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddd74a9687298c6858e9b88ec8935ec45d22e8fd5e6394fa1bd4e99a87789c76" +dependencies = [ + "getrandom 0.4.2", + "js-sys", + "serde_core", + "wasm-bindgen", +] + +[[package]] +name = "valuable" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "wait-timeout" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ac3b126d3914f9849036f826e054cbabdc8519970b8998ddaf3b5bd3c65f11" +dependencies = [ + "libc", +] + +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.11.1+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" + +[[package]] +name = "wasip2" +version = "1.0.3+wasi-0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20064672db26d7cdc89c7798c48a0fdfac8213434a1186e5ef29fd560ae223d6" +dependencies = [ + "wit-bindgen 0.57.1", +] + +[[package]] +name = "wasip3" +version = "0.4.0+wasi-0.3.0-rc-2026-01-06" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5428f8bf88ea5ddc08faddef2ac4a67e390b88186c703ce6dbd955e1c145aca5" +dependencies = [ + "wit-bindgen 0.51.0", +] + +[[package]] +name = "wasm-bindgen" +version = "0.2.118" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bf938a0bacb0469e83c1e148908bd7d5a6010354cf4fb73279b7447422e3a89" +dependencies = [ + "cfg-if", + "once_cell", + "rustversion", + "wasm-bindgen-macro", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.68" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f371d383f2fb139252e0bfac3b81b265689bf45b6874af544ffa4c975ac1ebf8" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.118" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eeff24f84126c0ec2db7a449f0c2ec963c6a49efe0698c4242929da037ca28ed" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.118" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d08065faf983b2b80a79fd87d8254c409281cf7de75fc4b773019824196c904" +dependencies = [ + "bumpalo", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.118" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5fd04d9e306f1907bd13c6361b5c6bfc7b3b3c095ed3f8a9246390f8dbdee129" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "wasm-encoder" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "990065f2fe63003fe337b932cfb5e3b80e0b4d0f5ff650e6985b1048f62c8319" +dependencies = [ + "leb128fmt", + "wasmparser", +] + +[[package]] +name = "wasm-metadata" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb0e353e6a2fbdc176932bbaab493762eb1255a7900fe0fea1a2f96c296cc909" +dependencies = [ + "anyhow", + "indexmap 2.14.0", + "wasm-encoder", + "wasmparser", +] + +[[package]] +name = "wasm-streams" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15053d8d85c7eccdbefef60f06769760a563c7f0a9d6902a13d35c7800b0ad65" +dependencies = [ + "futures-util", + "js-sys", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + +[[package]] +name = "wasmparser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47b807c72e1bac69382b3a6fb3dbe8ea4c0ed87ff5629b8685ae6b9a611028fe" +dependencies = [ + "bitflags", + "hashbrown 0.15.5", + "indexmap 2.14.0", + "semver", +] + +[[package]] +name = "web-sys" +version = "0.3.95" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f2dfbb17949fa2088e5d39408c48368947b86f7834484e87b73de55bc14d97d" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webpki-roots" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52f5ee44c96cf55f1b349600768e3ece3a8f26010c05265ab73f945bb1a2eb9d" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "which" +version = "4.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" +dependencies = [ + "either", + "home", + "once_cell", + "rustix 0.38.44", +] + +[[package]] +name = "which" +version = "7.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d643ce3fd3e5b54854602a080f34fb10ab75e0b813ee32d00ca2b44fa74762" +dependencies = [ + "either", + "env_home", + "rustix 1.1.4", + "winsafe", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows" +version = "0.57.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12342cb4d8e3b046f3d80effd474a7a02447231330ef77d71daa6fbc40681143" +dependencies = [ + "windows-core 0.57.0", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd04d41d93c4992d421894c18c8b43496aa748dd4c081bac0dc93eb0489272b6" +dependencies = [ + "windows-core 0.58.0", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-core" +version = "0.57.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2ed2439a290666cd67ecce2b0ffaad89c2a56b976b736e6ece670297897832d" +dependencies = [ + "windows-implement 0.57.0", + "windows-interface 0.57.0", + "windows-result 0.1.2", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-core" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ba6d44ec8c2591c134257ce647b7ea6b20335bf6379a27dac5f1641fcf59f99" +dependencies = [ + "windows-implement 0.58.0", + "windows-interface 0.58.0", + "windows-result 0.2.0", + "windows-strings 0.1.0", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-core" +version = "0.62.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" +dependencies = [ + "windows-implement 0.60.2", + "windows-interface 0.59.3", + "windows-link", + "windows-result 0.4.1", + "windows-strings 0.5.1", +] + +[[package]] +name = "windows-implement" +version = "0.57.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "windows-implement" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "windows-implement" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "windows-interface" +version = "0.57.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "windows-interface" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "windows-interface" +version = "0.59.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "windows-link" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" + +[[package]] +name = "windows-result" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e383302e8ec8515204254685643de10811af0ed97ea37210dc26fb0032647f8" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-result" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-result" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-strings" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" +dependencies = [ + "windows-result 0.2.0", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-strings" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.5", +] + +[[package]] +name = "windows-sys" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm 0.52.6", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.53.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" +dependencies = [ + "windows-link", + "windows_aarch64_gnullvm 0.53.1", + "windows_aarch64_msvc 0.53.1", + "windows_i686_gnu 0.53.1", + "windows_i686_gnullvm 0.53.1", + "windows_i686_msvc 0.53.1", + "windows_x86_64_gnu 0.53.1", + "windows_x86_64_gnullvm 0.53.1", + "windows_x86_64_msvc 0.53.1", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_i686_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" + +[[package]] +name = "winnow" +version = "0.7.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df79d97927682d2fd8adb29682d1140b343be4ac0f08fd68b7765d9c059d3945" +dependencies = [ + "memchr", +] + +[[package]] +name = "winsafe" +version = "0.0.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d135d17ab770252ad95e9a872d365cf3090e3be864a34ab46f48555993efc904" + +[[package]] +name = "wiremock" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08db1edfb05d9b3c1542e521aea074442088292f00b5f28e435c714a98f85031" +dependencies = [ + "assert-json-diff", + "base64", + "deadpool", + "futures", + "http", + "http-body-util", + "hyper", + "hyper-util", + "log", + "once_cell", + "regex", + "serde", + "serde_json", + "tokio", + "url", +] + +[[package]] +name = "wit-bindgen" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5" +dependencies = [ + "wit-bindgen-rust-macro", +] + +[[package]] +name = "wit-bindgen" +version = "0.57.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ebf944e87a7c253233ad6766e082e3cd714b5d03812acc24c318f549614536e" + +[[package]] +name = "wit-bindgen-core" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea61de684c3ea68cb082b7a88508a8b27fcc8b797d738bfc99a82facf1d752dc" +dependencies = [ + "anyhow", + "heck", + "wit-parser", +] + +[[package]] +name = "wit-bindgen-rust" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7c566e0f4b284dd6561c786d9cb0142da491f46a9fbed79ea69cdad5db17f21" +dependencies = [ + "anyhow", + "heck", + "indexmap 2.14.0", + "prettyplease", + "syn", + "wasm-metadata", + "wit-bindgen-core", + "wit-component", +] + +[[package]] +name = "wit-bindgen-rust-macro" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c0f9bfd77e6a48eccf51359e3ae77140a7f50b1e2ebfe62422d8afdaffab17a" +dependencies = [ + "anyhow", + "prettyplease", + "proc-macro2", + "quote", + "syn", + "wit-bindgen-core", + "wit-bindgen-rust", +] + +[[package]] +name = "wit-component" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d66ea20e9553b30172b5e831994e35fbde2d165325bec84fc43dbf6f4eb9cb2" +dependencies = [ + "anyhow", + "bitflags", + "indexmap 2.14.0", + "log", + "serde", + "serde_derive", + "serde_json", + "wasm-encoder", + "wasm-metadata", + "wasmparser", + "wit-parser", +] + +[[package]] +name = "wit-parser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecc8ac4bc1dc3381b7f59c34f00b67e18f910c2c0f50015669dde7def656a736" +dependencies = [ + "anyhow", + "id-arena", + "indexmap 2.14.0", + "log", + "semver", + "serde", + "serde_derive", + "serde_json", + "unicode-xid", + "wasmparser", +] + +[[package]] +name = "writeable" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ffae5123b2d3fc086436f8834ae3ab053a283cfac8fe0a0b8eaae044768a4c4" + +[[package]] +name = "xml-rs" +version = "0.8.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ae8337f8a065cfc972643663ea4279e04e7256de865aa66fe25cec5fb912d3f" + +[[package]] +name = "yansi" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" + +[[package]] +name = "yoke" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abe8c5fda708d9ca3df187cae8bfb9ceda00dd96231bed36e445a1a48e66f9ca" +dependencies = [ + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de844c262c8848816172cef550288e7dc6c7b7814b4ee56b3e1553f275f1858e" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] + +[[package]] +name = "zerocopy" +version = "0.8.48" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eed437bf9d6692032087e337407a86f04cd8d6a16a37199ed57949d415bd68e9" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.48" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70e3cd084b1788766f53af483dd21f93881ff30d7320490ec3ef7526d203bad4" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "zerofrom" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69faa1f2a1ea75661980b013019ed6687ed0e83d069bc1114e2cc74c6c04c4df" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11532158c46691caf0f2593ea8358fed6bbf68a0315e80aae9bd41fbade684a1" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] + +[[package]] +name = "zeroize" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" + +[[package]] +name = "zerotrie" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f9152d31db0792fa83f70fb2f83148effb5c1f5b8c7686c3459e361d9bc20bf" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", +] + +[[package]] +name = "zerovec" +version = "0.11.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90f911cbc359ab6af17377d242225f4d75119aec87ea711a880987b18cd7b239" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "625dc425cab0dca6dc3c3319506e6593dcb08a9f387ea3b284dbd52a92c40555" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "zmij" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8848ee67ecc8aedbaf3e4122217aff892639231befc6a1b58d29fff4c2cabaa" diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000000..7e076ea4b7 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,31 @@ +[workspace] +resolver = "2" +members = ["packages/launcher/crates/*"] + +[workspace.package] +version = "0.1.0" +edition = "2021" +rust-version = "1.81" +license = "AGPL-3.0-or-later" +repository = "https://github.com/Drakkar-Software/OctoBot" + +[workspace.lints.rust] +unsafe_code = "forbid" +missing_debug_implementations = "warn" + +[workspace.lints.clippy] +all = { level = "warn", priority = -1 } +pedantic = { level = "warn", priority = -1 } +unwrap_used = "warn" +expect_used = "warn" +missing_errors_doc = "allow" +missing_panics_doc = "allow" +must_use_candidate = "allow" +too_many_lines = "allow" + +[profile.release] +lto = "fat" +codegen-units = 1 +strip = "symbols" +panic = "abort" +opt-level = "z" diff --git a/Dockerfile b/Dockerfile index 936522b88a..7040b05ee6 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,47 +1,69 @@ -FROM python:3.10-slim-bookworm AS base +FROM python:3.13-slim-trixie AS base -WORKDIR / +WORKDIR /tmp -# requires git to install requirements with git+https RUN apt-get update \ - && apt-get install -y --no-install-recommends build-essential git gcc binutils libffi-dev libssl-dev libxml2-dev libxslt1-dev libxslt-dev libjpeg62-turbo-dev zlib1g-dev \ - && python -m venv /opt/venv - -# skip cryptography rust compilation (required for armv7 builds) + && apt-get install -y --no-install-recommends \ + build-essential \ + git \ + gcc \ + binutils \ + libffi-dev \ + libssl-dev \ + libxml2-dev \ + libxslt1-dev \ + libxslt-dev \ + libjpeg62-turbo-dev \ + libopenblas-dev \ + zlib1g-dev \ + && rm -rf /var/lib/apt/lists/* + +# Skip cryptography rust compilation (required for armv7 builds) ENV CRYPTOGRAPHY_DONT_BUILD_RUST=1 -# Make sure we use the virtualenv: -ENV PATH="/opt/venv/bin:$PATH" - -COPY . . -RUN pip install -U setuptools wheel pip>=20.0.0 \ - && pip install --no-cache-dir --prefer-binary -r requirements.txt -r full_requirements.txt \ - && python setup.py install +COPY dist/octobot-*.whl /tmp/ +COPY extra_requirements.txt /tmp/ +RUN python -m venv /opt/venv \ + && . /opt/venv/bin/activate \ + && pip install --no-cache-dir --upgrade pip setuptools wheel \ + && pip install --no-cache-dir /tmp/octobot-*.whl \ + && pip install --no-cache-dir -r /tmp/extra_requirements.txt -FROM python:3.10-slim-bookworm +FROM python:3.13-slim-trixie ARG TENTACLES_URL_TAG="" +ARG VERSION="" ENV TENTACLES_URL_TAG=$TENTACLES_URL_TAG +ENV VERSION=$VERSION + +LABEL maintainer="Drakkar-Software" \ + version="${VERSION}" \ + description="OctoBot - Cryptocurrency trading bot" WORKDIR /octobot -# Import python dependencies COPY --from=base /opt/venv /opt/venv - -# Add default config files COPY octobot/config /octobot/octobot/config - COPY docker/* /octobot/ -# 1. Install requirements -# 2. Install required packages -# 3. Finish env setup SHELL ["/bin/bash", "-o", "pipefail", "-c"] RUN apt-get update \ - && apt-get install -y --no-install-recommends curl libxslt-dev libxcb-xinput0 libjpeg62-turbo-dev zlib1g-dev libblas-dev liblapack-dev libatlas-base-dev libopenjp2-7 libtiff-dev \ + && apt-get install -y --no-install-recommends \ + curl \ + libxslt-dev \ + libxcb-xinput0 \ + libjpeg62-turbo-dev \ + zlib1g-dev \ + libblas-dev \ + liblapack-dev \ + libopenblas-dev \ + libopenjp2-7 \ + libtiff-dev \ && rm -rf /var/lib/apt/lists/* \ - && ln -s /opt/venv/bin/OctoBot OctoBot # Make sure we use the virtualenv \ - && chmod +x docker-entrypoint.sh + && chmod +x docker-entrypoint.sh \ + && chmod +x tunnel.sh + +ENV PATH="/opt/venv/bin:$PATH" VOLUME /octobot/backtesting VOLUME /octobot/logs @@ -50,6 +72,7 @@ VOLUME /octobot/user EXPOSE 5001 -HEALTHCHECK --interval=15s --timeout=10s --retries=5 CMD curl -sS http://127.0.0.1:5001 || exit 1 +HEALTHCHECK --interval=15s --timeout=10s --retries=5 \ + CMD curl -sS http://127.0.0.1:5001 || exit 1 ENTRYPOINT ["./docker-entrypoint.sh"] diff --git a/additional_tests/exchanges_tests/.env.template b/additional_tests/exchanges_tests/.env.template index e3254ca017..d501511cbb 100644 --- a/additional_tests/exchanges_tests/.env.template +++ b/additional_tests/exchanges_tests/.env.template @@ -16,3 +16,7 @@ PHEMEX_KEY= PHEMEX_SECRET= PHEMEX_PASSWORD= PHEMEX_SANDBOXED=true + +POLYMARKET_KEY= +POLYMARKET_SECRET= +POLYMARKET_PASSWORD= diff --git a/additional_tests/exchanges_tests/__init__.py b/additional_tests/exchanges_tests/__init__.py index 9c2dcfe25e..c60cbb4b73 100644 --- a/additional_tests/exchanges_tests/__init__.py +++ b/additional_tests/exchanges_tests/__init__.py @@ -112,7 +112,7 @@ async def get_authenticated_exchange_manager( .is_exchange_only() if http_proxy_callback_factory: proxy_callback = http_proxy_callback_factory(exchange_builder.exchange_manager) - exchange_builder.set_proxy_config(exchanges.ProxyConfig(http_proxy_callback=proxy_callback)) + exchange_builder.set_proxy_config(exchanges.ExchangeProxyConfig(http_proxy_callback=proxy_callback)) exchange_manager_instance = await exchange_builder.build() # create trader afterwards to init exchange personal data exchange_manager_instance.trader.is_enabled = True diff --git a/additional_tests/exchanges_tests/abstract_authenticated_exchange_tester.py b/additional_tests/exchanges_tests/abstract_authenticated_exchange_tester.py index 63100e0bf5..91057485ea 100644 --- a/additional_tests/exchanges_tests/abstract_authenticated_exchange_tester.py +++ b/additional_tests/exchanges_tests/abstract_authenticated_exchange_tester.py @@ -35,7 +35,7 @@ import octobot_trading.personal_data as personal_data import octobot_trading.personal_data.orders as personal_data_orders import octobot_trading.util.test_tools.exchanges_test_tools as exchanges_test_tools -import octobot_trading.util.test_tools.exchange_data as exchange_data_import +import octobot_trading.exchanges.util.exchange_data as exchange_data_import import trading_backend.enums import octobot_tentacles_manager.api as tentacles_manager_api from additional_tests.exchanges_tests import get_authenticated_exchange_manager, NoProvidedCredentialsError @@ -72,11 +72,13 @@ class AbstractAuthenticatedExchangeTester: ORDER_SIZE = 10 # % of portfolio to include in test orders PORTFOLIO_TYPE_FOR_SIZE = trading_constants.CONFIG_PORTFOLIO_FREE CONVERTS_ORDER_SIZE_BEFORE_PUSHING_TO_EXCHANGES = False + CONVERTS_ORDER_PRICE_BEFORE_PUSHING_TO_EXCHANGE = False ORDER_PRICE_DIFF = 20 # % of price difference compared to current price for limit and stop orders EXPECT_MISSING_ORDER_FEES_DUE_TO_ORDERS_TOO_OLD_FOR_RECENT_TRADES = False # when recent trades are limited and # closed orders fees are taken from recent trades EXPECT_MISSING_FEE_IN_CANCELLED_ORDERS = True # when get_cancelled_orders returns None in fee EXPECT_POSSIBLE_ORDER_NOT_FOUND_DURING_ORDER_CREATION = False + EXPECT_NOT_SUPPORTED_ERROR_WHEN_FETCHING_CANCELLED_ORDERS = False # set True when fetching cancelled orders is not supported and should raise a NotSupported error CONVERTS_MARKET_INTO_LIMIT_ORDERS = False # when market orders are always converted into limit order by the exchange OPEN_ORDERS_IN_CLOSED_ORDERS = False CANCELLED_ORDERS_IN_CLOSED_ORDERS = False @@ -86,6 +88,7 @@ class AbstractAuthenticatedExchangeTester: OPEN_TIMEOUT = 15 # if >0: retry fetching open/cancelled orders when created/cancelled orders are not synchronised instantly ORDER_IN_OPEN_AND_CANCELLED_ORDERS_TIMEOUT = 10 + ORDER_IMPACTS_PORTFOLIO_FREE_BALANCE = True CANCEL_TIMEOUT = 15 EDIT_TIMEOUT = 15 MIN_PORTFOLIO_SIZE = 1 @@ -110,9 +113,11 @@ class AbstractAuthenticatedExchangeTester: IS_BROKER_ENABLED_ACCOUNT = True # set False when this test account can't generate broker fees # set True when this exchange used to have symbols that can't be traded through API (ex: MEXC) USED_TO_HAVE_UNTRADABLE_SYMBOL = False + LIST_TRADABLE_SYMBOLS = False SUPPORTS_GET_MAX_ORDERS_COUNT = False # when True, will ensure that default values are not used DEFAULT_MAX_DEFAULT_ORDERS_COUNT = trading_constants.DEFAULT_MAX_DEFAULT_ORDERS_COUNT DEFAULT_MAX_STOP_ORDERS_COUNT = trading_constants.DEFAULT_MAX_STOP_ORDERS_COUNT + SLEEP_SECONDS_BEFORE_CHECKING_PORTFOLIO = 0 # used to wait before fetching portfolio after creating/cancelling an order # Implement all "test_[name]" methods, call super() to run the test, pass to ignore it. # Override the "inner_test_[name]" method to override a test content. @@ -170,12 +175,28 @@ def check_portfolio_content(self, portfolio): assert len(set(portfolio)) == len(portfolio) async def test_untradable_symbols(self): - await self.inner_test_untradable_symbols() - - async def inner_test_untradable_symbols(self): - if not self.USED_TO_HAVE_UNTRADABLE_SYMBOL: + if self.USED_TO_HAVE_UNTRADABLE_SYMBOL: + await self.inner_test_untradable_symbols() + elif self.LIST_TRADABLE_SYMBOLS: + await self._list_tradable_symbols() + else: # nothing to do return + + async def _list_tradable_symbols(self): + all_symbols = self.exchange_manager.exchange.get_all_available_symbols() + tradable = [] + for i, symbol in enumerate(all_symbols): + try: + await self.get_open_orders(_symbols=[symbol]) + tradable.append(symbol) + print(f"{i+1}/{len(all_symbols)} : {symbol} is tradable") + except ccxt.BadSymbol as e: + print(f"{i+1}/{len(all_symbols)} : {symbol} is untradable") + sorted_tradable = sorted(tradable, key=lambda x: f"{len(x.split('/')[0])}{x}") + print(f"Tradable symbols: {len(sorted_tradable)}/{len(all_symbols)}: {sorted_tradable}") + + async def inner_test_untradable_symbols(self): async with self.local_exchange_manager(): all_symbols = self.exchange_manager.exchange.get_all_available_symbols() all_symbols_including_disabled = self.exchange_manager.exchange.get_all_available_symbols(active_only=False) @@ -363,14 +384,20 @@ def assert_has_at_least_one_authenticated_call(calls): ) latest_calls = get_latest_calls() for latest_call in latest_calls: - assert latest_call[1] is False, f"{latest_call} should be NOT authenticated" # authenticated request + if self.exchange_manager.exchange.ALWAYS_REQUIRES_AUTHENTICATION: + assert latest_call[1] is True, f"{latest_call} should be authenticated" # authenticated request + else: + assert latest_call[1] is False, f"{latest_call} should be NOT authenticated" # authenticated request ticker = await self.exchange_manager.exchange.get_price_ticker(self.SYMBOL) assert ticker last_price = ticker[trading_enums.ExchangeConstantsTickersColumns.CLOSE.value] assert rest_exchange_data["calls"][-1][0][0] != latest_calls[-1][0][0] # assert latest call's url changed latest_calls = get_latest_calls() for latest_call in latest_calls: - assert latest_call[1] is False, f"{latest_call} should be NOT authenticated" # authenticated request + if self.exchange_manager.exchange.ALWAYS_REQUIRES_AUTHENTICATION: + assert latest_call[1] is True, f"{latest_call} should be authenticated" # authenticated request + else: + assert latest_call[1] is False, f"{latest_call} should be NOT authenticated" # authenticated request # 3. make private requests # balance (usually a GET) @@ -453,7 +480,7 @@ async def test_missing_trading_api_key_permissions(self): async def inner_test_missing_trading_api_key_permissions(self): permissions = await self.exchange_manager.exchange_backend._get_api_key_rights() # ensure reading permission only are returned - assert permissions == [trading_backend.enums.APIKeyRights.READING] + assert permissions == [trading_backend.enums.APIKeyRights.READING], f"Expected reading permission only, got: {permissions}" # ensure order operations returns a permission error with pytest.raises(trading_errors.AuthenticationError) as err: await self.inner_test_create_and_cancel_limit_orders(use_both_margin_types=False) @@ -515,7 +542,7 @@ async def inner_test_get_special_orders(self): for exchange_id, order_details in self.SPECIAL_ORDER_TYPES_BY_EXCHANGE_ID.items(): symbol, info_key, info_type, expected_type, expected_side, expected_trigger_above = order_details print(order_details) - fetched_order = await self.exchange_manager.exchange.get_order(exchange_id, symbol=symbol) + fetched_order = await self.exchange_manager.exchange.get_order(exchange_id, symbol=symbol, order_type=trading_enums.TradeOrderType(expected_type)) assert fetched_order is not None self._check_fetched_order_dicts([fetched_order]) # ensure parsing order doesn't crash @@ -614,13 +641,23 @@ async def inner_test_create_and_cancel_limit_orders(self, symbol=None, settlemen self.check_created_limit_order(limit_order, price, size, side) assert await self.order_in_open_orders(open_orders, limit_order, symbol=symbol) await self.check_can_get_order(limit_order) - # assert free portfolio amount is smaller than total amount - balance = await self.get_portfolio() - locked_currency = settlement_currency if side == trading_enums.TradeOrderSide.BUY else self.ORDER_CURRENCY - assert balance[locked_currency][trading_constants.CONFIG_PORTFOLIO_FREE] < \ - balance[locked_currency][trading_constants.CONFIG_PORTFOLIO_TOTAL], ( - f"FALSE: {balance[locked_currency][trading_constants.CONFIG_PORTFOLIO_FREE]} < {balance[locked_currency][trading_constants.CONFIG_PORTFOLIO_TOTAL]}" - ) + await self.sleep_before_checking_portfolio() + if self.ORDER_IMPACTS_PORTFOLIO_FREE_BALANCE: + # assert free portfolio amount is smaller than total amount + balance = await self.get_portfolio() + locked_currency = settlement_currency if side == trading_enums.TradeOrderSide.BUY else self.ORDER_CURRENCY + assert balance[locked_currency][trading_constants.CONFIG_PORTFOLIO_FREE] < \ + balance[locked_currency][trading_constants.CONFIG_PORTFOLIO_TOTAL], ( + f"FALSE: {balance[locked_currency][trading_constants.CONFIG_PORTFOLIO_FREE]} < {balance[locked_currency][trading_constants.CONFIG_PORTFOLIO_TOTAL]}" + ) + else: + # assert free portfolio amount equals total amount when orders don't impact free balance + balance = await self.get_portfolio() + locked_currency = settlement_currency if side == trading_enums.TradeOrderSide.BUY else self.ORDER_CURRENCY + assert balance[locked_currency][trading_constants.CONFIG_PORTFOLIO_FREE] == \ + balance[locked_currency][trading_constants.CONFIG_PORTFOLIO_TOTAL], ( + f"FALSE: {balance[locked_currency][trading_constants.CONFIG_PORTFOLIO_FREE]} == {balance[locked_currency][trading_constants.CONFIG_PORTFOLIO_TOTAL]}" + ) finally: # don't leave buy_limit as open order await self.cancel_order(limit_order) @@ -662,6 +699,7 @@ async def inner_test_create_and_fill_market_orders(self): filled_order[trading_enums.ExchangeConstantsOrderColumns.EXCHANGE_ID.value] ) self.check_raw_closed_orders([filled_order]) + await self.sleep_before_checking_portfolio() post_buy_portfolio = await self.get_portfolio() portfolio_increased = side == trading_enums.TradeOrderSide.SELL self.check_portfolio_changed(portfolio, post_buy_portfolio, portfolio_increased) @@ -672,6 +710,7 @@ async def inner_test_create_and_fill_market_orders(self): second_market_order = await self.create_market_order(current_price, mirror_size, other_side) self.check_created_market_order(second_market_order, mirror_size, other_side) await self.wait_for_fill(second_market_order) + await self.sleep_before_checking_portfolio() post_sell_portfolio = await self.get_portfolio() if post_buy_portfolio: portfolio_increased = other_side == trading_enums.TradeOrderSide.SELL @@ -707,7 +746,7 @@ async def inner_test_create_and_cancel_stop_orders(self): ) try: self.check_created_stop_order(stop_loss, price, size, trading_enums.TradeOrderSide.SELL) - stop_loss_from_get_order = await self.get_order(stop_loss.exchange_order_id, stop_loss.symbol) + stop_loss_from_get_order = await self.get_order(stop_loss.exchange_order_id, stop_loss.symbol, stop_loss.order_type) self.check_created_stop_order(stop_loss_from_get_order, price, size, trading_enums.TradeOrderSide.SELL) assert await self.order_in_open_orders(open_orders, stop_loss) ## for manual checks @@ -751,7 +790,11 @@ async def inner_test_get_cancelled_orders(self): if not self.exchange_manager.exchange.SUPPORT_FETCHING_CANCELLED_ORDERS: assert not self.exchange_manager.exchange.connector.client.has["fetchCanceledOrders"] # use get_closed order, no cancelled order is returned - assert await self.exchange_manager.exchange.connector.get_cancelled_orders(self.SYMBOL) == [] + if self.EXPECT_NOT_SUPPORTED_ERROR_WHEN_FETCHING_CANCELLED_ORDERS: + with pytest.raises(trading_errors.NotSupported): + await self.exchange_manager.exchange.connector.get_cancelled_orders(self.SYMBOL) + else: + assert await self.exchange_manager.exchange.connector.get_cancelled_orders(self.SYMBOL) == [] with pytest.raises(trading_errors.NotSupported): await self.get_cancelled_orders(force_fetch=True) return @@ -782,7 +825,7 @@ async def inner_test_edit_limit_order(self): edited_size = self.get_order_size(portfolio, price, order_size=1.3*self.ORDER_SIZE, settlement_currency=self._get_edit_order_settlement_currency()) sell_limit = await self.edit_order(sell_limit, edited_price=edited_price, edited_quantity=edited_size) await self.wait_for_edit(sell_limit, edited_size) - sell_limit = await self.get_order(sell_limit.exchange_order_id, sell_limit.symbol) + sell_limit = await self.get_order(sell_limit.exchange_order_id, sell_limit.symbol, sell_limit.order_type) self.check_created_limit_order(sell_limit, edited_price, edited_size, trading_enums.TradeOrderSide.SELL) finally: if sell_limit is not None: @@ -811,7 +854,7 @@ async def inner_test_edit_stop_order(self): edited_size = self.get_order_size(portfolio, price, order_size=1.3*self.ORDER_SIZE, settlement_currency=self._get_edit_order_settlement_currency()) stop_loss = await self.edit_order(stop_loss, edited_price=edited_price, edited_quantity=edited_size) await self.wait_for_edit(stop_loss, edited_size) - stop_loss = await self.get_order(stop_loss.exchange_order_id, stop_loss.symbol) + stop_loss = await self.get_order(stop_loss.exchange_order_id, stop_loss.symbol, stop_loss.order_type) self.check_created_stop_order(stop_loss, edited_price, edited_size, trading_enums.TradeOrderSide.SELL) finally: if stop_loss is not None: @@ -1157,10 +1200,10 @@ async def get_price(self, symbol=None): ] )) - async def get_order(self, exchange_order_id, symbol=None): + async def get_order(self, exchange_order_id, symbol=None, order_type: trading_enums.TraderOrderType = None): assert self.exchange_manager.exchange.connector.client.has["fetchOrder"] is \ self.EXPECT_FETCH_ORDER_TO_BE_AVAILABLE - order = await self.exchange_manager.exchange.get_order(exchange_order_id, symbol or self.SYMBOL) + order = await self.exchange_manager.exchange.get_order(exchange_order_id, symbol or self.SYMBOL, order_type) self._check_fetched_order_dicts([order]) return personal_data.create_order_instance_from_raw(self.exchange_manager.trader, order) @@ -1246,7 +1289,7 @@ async def _create_order_on_exchange(self, order, params=None, expected_creation_ raise AssertionError(f"Created order is None. input order: {order}, params: {params}") if created_order.status is trading_enums.OrderStatus.PENDING_CREATION: await self.wait_for_open(created_order) - return await self.get_order(created_order.exchange_order_id, order.symbol) + return await self.get_order(created_order.exchange_order_id, order.symbol, order.order_type) return created_order def get_order_size(self, portfolio, price, symbol=None, order_size=None, settlement_currency=None): @@ -1340,7 +1383,13 @@ def _check_fetched_order_dicts(self, orders: list[dict]): def check_created_limit_order(self, order, price, size, side): self._check_order(order, size, side) - assert order.origin_price == price, f"{order.origin_price} != {price}" + if self.CONVERTS_ORDER_PRICE_BEFORE_PUSHING_TO_EXCHANGE: + # actual origin_price may vary due to price conversion + assert price * decimal.Decimal("0.8") <= order.origin_price <= price * decimal.Decimal("1.2"), ( + f"FALSE: {price * decimal.Decimal('0.8')} <= {order.origin_price} <= {price * decimal.Decimal('1.2')}" + ) + else: + assert order.origin_price == price, f"{order.origin_price} != {price}" assert isinstance(order.filled_quantity, decimal.Decimal) expected_type = personal_data.BuyLimitOrder \ if side is trading_enums.TradeOrderSide.BUY else personal_data.SellLimitOrder @@ -1461,7 +1510,7 @@ async def _get_order_until(self, order, validation_func, timeout, can_order_be_n t0 = time.time() iterations = 0 while time.time() - t0 < timeout: - raw_order = await self.exchange_manager.exchange.get_order(order.exchange_order_id, order.symbol) + raw_order = await self.exchange_manager.exchange.get_order(order.exchange_order_id, order.symbol, order.order_type) iterations += 1 if raw_order is None: print(f"{self.exchange_manager.exchange_name} {order.order_type} {validation_func.__name__} " @@ -1487,7 +1536,7 @@ async def _get_order_until(self, order, validation_func, timeout, can_order_be_n raise TimeoutError(f"Order not filled/cancelled within {timeout}s: {order} ({validation_func.__name__})") async def check_can_get_order(self, order): - fetched_order = await self.get_order(order.exchange_order_id, order.symbol) + fetched_order = await self.get_order(order.exchange_order_id, order.symbol, order.order_type) self.check_created_limit_order(fetched_order, order.origin_price, order.origin_quantity, order.side) async def order_in_fetched_orders(self, method, previous_orders, order, symbol=None, check_presence=True): @@ -1665,6 +1714,11 @@ def _get_exchange_tentacle_class(self): def _supports_ip_whitelist_error(self): return bool(self._get_exchange_tentacle_class().EXCHANGE_IP_WHITELIST_ERRORS) + async def sleep_before_checking_portfolio(self): + if self.SLEEP_SECONDS_BEFORE_CHECKING_PORTFOLIO > 0: + print(f"{self.__class__.__name__} Waiting {self.SLEEP_SECONDS_BEFORE_CHECKING_PORTFOLIO} seconds to check portfolio") + await asyncio.sleep(self.SLEEP_SECONDS_BEFORE_CHECKING_PORTFOLIO) + def _get_encoded_value(raw) -> str: return commons_configuration.encrypt(raw).decode() diff --git a/additional_tests/exchanges_tests/abstract_authenticated_future_exchange_tester.py b/additional_tests/exchanges_tests/abstract_authenticated_future_exchange_tester.py index b8cf0ead58..09c15162e1 100644 --- a/additional_tests/exchanges_tests/abstract_authenticated_future_exchange_tester.py +++ b/additional_tests/exchanges_tests/abstract_authenticated_future_exchange_tester.py @@ -33,6 +33,7 @@ class AbstractAuthenticatedFutureExchangeTester( INVERSE_SYMBOL = None MIN_PORTFOLIO_SIZE = 2 # ensure fetching currency for linear and inverse SUPPORTS_GET_LEVERAGE = True + SUPPORTS_GET_POSITION = True SUPPORTS_EMPTY_POSITION_SET_MARGIN_TYPE = True async def test_get_empty_linear_and_inverse_positions(self): @@ -58,19 +59,23 @@ async def _inner_test_get_empty_linear_and_inverse_positions_for_margin_type( ): positions = await self.get_positions() self._check_positions_content(positions) - position = await self.get_position(self.SYMBOL) - self._check_position_content(position, self.SYMBOL, margin_type=margin_type) - for contract_type in (trading_enums.FutureContractType.LINEAR_PERPETUAL, - trading_enums.FutureContractType.INVERSE_PERPETUAL): - if not self.has_empty_position(self.get_filtered_positions(positions, contract_type)): - empty_position_symbol = self.get_other_position_symbol(positions, contract_type) - # test with get_position - empty_position = await self.get_position(empty_position_symbol) - assert self.is_position_empty(empty_position) - # test with get_positions - empty_positions = await self.get_positions([empty_position_symbol]) - assert len(empty_positions) == 1 - assert self.is_position_empty(empty_positions[0]) + if self.SUPPORTS_GET_POSITION: + position = await self.get_position(self.SYMBOL) + self._check_position_content(position, self.SYMBOL, margin_type=margin_type) + + if self.EXCHANGE_TYPE == trading_enums.ExchangeTypes.FUTURE.value: + for contract_type in (trading_enums.FutureContractType.LINEAR_PERPETUAL, + trading_enums.FutureContractType.INVERSE_PERPETUAL): + if not self.has_empty_position(self.get_filtered_positions(positions, contract_type)): + empty_position_symbol = self.get_other_position_symbol(positions, contract_type) + if self.SUPPORTS_GET_POSITION: + # test with get_position + empty_position = await self.get_position(empty_position_symbol) + assert self.is_position_empty(empty_position) + # test with get_positions + empty_positions = await self.get_positions([empty_position_symbol]) + assert len(empty_positions) == 1 + assert self.is_position_empty(empty_positions[0]) async def test_get_and_set_leverage(self): # ensure set_leverage works @@ -265,10 +270,10 @@ async def get_positions(self, symbols=None): async def init_and_get_contract(self, symbol=None): symbol = symbol or self.SYMBOL - await self.exchange_manager.exchange.load_pair_future_contract(symbol) - if not self.exchange_manager.exchange.has_pair_future_contract(symbol): + await self.exchange_manager.exchange.load_pair_contract(symbol) + if not self.exchange_manager.exchange.has_pair_contract(symbol): raise AssertionError(f"{symbol} contract not initialized") - return self.exchange_manager.exchange.get_pair_future_contract(symbol) + return self.exchange_manager.exchange.get_pair_contract(symbol) async def get_margin_type_and_leverage_from_position(self, symbol=None): position = await self.get_position(symbol=symbol) @@ -298,7 +303,7 @@ async def required_empty_position(self): async def load_contract(self, symbol=None): symbol = symbol or self.SYMBOL if self.exchange_manager.is_future and symbol not in self.exchange_manager.exchange.pair_contracts: - await self.exchange_manager.exchange.load_pair_future_contract(symbol) + await self.exchange_manager.exchange.load_pair_contract(symbol) async def enable_partial_take_profits_and_stop_loss(self, mode, symbol=None): await self.exchange_manager.exchange.set_symbol_partial_take_profit_stop_loss( diff --git a/additional_tests/exchanges_tests/abstract_authenticated_option_exchange_tester.py b/additional_tests/exchanges_tests/abstract_authenticated_option_exchange_tester.py new file mode 100644 index 0000000000..3d0fc2723c --- /dev/null +++ b/additional_tests/exchanges_tests/abstract_authenticated_option_exchange_tester.py @@ -0,0 +1,23 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. +import octobot_trading.enums as trading_enums +from additional_tests.exchanges_tests import abstract_authenticated_future_exchange_tester + + +class AbstractAuthenticatedOptionExchangeTester( + abstract_authenticated_future_exchange_tester.AbstractAuthenticatedFutureExchangeTester +): + EXCHANGE_TYPE = trading_enums.ExchangeTypes.OPTION.value diff --git a/additional_tests/exchanges_tests/test_binance_futures.py b/additional_tests/exchanges_tests/test_binance_futures.py index 9826efeb1e..b61401ddee 100644 --- a/additional_tests/exchanges_tests/test_binance_futures.py +++ b/additional_tests/exchanges_tests/test_binance_futures.py @@ -42,6 +42,8 @@ class TestBinanceFuturesAuthenticatedExchange( MAX_TRADE_USD_VALUE = decimal.Decimal(450000) # testnet portfolio ALLOW_0_MAKER_FEES = True SUPPORTS_GET_MAX_ORDERS_COUNT = True + # Set True when get_cancelled_order() can return outdated open orders + CAN_HAVE_DELAYED_CANCELLED_ORDERS = True SPECIAL_ORDER_TYPES_BY_EXCHANGE_ID: dict[ str, ( diff --git a/additional_tests/exchanges_tests/test_bitget.py b/additional_tests/exchanges_tests/test_bitget.py index 3dbdbac72c..9a365ec1b2 100644 --- a/additional_tests/exchanges_tests/test_bitget.py +++ b/additional_tests/exchanges_tests/test_bitget.py @@ -23,6 +23,7 @@ pytestmark = pytest.mark.asyncio +# need non FR account to test this exchange (20 march 2026) class TestBitgetAuthenticatedExchange( abstract_authenticated_exchange_tester.AbstractAuthenticatedExchangeTester ): diff --git a/additional_tests/exchanges_tests/test_coinbase.py b/additional_tests/exchanges_tests/test_coinbase.py index 81f7a83226..5473fbec10 100644 --- a/additional_tests/exchanges_tests/test_coinbase.py +++ b/additional_tests/exchanges_tests/test_coinbase.py @@ -32,13 +32,14 @@ class TestCoinbaseAuthenticatedExchange( SETTLEMENT_CURRENCY = "USDC" SYMBOL = f"{ORDER_CURRENCY}/{SETTLEMENT_CURRENCY}" ORDER_SIZE = 70 # % of portfolio to include in test orders - MIN_TRADE_USD_VALUE = decimal.Decimal("0.004") + MIN_TRADE_USD_VALUE = decimal.Decimal("0.0004") CONVERTS_ORDER_SIZE_BEFORE_PUSHING_TO_EXCHANGES = True VALID_ORDER_ID = "8bb80a81-27f7-4415-aa50-911ea46d841c" USE_ORDER_OPERATION_TO_CHECK_API_KEY_RIGHTS = True # set True when api key rights can't be checked using a EXPECT_MISSING_FEE_IN_CANCELLED_ORDERS = False IS_BROKER_ENABLED_ACCOUNT = False IS_AUTHENTICATED_REQUEST_CHECK_AVAILABLE = True # set True when is_authenticated_request is implemented + SLEEP_SECONDS_BEFORE_CHECKING_PORTFOLIO = 8 SPECIAL_ORDER_TYPES_BY_EXCHANGE_ID: dict[ str, ( diff --git a/additional_tests/exchanges_tests/test_coinex.py b/additional_tests/exchanges_tests/test_coinex.py index 23e13fa62e..9b1530057d 100644 --- a/additional_tests/exchanges_tests/test_coinex.py +++ b/additional_tests/exchanges_tests/test_coinex.py @@ -29,7 +29,7 @@ class TestCoinExAuthenticatedExchange( ORDER_CURRENCY = "BTC" SETTLEMENT_CURRENCY = "USDT" SYMBOL = f"{ORDER_CURRENCY}/{SETTLEMENT_CURRENCY}" - ORDER_SIZE = 70 # % of portfolio to include in test orders + ORDER_SIZE = 90 # % of portfolio to include in test orders CONVERTS_ORDER_SIZE_BEFORE_PUSHING_TO_EXCHANGES = True EXPECTED_GENERATED_ACCOUNT_ID = True # set True when account_id can't be fetch and a generated account id is used IS_AUTHENTICATED_REQUEST_CHECK_AVAILABLE = True # set True when is_authenticated_request is implemented diff --git a/additional_tests/exchanges_tests/test_kucoin.py b/additional_tests/exchanges_tests/test_kucoin.py index 53db24c7c1..a6aa69be76 100644 --- a/additional_tests/exchanges_tests/test_kucoin.py +++ b/additional_tests/exchanges_tests/test_kucoin.py @@ -25,6 +25,7 @@ class TestKucoinAuthenticatedExchange( abstract_authenticated_exchange_tester.AbstractAuthenticatedExchangeTester ): + # WARNING: can't be tested since dec 2025 due to regulatory changes on EU countries # enter exchange name as a class variable here EXCHANGE_NAME = "kucoin" ORDER_CURRENCY = "BTC" diff --git a/additional_tests/exchanges_tests/test_kucoin_futures.py b/additional_tests/exchanges_tests/test_kucoin_futures.py index e7a3a94842..8c5ce48c38 100644 --- a/additional_tests/exchanges_tests/test_kucoin_futures.py +++ b/additional_tests/exchanges_tests/test_kucoin_futures.py @@ -25,6 +25,7 @@ class TestKucoinFuturesAuthenticatedExchange( abstract_authenticated_future_exchange_tester.AbstractAuthenticatedFutureExchangeTester ): + # WARNING: can't be tested since dec 2025 due to regulatory changes on EU countries # enter exchange name as a class variable here EXCHANGE_NAME = "kucoin" CREDENTIALS_EXCHANGE_NAME = "KUCOIN_FUTURES" diff --git a/additional_tests/exchanges_tests/test_lbank.py b/additional_tests/exchanges_tests/test_lbank.py new file mode 100644 index 0000000000..922dee551b --- /dev/null +++ b/additional_tests/exchanges_tests/test_lbank.py @@ -0,0 +1,128 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. +import pytest +import ccxt + +import octobot_trading.errors as octobot_errors + +from additional_tests.exchanges_tests import abstract_authenticated_exchange_tester + +# All test coroutines will be treated as marked. +pytestmark = pytest.mark.asyncio + + +class TestLBankAuthenticatedExchange( + abstract_authenticated_exchange_tester.AbstractAuthenticatedExchangeTester +): + # enter exchange name as a class variable here + EXCHANGE_NAME = "lbank" + EXCHANGE_TENTACLE_NAME = "LBank" + # WARNING: use a tradable symbol otherwise tests will fail + # because of "ccxt.BadSymbol: Invalid Trading Pair" when cancelling order + # due to https://github.com/LBank-exchange/lbank-official-api-docs/issues/29 + ORDER_CURRENCY = "BNB" # WARNING: use a tradable symbol (see test_untradable_symbols) + SETTLEMENT_CURRENCY = "USDT" + SYMBOL = f"{ORDER_CURRENCY}/{SETTLEMENT_CURRENCY}" + ORDER_SIZE = 15 # % of portfolio to include in test orders + VALID_ORDER_ID = "1777764898965454848" + IS_ACCOUNT_ID_AVAILABLE = True # set False when get_account_id is not available and should be checked + EXPECTED_GENERATED_ACCOUNT_ID = True # set True when account_id can't be fetch and a generated account id is used + IS_AUTHENTICATED_REQUEST_CHECK_AVAILABLE = True # set True when is_authenticated_request is implemented + EXPECT_NOT_SUPPORTED_ERROR_WHEN_FETCHING_CANCELLED_ORDERS = True # set True when fetching cancelled orders is not supported and should raise a NotSupported error + CHECK_EMPTY_ACCOUNT = False # set True when the account to check has no funds. Warning: does not check order + # can't test for now due to country restrictions + # parse/create/fill/cancel or portfolio & trades parsing + CONVERTS_ORDER_SIZE_BEFORE_PUSHING_TO_EXCHANGES = True + + async def test_get_portfolio(self): + await super().test_get_portfolio() + + async def test_get_portfolio_with_market_filter(self): + await super().test_get_portfolio_with_market_filter() + + async def test_untradable_symbols(self): + # self.LIST_TRADABLE_SYMBOLS = True # uncomment to list tradable symbols + await super().test_untradable_symbols() + + async def test_get_max_orders_count(self): + await super().test_get_max_orders_count() + + async def test_get_account_id(self): + await super().test_get_account_id() + + async def test_is_authenticated_request(self): + await super().test_is_authenticated_request() + + async def test_invalid_api_key_error(self): + await super().test_invalid_api_key_error() + + async def test_get_api_key_permissions(self): + await super().test_get_api_key_permissions() + + async def test_missing_trading_api_key_permissions(self): + await super().test_missing_trading_api_key_permissions() + + async def test_api_key_ip_whitelist_error(self): + # now working: invalid IP seems to work as well for reading operations + await super().test_api_key_ip_whitelist_error() + + async def test_get_not_found_order(self): + await super().test_get_not_found_order() + + async def test_is_valid_account(self): + await super().test_is_valid_account() + + async def test_get_special_orders(self): + await super().test_get_special_orders() + + async def test_create_and_cancel_limit_orders(self): + await super().test_create_and_cancel_limit_orders() + + async def test_create_and_fill_market_orders(self): + await super().test_create_and_fill_market_orders() + + async def test_get_my_recent_trades(self): + await super().test_get_my_recent_trades() + + async def test_get_closed_orders(self): + await super().test_get_closed_orders() + + async def test_get_cancelled_orders(self): + await super().test_get_cancelled_orders() + + async def test_create_and_cancel_stop_orders(self): + # pass if not implemented + pass + + async def test_edit_limit_order(self): + # pass if not implemented + pass + + async def test_edit_stop_order(self): + # pass if not implemented + pass + + async def test_create_single_bundled_orders(self): + # pass if not implemented + pass + + async def test_create_double_bundled_orders(self): + # pass if not implemented + pass + + +# 15/01/2026 +# Tradable symbols: 397/2093: ['GOATSEUS3L/USDT', 'GOATSEUS3S/USDT', 'DT/USDT', 'XR/USDT', 'XT/USDT', 'BMB/USDT', 'BNB/USDT', 'BNC/USDT', 'CGO/USDT', 'FAN/USDT', 'FEI/USDT', 'FXY/USDT', 'IVT/USDT', 'JBC/USDT', 'LFT/USDT', 'MCS/USDT', 'OVO/USDT', 'RIS/USDT', 'TRX/ETH', 'VIX/USDT', 'AGFI/USDT', 'AGIX/USDT', 'AP3X/USDT', 'AR3L/USDT', 'AR3S/USDT', 'BLUE/USDT', 'DBNU/USDT', 'HEGE/USDT', 'HFOF/USDT', 'ID3L/USDT', 'ID3S/USDT', 'IP3L/USDT', 'IP3S/USDT', 'LEMX/USDT', 'ME3L/USDT', 'ME3S/USDT', 'OP3L/USDT', 'OP3S/USDT', 'ROAM/USDT', 'SAFE/USDT', 'SCNT/USDT', 'SEYT/USDT', 'SIFT/USDT', 'TAP2/USDT', 'TOTT/USDT', 'WNGY/USDT', 'ACE3L/USDT', 'ACE3S/USDT', 'ADA3L/USDT', 'ADA3S/USDT', 'APE3L/USDT', 'APE3S/USDT', 'APT3L/USDT', 'APT3S/USDT', 'ARB3L/USDT', 'ARB3S/USDT', 'ATA3L/USDT', 'ATA3S/USDT', 'AXS3L/USDT', 'AXS3S/USDT', 'AXS5L/USDT', 'AXS5S/USDT', 'BAT3L/USDT', 'BAT3S/USDT', 'BCH3L/USDT', 'BCH3S/USDT', 'BCH5L/USDT', 'BCH5S/USDT', 'BEL3L/USDT', 'BEL3S/USDT', 'BNB3L/USDT', 'BNB3S/USDT', 'BSV3L/USDT', 'BSV3S/USDT', 'BSV5L/USDT', 'BSV5S/USDT', 'BTC3L/USDT', 'BTC3S/USDT', 'BTC5L/USDT', 'BTC5S/USDT', 'C983L/USDT', 'C983S/USDT', 'CAT3L/USDT', 'CAT3S/USDT', 'CFX3L/USDT', 'CFX3S/USDT', 'CHR3L/USDT', 'CHR3S/USDT', 'CHZ3L/USDT', 'CHZ3S/USDT', 'CRO3L/USDT', 'CRO3S/USDT', 'CRV3L/USDT', 'CRV3S/USDT', 'CRV5L/USDT', 'CRV5S/USDT', 'CTK3L/USDT', 'CTK3S/USDT', 'CVC3L/USDT', 'CVC3S/USDT', 'DGB3L/USDT', 'DGB3S/USDT', 'DOT3L/USDT', 'DOT3S/USDT', 'DOT5L/USDT', 'DOT5S/USDT', 'ENA3L/USDT', 'ENA3S/USDT', 'ENJ3L/USDT', 'ENJ3S/USDT', 'ENJ5L/USDT', 'ENJ5S/USDT', 'ENS3L/USDT', 'ENS3S/USDT', 'ETC3L/USDT', 'ETC3S/USDT', 'ETH3L/USDT', 'ETH3S/USDT', 'ETH5L/USDT', 'ETH5S/USDT', 'FIL3L/USDT', 'FIL3S/USDT', 'FIL5L/USDT', 'FIL5S/USDT', 'GMT3L/USDT', 'GMT3S/USDT', 'GRT3L/USDT', 'GRT3S/USDT', 'GTC3L/USDT', 'GTC3S/USDT', 'ICP3L/USDT', 'ICP3S/USDT', 'ICX3L/USDT', 'ICX3S/USDT', 'IMX3L/USDT', 'IMX3S/USDT', 'INJ3L/USDT', 'INJ3S/USDT', 'JASMY/USDT', 'KNC3L/USDT', 'KNC3S/USDT', 'KSM3L/USDT', 'KSM3S/USDT', 'LPT3L/USDT', 'LPT3S/USDT', 'LRC3L/USDT', 'LRC3S/USDT', 'LSETH/USDT', 'LTC3L/USDT', 'LTC3S/USDT', 'LTC5L/USDT', 'LTC5S/USDT', 'MBOOM/USDT', 'MTL3L/USDT', 'MTL3S/USDT', 'MYX3L/USDT', 'MYX3S/USDT', 'NEO3L/USDT', 'NEO3S/USDT', 'NEO5L/USDT', 'NEO5S/USDT', 'NKN3L/USDT', 'NKN3S/USDT', 'NOT3L/USDT', 'NOT3S/USDT', 'OGN3L/USDT', 'OGN3S/USDT', 'ONE3L/USDT', 'ONE3S/USDT', 'ONT3L/USDT', 'ONT3S/USDT', 'RAY3L/USDT', 'RAY3S/USDT', 'RIF3L/USDT', 'RIF3S/USDT', 'RLC3L/USDT', 'RLC3S/USDT', 'RSR3L/USDT', 'RSR3S/USDT', 'RVN3L/USDT', 'RVN3S/USDT', 'SFP3L/USDT', 'SFP3S/USDT', 'SKL3L/USDT', 'SKL3S/USDT', 'SNX3L/USDT', 'SNX3S/USDT', 'SOL3L/USDT', 'SOL3S/USDT', 'SOL5L/USDT', 'SOL5S/USDT', 'SPX3L/USDT', 'SPX3S/USDT', 'SSV3L/USDT', 'SSV3S/USDT', 'SUI3L/USDT', 'SUI3S/USDT', 'SXP3L/USDT', 'SXP3S/USDT', 'THE3L/USDT', 'THE3S/USDT', 'TIA3L/USDT', 'TIA3S/USDT', 'TLM3L/USDT', 'TLM3S/USDT', 'TON3L/USDT', 'TON3S/USDT', 'TRB3L/USDT', 'TRB3S/USDT', 'TRX3L/USDT', 'TRX3S/USDT', 'TRX5L/USDT', 'TRX5S/USDT', 'UNI3L/USDT', 'UNI3S/USDT', 'UNI5L/USDT', 'UNI5S/USDT', 'VET3L/USDT', 'VET3S/USDT', 'VNXAU/USDT', 'WIF3L/USDT', 'WIF3S/USDT', 'WLD3L/USDT', 'WLD3S/USDT', 'XLM3L/USDT', 'XLM3S/USDT', 'XMR3L/USDT', 'XMR3S/USDT', 'XRP3L/USDT', 'XRP3S/USDT', 'XRP5L/USDT', 'XRP5S/USDT', 'XTZ3L/USDT', 'XTZ3S/USDT', 'Y6D6S/VUSD', 'Y7D7S/VUSD', 'YFI3L/USDT', 'YFI3S/USDT', 'ZEC3L/USDT', 'ZEC3S/USDT', 'ZEN3L/USDT', 'ZEN3S/USDT', 'ZIL3L/USDT', 'ZIL3S/USDT', 'ZRX3L/USDT', 'ZRX3S/USDT', 'AAVE3L/USDT', 'AAVE3S/USDT', 'ALGO3L/USDT', 'ALGO3S/USDT', 'ANKR3L/USDT', 'ANKR3S/USDT', 'API33L/USDT', 'API33S/USDT', 'ARPA3L/USDT', 'ARPA3S/USDT', 'ATOM3L/USDT', 'ATOM3S/USDT', 'AVAX3L/USDT', 'AVAX3S/USDT', 'BAND3L/USDT', 'BAND3S/USDT', 'BOME3L/USDT', 'BOME3S/USDT', 'BONK3L/USDT', 'BONK3S/USDT', 'CELO3L/USDT', 'CELO3S/USDT', 'CELR3L/USDT', 'CELR3S/USDT', 'COMP3L/USDT', 'COMP3S/USDT', 'COTI3L/USDT', 'COTI3S/USDT', 'CTSI3L/USDT', 'CTSI3S/USDT', 'DASH3L/USDT', 'DASH3S/USDT', 'DENT3L/USDT', 'DENT3S/USDT', 'DOGE3L/USDT', 'DOGE3S/USDT', 'DOGE5L/USDT', 'DOGE5S/USDT', 'DOGS3L/USDT', 'DOGS3S/USDT', 'DYDX3L/USDT', 'DYDX3S/USDT', 'EGLD3L/USDT', 'EGLD3S/USDT', 'ELCASH/USDT', 'ENIDOG/USDT', 'FLOW3L/USDT', 'FLOW3S/USDT', 'GALA3L/USDT', 'GALA3S/USDT', 'GALA5L/USDT', 'GALA5S/USDT', 'HBAR3L/USDT', 'HBAR3S/USDT', 'HIGH3L/USDT', 'HIGH3S/USDT', 'HOOK3L/USDT', 'HOOK3S/USDT', 'IOST3L/USDT', 'IOST3S/USDT', 'IOTA3L/USDT', 'IOTA3S/USDT', 'IOTX3L/USDT', 'IOTX3S/USDT', 'KAVA3L/USDT', 'KAVA3S/USDT', 'LINK3L/USDT', 'LINK3S/USDT', 'LINK5L/USDT', 'LINK5S/USDT', 'MANA3L/USDT', 'MANA3S/USDT', 'MASK3L/USDT', 'MASK3S/USDT', 'NEAR3L/USDT', 'NEAR3S/USDT', 'PEPE3L/USDT', 'PEPE3S/USDT', 'PNUT3L/USDT', 'PNUT3S/USDT', 'QTUM3L/USDT', 'QTUM3S/USDT', 'ROSE3L/USDT', 'ROSE3S/USDT', 'RUNE3L/USDT', 'RUNE3S/USDT', 'SAND3L/USDT', 'SAND3S/USDT', 'SAND5L/USDT', 'SAND5S/USDT', 'SATS3L/USDT', 'SATS3S/USDT', 'SHIB3L/USDT', 'SHIB3S/USDT', 'SHIB5L/USDT', 'SHIB5S/USDT', 'STRK3L/USDT', 'STRK3S/USDT', 'VANA3L/USDT', 'VANA3S/USDT', 'VIGO1M/USDT', 'WLFI3L/USDT', 'WLFI3S/USDT', '1INCH3L/USDT', '1INCH3S/USDT', '1INCH5L/USDT', '1INCH5S/USDT', 'ALICE3L/USDT', 'ALICE3S/USDT', 'AUDIO3L/USDT', 'AUDIO3S/USDT', 'BELIEVE/USDT', 'FLOKI3L/USDT', 'FLOKI3S/USDT', 'GRASS3L/USDT', 'GRASS3S/USDT', 'JASMY3L/USDT', 'JASMY3S/USDT', 'MAGIC3L/USDT', 'MAGIC3S/USDT', 'PENGU3L/USDT', 'PENGU3S/USDT', 'PIXEL3L/USDT', 'PIXEL3S/USDT', 'SAITAMA/USDT', 'SPELL3L/USDT', 'SPELL3S/USDT', 'STORJ3L/USDT', 'STORJ3S/USDT', 'SUSHI3L/USDT', 'SUSHI3S/USDT', 'SUSHI5L/USDT', 'SUSHI5S/USDT', 'TESTBC1/VUSD', 'THETA3L/USDT', 'THETA3S/USDT', 'TRUMP3L/USDT', 'TRUMP3S/USDT', 'WAVES3L/USDT', 'WAVES3S/USDT', 'PEOPLE3L/USDT', 'PEOPLE3S/USDT', 'PEOPLE5L/USDT', 'PEOPLE5S/USDT', 'BIGTIME3L/USDT', 'BIGTIME3S/USDT', 'MELANIA3L/USDT', 'MELANIA3S/USDT', 'MOODENG3L/USDT', 'MOODENG3S/USDT', 'TESTPEPE2/VUSD'] diff --git a/additional_tests/exchanges_tests/test_polymarket.py b/additional_tests/exchanges_tests/test_polymarket.py new file mode 100644 index 0000000000..27ac8399e7 --- /dev/null +++ b/additional_tests/exchanges_tests/test_polymarket.py @@ -0,0 +1,134 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. +import pytest + +from additional_tests.exchanges_tests import abstract_authenticated_option_exchange_tester + +try: + import tentacles.Trading.Exchange.polymarket.ccxt.polymarket_async +except ImportError: + pytest.skip( + reason=( + "Polymarket tentacle is not installed, skipping TestPolymarketAuthenticatedExchange" + ) + ) + +# All test coroutines will be treated as marked. +pytestmark = pytest.mark.asyncio + + +class TestPolymarketAuthenticatedExchange( + abstract_authenticated_option_exchange_tester.AbstractAuthenticatedOptionExchangeTester +): + # enter exchange name as a class variable here + EXCHANGE_NAME = "polymarket" + ORDER_CURRENCY = "will-bitcoin-replace-sha-256-before-2027" + SETTLEMENT_CURRENCY = "USDC" + EXPIRATION_DATE = "261231" + SYMBOL = f"{ORDER_CURRENCY}/{SETTLEMENT_CURRENCY}:{SETTLEMENT_CURRENCY}-{EXPIRATION_DATE}-0-YES" + ORDER_SIZE = 10 # % of portfolio to include in test orders + EXPECT_MISSING_FEE_IN_CANCELLED_ORDERS = False + CONVERTS_ORDER_SIZE_BEFORE_PUSHING_TO_EXCHANGES = True + CONVERTS_ORDER_PRICE_BEFORE_PUSHING_TO_EXCHANGE = True + ORDER_IMPACTS_PORTFOLIO_FREE_BALANCE = False + SUPPORTS_GET_POSITION = False + + async def test_get_portfolio(self): + await super().test_get_portfolio() + + async def test_get_portfolio_with_market_filter(self): + # pass if not implemented + pass + + async def test_untradable_symbols(self): + # pass if not implemented + pass + + async def test_get_max_orders_count(self): + # pass if not implemented + pass + + async def test_get_account_id(self): + # pass if not implemented + pass + + async def test_is_authenticated_request(self): + await super().test_is_authenticated_request() + + async def test_invalid_api_key_error(self): + await super().test_invalid_api_key_error() + + async def test_get_api_key_permissions(self): + # pass if not implemented + pass + + async def test_missing_trading_api_key_permissions(self): + pass + + async def test_api_key_ip_whitelist_error(self): + # pass if not implemented + pass + + async def test_get_not_found_order(self): + await super().test_get_not_found_order() + + async def test_is_valid_account(self): + # pass if not implemented + pass + + async def test_get_special_orders(self): + # pass if not implemented + pass + + async def test_create_and_cancel_limit_orders(self): + await super().test_create_and_cancel_limit_orders() + + async def test_create_and_fill_market_orders(self): + await super().test_create_and_fill_market_orders() + + async def test_get_my_recent_trades(self): + await super().test_get_my_recent_trades() + + async def test_get_closed_orders(self): + # pass if not implemented + pass + + async def test_get_cancelled_orders(self): + # pass if not implemented + pass + + async def test_create_and_cancel_stop_orders(self): + # pass if not implemented + pass + + async def test_edit_limit_order(self): + # pass if not implemented + pass + + async def test_edit_stop_order(self): + # pass if not implemented + pass + + async def test_create_single_bundled_orders(self): + # pass if not implemented + pass + + async def test_create_double_bundled_orders(self): + # pass if not implemented + pass + + async def test_get_empty_linear_and_inverse_positions(self): + await super().test_get_empty_linear_and_inverse_positions() diff --git a/bin/.gitignore b/bin/.gitignore new file mode 100644 index 0000000000..2211df63dd --- /dev/null +++ b/bin/.gitignore @@ -0,0 +1 @@ +*.txt diff --git a/bin/favicon.ico b/bin/favicon.ico new file mode 100644 index 0000000000..d521fd3d53 Binary files /dev/null and b/bin/favicon.ico differ diff --git a/bin/start.spec b/bin/start.spec new file mode 100644 index 0000000000..a4dfa8c62c --- /dev/null +++ b/bin/start.spec @@ -0,0 +1,57 @@ +# -*- mode: python -*- + +block_cipher = None + +OCTOBOT_PACKAGES_FILES = REQUIRED = [s.strip() for s in open('bin/octobot_packages_files.txt').readlines()] +# hiddenimports=['numpy.core._dtype_ctypes'] from https://github.com/pyinstaller/pyinstaller/issues/3982 +a = Analysis( + ['../start.py'], + pathex=['../'], + datas=[ + ('../octobot/config', 'octobot/config'), + ('../octobot/strategy_optimizer/optimizer_data_files', 'octobot/strategy_optimizer/optimizer_data_files') + ], + hiddenimports=[ + "colorlog", "numpy.core._dtype_ctypes", "dotenv", + "pgpy", "imghdr", + "aiosqlite", "aiohttp", + "pyarrow", "pyiceberg", + "psutil", + "telegram", "telegram.ext", "telethon", "jsonschema", + "tulipy", + "asyncpraw", "simplifiedpytrends", "simplifiedpytrends.exceptions", "simplifiedpytrends.request", + "pyngrok", "pyngrok.ngrok", "openai", + "flask", "flask_login", "flask_wtf", "flask_caching", "flask_compress", "flask_socketio", "flask_cors", + "wtforms", "wtforms.fields", "gevent", "geventwebsocket", + "vaderSentiment", "vaderSentiment.vaderSentiment", + "coingecko_openapi_client", + "certifi", + "aiofiles", + "pydantic", "mcp", + "dbos", "fastapi", "passlib", "fastapi.staticfiles", + "web3", + "ccxt", "ccxt.async_support", "ccxt.pro", "order_book", "cmath", "cryptography", "websockets", "yarl", "idna", "sortedcontainers", + "websockets.legacy", "websockets.legacy.auth", "websockets.legacy.client", "websockets.legacy.compatibility", + "websockets.legacy.framing", "websockets.legacy.handshake", "websockets.legacy.http", "websockets.legacy.protocol", + "websockets.legacy.server" + ] + OCTOBOT_PACKAGES_FILES, + excludes=["tentacles", "logs", "user"], + hookspath=[], + runtime_hooks=[], + win_no_prefer_redirects=False, + win_private_assemblies=False, + cipher=block_cipher +) +pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher) +exe = EXE(pyz, + a.scripts, + a.binaries, + a.zipfiles, + a.datas, + name='OctoBot', + debug=False, + strip=False, + icon="favicon.ico", + upx=True, + runtime_tmpdir=None, + console=True ) diff --git a/dev_requirements.txt b/dev_requirements.txt index 63d95a05ae..4b65f4bb08 100644 --- a/dev_requirements.txt +++ b/dev_requirements.txt @@ -2,12 +2,10 @@ pytest>=9.0.0 pytest-asyncio>=1.3.0 pytest-cov pytest-timeout +pytest-xdist mock>=4.0.1 -coverage -coveralls - twine pip setuptools diff --git a/docker/docker-entrypoint.sh b/docker/docker-entrypoint.sh index dbf4ac357d..1f242c2dd3 100755 --- a/docker/docker-entrypoint.sh +++ b/docker/docker-entrypoint.sh @@ -14,5 +14,5 @@ bash tunnel.sh # Disable set -e set +e -# Start OctoBot -./OctoBot +# Start OctoBot using the installed console script +OctoBot "$@" diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 0000000000..1334fabdba --- /dev/null +++ b/docs/README.md @@ -0,0 +1,76 @@ +# OctoBot Documentation + +Built with [Docusaurus 3](https://docusaurus.io/). Supports English and French (i18n). + +## Setup + +```bash +cd docs +npm install +``` + +Requires Node.js >= 20. + +## Development + +```bash +npm start +``` + +Runs the pre-collect step (generates tentacle docs, syncs root docs, generates `llms.txt`), then starts the dev server at `http://localhost:3000`. + +## Build + +```bash +npm run build +``` + +Output goes to `docs/build/`. The pre-collect step runs automatically before the build. + +### Collect step (manual) + +The collect step generates content from tentacles and syncs a few root-level files. It runs automatically on `start` and `build`, but you can run it manually: + +```bash +npm run collect +``` + +Generated files are gitignored — never commit them: + +- `content/guides/exchanges.md` and `content/guides/exchanges/` +- `content/guides/strategies/` +- `content/creators/` +- `content/developers/contributing.md` and `content/developers/changelog.md` +- `static/llms.txt` + +## Images and Git LFS + +Images in `docs/static/` are stored in [Git LFS](https://git-lfs.com/) — the repo only contains LFS pointers, not the actual image blobs. + +### First-time setup + +Install Git LFS once per machine: + +```bash +brew install git-lfs # macOS +git lfs install +``` + +After cloning or pulling, fetch the actual image files: + +```bash +git lfs pull +``` + +### Adding new images + +Drop images into `docs/static/images/`. Git will automatically track them via LFS (configured in `.gitattributes` at the repo root). No extra steps needed — just `git add` and commit as usual. + +Supported formats: `.png`, `.jpg`, `.jpeg`, `.webp`. + +### Checking LFS status + +```bash +git lfs ls-files # list all LFS-tracked files +git lfs status # show pending LFS changes +``` diff --git a/docs/blog/2022-06-18-trading-strategy.md b/docs/blog/2022-06-18-trading-strategy.md new file mode 100644 index 0000000000..b3add89436 --- /dev/null +++ b/docs/blog/2022-06-18-trading-strategy.md @@ -0,0 +1,41 @@ +--- +title: "What is a trading strategy" +description: "Discover what is a trading strategy and why you should automate it." +slug: "trading-strategy" +date: "2022-06-18" +authors: ["guillaume"] +tags: ["Crypto", "Trading", "Strategy", "Automation", "Educational"] +image: "/images/blog/trading-strategy-automation/cover.png" +--- + + + +# Trading Strategies + +<div style={{textAlign: "center"}}> + ![cover](/images/blog/trading-strategy-automation/cover.png) +</div> + +A trading strategy is a set of rules or guidelines that traders use to determine when to buy and sell assets in the financial markets. While there are many different strategies that traders can employ, some common elements include the use of technical analysis, risk management, and market psychology. In this article, we will take a look at some of the most popular trading strategies and how they can be used to improve your own trading. + +<!--truncate--> + +## What is a trading strategy + +A trading strategy is a plan that outlines how you will trade cryptocurrencies, stocks, options and more. It includes when you will buy and sell, what you will buy and sell, and how much you are willing to risk. A trading strategy should be based on your investment goals and risk tolerance. + +## Why using a trading strategy + +If you are new to trading, or even if you have some experience, you may be wondering why using a trading strategy is so important. There are many different reasons why having a good trading strategy can be beneficial. + +First of all, having a trading strategy can help to keep you disciplined. It can be very easy to get caught up in the excitement of trading and make decisions based on emotions rather than logic. A trading strategy can help to take the emotion out of decision making by providing clear rules to follow. This can help to prevent impulsive decisions that might lead to losses. + +Another reason why using a trading strategy is important is that it can help you to stay focused. There are so many different things that you need to keep track of when trading that it can be easy to get distracted. Having a clear strategy can help you to stay focused on what is important and ignore everything else. This can lead to better decision making and improved results. + +Finally, having a good trading strategy can help you to manage your risk. Risk management is an essential part of successful trading, and a good strategy will allow you to control your risk while still giving you the opportunity to make profits. Without proper risk management, it becomes difficult to protect your capital and can lead to significant losses. + +## Automate your own trading strategy with OctoBot + +In the blog section, we will discuss how you can use OctoBot to automate your own trading strategy. OctoBot is a powerful tool that can help you take your trading to the next level. By automating your trading strategy, you can free up your time to focus on other important aspects of your life. OctoBot can help you stay disciplined with your trading and ensure that you are always following your predetermined rules. In addition, OctoBot can also help you manage your risk by automatically adjusting your position size according to your risk profile. + +[Let's start automating your strategy](/) diff --git a/docs/blog/2022-07-01-hollaex-partnership.md b/docs/blog/2022-07-01-hollaex-partnership.md new file mode 100644 index 0000000000..990b1d3bf5 --- /dev/null +++ b/docs/blog/2022-07-01-hollaex-partnership.md @@ -0,0 +1,32 @@ +--- +title: "OctoBot is now supporting Hollaex" +description: "OctoBot partners with Hollaex, the first decentralized crypto exchange platform." +slug: "hollaex-partnership" +date: "2022-07-01" +authors: ["guillaume"] +tags: ["Crypto", "Trading", "Exchange", "Partnership", "Hollaex"] +image: "/images/blog/hollaex-partnership/cover.jpg" +--- + + + +# OctoBot is now supporting Hollaex + +![cover](/images/blog/hollaex-partnership/cover.jpg) + +OctoBot is excited to announce that it has joined Hollaex, the world’s first decentralized cryptocurrency exchange platform, in its list of partner trading bots! + +<!--truncate--> + +## What is Hollaex + +<a href="https://www.hollaex.com?utm_source=octobot" rel="nofollow">HollaEx</a> is more than just a white-label crypto software, it's +your go to tool kit that connects your business to the blockchain world. HollaEx +let's you start an exchange, with your own markets and assets on your domain. +Hollaex is an open source exchange software kit that allows anyone to start a +crypto business. OctoBot is now the first crypto trading bot to integrate +Hollaex support! + +## How to use Hollaex in OctoBot + +OctoBot supports Hollaex trading through a dedicated module. A simple configuration guide is available at on [the HollaEx account setup](/guides/exchanges/hollaex/account-setup). diff --git a/docs/blog/2022-09-30-profile-sharing-in-octobot-cloud.md b/docs/blog/2022-09-30-profile-sharing-in-octobot-cloud.md new file mode 100644 index 0000000000..88a77dd469 --- /dev/null +++ b/docs/blog/2022-09-30-profile-sharing-in-octobot-cloud.md @@ -0,0 +1,69 @@ +--- +title: "Profile sharing in OctoBot cloud" +description: "You can now share your OctoBot profiles with the community" +slug: "profile-sharing-in-octobot-cloud" +date: "2022-09-30" +authors: ["paul"] +tags: ["Cryptocurrency", "Trading", "Exchange", "OctoBot cloud"] +image: "/images/blog/profile-sharing-in-octobot-cloud/cover.jpg" +--- + + + +# Profile sharing in OctoBot cloud + +![cover](/images/blog/profile-sharing-in-octobot-cloud/cover.jpg) + +## Why sharing an OctoBot profile ? + +In OctoBot, your configuration is stored as a profile. A profile contains: + +<!--truncate--> + +- Your activated trading mode and evaluators alongside their configuration +- Your traded pairs +- The exchanges you are currently using (without authentication credentials) + +Sharing your profile means sharing your current trading strategy configuration. Anyone using your profile will be able to trade in the exact same way as you do with your OctoBot. + +## How to share a profile ? + +1. Select and download it from your OctoBot + ![Profile-sharing-from-octobot](/images/blog/profile-sharing-in-octobot-cloud/bot-share.jpg) +2. Login on [OctoBot cloud](/), go to `Editor` and `Publish a new strategy` + ![Profile-sharing-octobot-cloud-editor](/images/blog/profile-sharing-in-octobot-cloud/editor.jpg) +3. Enter your profile name, description and logo + ![Profile-sharing-octobot-cloud-publish](/images/blog/profile-sharing-in-octobot-cloud/publish.jpg) +4. Upload the profile as downloaded from your OctoBot + ![Profile-sharing-octobot-cloud-publish-profile](/images/blog/profile-sharing-in-octobot-cloud/publish-profile.jpg) +5. Submit your profile to make it available to everyone on OctoBot cloud + +Note: For now, we are manually checking profiles, therefore there will be a short delay before your profile will be available to everyone on OctoBot cloud. + +## How to use a profile from OctoBot cloud ? + +1. Go yo the profile you want to use and click `Subscribe` + ![Profile-sharing-octobot-cloud-subscribe](/images/blog/profile-sharing-in-octobot-cloud/sub.jpg) +2. Now that you are subscribing to the profile, click `Copy download url` + ![Profile-sharing-octobot-cloud-copy](/images/blog/profile-sharing-in-octobot-cloud/copy.jpg) +3. From your OctoBot, click `Import a profile` + ![Profile-sharing-from-octobot-import](/images/blog/profile-sharing-in-octobot-cloud/bot-import.jpg) +4. Paste the download url (that was copied from step 2) and click `Import` + ![Profile-sharing-from-octobot-import-url](/images/blog/profile-sharing-in-octobot-cloud/bot-import-link.jpg) +5. The new profile is now available in your OctoBot + ![Profile-sharing-from-octobot-importe](/images/blog/profile-sharing-in-octobot-cloud/bot-imported.jpg) + +## Next steps + +Sharing profiles is the first step towards [OctoBot cloud](/) as a platform where OctoBot users can share whole trading strategies. Profiles are merely strategy configurations and we will soon add the possibility to share whole strategies. + +With OctoBot cloud, you will be able to: + +- Use and share any strategy with our without sharing its code and configuration +- Check out past performances of available strategies +- Create and use paid or free strategies that are made by the community: creators are incentivized to create strategies with the best results to earn money from paid subscribers that use the strategies + +## Join the beta + +Sharing profiles will first be available on the [beta OctoBot cloud](https://beta.octobot.cloud/). +To join the OctoBot beta program, [have a look our beta program](/guides/octobot-advanced-usage/beta-program) diff --git a/docs/blog/2022-10-25-what-is-future-trading.md b/docs/blog/2022-10-25-what-is-future-trading.md new file mode 100644 index 0000000000..6a9eec4941 --- /dev/null +++ b/docs/blog/2022-10-25-what-is-future-trading.md @@ -0,0 +1,94 @@ +--- +title: "What is future trading" +slug: "what-is-future-trading" +date: "2022-10-25" +authors: ["guillaume"] +tags: ["Cryptocurrency", "Trading", "Exchange", "Future", "Strategy", "Educational"] +image: "/images/blog/what-is-future-trading/cover.png" +--- + +# What is future trading ? + +![cover](/images/blog/what-is-future-trading/cover.png) + +## What is cryptocurrency future trading + +Cryptocurrency future trading is a process of buying and selling digital assets with the aim of making a profit from the difference in prices. It is one of the most popular forms of cryptocurrency trading, as it allows traders to speculate on the future price movements of their chosen asset. In this article, we will take a look at what cryptocurrency future trading is, how it works, and whether it is right for you. + +<!--truncate--> + +## What is future trading + +Cryptocurrency future trading is an innovative way to trade digital assets. It allows traders to speculate on the future price of a cryptocurrency. By correctly predicting the future price, traders can make a profit. + +However, cryptocurrency future trading is also risky. If a trader makes a wrong prediction, they can lose all of their investment. For this reason, it is important to carefully research the market before trading. + +The future of cryptocurrency trading is exciting. With the right approach, it can be profitable and rewarding. + +### What is short selling + +Short selling is a trading strategy where a trader sells an asset, hoping to buy it back at a lower price so they can profit from the difference. Short selling is sometimes also called "shorting" or "going short". + +Short selling can be used in any market, but it is particularly popular in the cryptocurrency market. This is because cryptocurrencies are often volatile, which can create opportunities for traders to make profits. + +However, short selling is risky. If the price of the asset goes up instead of down, the trader will lose money. For this reason, it is important to carefully research the market before short selling. + +## Why trading with future contracts + +When it comes to trading cryptocurrencies, there are a few different options available. One popular option is trading with future contracts. In this article, we'll take a look at what exactly cryptocurrency future trading is and why it might be a good option for you. + +Cryptocurrency future trading is essentially betting on the future price of a particular coin. For example, let's say you think the price of [Bitcoin](https://www.octobot.cloud/what-is-bitcoin) is going to increase in the next month. You could buy a Bitcoin future contract that expires in one month and allows you to buy Bitcoin at the current price. If the price of Bitcoin does indeed increase in the next month, you would make a profit on your contract. + +There are a few reasons why cryptocurrency future trading can be a good option. First, it allows you to get exposure to the price movement of a particular coin without actually having to own any of the coins. This can be helpful if you don't want to tie up your capital in a particular coin but still want to benefit from its price movement. + +Another reason why cryptocurrency future trading can be attractive is that it often provides leverage. This means that you can control a larger position than if you were just buying the coins outright. + +## Some future trading strategies + +When trading cryptocurrencies, it's important to have a strategy. Here are some future trading strategies to consider: + +1. Buy and hold: This strategy involves buying a currency and holding it for a long period of time, regardless of market conditions. + +2. Buy and sell: This strategy involves buying a currency and selling it when the price increases. + +3. Sell and buy back: This strategy involves selling a currency and then buying it back at a lower price. + +4. Short selling: This strategy involves selling a currency in the hopes that the price will fall so that it can be bought back at a lower price. + +5. [Arbitrage](https://www.octobot.cloud/tools/triangular-arbitrage-crypto): This strategy involves taking advantage of price differences between exchanges. + +6. Hedging: This strategy involves taking both long and short positions in different currencies to offset risk. + +## Automating future trading with OctoBot + +OctoBot is an open-source software project that automates cryptocurrency trading. It is designed to be easily extensible and adaptable, allowing it to be used with a wide range of exchanges and strategies. + +OctoBot has a number of features that make it well suited for future trading. First, it supports multiple exchanges, allowing you to trade on multiple platforms simultaneously. Second, it includes a risk management system that can automatically adjust your position size to limit your losses. Third, it features a number of built-in strategies that you can use or customize to suit your own trading style. + +Finally, and perhaps most importantly, OctoBot is constantly being updated with new features and improvements. This means that it will continue to get better over time, making it an ideal tool for long-term future trading. + +### How to use OctoBot for future trading + +There are two ways to use OctoBot for future trading: + +1. Use the built-in strategies + +OctoBot includes a number of built-in trading strategies that you can use out-of-the-box. To access these strategies, go to the "Strategies" tab in the OctoBot interface. + +From here, you can view a list of all the available strategies, as well as their performance over time. You can also backtest each strategy to see how it would have performed in the past. + +To start using a strategy, simply click on its name and then click "Enable". OctoBot will then begin using the strategy on your behalf. + +2. Create your own strategy + +If you want more control over your future trading, you can create your own custom strategy with OctoBot's Strategy Builder. This is a powerful tool that allows you to customize every aspect of your trading strategy, from the entry and exit conditions to the position sizing and risk management rules. + +From here, you'll be able to give your strategy a name and description. You can then start adding rules and conditions to your strategy. OctoBot's Strategy Builder includes a wide range of options, so you'll be able to create a strategy that suits your own trading style. + +Once you're happy with your strategy, click "Save" and OctoBot will begin using it on your behalf. + +## Conclusion + +OctoBot is a powerful tool that can help you automate your future trading. It includes a number of built-in strategies that you can use out-of-the-box, as well as a powerful Strategy Builder that allows you to create your own custom strategies. + +If you're looking for a tool to help you automate your future trading, OctoBot is definitely worth considering. diff --git a/docs/blog/2022-11-following-strategies-in-octobot-cloud.md b/docs/blog/2022-11-following-strategies-in-octobot-cloud.md new file mode 100644 index 0000000000..ec2090e24e --- /dev/null +++ b/docs/blog/2022-11-following-strategies-in-octobot-cloud.md @@ -0,0 +1,64 @@ +--- +title: "Following strategies in OctoBot cloud" +description: "You can now follow trading strategies of the community" +slug: "following-strategies-in-octobot-cloud" +date: "2022-11" +authors: ["paul"] +tags: ["Cryptocurrency", "Trading", "Strategy", "Exchange", "OctoBot cloud"] +image: "/images/blog/following-strategies-in-octobot-cloud/cover.png" +--- + + + +# Follow the best strategies + +![cover](/images/blog/following-strategies-in-octobot-cloud/cover.png) + +On OctoBot cloud, you can subscribe to trading strategies. Subscribing to a strategy allows you to easily trade using a strategy made by someone else from the OctoBot community. + +<!--truncate--> + +When subscribed to a strategy, you can use the strategy profile directly from your OctoBot. When you do so, your OctoBot will follow the strategy by coping any trade made by this strategy. Order amounts will be adapted to your current portfolio. + +## How to use a followed strategy ? + +1. Login on [OctoBot cloud](/) and go to the desired strategy page +2. Click `Subscribe` + ![Following-strategies-pre-sub](/images/blog/following-strategies-in-octobot-cloud/pre-sub.png) +3. Now that you are subscribing to the strategy, click `Copy download url` +4. From your OctoBot, login to your OctoBot cloud account + ![Following-strategies-community](/images/blog/following-strategies-in-octobot-cloud/community.png) +5. Go to the `Profile` tab and click on the name of the current profile, click `Import a profile` + ![Following-strategies-import](/images/blog/profile-sharing-in-octobot-cloud/bot-import.jpg) +6. Paste the download url (that was copied from step 3) and click `Import` + ![Following-strategies-imported](/images/blog/following-strategies-in-octobot-cloud/imported.png) +7. Use the imported profile and restart your OctoBot + +## How does it work ? + +When following a strategy, a user gets access to the trading signals of the strategy. Trading signals are emitted at each order created or cancelled by the followed strategy. This way followers of a strategy benefit from trades of the desired strategy in real time directly from their OctoBot. Strategies can be applied to any exchange as long as the strategy trading pairs are supported. You can follow a strategy with real or simulated trading. + +Trading through strategy signals is achieved by using the RemoteTradingSignalsTradingMode configured to follow the strategy you selected. When importing a strategy profile, you are importing an already configured profile that enables this trading mode with the right strategy identifier and the strategy traded pairs and default exchange. + +![Following-strategies-mode-config](/images/blog/following-strategies-in-octobot-cloud/mode-config.png) + +As following a strategy is only possible through OctoBot cloud, you need to login to your OctoBot cloud account from your OctoBot to be able to follow a strategy. + +## How to publish a strategy on OctoBot cloud ? + +Trading strategies are published on [OctoBot cloud](/) by the OctoBot community. +When a user wants to share a trading strategy, the only thing to do is to: + +1. Create a strategy on [OctoBot cloud](/) +2. Setup the desired OctoBot trading mode to emit trading signals to this strategy + ![Following-strategies-config](/images/blog/following-strategies-in-octobot-cloud/config.png) + +Note: the identifier of the strategy to emit signal on can be found on the strategy page, by clicking on this button +![Following-strategies-id-button](/images/blog/following-strategies-in-octobot-cloud/id-button.png) + +Please note that configuration and content of a published strategy is not uploaded to OctoBot cloud and followers can't access the code or configuration of the strategy. They will only get trading signals when the OctoBot that is actually running the strategy will create or cancel orders. + +## Join the beta + +Following strategies will first be available on the [beta OctoBot cloud](https://beta.octobot.cloud/). +To join the OctoBot beta program, [have a look at our beta program](/guides/octobot-advanced-usage/beta-program) diff --git a/docs/blog/2022-12-11-octobots-in-octobot-cloud.md b/docs/blog/2022-12-11-octobots-in-octobot-cloud.md new file mode 100644 index 0000000000..a092d8ea67 --- /dev/null +++ b/docs/blog/2022-12-11-octobots-in-octobot-cloud.md @@ -0,0 +1,50 @@ +--- +title: "Easily deploy your OctoBot in OctoBot cloud" +description: "You can now easily deploy your OctoBot directly in OctoBot cloud" +slug: "octobots-in-octobot-cloud" +date: "2022-12-11" +authors: ["paul"] +tags: ["Cryptocurrency", "Trading", "Strategy", "OctoBot cloud"] +image: "/images/blog/octobots-in-octobot-cloud/cover.png" +--- + + + +# Easily deploy your OctoBot in OctoBot cloud + +![cover](/images/blog/octobots-in-octobot-cloud/cover.png) + +## Your OctoBot, always online and reachable + +The main benefit of hosting OctoBot in the cloud are that it will be remain online, no need to install OctoBot or have it running your computer anymore. +Moreover, your OctoBots can be accessed from anywhere, as long as you have an internet connection. + +<!--truncate--> + +This can be especially useful for traders who need to monitor their OctoBot from different locations or devices. + +## How to deploy your OctoBot on OctoBot cloud ? + +1. Login on [OctoBot cloud](/) and go to `My bots`. + ![my-bots-button](/images/blog/octobots-in-octobot-cloud/my-bots-button.jpg) +2. Click on `Deploy now` from the `Discover` card. + ![deploy-now](/images/blog/octobots-in-octobot-cloud/deploy-now.jpg) +3. Wait until your OctoBot is available. This may take a few minutes. + ![deploying](/images/blog/octobots-in-octobot-cloud/deploying.png) +4. Access your personal OctoBot using the `Open Interface` button. + ![open-interface](/images/blog/octobots-in-octobot-cloud/open-interface.png) +5. Unlock your OctoBot with your OctoBot cloud account password. + ![login](/images/blog/octobots-in-octobot-cloud/login.png) +6. Enjoy your OctoBot from anywhere. + +Note: running a cloud OctoBot requires OctoBot cloud credits. We will soon add ways to get those credits. Fow now please ask the OctoBot team if you need beta credits. + +## Join the beta + +If you're interested in participating, be sure to follow any guidelines and instructions provided by the OctoBot team, and take the time to thoroughly test the application and provide useful feedback. + +Cloud OctoBots will first be available on the [beta OctoBot cloud](https://beta.octobot.cloud/). + +To participate in the beta testing of OctoBot's cloud hosting, you'll need to sign up to be a beta tester. + +Once you've been accepted into the program, please follow [these instructions](/guides/octobot-advanced-usage/beta-program) to access the beta version of the application and any other necessary information. diff --git a/docs/blog/2023-05-17-trading-using-tradingview.md b/docs/blog/2023-05-17-trading-using-tradingview.md new file mode 100644 index 0000000000..293f55627b --- /dev/null +++ b/docs/blog/2023-05-17-trading-using-tradingview.md @@ -0,0 +1,53 @@ +--- +title: "Trading using TradingView" +description: "Automate your trades using any TradingView indicator" +slug: "trading-using-tradingview" +date: "2023-05-17" +authors: ["paul"] +tags: ["Tradingview", "Pine Script", "Webhook", "Strategy", "OctoBot cloud", "Educational"] +image: "/images/blog/trading-using-tradingview/cover.png" +--- + + + +# Trading using TradingView + +![cover](/images/blog/trading-using-tradingview/cover.png) + +## Trading using your favorite TradingView strategies + +You love using TradingView indicators and strategies ? With OctoBot, you can take it to the next level and trade using TradingView strategies and indicators directly the exchange you want. + +<!--truncate--> + +This means that you can use all the OctoBot features according to your TradingView tools, this includes: + +- Trading on your favorite exchange(s) using your TradingView strategy +- Test your TradingView strategy in real time with simulated funds +- Get real time notifications when your TradingView strategy sends a buy or sell signal + +## TradingView strategies in your OctoBot + +When following a TradingView strategy, your OctoBot will listen for TradingView signals and when signals are received, it will react instantly by creating the associated alert and order(s), which can be simulated or real, on any supported exchange. + +<div style={{textAlign: "center"}}> + ![plan-display](/images/blog/trading-using-tradingview/telegram.png) +</div> + +You can send details on the order to create directly from the TradingView signal such as the type of order, the take profit and stop loss prices and much more. View the full details of orders signals on [the TradingView signals guide](/guides/octobot-interfaces/tradingview/#alert-format). + +## How to bind your TradingView account to your OctoBot + +### Using a Cloud OctoBot + +When using [OctoBot cloud](/), all you need to do is to [create TradingView alerts](/guides/octobot-interfaces/tradingview#create-an-alert) on any event, directly from Pine Script or from a custom alert. + +Cloud OctoBots' webhook configuration is done automatically and does not require any work. + +### Using a self hosted OctoBot + +When using a self hosted OctoBot, you will have to configure a way to make your OctoBot reachable from a webhook. This is required for TradingView to send signals to your OctoBot and might require an external paid software. + +Please have a look at the [webhook manual configuration](/guides/octobot-interfaces/tradingview/using-a-webhook). + +Once your webhook setup, you can [create TradingView alerts](/guides/octobot-interfaces/tradingview#create-an-alert) on any event, directly from Pine Script or from a custom alert. diff --git a/docs/blog/2023-09-19-introducing-the-new-octobot-cloud.mdx b/docs/blog/2023-09-19-introducing-the-new-octobot-cloud.mdx new file mode 100644 index 0000000000..e96c57b9d9 --- /dev/null +++ b/docs/blog/2023-09-19-introducing-the-new-octobot-cloud.mdx @@ -0,0 +1,103 @@ +--- +title: "Introducing the new OctoBot cloud" +description: "OctoBot cloud, a new way to profit from trading strategies" +slug: "introducing-the-new-octobot-cloud" +date: "2023-09-19" +authors: ["guillaume"] +tags: ["Free", "Cryptocurrency", "Trading", "Strategy", "Exchange", "OctoBot cloud"] +image: "/images/blog/introducing-the-new-octobot-cloud/cover.png" +--- + + + +# Introducing the new OctoBot cloud + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="0rbUDySkIyg" title="Introducing OctoBot cloud" /> + +The [new Investor plans](new-octobot-cloud-plans-and-trading-bots) allow you to enjoy trading strategies in a very easy yet powerful way. These plans allow you to benefit from OctoBot based strategies without the technicalities of OctoBot. + +<!--truncate--> + +## Why a new OctoBot cloud ? + +At OctoBot, we realized that the current state of OctoBot is suitable for users with a technical background but is too complex for the majority of people. + +The goal of OctoBot has always been to bring automated trading strategies to any crypto investor. This includes tech savvy users as well as all the others. Up until now, OctoBot failed to be usable by any crypto investor and [the new OctoBot cloud](/) is built for them. + +**While the full OctoBot is designed to create and customize strategies, the new OctoBot cloud makes it very easy to use those strategies** + +We are splitting OctoBot plans into 2 different kinds: + +1. Strategy based plans +2. Full OctoBot plans + +## Choose a strategy, not a robot: strategy based plans + +OctoBot can be complicated to use and setup, finding your suitable strategy can be even more difficult. That's why we make strategy based plans as simple and clear as possible + +### OctoBot, but simple + +Ideally, when you want to use a strategy and not create one, you want to: + +1. Explore and compare available strategies +2. Understand potential profits and risks of the strategy of your choice +3. Apply this strategy on your exchange account + +It shouldn't be more complicated than this. Making these steps as easy as possible is our goal with the Investor, Investor Plus and Pro plans. + +Therefore, using those plans, you don't need to care about your OctoBot, we are doing it for you. You just need to: + +1. Select the strategy of your choice + ![strategies](/images/blog/introducing-the-new-octobot-cloud/strategies.png) + +2. follow your gains directly from [OctoBot cloud](/) + ![bot](/images/blog/introducing-the-new-octobot-cloud/bot.png) + +Besides simplicity, making strategies financially accessible is also important to us. For this reason, we designed the Investor plan to be completely free and unlimited. + +### No cost, no monthly fees, it just works + +At OctoBot, we believe that making a free plan will help a lot of people accessing automated trading strategies. + +That's why the Investor plan enables everyone to simply use trading strategies for free. + +This not a free trial: when you use a strategy with the Investor plan, we are not asking for your payment information, there is **no monthly subscription fee, no % taken on gains, no hidden fees**. + +> How is this possible ? When using the Investor plan, we rely on exchange partnerships to pay for our running costs. This means that as long as you use an exchange account from an OctoBot official partner, exchanges reward us and you are free to use the Investor plan forever. + +![plans](/images/blog/introducing-the-new-octobot-cloud/plans.png) + +### Transparency is key + +Of couse, each strategy on [OctoBot cloud](/) is built, run and tested using OctoBot. This means that each strategy past performance is evaluated on a regular basis using historical data and OctoBot's [backtesting engine](/guides/octobot-usage/backtesting). This ensures that displayed statistics are real as strategy based plans are also using OctoBot under the hood. + +![dca](/images/blog/introducing-the-new-octobot-cloud/dca.png) + +At OctoBot we believe in transparency. This means that sometimes strategies can turn unprofitable as profits depends on so many different factors including market conditions. **If a strategy is not making profits during a given period, you will see it before using it.** + +## Create your own strategy: trading bots plans + +![cover](/images/blog/introducing-the-new-octobot-cloud/cover.png) + +[OctoBot trading bots](https://www.octobot.cloud/trading-bot) enable you to run OctoBots adds a few features such as the [strategy designer](/guides/octobot-usage/strategy-designer), [free TradingView webhooks](trading-using-tradingview) and [ChatGPT integrations](trading-using-chat-gpt). + +> Those offers will now be explicitely targeting users who want to create or customize strategies. + +**[Start your OctoBot](https://www.octobot.cloud)** + +## The open source OctoBot + +The current OctoBot (available <a href="https://github.com/Drakkar-Software/OctoBot" rel="nofollow">on github</a>) will stay open source, we will keep updating it as it is the backbone of everything on [OctoBot cloud](/). + +Each plan uses it and will help support the development of the free open source trading robot. + +Basically nothing changed for the open source OctoBot, it will continue to grow and improve. + +## Beta phase + +At the time of writing, the [new OctoBot cloud](/) is in beta stage. While the technical part is fully fonctionnal, we are working on the last user interface improvements before completely replacing the [current octobot.cloud](/). + +Feel free to use, experiment and give us your feedback on the [new OctoBot cloud](/), we are looking forward to knowing what your think of it and our new strategy-based plans. +Join us on the [beta dedicated telegram channel here](/) diff --git a/docs/blog/2023-09-30-shape-the-future-with-our-roadmap.md b/docs/blog/2023-09-30-shape-the-future-with-our-roadmap.md new file mode 100644 index 0000000000..b16c6bdf24 --- /dev/null +++ b/docs/blog/2023-09-30-shape-the-future-with-our-roadmap.md @@ -0,0 +1,79 @@ +--- +title: "Shape the future of our roadmap" +description: "Vote for what matters to you and influence the future of OctoBot" +slug: "shape-the-future-with-our-roadmap" +date: "2023-09-30" +authors: ["guillaume"] +tags: ["Roadmap", "Vote", "Share", "Cryptocurrency", "OctoBot cloud"] +image: "/images/blog/shape-the-future-with-our-roadmap/banner-dark.png" +--- + + + +# Shape the future of our roadmap + +![cover](/images/blog/shape-the-future-with-our-roadmap/banner-dark.png) + +## Your ideas first + +### Quick context + +Simplicity and transparency are among the most important values to us as we explaned on [our previous article](introducing-the-new-octobot-cloud) regarding the [new OctoBot cloud](/). + +<!--truncate--> + +With OctoBot cloud, we are comitted in creating the best strategy automation system possible. Of course, "best" always depends on what is used as comparison criteria. +For us it means that you, our users, can: + +1. Clearly identify the investment strategies you want to use +2. Easily start the trading strategies you choose +3. Quickly access and understand all the data to follow your investment +4. Adjust things whenever you want to, in a very simple way + +This represents many challenges as each of those 4 steps can become very complex and end up being unsable. We want to avoid that at all cost. + +That is why are building tools to make it easy for you to share your ideas on how to improve each of those steps, according to your own experience. + +### The current system + +Up until the day of writing this article, the <a href="https://github.com/Drakkar-Software/OctoBot" rel="nofollow">open source OctoBot</a> grew based on user feedbacks according to a mix of ideas pushed by the user community on: + +- <a href="https://t.me/octobot_trading" rel="nofollow">Telegram</a> +- <a href="https://discord.com/invite/vHkcb8W" rel="nofollow">Discord</a>{' '} +- <a href="https://feedback.octobot.cloud/open-source" rel="nofollow">Feedback website</a>{' '} + +### What we want to achieve + +Our goal is, and has always been, to shape the whole OctoBot ecosystem according to its whole community best ideas and needs. + +As the community is growing and we are now releasing new features at a much faster pace, we will give a greater weight to our public feedback and roadmap system and split it into two part: + +- The <a href="https://feedback.octobot.cloud/cloud" rel="nofollow">OctoBot cloud section</a> + +![octobot_cloud](/images/blog/shape-the-future-with-our-roadmap/octobot_cloud.png) + +- The <a href="https://feedback.octobot.cloud/open-source" rel="nofollow">open source OctoBot section</a> + +![open_source_octobot](/images/blog/shape-the-future-with-our-roadmap/open_source_octobot.png) + +## How does it work ? + +On <a href="https://feedback.octobot.cloud/" rel="nofollow">feedback.octobot.cloud</a> you will find our public roadmap showing: + +- What we are currently working on +- What we are planning to do next +- Ideas on the following things to work on based on your and our inputs + +![dca](/images/blog/shape-the-future-with-our-roadmap/roadmap.png) + +What you can do to improve OctoBot: + +- Vote for features that you would like to see added to the <a href="https://github.com/Drakkar-Software/OctoBot" rel="nofollow">open source OctoBot</a> or [OctoBot cloud](/) +- Share new ideas +- Spread the word about <a href="https://feedback.octobot.cloud/" rel="nofollow">feedback.octobot.cloud</a> to encourage people to share ideas and vote for what matters the most + +What happens next ? + +- We update this roadmap on a regular basis to reflect our current work +- You get notified when things change to an idea you submitted or are following +- Both the <a href="https://github.com/Drakkar-Software/OctoBot" rel="nofollow">open source OctoBot</a> and [OctoBot cloud](/) become better and better thanks you and we are really greateful for this. Thank you diff --git a/docs/blog/2023-10-07-trading-with-ai-introduction.md b/docs/blog/2023-10-07-trading-with-ai-introduction.md new file mode 100644 index 0000000000..a92e9e5fe1 --- /dev/null +++ b/docs/blog/2023-10-07-trading-with-ai-introduction.md @@ -0,0 +1,70 @@ +--- +title: "Crypto trading using artificial intelligence" +description: "Learn how to automate your crypto trading with AI in 5 steps" +slug: "trading-with-ai-introduction" +date: "2023-10-07" +authors: ["paul"] +tags: ["AI", "Deep learning", "Trading", "Cryptocurrency", "OctoBot", "Educational"] +image: "/images/blog/trading-with-ai-introduction/cover.png" +--- + + + +# How to automate crypto trading with AI + +Dive into the future of cryptocurrency trading using the power of AI with [OctoBot script](/guides/octobot-script)! +We'll walk you through 5 simple steps to automate your crypto trading using artificial intelligence. +No matter your experience level, this guide is designed to provide a step-by-step process for setting up and executing your first automated cryptocurrency trade using AI. + +<!--truncate--> + +## AI in trading + +Artificial intelligence (AI) has revolutionized how we trade. It helps in analyzing massive amounts of data, predicting market trends, and executing trades at lightning speed. To trade using AI, you need to choose a reliable AI trading software, set your trading parameters, and let the system do the rest. + +![trading](/images/blog/trading-with-ai-introduction/trading.jpg) + +## Understanding reinforcement learning + +Reinforcement Learning is a type of machine learning (itself a type of AI) where an agent learns to make decisions by taking actions in an environment to maximize some notion of cumulative reward. An 'agent' in this context refers to the algorithm or program that is making the decisions. It operates by interacting with its environment (in this case, the trading market), taking actions (such as buying or selling stocks), and receiving rewards or penalties based on the outcome. The goal of this agent is to learn over time which actions lead to the best outcomes, in this case, the most profitable trades. +In trading, we can use reinforcement learning to understand market dynamics, make accurate predictions, and execute profitable trades. + +![brain](/images/blog/trading-with-ai-introduction/brain.jpeg) + +## OctoBot script + +[OctoBot script](/guides/octobot-script) is engineered to provide traders with a framework for crafting and testing crypto trading strategies. + +It offers a suite of keywords (Python methods) which simplifies the process of creating trades and calculating TA indicators like RSI, thus facilitating users to design their unique trading strategies. + +OctoBot script also allows users to test their strategies using past data through the [backtesting](/guides/octobot-usage/backtesting) feature. With the generation of an advanced report at the end of each backtesting, users gain valuable insights into the performance of their strategies, enabling a comprehensive understanding of their effectiveness. + +## How to use OctoBot script to trade with AI + +- Install OctoBot script by following the get started guide on <a href="https://github.com/Drakkar-Software/OctoBot-Script" rel="nofollow">github</a> +- Install AI requirements with + +``` +pip install -r requirements-ai.txt +``` + +- Install the necessary dependencies to be able to run the script on your GPU by following <a href="https://gretel.ai/blog/install-tensorflow-with-cuda-cdnn-and-gpu-support-in-4-easy-steps" rel="nofollow">this tutorial</a> +- Start to train your own model (model = the "brain" of your AI) on ETH/BTC using + +``` +python3 ai-example.py -t -s ETH/BTC -e 10 +``` + +- Once done your AI model will be saved in the weights folder. Find its name and add it in the end of the following command to run a backtesting using your new AI model + +``` +python3 ai-example.py -p -s ETH/USDT -w weights/202310050722-final-dqn.h5 +``` + +_202310050722-final-dqn.h5 is an example of weight, update it with your own_ + +- Here is an example of a backtesting using an AI model built using OctoBot script AI. There is no human action behind it, all the trades have been triggered by the AI. + +![strategy-ouput](/images/blog/trading-with-ai-introduction/strategy-output.png) + +If you found this content helpful, please give us feedback in our community <a href="https://discord.com/invite/vHkcb8W" rel="nofollow">Discord</a> and <a href="https://t.me/octobot_trading" rel="nofollow">Telegram</a>! Your support will encourage us to create a series of detailed guides exploring more strategies and insights into AI trading. diff --git a/docs/blog/2023-10-18-open-source-trading-software.md b/docs/blog/2023-10-18-open-source-trading-software.md new file mode 100644 index 0000000000..7f9fa8f731 --- /dev/null +++ b/docs/blog/2023-10-18-open-source-trading-software.md @@ -0,0 +1,83 @@ +--- +title: "The Power of Open Source Trading Software" +description: "Understand the benefits of open source trading software." +slug: "open-source-trading-software" +date: "2023-10-18" +authors: ["guillaume"] +tags: ["Open source", "Cryptocurrency", "Trading", "Software", "Educational"] +image: "/images/blog/open-source-trading-software/cover.png" +--- + + + +# The Power of Open Source Trading Software + +![cover](/images/blog/open-source-trading-software/cover.png) + +Welcome to your guide to open source trading software. After going through this blog post, you'll have a thorough understanding of open source trading platforms, with a specific focus on open source crypto trading bots. We'll explore the main advantages they offer and how they use community feedback to optimize the trading experience. + +<!--truncate--> + +## Table of contents + +- [Definition of Terms](#definition-of-terms) +- [Benefits of Open Source Software](#benefits-of-open-source-software) +- [The open source community](#the-open-source-community) +- [OctoBot: an open source trading software](#octobot-an-open-source-trading-software) +- [Conclusion](#conclusion) + +## Definition of terms + +To fully understand the notion of open source trading software, it's essential to know the individual terms: open source, trading, and crypto trading. + +- **Open Source**: Open source refers to something that is publicly accessible and can be modified or shared. In the context of software, it means the source code is freely available for users to inspect, modify, or enhance according to their needs. Most open source software are available on <a href="https://github.com/Drakkar-Software/OctoBot" rel="nofollow">github</a>. + +- **Trading**: Trading is a fundamental economic concept involving the buying and selling of goods and services, with compensation paid by a buyer to a seller, or the exchange of goods or services between parties. + +- **Crypto Trading**: This refers to the act of buying or selling a cryptocurrency via a trading exchange like <a href="https://accounts.binance.com/en/register?ref=528112221" rel="nofollow">Binance</a>. + +- **Trading Platform Open Source**: This is a publicly accessible software where cryptocurrencies and other forms of assets are bought and sold. Since it is open source, users can modify it to suit their specific trading needs. + +- **Open Source Crypto Trading Bot**: This is a specific type of trading platform which is open source and that uses algorithms to buy and sell cryptocurrencies on behalf of the user, based on parameters set by the user. + +![crypto](/images/blog/open-source-trading-software/crypto.png) + +## Benefits of open source software + +When it comes to trading software, going open source comes with several benefits for the user. + +- **Flexibility**: Open source software allows users to customize and modify the software to fit their specific needs. + +- **Cost Efficiency**: They are generally free, reducing the cost of trading operations. + +- **Community Support**: Open source software often has a supportive community that can provide assistance and share innovative ideas. + +- **Transparency**: Open source software allows users to scrutinize, audit and improve upon the code, promoting trust and security. + +## The open source community + +Open source software thrives on community involvement. The collective experience of the community helps to: + +- **Improve the software**: Users can spot bugs, suggest improvements, and contribute to the development of the software. + +- **Foster innovation**: Different users bring diverse perspectives, leading to novel solutions and features. For example, OctoBot encourages users to share feedback on the software with a <a href="https://feedback.octobot.cloud/" rel="nofollow">dedicated website</a>. + +- **Provide support**: The community can offer assistance and share knowledge, making it easier for new users to navigate the software. + +## OctoBot: an open source trading software + +![A man relaxing in his couch while OctoBot is making money by automating cryptocurrency strategies](/a-man-relaxing-in-his-couch-while-octobot-is-making-money-by-automating-cryptocurrency-strategies-light.png) + +OctoBot is a top recommendation when it comes to open source crypto trading bots. The reasons are: + +- **Customization**: Each OctoBot strategy can be customized to create your own trading strategy. A step by step guide is available in our [customization guide](/guides/octobot-configuration/profile-configuration). + +- **Community**: OctoBot has a robust community that continually contributes to its development and offers support to new users. You can join it on <a href="https://discord.com/invite/vHkcb8W" rel="nofollow">Discord</a> and <a href="https://t.me/octobot_trading" rel="nofollow">Telegram</a>. + +- **Transparency**: OctoBot's open-source nature ensures transparency, allowing users to verify its security and fairness. + +- **Diversity**: OctoBot supports most major exchanges and each of their cryptocurrencies, find your exchange on the [detailed exchanges list](/guides/exchanges). + +## Conclusion + +In essence, open source trading platforms, such as OctoBot, offer a cost-effective, flexible and transparent trading experience. Stay engaged with the community, keep your software updated, and embrace the journey of successful trading ahead. diff --git a/docs/blog/2023-10-30-octobot-1-0-2-whats-new.md b/docs/blog/2023-10-30-octobot-1-0-2-whats-new.md new file mode 100644 index 0000000000..8d81129bfb --- /dev/null +++ b/docs/blog/2023-10-30-octobot-1-0-2-whats-new.md @@ -0,0 +1,49 @@ +--- +title: "OctoBot 1.0.2 - What's new" +slug: "octobot-1-0-2-whats-new" +date: "2023-10-30" +authors: ["guillaume"] +tags: ["Tradingview", "Chatgpt", "Release", "DCA", "Backtesting"] +image: "/images/blog/octobot-1-0-2-whats-new/cover.png" +--- + + + +# OctoBot 1.0.2 - What's new + +![cover](/images/blog/octobot-1-0-2-whats-new/cover.png) + +## Introducing OctoBot 1.0.2 + +We're thrilled to announce the release of OctoBot 1.0.2, an upgraded version with many improved features, thanks to the great feedback we received from you all. + +<!--truncate--> + +## Revamped ChatGPT strategy + +In OctoBot 1.0.2, we've revamped the ChatGPT strategy. Until now, you couldn't run a [backtesting](/guides/octobot-usage/backtesting) on a chatgpt profile due to the excessive prompt, costing around $2 for 6 months history, hence we disallowed it. +However, with the new update, you can run backtesting on some gpt settings because we've already computed the prompt against some exchanges pairs historical data which are downloaded from our servers. + +We've also shifted from Daily Trading mode to a smart DCA trading mode in the chatgpt profile. The previous mode was no longer suited to the current market, hence we updated it to DCA trading mode to develop more accurate sell orders following a chatgpt entry signal. + +Additionally, we've introduced a new prompt setting. You can now ask chatgpt with pure candle history (without any TA indicator) and include the number of candles you want. + +![chatgpt settings](/images/blog/octobot-1-0-2-whats-new/gpt-evaluator-settings.png) + +## Improved TradingView connection + +We've also made noteworthy improvements to the TradingView connection, thanks to some valuable feedback from our OctoBot users who use the TradingView integration. +It's now possible to send a cancel order signal to cancel all current open orders for a symbol, or only to cancel an open order on a specific side using the param SIDE. More details on this can be found on [the alert format guide](/guides/octobot-interfaces/tradingview/alert-format#canceling-orders). + +Special thanks to @KidCharlemagne, an active member of our OctoBot <a href="https://discord.com/invite/vHkcb8W" rel="nofollow">Discord community</a>, for helping with the complete refactor of the TradingView [configuration guide](/guides/octobot-interfaces/tradingview). It's clearer now, with ample examples. + +![TradingView guide](/images/blog/octobot-1-0-2-whats-new/tv-guides.png) + +## Some bugfix + +We've also squashed some bugs in this release. After careful checks, we discovered an issue in the OctoBot [backtesting engine](/guides/octobot-usage/backtesting) that allowed for premature filling of open orders. + +## Conclusion + +We can't wait to hear your thoughts on this new version. +Please use this <a href="https://feedback.octobot.cloud/open-source" rel="nofollow">feedback link</a> to share your suggestions and what you'd like to see in our next release. diff --git a/docs/blog/2023-11-01-smart-dca-making-of.mdx b/docs/blog/2023-11-01-smart-dca-making-of.mdx new file mode 100644 index 0000000000..713f458326 --- /dev/null +++ b/docs/blog/2023-11-01-smart-dca-making-of.mdx @@ -0,0 +1,166 @@ +--- +title: "Smart DCA trading strategies making of" +slug: "smart-dca-making-of" +date: "2023-11-01" +authors: ["guillaume"] +tags: ["DCA", "Strategy designer", "Backtesting", "Cryptocurrency", "Trading", "OctoBot cloud", "Educational"] +image: "/images/blog/smart-dca-making-of/cover.png" +--- + + + +# Smart DCA trading strategies making of + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="519pwSV1uwE" title="Smart DCA with OctoBot" /> + +At OctoBot, we are always trying to find new ways to trade. After experimenting with many types of strategies, we realized that sometimes, keeping it simple just works. + +<!--truncate--> + +Our idea was to take the concept of Dollar Cost Averaging and adapt it to smaller scale investments. + +## Introducing Smart DCA + +<a href="https://www.investopedia.com/terms/d/dollarcostaveraging.asp" rel="nofollow">Dollar Cost Averaging (or DCA)</a> is a very well known investment strategy where you buy on +a regular basis in order to profit from local price drops. It allows investors +to reduce their overal buying costs. + +The DCA concept can also be applied to selling an asset. Selling something over a long period of time, when the price is going up, is allowing to profit from the whole range of prices and increase your average selling price. + +Buying and selling using DCA is a way to maximise profits when investing in and out of a coin. + +> After running tons of tests with historical market data, we realized that this idea also works very well on shorter term trades. + +We also had the surprise to observe an interesting side effect of smaller term DCA: it's a great way to profit from markets that are not moving synchronously. In other words, it can allow to profit from a rising ETH in the morning, take profits at noon and profit from a rising SOL in the afternoon, as long as ETH and SOL are not moving at the same time. + +## Video summary + +### Pro and cons of the strategy + +1. Advantages of Smart DCA: + +- It works very well in sideway markets and uptrends. +- It does not require big market moves, simple + or minus 0.5 to 1% are enough to make profits. +- It never sells at a loss. + +2. Drawbacks of Smart DCA: + +- It should not be used in downward markets as it would lock your funds in sell orders (since it's not selling at a loss). + +### Finding the right coins to trade + +In order to multiply trades, trading assets that are moving up and down at different times is optimal. We call such assets **complementary**. + +In the end, to optimize profits, the most important part is to include smartly chosen traded assets, so that the strategy trades as much as possible while lowering risk by investing in multiple coins. + +## When to use the Smart DCA strategy ? + +The Smart DCA strategy is adapted to sideways or upwards markets. In order to be able to quickly fill its sell orders, it relies on the market not being in a pure downtrend. + +![cover](/images/blog/smart-dca-making-of/cover.png) + +Using the Smart DCA strategy in a downwards market might not allow sell order to be filled and therefore lock funds in open sell orders. While this is not a selling at a loss, it is still non optimal and can prevent generating profits from other cryptocurrencies. + +## Deep dive in the strategy technicals + +Let's now explore the very technical aspects of the Smart DCA strategy. + +After the initial step of identifying complementary coins to trade, the next part is to optimize the way Smart DCA will trade those coins. + +This comes down to how entries and exits should be traded, how much to assign to each entry signal, how to configure take profits, all of this while limiting inherent risks associated with the traded assets. + +We will split this into 3 mains topics: + +1. Configuring entries and exits. +2. Taking multiple markets into account. +3. Going beyond [backtesting](/guides/octobot-usage/backtesting) results. + +### Optimizing position entries and exits + +Profits in Smart DCA come from the difference between the sell and buy prices. + +The higher the difference, the bigger the profits. However the bigger the risks of not selling the asset. + +In a ideal world, your Smart DCA configuration is so that each entry quickly finds its exit because the exit price is configured according to your traded assets typical behavior. However in reality this is not always true. + +Therefore the goal of the strategy's entry and exit configuration is to find the sweet point for your traded assets where the large majority of your exit orders end up filled within the next hours or days at maximum. This allows to quickly free up funds and jump to the next opportunity. We don't want to be waiting for a fill that might take weeks to happen and prevent you from making money with this trade funds using other traded markets. + +![profitable results with 0.8 percent take profit](/images/blog/smart-dca-making-of/profitable-results-with-0.8-percent-take-profit.png) +_Steady portfolio growth and regular trades using 0.8% take profit targets_ + +At OctoBot cloud, we realized that for the top 50 altcoins, this point is usually around 0.8% profits. This configuration allows to make profits even after exchange fees while quickly freeing funds to multiply trade opportunities and limit asset exposure. + +![profitable but risky results with 2 percent take profit](/images/blog/smart-dca-making-of/risky-results-with-2-percent-take-profit.png) +_Unoptized portfolio growth: missed trades and higher volatily using 2% take profit targets_ + +Of course this number is highly correlated to the volatility of the traded pairs. If you are trading pairs from top 100 to 200 ranks, it's possible that a 1.5% take profit target would be more profitable as those pairs are much more volatile. + +### Trading with multiple pairs + +A key concept to optimize your returns using Smart DCA is to trade complementary coins. This allows to multiply trades while reducing risk by spreading funds between different assets. + +**But how many coins should be traded ?** + +Overal, the more the better providing 3 conditions: + +1. All assets must remain complementary (not making the same moves at the same time), otherwise profits are not increased. +2. Assets should display a similar volatility. +3. Having enough initial funds to create orders on every market. + +#### Complementary assets + +As explained on the video, the best way we found to identify complementary assets is to select assets from different naratives. This means coins that serve different purposes and therefore won't be moving from the same market events or trends. + +Here are examples of coins naratives: + +- Value transfer/storage coins (BTC, XRP) +- Blockchain coins (ETH, ADA, SOL) +- Privacy coins (XMR, ZEC) +- Oracle coins (LINK, BAND) +- Exchange coins (BNB, UNI) +- Meme coins ([DOGE](https://www.octobot.cloud/what-is-dogecoin), SHIB) +- Supply chain coins (VET) + +There are many more naratives such as gaming, metaverse, NFTs and others. + +An alternative way to explore coin narative is to use coin explorers categories: +![coingecko top coin categories](/images/blog/smart-dca-making-of/coingecko-coin-categories.png) +_CoinGecko's <a href="https://www.coingecko.com/en/categories" rel="nofollow">top coin categories</a>_ + +#### Volatility + +As the goal of the strategy is to quickly go in and out of each asset, it is important that each asset displays overall the same volatily. This allows to fine tune entries and exits goals in an efficient manner. + +Using markets with different volatility present the following risks: + +- Exiting the market too early and missing on profits from more volatile assets. +- Not exiting the market when an opportunity arises due to targets adapted for a higher volatility market. + +#### Initial funds + +According to our tests, the ideal way to size orders on DCA is to use a small percent of your total traded portfolio value on each order. Here the meaning of _small_ can vary depending on your context and goals but overal the idea is the following: + +- Using a `%t` order amount settings to size orders according to the total value of traded assets holdings and keep order sizes consistent. +- Sizing `%t` in a manner that complies with the exchange minimal order size rules. For example this is usually $5 or $10 (or USD equivalent) on Binance. Please note that the current version of backtesting is very permissive on this topic and it's better to use the [live trading simulator](/guides/octobot-usage/simulator) or manually check order sizes if you are unsure about minimal order sizes +- Keeping the order amount smaller as you increase the number of traded pairs to profit of each pair and reduce chances of having a large part of your portfolio being stuck in sell orders of a particular asset when your exits did not yet trigger. + +![binance trading rules min funds for each market](/images/blog/smart-dca-making-of/binance-trading-rules-min-funds.png) +_Binance's <a href="https://www.binance.com/en/trade-rule" rel="nofollow">trading rules and minimal order size for each market</a>_ + +On OctoBot cloud, strategies usually trade with between 5% and 8% of the portfolio in each order. This allows to benefit from multiple pairs while allowing for minimum initial portfolios in the range of 100 to 200 USD-equivalent. + +### Beyond backtesting results + +When creating a trading strategy, it's always important to test it with [backtesting](/guides/octobot-usage/backtesting) to make sure the strategy behaves as expected. Backtesting can also be used to optimize a strategy settings. This is what we do at OctoBot when we create a new strategy. + +However, it's important to keep in mind that backtesting is only using past data. Therefore there are a few key points to pay attention to: + +1. Never over-optimize a strategy for a single backtesting context as the future is very rarely the exact repetition of the past. Prefer finding settings that work good (but not necessarily perfect) in most relevant historical range of your traded assets. +2. Carefully identify areas with no buy trades when there should be some. This usually means that your portfolio is completely invested and probably that you are missing a few opportunities. Your settings can most likely be improved for the selected market. +3. Assets that look complementary only based on their past price chart doesn't mean they will keep doing it. That's why having clear fundamental reasons to explain their price complementary (such as the narative) is better than just relying on price charts. + +## Disclaimer + +Please be advised that the contents of this article are intended FOR GENERAL INFORMATION PURPOSES and not financial advice. The information contained herein is for informational purposes only. Nothing herein shall be construed to be financial legal or tax advice. The content of this article is solely the opinions of the author and/or the OctoBot team. None of those are licensed financial advisors or registered investment advisors. Purchasing cryptocurrencies poses considerable risk of loss. The author and/or the OctoBot team does not guarantee any particular outcome. Past performance does not indicate future results. diff --git a/docs/blog/2023-11-02-trading-using-chat-gpt.mdx b/docs/blog/2023-11-02-trading-using-chat-gpt.mdx new file mode 100644 index 0000000000..a39717d8b0 --- /dev/null +++ b/docs/blog/2023-11-02-trading-using-chat-gpt.mdx @@ -0,0 +1,92 @@ +--- +title: "Trading using ChatGPT" +slug: "trading-using-chat-gpt" +date: "2023-11-02" +authors: ["paul"] +tags: ["ChatGPT", "AI", "Cryptocurrency", "Trading", "OctoBot cloud"] +image: "/images/blog/trading-using-chat-gpt/cover.png" +--- + + + +# Trading using ChatGPT + +![cover](/images/blog/trading-using-chat-gpt/cover.png) + +## Ask ChatGPT about the future of the market + +Wouldn't it be great to know what <a href="https://chat.openai.com/" rel="nofollow">ChatGPT</a> thinks about a particular market direction ? + +While ChatGPT can't directly answer question such as "Will BTC/USD go up tomorrow ?", it can answer many related questions including forecasting trends. + +This is a great opportunity to take advantage of the extremely powerful AI that is ChatGPT (in version 3 or later) and integrate it directly in your trading strategies. + +## A ChatGPT Trading strategy + +In this video, we are showcasing a ChatGPT trading strategy we created on [OctoBot cloud](/). We show what it looks like and how it performs. + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="BV4ZHQrIpRQ" title="ChatGPT Crypto trading strategies using OctoBot" /> + +<!--truncate--> + +If you are insterested in knowing more about this ChatGPT trading strategy, we created this [ChatGPT strategy deep dive](chatgpt-strategy-deep-dive) where we cover in details: + +- The buy and sells of the strategy +- Its performances using paper trading and historical data ([backtesting](/guides/octobot-usage/backtesting)) +- The performances of an OctoBot trading using this strategy for a few weeks now + +**[Invest with ChatGPT](https://www.octobot.cloud)** + +## ChatGPT in your OctoBot + +Starting from OctoBot 0.4.47, we added a new evaluator: the `GPTEvaluator`. This evaluator can be used directly with the profile `GPT Trading`. + +When enabled, it will automatically ask ChatGPT about is opinion on every traded pair, on each time frame. + +The type of data given to ChatGPT can be configured: the value of the asset price or different kind of technical evaluator can be sent to ChatGPT for it to make its opinion. + +Once enabled, `GPTEvaluator` behaves like any other technical evaluator, this means that it can combined with others, used to trade, to create trading signals or to notify you when the situation changes. + +You can therefore: + +- Automatically trade based only on ChatGPT's predictions +- Trade combining ChatGPT's predictions with other evaluators +- Get ChatGPT's view on the market at anytime from the web interface and market status and get notified on any change + +Lean more details on how to trade with ChatGPT on OctoBot on the [ChatGPT trading guide](/guides/octobot-trading-modes/chatgpt-trading). + +## Follow ChatGPT trading signals + +We've also set up a [ChatGPT dashboard](https://www.octobot.cloud/tools/crypto-prediction) where you can follow the latest ChatGPT crypto trading signals. +The goal of this new page is simple. It's to show you the latest crypto trading signals from our ChatGPT strategy. This way, you can see how it works and confirms that it actually works. + +<div style={{textAlign: "center"}}> + <div> + ![ChatGPT trading signals](/images/blog/trading-using-chat-gpt/gpt-free-tool.png) + _The latest ChatGPT trading signals_ + </div> +</div> + +For more real-time information, we've also created a <a href="https://twitter.com/OctoBotGPT" rel="nofollow">Twitter / X account</a> and <a href="https://t.me/octobotgpt" rel="nofollow">Telegram channel</a> publishing new signals. By following these accounts, you can get immediate notifications on the latest signals, making it easier to stay informed about the crypto market. + +Check out our [Trading with ChatGPT signals](introducing-chatgpt-trading-tool) blog article for all the details about those trading signals. + +<div style={{textAlign: "center"}}> + **[View ChatGPT strategies](https://www.octobot.cloud/explore?category=strategies)** +</div> + +## How to use ChatGPT in your OctoBot + +As ChatGPT's automated calls is a paid feature from <a href="https://openai.com/" rel="nofollow">openai.com</a>, there are 3 ways to use it in OctoBot and to [trade with ChatGPT](/guides/octobot-trading-modes/chatgpt-trading). + +### 1. Use a ChatGPT-based trading strategy on OctoBot cloud + +Using [OctoBot cloud](https://www.octobot.cloud/), you can use ChatGPT trading strategies in a very simply way for free. Use them with your exchange account or risk free with [paper trading](/investing/paper-trading-a-strategy). + +### 2. ChatGPT from your OctoBot trading bot + +To use ChatGPT from your [OctoBot trading bot](https://www.octobot.cloud/trading-bot), just provide your own OpenAI API key and store it into your OctoBot configuration. The regular OpenAI pricing will then be applied. + +More details on how to setup ChatGPT and how to estimate the cost of its requests on the [ChatGPT guide](/guides/octobot-interfaces/chatgpt) diff --git a/docs/blog/2023-11-06-introducing-chatgpt-trading-tool.mdx b/docs/blog/2023-11-06-introducing-chatgpt-trading-tool.mdx new file mode 100644 index 0000000000..8e47faa5fa --- /dev/null +++ b/docs/blog/2023-11-06-introducing-chatgpt-trading-tool.mdx @@ -0,0 +1,141 @@ +--- +title: "Introducing ChatGPT crypto predictions" +description: "Discover how to use ChatGPT to predict the next crypto price movement. This article highlights our tool for tracking the latest predictions and our Twitter and Telegram accounts for real-time notifications." +slug: "introducing-chatgpt-trading-tool" +date: "2023-11-06" +authors: ["paul"] +tags: ["Chatgpt", "Cryptocurrency", "Trading", "Strategy", "AI", "GPT"] +image: "/images/blog/introducing-chatgpt-trading-tool/chatgpt-logo.png" +--- + + + +# Trading with ChatGPT signals + +We're excited to introduce our latest tool, designed to provides straightforward BUY or SELL signals for various cryptocurrencies based on AI predictions. + +## The power of ChatGPT + +### What's ChatGPT + +Our new tool harnesses the power of ChatGPT, an artificial intelligence model developed by <a href="https://openai.com/" rel="nofollow">OpenAI</a>. + +<a href="https://openai.com/chatgpt" rel="nofollow">ChatGPT</a> has been trained on diverse internet text, and it uses +this knowledge to generate text that is relevant to the input it receives. In +the context of our tool, it uses its knowledge of market price history, +specifically candlestick price patterns, to make predictions about +cryptocurrency trends. + +<div style={{textAlign: "center"}}> + <div> + ![chatgpt-logo](/images/blog/introducing-chatgpt-trading-tool/chatgpt-logo.png) + *ChatGPT logo* + </div> +</div> + +### Why ChatGPT + +Candlestick patterns, which display the high, low, opening and closing prices for a specific period, are a fundamental part of technical analysis in trading. +ChatGPT uses its <a href="https://openai.com/blog/chatgpt" rel="nofollow">extensive knowledge base</a> to identify these patterns in the historical price data of various cryptocurrencies. + +By recognizing these patterns, it can then predict potential future trends by comparing current market data with similar historical situations. +This analysis forms the basis of the BUY or SELL signals that our tool generates. + +## How it works + +We feed ChatGPT the current prices of different cryptocurrencies and ask it to determine the likelihood of these prices going up or down. +Currently, our tool uses only candlestick prices for its predictions. However, in the future, we plan to include technical analysis indicators for a more detailed and accurate prediction, giving you a more complete perspective on potential market movements. + +![prompt](/images/blog/introducing-chatgpt-trading-tool/prompt.png) + +Here is how it works: + +1. We ask ChatGPT to predict the potential price direction (either up or down) based on the most recent cryptocurrency price data, as illustrated in the following screenshot. +2. Then, we use ChatGPT's response to create a trading signal, which depends on the predicted price direction and how confident ChatGPT is in that prediction. + +More details on how to trade with ChatGPT on OctoBot on the [ChatGPT trading guide](/guides/octobot-trading-modes/chatgpt-trading). + +**[Invest with ChatGPT](https://www.octobot.cloud)** + +## The new dashboard + +We've created this [free ChatGPT crypto dashboard](https://www.octobot.cloud/tools/crypto-prediction) featuring the most recent cryptocurrency trading signals derived from ChatGPT. + +<div style={{textAlign: "center"}}> + ![shib-prediction](/images/blog/introducing-chatgpt-trading-tool/tool-screenshot.png) + *Screenshot of the ChatGPT trading signals showcase page* +</div> + +This feature can be used by anyone who wants to keep track of what ChatGPT thinks is going to happen with different cryptocurrencies. +It's like having a quick, up-to-date overview of the AI's predictions, which can aid in making investment decisions. + +## Receiving notifications + +We've taken it a step further by introducing dedicated Twitter and Telegram accounts that post a signal each time a new prediction with high confidence is made by ChatGPT. +By following these accounts, you can stay updated with the most interesting cryptocurrency trading signals predicted by ChatGPT. +This way, you'll never miss an opportunity. + +### Twitter / X account + +Follow <a href="https://twitter.com/OctoBotGPT" rel="nofollow">the OctoBotGPT X account</a> to be notified for each new ChatGPT prediction via X / Twitter. + +<div style={{textAlign: "center"}}> + <div> + ![sol Solana Twitter notification ChatGPT predicts SOL going up with 90% + change](/images/blog/introducing-chatgpt-trading-tool/sol-tweet.png) + *OctoBotGPT SOL/USDT prediction tweet* + </div> +</div> + +### Telegram account + +Join the <a href="https://t.me/octobotgpt" rel="nofollow">the OctoBotGPT Telegram</a> to be notified for each new ChatGPT prediction via Telegram. + +<div style={{textAlign: "center"}}> + <div> + ![sol Solana Telegram notification ChatGPT predicts SOL going up with 90% + change](/images/blog/introducing-chatgpt-trading-tool/sol-telegram.png) + *OctoBotGPT SOL/USDT Telegram prediction* + </div> +</div> + +## ChatGPT predictions performances + +Here is an example of a recent tweet published by <a href="https://twitter.com/OctoBotGPT" rel="nofollow">OctoBotGPT twitter account</a>: + +<div style={{textAlign: "center"}}> + <div> + ![shib Shiba Inu Twitter notification ChatGPT predicts SHIB going up with + 90% change](/images/blog/introducing-chatgpt-trading-tool/shib-tweet.png) + *OctoBotGPT SHIB/USDT prediction tweet at 9 PM* + </div> +</div> + +<div style={{textAlign: "center"}}> + ![shib Shiba Inu SHIB/USDT price going up after ChatGPT prediction on + twitter](/images/blog/introducing-chatgpt-trading-tool/shib-prediction.png) + *SHIB/USDT price on Binance at 9 PM* +</div> + +As an illustration, this tweet was published at 9pm, predicting a high probability of the price of SHIB increasing. +This turned out to be a great prediction, as just a few minutes after the tweet was posted, the price of SHIB indeed started to rise. + +## ChatGPT trading strategies + +To automate trading according to ChatGPT signals, we created strategies to automatically trade upon a new ChatGPT signal. + +We are showcasing those strategies in this video. + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="BV4ZHQrIpRQ" title="ChatGPT Crypto trading strategies using OctoBot" /> + +<!--truncate--> + +You will also find more information on ChatGPT-based trading strategies on our dedicated article: [Trading using ChatGPT](trading-using-chat-gpt). + +<div style={{textAlign: "center"}}> + **[View ChatGPT strategies](https://www.octobot.cloud/explore?category=strategies)** +</div> + +Got a question or request regarding GPT trading signals? Don't hesitate! Reach out to the OctoBot community on <a href="https://discord.com/invite/vHkcb8W" rel="nofollow">Discord</a> and <a href="https://t.me/octobot_trading" rel="nofollow">Telegram</a>. diff --git a/docs/blog/2023-11-10-introducing-the-new-octobot-mobile-app.md b/docs/blog/2023-11-10-introducing-the-new-octobot-mobile-app.md new file mode 100644 index 0000000000..f9ae4f51a8 --- /dev/null +++ b/docs/blog/2023-11-10-introducing-the-new-octobot-mobile-app.md @@ -0,0 +1,51 @@ +--- +title: "Introducing the new OctoBot App" +description: "Discover the new OctoBot Android App. Track your OctoBots profits, assets, and balance in real-time from your mobile." +slug: "introducing-the-new-octobot-mobile-app" +date: "2023-11-10" +authors: ["paul"] +tags: ["Android", "Mobile", "OctoBot cloud"] +image: "/images/blog/introducing-the-new-octobot-mobile-app/cover.png" +--- + + + +# Introducing the new OctoBot Android App + +<div style={{textAlign: "center"}}> + ![octobot-android-app](/images/blog/introducing-the-new-octobot-mobile-app/cover.png) +</div> + +We are thrilled to introduce the OctoBot Android app. This new application brings the ability to follow your OctoBot, whether self-hosted or cloud ones, right from your mobile. + +<!--truncate--> + +## What is the OctoBot Android App? + +The <a href="https://play.google.com/store/apps/details?id=com.drakkarsoftware.octobotapp&utm_source=www.octobot.cloud&utm_media=blog&utm_content=introducing-mobile-app" rel="nofollow">OctoBot Android App</a> is a mobile application designed to provide easy access to your OctoBot trading bots. It allows you to log in using your OctoBot cloud account and keep track of your trading bots directly from your Android device. + +## Features of the OctoBot Android App + +For now, the OctoBot Android app allows you to follow bot profit, assets, and balance. + +<div> + <div> + ![login-view](/images/blog/introducing-the-new-octobot-mobile-app/app-signin.webp) + </div> + <div> + ![bots-view](/images/blog/introducing-the-new-octobot-mobile-app/app-bots.webp) + </div> +</div> + +## Our Commitment to User Feedback + +We believe that the best way to create a tool that truly meets the needs of our users is by listening to what they have to say. This is why we are inviting users to provide feedback on the OctoBot Android app. + +We have set up a <a href="https://feedback.octobot.cloud/octobot-mobile-app" rel="nofollow">feedback website</a> where you can share your thoughts, suggestions, and experiences with the app. + +## In Conclusion + +With the OctoBot Android app you can now manage your trading bots from anywhere, with just a few taps on your mobile device. +While we are proud of this achievement, we know there is still much to be done. Your feedback is crucial, we encourage you to share your experience on our <a href="https://feedback.octobot.cloud/octobot-mobile-app" rel="nofollow">feedback website</a>. + +So, why wait until tomorrow? Download the OctoBot Android app now: search "OctoBot" in your <a href="https://play.google.com/store/apps/details?id=com.drakkarsoftware.octobotapp&utm_source=www.octobot.cloud&utm_media=blog&utm_content=introducing-mobile-app" rel="nofollow">Play Store</a>. diff --git a/docs/blog/2023-11-17-paper-trading-with-octobot.md b/docs/blog/2023-11-17-paper-trading-with-octobot.md new file mode 100644 index 0000000000..9c59edfa33 --- /dev/null +++ b/docs/blog/2023-11-17-paper-trading-with-octobot.md @@ -0,0 +1,54 @@ +--- +title: "Paper trading with OctoBot" +slug: "paper-trading-with-octobot" +date: "2023-11-17" +authors: ["guillaume"] +tags: ["Trading", "Strategy", "OctoBot cloud"] +image: "/images/blog/paper-trading-with-octobot/cover.jpg" +--- + + + +# Paper trading with OctoBot + +<div style={{textAlign: "center"}}> + ![cover](/images/blog/paper-trading-with-octobot/cover.jpg) +</div> + +## Introducing paper trading + +Paper trading is service allowing to test trading strategies in live conditions using a virtual portfolio. + +<!--truncate--> + +By using paper trading, you can easily: + +- **Test** the trading strategies you are interested in before using your real funds +- **Experiment** all the trading strategies you are curious to know more about + +## OctoBot's stance on paper trading + +At OctoBot, transparency is one of our core values. It is the reason why OctoBot is [open source](/guides/octobot) and why each strategy comes with its historical results. + +In order to bring this one step further, we decided to make [paper trading](/investing/paper-trading-a-strategy) of trading strategies **free and unlimited**. + +![trading account type choice real or paper trading](/images/guides/trading-account-type-choice-real-or-paper-trading.png) + +While most trading robot services choose to charge for paper trading or limit its duration, we decided to stay true to our ethos and include paper trading in our free services for all users. + +This means that when using OctoBot cloud, you can: + +1. **Explore**: Test the trading strategy (or strategies !) you are interested in using paper trading +2. **Invest**: Once you find a trading strategy you like, start an OctoBot with your real funds +3. **Optimize**: Keep experimenting other trading strategies risk free with your paper trading OctoBot + +## How to start a paper trading OctoBot ? + +As paper trading is using virtual funds, OctoBot cloud doesn't need your exchange credentials to run a paper trading OctoBot. +This means that you can very quickly experiment with any strategy on OctoBot cloud in just 3 steps: + +1. Create your [OctoBot](https://www.octobot.cloud) account +2. Select the strategies your want to use +3. Choose paper trading and the simulated portfolio you want to trade with + ![paper trading virtual portfolio configuration](/images/guides/paper-trading-virtual-portfolio-configuration.png) +4. Follow your new OctoBot either from <a href="https://www.octobot.cloud/bots" rel="nofollow">OctoBot cloud</a> or the <a href="https://play.google.com/store/apps/details?id=com.drakkarsoftware.octobotapp&utm_source=www.octobot.cloud&utm_media=blog&utm_content=paper-trading" rel="nofollow">OctoBot app</a> diff --git a/docs/blog/2023-11-29-octobot-pro-plan-early-access.md b/docs/blog/2023-11-29-octobot-pro-plan-early-access.md new file mode 100644 index 0000000000..be3e3294ce --- /dev/null +++ b/docs/blog/2023-11-29-octobot-pro-plan-early-access.md @@ -0,0 +1,92 @@ +--- +title: "OctoBot cloud trading bots early access" +slug: "octobot-pro-plan-early-access" +date: "2023-11-29" +authors: ["guillaume"] +tags: ["Strategy designer", "AI", "Backtesting", "Cryptocurrency", "Trading", "OctoBot cloud", "Release"] +image: "/images/blog/octobot-pro-plan-early-access/octobot-pro-plan-early-access-announcement.png" +--- + + + +# OctoBot cloud trading bots plan early access + +![octobot cloud trading bots plan early access announcement](/images/blog/octobot-pro-plan-early-access/octobot-pro-plan-early-access-announcement.png) + +## OctoBot cloud trading bots + +With [OctoBot cloud trading bots](https://www.octobot.cloud/trading-bot), you get access to the full power of OctoBot. + +<!--truncate--> + +- Your OctoBot will always be live and up to date: the OctoBot team takes care of all the technicals +- Enjoy Pro-exclusive extensions such as the [Strategy Designer](/guides/octobot-usage/strategy-designer) to create and optimize your best trading strategies +- You get personalized support to be sure that you have everything you need to start your OctoBot properly according to the strategy you want +- Easily use and customize OctoBot cloud strategies +- Simply and securely automate your TradingView strategies trades without the need for custom configuration +- Enjoy the full potential of ChatGPT based strategies without having to pay for an OpenAI subscription + +**[Start your OctoBot](https://www.octobot.cloud)** + +## Your improved OctoBot + +OctoBot cloud trading bots give you access to an improved version of the <a href="https://github.com/Drakkar-Software/OctoBot" rel="nofollow">open source OctoBot</a>. But unlike the open source version where you have to make sure that your OctoBot remains online, pay for server costs and manually handle updates, when using a cloud OctoBot trading bot, you OctoBot is always online and operates in the best conditions. + +This means that using the cloud OctoBots, you can: + +- Create your own strategies using technical indicators, [TradingView](#easy-tradingview-automation), Reddit or even Google Trends +- Use and customize existing strategies such as, [Dollar Cost Averaging](smart-dca-making-of), Grid or [ChatGPT](#chatgpt-evaluations) +- Trade Spot and Futures markets on any [supported exchange](/guides/exchanges). There is no restriction to the exchange you can use +- Use [simulated money (paper trading)](/guides/octobot-usage/simulator) or your real exchange account funds +- Optimize your strategies by running unlimited backtestings on any type of market and exchange using both the [open source backtesting](/guides/octobot-usage/backtesting) or the [Strategy Designer](#the-strategy-designer) +- Access your OctoBot directly from [your browser](/guides/octobot-interfaces/web), <a href="https://www.octobot.cloud/bots" rel="nofollow">the bots dedicated page on OctoBot cloud</a>, [Telegram](/guides/octobot-interfaces/telegram) or the <a href="https://play.google.com/store/apps/details?id=com.drakkarsoftware.octobotapp" rel="nofollow">OctoBot app</a> at anytime + +Let's explore the exclusive benefits of using the [cloud OctoBot trading bots](https://www.octobot.cloud/trading-bot). + +## The Strategy Designer + +[Cloud OctoBot trading bots](https://www.octobot.cloud/trading-bot) have acccess to the exclusive [Strategy Designer](/guides/octobot-usage/strategy-designer), which is the most advanced trading strategy design tool we created. + +![Strategy Designer preview with bitcoin, ethereum and polygon historical charts](/images/blog/octobot-pro-plan-early-access/strategy-designer-preview.png) + +Using the Strategy Designer, you can easily customize, test and compare the performances of every strategy. + +Get in-depth insights on your trading strategy historical Profit and Loss and portfolio, explore each trade, compare results across runs and much more. + +Learn more about the Strategy Designer [on our dedicated guide](/guides/octobot-usage/strategy-designer). + +## Get started with a personalised session with the OctoBot team + +We know that starting a trading strategy can be complicated. That's why the OctoBot team offers to spend up to 30-minutes with you on a personalized one to one session to get your OctoBot up and running according to your ideas. + +This way you are sure to be able to use your OctoBot at its full potential. + +## Access & customize OctoBot cloud strategies + +One of the great benefits of using the cloud OctoBot trading bots is to be able to configure <a href="https://www.octobot.cloud/explore" rel="nofollow">OctoBot cloud strategies</a>. + +![cloud strategies](/images/blog/octobot-pro-plan-early-access/cloud-strategies.png) + +This means that you can: + +- Use them on your OctoBot, on the exchange you want +- Understand how they work and make changes to their configuration +- Use [Backtesting](/guides/octobot-usage/backtesting) and the [Strategy Designer](/guides/octobot-usage/strategy-designer) to get the best results on the trading pairs of your choice + +## Easy TradingView automation + +Using the open source version of OctoBot, you can already [automate TradingView signals](/guides/octobot-interfaces/tradingview). However this requires a [special configuration](/guides/octobot-interfaces/tradingview/using-a-webhook) and usually requires a paid ngrok subscription. + +When using a cloud OctoBot trading bot, everything is already configured and there is no need for any external account such as ngrok to receive your TradingView signals. + +## ChatGPT evaluations + +Accessing ChatGPT automatically is a <a href="https://openai.com/pricing" rel="nofollow">paid service by OpenAI</a>, the company behind ChatGPT. This means that when using the open source version of OctoBot, to use any ChatGPT strategy, you need a configured OpenAI account and you'll be charged for each use. + +With a cloud OctoBot trading bot in [standard or ultra plans](https://www.octobot.cloud/trading-bot), the OctoBot team is covering those costs. Therefore there is no need for an OpenAI account, everything is already ready to use. + +## Register for early access + +We are now opening registrations for cloud OctoBot trading bots early acccess. If you are interested in benefiting from the many advantages of cloud OctoBot trading bots, start your OctoBot on [OctoBot cloud trading bots](https://www.octobot.cloud/trading-bot). + +**[Start your OctoBot](https://www.octobot.cloud)** diff --git a/docs/blog/2023-12-02-how-does-trading-bot-work.md b/docs/blog/2023-12-02-how-does-trading-bot-work.md new file mode 100644 index 0000000000..a50897e7da --- /dev/null +++ b/docs/blog/2023-12-02-how-does-trading-bot-work.md @@ -0,0 +1,82 @@ +--- +title: "How does trading bot work" +description: "Discover how does trading bot work, their benefits and drawbacks, and learn how to select the right bot for your trading needs." +slug: "how-does-trading-bot-work" +date: "2023-12-02" +authors: ["guillaume"] +tags: ["Crypto", "Trading", "Backtesting", "Educational"] +image: "/images/blog/how-does-trading-bot-work/cover.png" +--- + +# How does trading bot work + +Explore crypto trading bots, how they work, their benefits and drawbacks, and learn how to select the right bot for your trading needs. + +<!--truncate--> + +## What are crypto trading bots? + +Crypto trading bots are automated software designed to handle cryptocurrency trading on behalf of users. +By using various algorithms, these bots can execute trades based on a set of predefined settings and strategies, making them a great way to invest or trade cryptocurrencies. + +<div style={{textAlign: "center"}}> + <div> + ![a crypto trading bot sitting behind a + desk](/images/blog/how-does-trading-bot-work/cover.png) + </div> +</div> + +## How do crypto trading bots work? + +Understanding how crypto trading bots work is essential for any crypto trader. +This section explains some of the inputs that enable these bots to perform. + +1. Technical Indicators: These bots often uses a range of technical indicators such as moving averages, Relative Strength Index (RSI), and others to inform their trading decisions. +2. Crypto Predictions: Advanced bots may incorporate machine learning algorithms, like [OctoBot GPT](trading-using-chat-gpt), to predict future price movements based on historical data analysis. +3. Crypto Signals: Utilizing signals from platforms like [TradingView](trading-using-tradingview), these bots can execute trades based on market trends and expert insights. + +## Pros of using crypto trading bots + +Let's explore some advantages of employing crypto trading bots. + +- Efficiency and speed: Bots are capable of processing large datasets and executing trades at a speed unmatchable by human traders. +- Avoid emotional trading: Bots are based on algorithms, thereby removing emotional biases which often lead to trading losses. +- 24/7 trading: Unlike humans, bots can operate 24h a day 7 days a week, trading each opportunities even when its owner is sleeping. + +## Cons of using crypto trading bots + +While advantageous, it's crucial to understand the potential drawbacks and limitations of using crypto trading bots in your investment strategy. + +- Complexity of Use: Understanding and setting up a trading bot can be a complicated task, particularly for beginners in the trading world. +- Security risks: Using bots on unreliable platforms can pose security threats to users crypto or personal data. + +## Is automated trading profitable? + +Profitability in automated trading is not a guaranteed outcome. This section highlights the need to analyze past strategy performance and the value of [paper trading](paper-trading-with-octobot) for strategy testing. + +- Paper trading and backtesting: Before employing a bot in real trading, it’s crucial to test its strategy using historical data ([backtesting](/guides/octobot-usage/backtesting)) and simulate trading in a risk-free environment (paper trading). +- Analyzing past performance: While past performance is not a sure indicator of future success, it provides valuable insights into how a bot may perform under certain market conditions. + +## How to choose a crypto trading bot + +Here are essential aspects to consider when choosing a crypto trading bot in order to align with your trading goals and trading experience. + +- Ease of use: It's important to select a bot that matches your technical expertise to ensure smooth use. +- Strategy variety: The bot should offer a range of strategies that align with your trading objectives and style. +- Cost consideration: Evaluate the price of the bot in relation to your budget and the potential returns it can offer. + +![A man relaxing in his couch while OctoBot is making money by automating cryptocurrency strategies](/a-man-relaxing-in-his-couch-while-octobot-is-making-money-by-automating-cryptocurrency-strategies-light.png) + +### OctoBot: the trading bot for you + +OctoBot, created in 2018, is known for its transparency and customizable features in crypto trading. + +- Open Source with free strategies: As an [open-source project](open-source-trading-software), it offers multiple trading strategies for free, giving users control and flexibility. +- OctoBot cloud: [The cloud plateform of OctoBot](introducing-the-new-octobot-cloud) simplifies OctoBot setup, making it accessible even for novices, and is free of charge. +- Performance transparency: OctoBot maintains an [history of the performance](/investing/find-your-strategy#strategies-details) of each strategies, aiding users in making informed decisions. + +## Conclusion + +Crypto trading bots, like OctoBot, offers investment efficiency and emotion-free trading. +However, it's essential to take into account their complexities, potential risks, and the need for testing before use. +By choosing the right bot, traders can significantly improve their trading strategies and potentially increase their chances of success. diff --git a/docs/blog/2023-12-05-safu-meaning.md b/docs/blog/2023-12-05-safu-meaning.md new file mode 100644 index 0000000000..df33c60331 --- /dev/null +++ b/docs/blog/2023-12-05-safu-meaning.md @@ -0,0 +1,38 @@ +--- +title: "Understanding the SAFU meaning" +slug: "safu-meaning" +date: "2023-12-05" +authors: ["guillaume"] +tags: ["Crypto", "Trading", "Educational"] +image: "/images/blog/safu-meaning/cover.png" +--- + + + +# Understanding the SAFU meaning + +In the world of cryptocurrency, where security and trust are really important, the concept of the Secure Asset Fund for Users (SAFU) stands as a major innovation. This article describes the origins, purpose, and cultural impact of SAFU, particularly in its role in safeguarding user funds. + +<!--truncate--> + +## Origin and purpose of SAFU + +The acronym SAFU refers to the "Secure Asset Fund for Users," an emergency insurance fund established by the cryptocurrency exchange <a href="https://www.binance.com" rel="nofollow">Binance</a>. In July 2018, Binance created this <a href="https://www.binance.com/en/support/announcement/binance-secure-asset-fund-for-users-safu-valued-at-1bn-9c513d91af3f497b99da2962322fb3c3" rel="nofollow">fund</a>, committing 10% of all trading fees to ensure user funds are protected in extreme situations. + +## SAFU's financial aspect + +By January 29, 2022, the Secure Asset Fund for Users was valued at one billion dollars, showcasing its robust nature. SAFU primarily contains three types of assets: BNB (Binance Coin), BTC ([Bitcoin](https://www.octobot.cloud/what-is-bitcoin)), and BSC-USD (Binance's [stablecoin](what-are-stablecoins)), all held in separate cold wallets to maximize security. + +## The sentence "Funds are SAFU" + +The term "SAFU" gained popularity from a humorous play on the word "safe," originating from a <a href="https://www.youtube.com/watch?v=DelF6zEHXpE" rel="nofollow">YouTube video</a> by the content creator Bizonacci in 2018. This video, titled "Funds are Safu," was a response to a <a href="https://twitter.com/cz_binance/status/1326458569974181891" rel="nofollow">tweet by Changpeng Zhao</a>, the CEO of Binance at that time, who assured users that their "funds are safe" during a period of unscheduled maintenance. + +<div style={{textAlign: "center"}}> + <div> + ![Changpeng Zhao "SAFU" tweet](/images/blog/safu-meaning/safu-tweet.png) + </div> +</div> + +## The birth of the SAFU meme + +The phrase "Funds are SAFU" traces its roots back to an incident on March 7, 2018. [Binance](https://www.octobot.cloud/binance-trading-bot) users encountered an unexpected maintenance period due to a malfunction in the SYS/BTC trading pair. This event, which included attempts by hackers to exploit the system, led to Changpeng Zhao's reassuring tweet. The subsequent viral video and Zhao's embrace of the term cemented "Funds are SAFU" as a staple in crypto slang, often used during similar situations to reassure users. diff --git a/docs/blog/2023-12-07-fud-meaning.md b/docs/blog/2023-12-07-fud-meaning.md new file mode 100644 index 0000000000..f78fa9bbb2 --- /dev/null +++ b/docs/blog/2023-12-07-fud-meaning.md @@ -0,0 +1,49 @@ +--- +title: "Understanding the FUD meaning" +description: "Explore the FUD meaning in crypto with this concise guide to understanding its influence on markets and investment decisions" +slug: "fud-meaning" +date: "2023-12-07" +authors: ["guillaume"] +tags: ["Crypto", "Trading", "Educational"] +image: "/images/blog/fud-meaning/cover.png" +--- + + + +# Understanding FUD meaning: fear, uncertainty, and doubt in investing + +FUD, short for "fear, uncertainty, and doubt" is a term that resonates deeply in the world of finance and cryptocurrency. It represents a state of mind that can heavily influence investment decisions, often leading to irrational market behavior. + +<!--truncate--> + +## Meaning of FUD in investing + +In investing, FUD refers to a pervasive sense of pessimism or negativity that can affect market sentiment. It goes beyond the normal concerns of investors, touching on rumors and hype that can drive impulsive, often irrational decisions. An example of this is the meme stock craze, where FUD played a significant role. + +## FUD vs FOMO: understanding the difference + +FUD can be seen as the opposite of [FOMO](fomo-meaning). While FOMO drives investors to jump on the bandwagon due to a fear of missing out on gains, FUD spreads a collective negative sentiment, often exacerbated by social media, causing people to panic and sell. + +## The history of FUD + +FUD's history dates back to the 1920s, becoming more widely recognized as an acronym in the 1970s. Initially a marketing and sales tactic, FUD evolved into a key concept in investing, particularly with the advent of highly volatile markets like cryptocurrency. + +<div style={{textAlign: "center"}}> + <div> + ![A person looks anxiously at a computer with a falling crypto market graph, + illustrating FUD in cryptocurrency.](/images/blog/fud-meaning/cover.png) *An + illustration of the FUD sentiment.* + </div> +</div> + +## FUD in cryptocurrency + +In the crypto world, FUD takes on two main forms: spreading doubt to manipulate market prices and general skepticism about the legitimacy of cryptocurrencies. The volatile nature of crypto markets makes them especially susceptible to FUD. + +## Examples of FUD in crypto + +Several instances demonstrate FUD's impact on the crypto market. Notable examples include rumors of <a href="https://www.bbc.com/news/technology-58678907" rel="nofollow">China banning Bitcoin</a> and fears around government regulation. These instances show how exaggerated negative news can lead to hysteria and market fluctuations. + +## The impact of FUD + +FUD can lead to significant market movements, as investors react to perceived threats or negative news. In regulated markets, spreading FUD with the intention of <a href="https://www.binance.com/en-NG/feed/post/1279693" rel="nofollow">manipulating prices is illegal</a> and considered market manipulation. diff --git a/docs/blog/2023-12-07-hodl-meaning.md b/docs/blog/2023-12-07-hodl-meaning.md new file mode 100644 index 0000000000..befaa8269d --- /dev/null +++ b/docs/blog/2023-12-07-hodl-meaning.md @@ -0,0 +1,44 @@ +--- +title: "Understanding the HODL meaning" +description: "Explore this famous word origins, meaning and cultural impact." +slug: "hodl-meaning" +date: "2023-12-07" +authors: ["guillaume"] +tags: ["Crypto", "Trading", "Educational"] +image: "/images/blog/hodl-meaning/cover.png" +--- + + + +# Understanding HODL meaning in crypto: more than just a misspelling + +HODL is a famous word in the crypto world. It's short for “hold on for dear life,” and means to keep your crypto even when prices go up and down a lot. + +<!--truncate--> + +## What is HODL? + +HODL is like saying, "Don't sell your crypto, even when things get tough." It started as a funny way to spell "hold" but now means a lot more. It's like the old idea of keeping stocks for a long time, but for crypto. + +## HODL in crypto culture + +In online crypto chats, you'll see HODL and other words like [SAFU](safu-meaning), [FUD](fud-meaning), [FOMO](fomo-meaning), Moon, and Sats. These words have become a big part of talking about crypto. Even people who invest in other things use HODL now. + +## The origin of HODL + +The story of HODL began with a spelling mistake. On December 18, 2013, when [Bitcoin](https://www.octobot.cloud/bitcoin-prediction)'s price fell a lot, a trader named GameKyuubi wrote <a href="https://bitcointalk.org/index.php?topic=375643.0" rel="nofollow">"I AM HODLING"</a> in an online forum. He was saying that, even though he wasn't good at trading, he would keep his Bitcoin. + +<div style={{textAlign: "center"}}> + <div> + ![A man hodling crypto](/images/blog/hodl-meaning/cover.png) *A man + "HODLING" crypto* + </div> +</div> + +## Understanding the HODL strategy + +HODL means to really stick with your crypto, even when prices drop. It's different from day trading, where people buy and sell often to make small profits. + +## Knowing when to HODL + +Deciding to HODL or sell is up to you. You should research and think about it carefully. Some people HODL all their crypto, while others trade some and keep others. diff --git a/docs/blog/2023-12-10-octobot-1-0-4-whats-new.md b/docs/blog/2023-12-10-octobot-1-0-4-whats-new.md new file mode 100644 index 0000000000..a80f13bc20 --- /dev/null +++ b/docs/blog/2023-12-10-octobot-1-0-4-whats-new.md @@ -0,0 +1,51 @@ +--- +title: "OctoBot 1.0.4 - What's new" +slug: "octobot-1-0-4-whats-new" +date: "2023-12-10" +authors: ["guillaume"] +tags: ["OctoBot cloud", "Release", "DCA", "Exchanges"] +image: "/images/blog/octobot-1-0-4-whats-new/with-octobot-1.0.4-use-octobot-cloud-strategies-and-trade-on-bingx.png" +--- + + + +# OctoBot 1.0.4 - What's new + +![use octobot cloud strategies and trade on bingx](/images/blog/octobot-1-0-4-whats-new/with-octobot-1.0.4-use-octobot-cloud-strategies-and-trade-on-bingx.png) + +## Introducing OctoBot 1.0.4 + +We are glad to announce the new release of OctoBot. 1.0.4 is a updated version adding the very anticipated download of OctoBot cloud strategies directly in your OctoBot, the addition of the <a href="https://bingx.com/en-us/invite/Z4UUVX/" rel="nofollow">BingX exchange</a> to the [OctoBot partner exchanges](/guides/exchanges#partner-exchanges---support-octobot) and many improvements. + +<!--truncate--> + +## Downloading OctoBot cloud strategies + +Starting from OctoBot 1.0.4, you can now profit from <a href="https://www.octobot.cloud/explore" rel="nofollow">OctoBot cloud strategies</a> directly from your [OctoBot trading bots](https://www.octobot.cloud/trading-bot). + +![download octobot cloud strategies in open source bot](/images/blog/octobot-1-0-4-whats-new/download-octobot-cloud-strategies-in-open-source-bot.png) + +Directly from your OctoBot, download OctoBot cloud strategies and: + +- Use them with simulated or real funds +- Configure them to trade differently, on other exchanges or other pairs +- Backtest them using the OctoBot [backtesting engine](/guides/octobot-usage/backtesting) or the [Strategy Designer](/guides/octobot-usage/strategy-designer) available on [OctoBot trading bots](https://www.octobot.cloud/trading-bot) to optimize them according to your ideas + +## BingX is now available on OctoBot + +At OctoBot, we work on making trading as accessible as possible. This comes with the support of most major exchanges. Following this philosophy, we just added support for <a href="https://bingx.com/en-us/invite/Z4UUVX/" rel="nofollow">BingX</a>. We hope that this addition will help many of our users. + +## Exchanges bugfixes + +In OctoBot 1.0.4, fixed many exchange related issues, especially regarding futures trading and take profit / stop loss orders. Special thanks to Nes, Grr, Gerhard and Artem from our community who helped us a lot finding those issues. + +## Other improvements and bugfixes + +In this release, we also added parameters to make the DCA and Daily trading mode more customizable and trade closer to your ideas. + +Many bugs have been fixed as well, especially regarding the web interface currency selector update, exchange connection issues, Ngrok configuration and more. + +## Conclusion + +We can't wait to know what you think about this new version. +Please use this <a href="https://feedback.octobot.cloud/open-source" rel="nofollow">feedback link</a> to share your suggestions and what you'd like to see in our next release. diff --git a/docs/blog/2023-12-12-fomo-meaning.md b/docs/blog/2023-12-12-fomo-meaning.md new file mode 100644 index 0000000000..c6ef9278f8 --- /dev/null +++ b/docs/blog/2023-12-12-fomo-meaning.md @@ -0,0 +1,54 @@ +--- +title: "Understanding the FOMO meaning" +description: "Explore FOMO meaning in crypto. Understand how Fear of Missing Out influences trading decisions and learn effective strategies to combat it." +slug: "fomo-meaning" +date: "2023-12-12" +authors: ["guillaume"] +tags: ["Crypto", "Trading", "Educational"] +image: "/images/blog/fomo-meaning/cover.png" +--- + + + +# Understanding FOMO meaning + +FOMO, short for "fear of missing out" is when traders worry they might miss a chance to make money, especially in the fast-changing crypto market. + +<!--truncate--> + +## FOMO in crypto context + +In the crypto world, FOMO is particularly intense. FOMO can be seen as the opposite of [FUD](fud-meaning). Traders often feel they're missing out on the next big thing, leading them to make impulsive choices based on unverified information or social media speculation. + +## Psychology behind FOMO + +FOMO happens when people are afraid of missing out on making money, especially when market prices are going up. This fear can make people anxious, too focused on market news, and quick to make decisions based on changing prices. + +<div style={{textAlign: "center"}}> + <div> + ![A person with an excited expression watches a rising crypto market graph + on their computer, symbolizing FOMO in + cryptocurrency.](/images/blog/fomo-meaning/cover.png) *An illustration of + the FOMO sentiment.* + </div> +</div> + +## Negative consequences of FOMO + +FOMO can have serious drawbacks. It's a tool in pump-and-dump schemes, where investors are misled into making poor choices. A notable example is the <a href="https://www.wired.com/story/squid-game-coin-crypto-scam/" rel="nofollow">SQUID Coin scheme</a>, fueled by social media hype and investor FOMO. + +## Dealing with FOMO in crypto + +To combat FOMO, traders should keep a detailed trading journal, adhere to a solid trading plan, and have a clear risk management strategy. It's crucial to research independently and not base decisions solely on social media buzz. + +## Examples of FOMO in crypto + +[Dogecoin](https://www.octobot.cloud/what-is-dogecoin)'s price fluctuations, often <a href="https://www.cbc.ca/news/business/dogecoin-1.6020408" rel="nofollow">influenced by Elon Musk's tweets</a>, illustrates FOMO in practice. Motivated by the fear of missing out, traders quickly respond to these high-profit opportunities. + +## Identifying causes of FOMO + +Knowing why FOMO sentiment exists is important. It can come from wanting to be part of a big market change or not wanting to lose out, having lots of information, and being attracted to stories of people who succeeded early in investing. + +## Signs of FOMO + +Signs of FOMO are often related to an urgent need to invest based on recent popularity or price jumps. To avoid FOMO, thorough research and reliance on trusted information sources are essential. diff --git a/docs/blog/2023-12-15-best-crypto-trading-bots.md b/docs/blog/2023-12-15-best-crypto-trading-bots.md new file mode 100644 index 0000000000..cb8b39248d --- /dev/null +++ b/docs/blog/2023-12-15-best-crypto-trading-bots.md @@ -0,0 +1,346 @@ +--- +title: "10 Best Paid and Free Crypto Trading Bots" +description: "Discover the best crypto trading bots. Compare top free and paid bots, their features, ease of use, and pricing. Ideal for both beginners and experts in cryptocurrency trading." +slug: "best-crypto-trading-bots" +date: "2023-12-15" +authors: ["guillaume"] +tags: ["Cryptocurrency", "Trading", "Plans"] +image: "/images/blog/best-crypto-trading-bots/cover.png" +--- + + + +# 10 Best Paid and Free Crypto Trading Bots + +Choosing the right crypto trading bot from the many options available can be tough. This article is here to help you find the best one for your needs. + +<!--truncate--> + +## What is a crypto trading bot? + +Crypto trading bots are like your digital assistants for cryptocurrency trading. +They work automatically, following your set strategies, to buy and sell crypto for you. +This means you don't have to watch the markets all the time. + +With many traders using them today, they're a popular choice for both beginners and experts. +For those new to crypto trading, these bots often come with user-friendly strategies to help you get started easily. + +<div> + Now that we have a clear understanding of what a crypto trading bot is, let's + explore the various types of trading bots available in the market. +</div> + +## 1. OctoBot + +<div style={{textAlign: "center"}}> + <div> + ![A man relaxing in his couch while OctoBot is making money by automating + cryptocurrency + strategies](/a-man-relaxing-in-his-couch-while-octobot-is-making-money-by-automating-cryptocurrency-strategies-light.png) + </div> +</div> + +[OctoBot](/) is a flexible and easy-to-use trading bot that offers a variety of strategies for free, including [AI-based](https://www.octobot.cloud/features/ai-trading-bot), smart [DCA](smart-dca-making-of), and GRID strategies. +It's [open-source](open-source-trading-software). With its focus on transparency, users can backtest strategies or use paper trading and track performance. +OctoBot supports most major crypto exchanges and offers premium plans for advanced users, making it suitable for both beginners and experienced crypto investors. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Ease of use" + level={3} + h="14px" + tooltipText="User-friendly with AI-based and various pre-made strategies for beginners and pros" + /> + <Rating + title="Price" + level={3} + h="14px" + tooltipText="Multiple free offers with options for advanced premium plans" + /> + <Rating + title="Features" + level={3} + h="14px" + tooltipText="Open-source, supports major exchanges, variety of trading strategies, backtesting, and performance tracking" + /> + </div> +</Card> + +## 2. Bitsgap + +Bitsgap is known for its unique grid bots which make trading accessible to more traders. +These bots work within a set range, placing buy and sell orders to capitalize on market swings. + +Bitsgap also offers a Futures trading bot for handling multiple small positions daily, aiming for frequent, smaller returns while minimizing risks. +The platform is cloud-based for ease of use, and includes [paper trading](/investing/paper-trading-a-strategy) and backtested strategies to help traders start quickly and safely. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Ease of use" + level={3} + h="14px" + tooltipText="Cloud-based platform with accessible grid bot trading" + /> + <Rating title="Price" level={1} h="14px" tooltipText="Paid service" /> + <Rating + title="Features" + level={3} + h="14px" + tooltipText="Offers Futures trading bot and paper trading" + /> + </div> +</Card> + +## 3. 3Commas + +<div style={{textAlign: "center"}}> + <div> + ![3commas-logo](/images/blog/best-crypto-trading-bots/3commas.png) + </div> +</div> + +3Commas is a paid crypto trading bot, offering GRID, DCA and Signal bots. +Known for its user-friendly interface, 3Commas supports multiple trading strategies and technical indicators. + +It also features a community for support and learning, and a marketplace for third-party crypto signals. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Ease of use" + level={3} + h="14px" + tooltipText="Easy-to-navigate interface with multiple trading strategies" + /> + <Rating + title="Price" + level={1} + h="14px" + tooltipText="Expensive paid service" + /> + <Rating + title="Features" + level={2} + h="14px" + tooltipText="Supports GRID, DCA, and Signal bots with a marketplace" + /> + </div> +</Card> + +## 4. Cryptohopper + +Cryptohopper is a paid crypto trading bot, offering a 3 days free trial for new users. +It stands out for its market-making bot and the ability for users to create custom trading strategies or copy others' from its marketplace. + +The platform also supports automated trading via a telegram bot and offers additional services like crypto signals, strategy templates, and paper trading. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Ease of use" + level={1} + h="14px" + tooltipText="Challenging for beginners, offers market-making bot" + /> + <Rating + title="Price" + level={2} + h="14px" + tooltipText="Free and paid plans, suitable for various budgets" + /> + <Rating + title="Features" + level={3} + h="14px" + tooltipText="Custom strategy creation, telegram bot trading, and paper trading" + /> + </div> +</Card> + +## 5. CoinRule + +Coinrule is a no-code crypto trading bot. +It features an easy "if-this-then-that" rule setup, over 150 pre-set trading rules, and a risk-free demo exchange. + +The bot is available via a web platform, supports all major tokens, and offers various subscription plans, including a free option. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Ease of use" + level={1} + h="14px" + tooltipText="Complex 'if-this-then-that' rule setup for beginners" + /> + <Rating + title="Price" + level={3} + h="14px" + tooltipText="Free and paid plans, suitable for various budgets" + /> + <Rating + title="Features" + level={3} + h="14px" + tooltipText="Over 150 pre-set rules and a risk-free demo exchange" + /> + </div> +</Card> + +## 6. Binance trading bot + +Binance trading bots are automated tools designed to execute cryptocurrency trades based on predefined parameters, allowing users to trade 24/7 without constant monitoring. +These bots enhance trading efficiency by analyzing market data and making split-second decisions, which can help capitalize on market volatility. +Binance offers a variety of bots, including the popular Spot Grid bot, which is particularly effective in sideways markets by buying low and selling high within a set price range. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Ease of use" + level={1} + h="14px" + tooltipText="Complex bot exploration and creation" + /> + <Rating + title="Price" + level={3} + h="14px" + tooltipText="Exchanges fees only" + /> + <Rating + title="Features" + level={3} + h="14px" + tooltipText="Great variety of pre-made bots" + /> + </div> +</Card> + +## 7. Altrady + +Altrady is a crypto trading platform that integrates with over 17 exchanges, offering automated GRID and Signal bots for effective trading. +It includes advanced features like Take Profit, Stop Loss, and risk management tools. + +Its interface is complemented by real-time analytics and portfolio management tools. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Ease of use" + level={2} + h="14px" + tooltipText="Moderately easy with advanced feature set" + /> + <Rating title="Price" level={2} h="14px" tooltipText="Balanced pricing" /> + <Rating + title="Features" + level={2} + h="14px" + tooltipText="Some advanced tools like Take Profit, Stop Loss, and portfolio management" + /> + </div> +</Card> + +## 8. Pionex + +<div style={{textAlign: "center"}}> + <div> + ![pionex-logo](/images/blog/best-crypto-trading-bots/pionex.jpg) + </div> +</div> + +[Pionex](https://www.pionex.com/en/signUp?r=octobot) ([Pionex.us](https://accounts.pionex.us/en/signup?ref=octobot) for US citizens) is a cutting-edge trading platform known for its user-friendly automated trading bots, allowing traders to execute strategies effortlessly. +It offers a variety of customizable bots tailored to different trading styles, making it ideal for both novice and experienced traders seeking flexibility. +With advanced backtesting and performance monitoring tools, Pionex empowers users to optimize their strategies and manage risk effectively. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Ease of use" + level={2} + h="14px" + tooltipText="Easy to use copy bot but complex bot creation" + /> + <Rating + title="Price" + level={3} + h="14px" + tooltipText="Low exchanges fees" + /> + <Rating + title="Features" + level={3} + h="14px" + tooltipText="Great variety of pre-made bots" + /> + </div> +</Card> + +## 9. Cornix + +Cornix is an automated crypto trading platform, renowned for beeing the largest crypto signals provider marketplace. +It offers DCA bots, a dedicated mobile app and integration with Telegram for easy trade automation. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Ease of use" + level={2} + h="14px" + tooltipText="Moderate ease with mobile app and Telegram integration" + /> + <Rating + title="Price" + level={1} + h="14px" + tooltipText="Cost-effective with other signals provider marketplace" + /> + <Rating + title="Features" + level={2} + h="14px" + tooltipText="DCA bots and integration with major trading platforms" + /> + </div> +</Card> + +## 10. TradeSanta + +<div style={{textAlign: "center"}}> + <div> + ![tradesanta-logo](/images/blog/best-crypto-trading-bots/tradesanta.png) + </div> +</div> + +TradeSanta is a cloud-based bot automating trading with technical indicators, and risk management tools such as stop loss. +It also offers demo trading, real-time Telegram notifications, and an option to quickly convert assets during market fluctuations. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Ease of use" + level={2} + h="14px" + tooltipText="User-friendly, with technical indicators and demo trading" + /> + <Rating + title="Price" + level={1} + h="14px" + tooltipText="Paid options for essential trading tools" + /> + <Rating + title="Features" + level={1} + h="14px" + tooltipText="Basic, includes stop loss and real-time Telegram notifications" + /> + </div> +</Card> + +## Conclusion + +In summary, whether you're a beginner or a seasoned trader, there's a crypto trading bot out there that can meet your specific needs. +From bots offering a wide range of trading strategies and pairs to those that allow deep customization and scripting, each bot has its own unique set of features that cater to different trading styles and preferences. diff --git a/docs/blog/2023-12-17-best-open-source-crypto-trading-bots.md b/docs/blog/2023-12-17-best-open-source-crypto-trading-bots.md new file mode 100644 index 0000000000..6f5cd4590b --- /dev/null +++ b/docs/blog/2023-12-17-best-open-source-crypto-trading-bots.md @@ -0,0 +1,196 @@ +--- +title: "5 Best Open source Crypto Trading Bots" +description: "Discover the top-performing open-source crypto trading bots. Find the best solutions for efficient cryptocurrency trading." +slug: "best-open-source-crypto-trading-bots" +date: "2023-12-17" +authors: ["guillaume"] +tags: ["Cryptocurrency", "Trading", "Plans"] +image: "/images/blog/best-open-source-crypto-trading-bots/cover.png" +--- + + + +# 5 Best Open source Crypto Trading Bots + +Finding the perfect crypto trading bot can be challenging with so many choices. This guide aims to simplify your search about open source options. +In this article, we will introduce you to the top 5 open source crypto trading bots. + +<!--truncate--> + +## What is an open source crypto trading bot? + +An [open-source crypto trading bot](open-source-trading-software) is a software tool that automates the buying and selling of cryptocurrencies on exchanges. +These bots connect to exchanges via APIs to gather market data and execute trades based on a [trading strategy](/investing/find-your-strategy). + +Open-source bots are unique because their code is freely available for anyone to view, [modify, and improve](/guides/developers). +This transparency and customizability make them a favorite among traders who want to tailor their trading strategies. +They're suitable for both beginners who are just starting and experienced traders who want more control over their trading strategies. +Plus, their open-source nature often makes them more affordable. + +## 1. OctoBot + +![A man relaxing in his couch while OctoBot is making money by automating cryptocurrency strategies](/a-man-relaxing-in-his-couch-while-octobot-is-making-money-by-automating-cryptocurrency-strategies-light.png) + +[OctoBot](/) is a flexible and easy-to-use trading bot that offers a variety of strategies for free, including [AI-based](https://www.octobot.cloud/features/ai-trading-bot), smart [DCA](smart-dca-making-of), and GRID strategies. +Its active community support make it an attractive choice for traders looking for a balance between functionality and ease of use. +With its focus on transparency, users can backtest strategies and track performance. +OctoBot supports most major crypto exchanges and offers professional features for advanced users, making it suitable for both beginners and experienced crypto investors. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Ease of use" + level={3} + h="14px" + tooltipText="A user-friendly web interface that makes it easy for beginners to navigate along with a cloud version for hassle-free hosting" + /> + <Rating + title="Features" + level={3} + h="14px" + tooltipText="Offers a variety of trading strategies and features including AI trading and strategy optimization" + /> + <Rating + title="Docs & Community" + level={3} + h="14px" + tooltipText="A very detailed documentation and an active telegram and discord community" + /> + </div> +</Card> + +## 2. FreqTrade + +<a href="https://www.freqtrade.io/en/stable/" rel="nofollow">Freqtrade</a> +, written in Python, is known for its ease of use and Telegram integration. It +allows for extensive strategy testing and simultaneous running of multiple bots, +making it a flexible choice for varied trading styles. The bot's active +development community constantly works on new features and improvements. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Ease of use" + level={3} + h="14px" + tooltipText="Known for its user-friendly telegram or web interface making it accessible for beginners" + /> + <Rating + title="Features" + level={2} + h="14px" + tooltipText="Offers a solid range of features including strategy testing and multiple bot options" + /> + <Rating + title="Docs & Community" + level={3} + h="14px" + tooltipText="A very detailed documentation and a very active and supportive community" + /> + </div> +</Card> + +## 3. HummingBot + +<a href="https://hummingbot.org/" rel="nofollow">Hummingbot</a> is renowned for its automated trading strategies and +compatibility with both centralized and decentralized exchanges. Its +market-making feature is ideal for spread trading enthusiasts. The platform also +supports liquidity mining, offering additional earning opportunities. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Ease of use" + level={1} + h="14px" + tooltipText="While powerful, it might require some learning curve for new users" + /> + <Rating + title="Features" + level={3} + h="14px" + tooltipText="Offers a wide range of features including market making, liquidity mining, and support for both centralized and decentralized exchanges" + /> + <Rating + title="Docs & Community" + level={3} + h="14px" + tooltipText="Has comprehensive documentation and a robust community" + /> + </div> +</Card> + +## 4. Jesse + +<div style={{textAlign: "center"}}> + <div> + ![jesse-logo](/images/blog/best-open-source-crypto-trading-bots/jesse.png) + </div> +</div> + +<a href="https://jesse.trade/" rel="nofollow">Jesse</a> is a lesser-known but highly effective open-source +trading bot, designed for simplicity and efficiency in strategy development. It +supports various cryptocurrencies and offers a streamlined +[backtesting](/guides/octobot-usage/backtesting) environment. Jesse is +particularly appreciated for its clean coding structure, making it easy for +developers to customize and extend its capabilities. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Ease of use" + level={1} + h="14px" + tooltipText="Streamlined for strategy development, but might require some technical knowledge" + /> + <Rating + title="Features" + level={2} + h="14px" + tooltipText="Good range of features for strategy development and backtesting" + /> + <Rating + title="Docs & Community" + level={2} + h="14px" + tooltipText="Adequate documentation and a growing community" + /> + </div> +</Card> + +## 5. Superalgos + +<a href="https://superalgos.org/" rel="nofollow">Superalgos</a> provides a comprehensive trading suite with visual +strategy design and extensive data tools, suitable for individual and +professional traders. Its scalability for larger operations and decentralized +approach allows collaborative strategy development. The platform's integration +of data mining and machine learning tools makes it a powerful option for +advanced traders. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Ease of use" + level={1} + h="14px" + tooltipText="Offers a lot of functionality which can be overwhelming for beginners, but very powerful for experienced users" + /> + <Rating + title="Features" + level={3} + h="14px" + tooltipText="Comprehensive suite of tools including visual strategy design, data analysis, and scalability for professional use" + /> + <Rating + title="Docs & Community" + level={3} + h="14px" + tooltipText="Strong community and extensive documentation available" + /> + </div> +</Card> + +## Conclusion + +In conclusion, there's a crypto trading bot for every type of trader. +Whether you need a wide range of strategies or prefer customizable options, each bot has unique features to suit different trading styles and needs, making your crypto trading more efficient and tailored to you. diff --git a/docs/blog/2023-12-20-chatgpt-strategy-deep-dive.mdx b/docs/blog/2023-12-20-chatgpt-strategy-deep-dive.mdx new file mode 100644 index 0000000000..1fca4d4bb6 --- /dev/null +++ b/docs/blog/2023-12-20-chatgpt-strategy-deep-dive.mdx @@ -0,0 +1,81 @@ +--- +title: "ChatGPT trading strategy deep dive" +description: "Discover how we created the ChatGPT based OctoBot trading strategy by combining the benefits of AI and smart DCA strategies" +slug: "chatgpt-strategy-deep-dive" +date: "2023-12-20" +authors: ["paul"] +tags: ["ChatGPT", "AI", "DCA", "Strategy designer", "Backtesting", "Cryptocurrency", "Trading", "OctoBot cloud", "Educational"] +image: "/images/blog/chatgpt-strategy-deep-dive/cover.png" +--- + + + +# ChatGPT trading strategy deep dive + +For a few months now, we have been experimenting with <a href="https://chat.openai.com/" rel="nofollow">ChatGPT</a> based trading strategies. We started by adding the [possibility to use ChatGPT as an indicator on OctoBot](/guides/octobot-trading-modes/chatgpt-trading) and kept building. + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="P23oiE8gW4Y" title="Deep Dive into the ChatGPT Trading Strategy" /> + +<!--truncate--> + +In this video, we are explaining how the strategy works in details and go through its historical and live performances by following a 4 weeks old live trading OctoBot. + +We are now introducing our latest ChatGPT strategy that is taking advantage of both [Smart DCA (Dollar Cost Averaging)](smart-dca-making-of) optimisations and artificial intelligence predictions. + +## The ChatGPT strategy concept + +The concept behind this strategy is to identify market entries when ChatGPT predicts that the market will go up. + +For this, we provide ChatGPT with context of the market: we give it recent market data and ask for a prediction of the following candle: "Will the market go up or down ?" + +Once an entry is identified, the DCA part of the strategy kicks in to optimize the buy and sell steps. + +More details on how to trade with ChatGPT on OctoBot on the [ChatGPT trading guide](/guides/octobot-trading-modes/chatgpt-trading). + +## Video summary + +### Creating orders + +Buy orders are created when ChatGPT predicts the market will go up with a sufficient degree of confidence. + +Each buy signal is then triggering a quick DCA strategy where multiple buy orders at different prices are created. Using multiple buy prices allows to take advantage of smaller market dips to lower the average buying price + +Once filled, each buy order is replaced by a sell order of the same amount. This sell order is at an optimized profit target and is designed to be filled within the next hours or days, therefore generating profits. + +### Backtesting the strategy + +To design the ChatGPT strategy, we ran extensive [backtesting](/guides/octobot-usage/backtesting) campaigns in order to find the best markets and settings to trade on. + +![strategy designer with bitcoin and ChatGPT strategy](/images/blog/chatgpt-strategy-deep-dive/strategy-designer-bitcoin-with-chat-gpt-strategy.png) + +Similarly to [Smart DCA complementary assets](smart-dca-making-of#complementary-assets), the ChatGPT strategy benefits from trading multiple complementary assets at the same time, therefore it is important to identify the right markets to optimize your profits using this strategy. + +### Live results + +We are testing the ChatGPT strategy with live trading bots for a few weeks now and we are very happy to see that it behaves as expected: + +- It buys and sells quickly +- It takes profits +- It doesn't get stuck in open sell orders + +We even made some profit ! + +## When to use the ChatGPT strategy + +Similarly to DCA strategies, the ChatGPT strategy relies on sideways or upward markets to make profits. It is therefore important to always carefully choose the traded assets for the strategy to be able to sell those assets quickly and avoid locking funds in sell orders. + +Using the ChatGPT strategy on a downwards market might lock funds is sell orders, while this is not selling at a loss, it is not optimal and can prevent profits from other cryptocurrencies. + +## Other resources + +If you are interested in knowing more about how to run you how ChatGPT trading strategy on OctoBot, checkout our [Trading using ChatGPT](trading-using-chat-gpt) article which covers the technical details on how to use ChatGPT the way you want, directly from your OctoBot. + +![ChatGPT crypto predictions](/images/blog/chatgpt-strategy-deep-dive/chatgpt-crypto-price-predictions.png) + +We make ChatGPT trading signals available for free on our [ChatGPT crypto predictions](https://www.octobot.cloud/tools/crypto-prediction), for which you can find more details on our [Trading with ChatGPT signals](introducing-chatgpt-trading-tool) article. + +## Warning + +Please be advised that the contents of this article are intended FOR GENERAL INFORMATION PURPOSES and not financial advice. The information contained herein is for informational purposes only. Nothing herein shall be construed to be financial legal or tax advice. The content of this article is solely the opinions of the author and/or the OctoBot team. None of those are licensed financial advisors or registered investment advisors. Purchasing cryptocurrencies poses considerable risk of loss. The author and/or the OctoBot team does not guarantee any particular outcome. Past performance does not indicate future results. diff --git a/docs/blog/2024-01-01-cloud-octobot-plans.md b/docs/blog/2024-01-01-cloud-octobot-plans.md new file mode 100644 index 0000000000..f8cddae419 --- /dev/null +++ b/docs/blog/2024-01-01-cloud-octobot-plans.md @@ -0,0 +1,61 @@ +--- +title: "cloud octobot plans" +slug: "cloud-octobot-plans" +date: "2024-01-01" +--- +--- +title: OctoBot cloud plans +description: Explore the different OctoBot plans designed to suit every need, whether you seek simplicity or customization in your trading strategies. +tags: Cryptocurrency, Trading, Plans, OctoBot cloud +image: /images/blog/introducing-cloud-octobot-plans/cover.png +domain: blog.octobot.online +sidebar: false +breadcrumb: false +date: 2023-10-23 +searchable: false +difficulty_level: 1 +reading_time: 3 +author: guillaume +--- + + +# OctoBot cloud plans + +![cover](/images/blog/introducing-cloud-octobot-plans/cover.png) + +## An OctoBot plan for each need + +In the OctoBot team, we want everyone to be able to use OctoBot and enjoy great trading strategies. + +<!--truncate--> + +Our goal is that whether you want to get a free and pre-configured strategy or infinite customization to create your very own strategy, you should be able to do it using OctoBot. + +That's why we created different plans that are designed to fit every user. + +## The plans + +### Investor +The Investor plan is perfect for investors who look for simplicity. Automate a trading strategy out of the box on a few trading pairs of your choice. + +The Investor plan is free. It uses the simplest version of OctoBot. + +### The Investor Plus plan + +The [Investor Plus plan](introducing-the-investor-plus-plan) gives access to each and every investment startegy on OctoBot cloud as well as the possibility to run as many OctoBots as you wish. + +### The Pro plan + +The [Pro plan](introducing-the-pro-plan) includes the Investor Plus plan, allows to automate TradingView alerts and add the possibility to fine tune your OctoBots and directly configure how they trade. + +### Self hosting + +The Self hosting plan allows you to run your OctoBot directly from home, on the device of your choice. It is free and relies on you to install, run and maintain your OctoBot. + +If you prefer to run OctoBot at home or on your own server, the Self hosting plan is made for you. + +## How to start investing on OctoBot cloud? + +Select a strategy on [OctoBot cloud](/) and choose Investor to start investing for free. + +You will find all the plans details on the [pricing page](https://www.octobot.cloud/pricing). diff --git a/docs/blog/2024-01-01-first-blog-post.md b/docs/blog/2024-01-01-first-blog-post.md new file mode 100644 index 0000000000..2533023e08 --- /dev/null +++ b/docs/blog/2024-01-01-first-blog-post.md @@ -0,0 +1,51 @@ +--- +title: "first blog post" +slug: "first-blog-post" +date: "2024-01-01" +--- +--- +title: Welcome to the OctoBot blog +description: +tags: Crypto, Trading, Python, OctoBot +image: /images/blog/welcome-to-octobot-blog/cover.png +domain: blog.octobot.online +sidebar: false +breadcrumb: false +date: 2022-06 +searchable: false +archived: true +author: guillaume +--- + +# Welcome to the OctoBot blog + +![cover](/images/blog/welcome-to-octobot-blog/cover.png) + +Since 2018, OctoBot is an open source cryptocurrency trading bot that allow hundreds of users to trade easily with professional features. + +<!--truncate--> + +The new OctoBot blog + +The OctoBot blog is a great place to learn about the open source OctoBot project. You can find out about the latest news, features, and releases. You can also learn about the people behind the project and how they are working to make OctoBot the best it can be. If you're using OctoBot, then we'd like to hear from you! Tell us what you think of the project, what you would like to see changed or added, and any other comments you have. + +## What's cryptocurrency trading + +Cryptocurrency trading is the process of buying and selling cryptocurrencies. Cryptocurrencies are digital or virtual tokens that use cryptography to secure their transactions and to control the creation of new units. Cryptocurrencies are decentralized, meaning they are not subject to government or financial institution control. [Bitcoin](https://www.octobot.cloud/what-is-bitcoin), the first and most well-known cryptocurrency, was created in 2009. + +Cryptocurrency trading is a relatively new practice, but it has grown in popularity as more people become interested in cryptocurrencies. In 2018, Bitcoin's value increased by over 1000%, leading to a surge in interest in other cryptocurrencies. Cryptocurrency trading can be done through online exchanges, and many people trade cryptocurrencies as a way to make money. + +If you're interested in trading cryptocurrencies, there are a few things you should know. First, you need to understand how the market works and what factors can affect cryptocurrency prices. You also need to choose an exchange and wallet that you trust, and have a good understanding of the risks involved. Finally, don't invest more than you can afford to lose, and always remember that cryptocurrency prices can fluctuate wildly. + +## Why using a trading bot? + +There are many reasons why you might want to use a trading bot. Perhaps you want to free up your time so that you can focus on other things, or maybe you want to take advantage of the 24-hour trading cycle by having a bot that can trade for you around the clock. Whatever your reason, a trading bot can be a helpful tool in your trading arsenal. + +One of the main advantages of using a trading bot is that it can help to take emotion out of the equation. When you are manually trading, it can be easy to let emotions like greed or fear influence your decisions. This can lead to bad trades that lose you money. A trading bot, on the other hand, is not influenced by emotions and will only execute trades based on pre-determined rules that you have set. This can help you to stick to your trading plan and make better, more informed decisions. + +Another benefit of using a trading bot is that it can help you to diversify your portfolio. If you are only manually trading one or two assets, you are missing out on the opportunity to profit from the moves in other markets. By using a bot, you can trade multiple assets simultaneously and capitalize on opportunities as they arise. + +## What's open source? + +In the world of software development, open source is a term used to describe a project or program that is available for anyone to use or modify. Open source projects are usually developed by a community of developers who work together to create and improve the code. OctoBot is an open source project that anyone can contribute to. We welcome new contributors! + diff --git a/docs/blog/2024-01-01-introducing-the-strategy-designer.md b/docs/blog/2024-01-01-introducing-the-strategy-designer.md new file mode 100644 index 0000000000..a60b32c436 --- /dev/null +++ b/docs/blog/2024-01-01-introducing-the-strategy-designer.md @@ -0,0 +1,70 @@ +--- +title: "introducing the strategy designer" +slug: "introducing-the-strategy-designer" +date: "2024-01-01" +--- +--- +title: Introducing the Strategy Designer +description: Meet the new Strategy Designer and get the best out of your strategies + introducing-the-strategy-designer +tags: Strategy designer, Backtesting, Cryptocurrency, Trading, OctoBot cloud +image: /images/blog/introducing-the-strategy-designer/cover.png +domain: blog.octobot.online +sidebar: false +breadcrumb: false +date: 2023-03-23 +searchable: false +difficulty_level: 3 +reading_time: 5 +author: guillaume +--- + + +# Introducing the Strategy Designer + +![cover](/images/blog/introducing-the-strategy-designer/cover.png) + + +## Visualize exactly what happened +When creating a trading strategy, the hardest part is often to test the strategy in order to find the best configuration for the target market. + +<!--truncate--> + +With the [Strategy Designer](/guides/octobot-usage/strategy-designer), you will find many tools to experiment, analyse, compare and optimize your strategies and their configurations to their full potential. + +![full-page](/images/blog/introducing-the-strategy-designer/full-page.png) + +Using the Strategy Designer, for any trading strategy, you can visualize each backtesting run: +- Trades +- Positions (when trading futures) +- PNL +- Portfolio value +- Configuration +- Other insightful metrics + +## Compare backtesting runs +Being able to see the exact behavior of your strategy through time in each backtest is good, comparing those values across [backtesting](/guides/octobot-usage/backtesting) results is better. + +![graph-comparison](/images/blog/introducing-the-strategy-designer/comparison.png) + +Each analysis tool can be used to compare run results. + +![trades-comparison](/images/blog/introducing-the-strategy-designer/trades-comp.png) + +## Store results + +When running the Strategy Designer backtesting, results are always stored. This way you can always come back to a previous setup and easily compare it with others. + +![history](/images/blog/introducing-the-strategy-designer/history.png) + +## More to come + +This article is introducing the first version of the Strategy Designer. More exciting features will be added to the designer allowing you to be even more efficient when creating your best trading strategies. + +![optmizer-preview](/images/blog/introducing-the-strategy-designer/preview.png) + +## How to get the Strategy Designer ? + +The [Strategy Designer](/guides/octobot-usage/strategy-designer) is available to users using [the OctoBot trading bots](https://www.octobot.cloud/trading-bot). + +**[Start your OctoBot](https://www.octobot.cloud)** diff --git a/docs/blog/2024-01-01-strategy-designer-revamp.md b/docs/blog/2024-01-01-strategy-designer-revamp.md new file mode 100644 index 0000000000..ab810ecbc6 --- /dev/null +++ b/docs/blog/2024-01-01-strategy-designer-revamp.md @@ -0,0 +1,74 @@ +--- +title: "strategy designer revamp" +slug: "strategy-designer-revamp" +date: "2024-01-01" +--- +--- +title: Strategy Designer Revamp +description: Enjoy the revamped Strategy Designer and created your best trading strategies + strategy-designer-revamp +tags: Strategy designer, Configuration, Backtesting, Cryptocurrency, OctoBot cloud +image: /images/blog/strategy-designer-revamp/cover.png +domain: blog.octobot.online +sidebar: false +breadcrumb: false +date: 2023-04-25 +searchable: false +difficulty_level: 3 +reading_time: 5 +author: guillaume +--- + + +# Strategy Designer Revamp +![cover](/images/blog/strategy-designer-revamp/cover.png) + +## A clear display of your backtesting runs +Based on your feedback, we have been working a lot making the [Strategy Designer](/guides/octobot-usage/strategy-designer) much easier to use and understand. The strategy designer is now split into 2 parts: + +<!--truncate--> +1. The Strategy Viewer: where you can quickly analyze and compare trading strategies based on their performances +2. The Strategy Creator: where you can easily create a new strategy in order to test it in [backtesting](/guides/octobot-usage/backtesting). + +The Strategy Designer is now our most advanced interface to create, test and compare strategies, we hope you will enjoy it. + +## The Strategy Viewer +Quickly view, analyze and compare how your strategies behave and are more or less profitable or risky through time. +![viewer](/images/blog/strategy-designer-revamp/viewer.png) +Clearly identify key metrics of your strategy performances +![summary](/images/blog/strategy-designer-revamp/summary.png) +View each trade details +![trades](/images/blog/strategy-designer-revamp/trades.png) + +## The Strategy Creator + +1. Select the markets to trade on as well as the starting portfolio of your OctoBot for this strategy. +![step1.1](/images/blog/strategy-designer-revamp/stepper1.1.png) +![step1.2](/images/blog/strategy-designer-revamp/stepper1.2.png) + +2. Select the trading mode to use for your strategy. You can also change its configuration. +![step2](/images/blog/strategy-designer-revamp/stepper2.png) + +3. Select the strategy and evaluators to use. You can also change their configuration. +![step3](/images/blog/strategy-designer-revamp/stepper3.png) + +4. Configure the final touch: the type of exchange and backtesting time window. You can also give a name to your backtesting run to quickly identify it later on. +![step4](/images/blog/strategy-designer-revamp/stepper4.png) + +5. Start your backtesting: if necessary, OctoBot will fetch the required historical data from your exchange and will then launch a backtesting using your strategy. Results of the backtesting run will be stored in your history and available for later views and comparisons. + +## From backtesting to live trading + +Strategies created within the Strategy Designer remain in the Strategy Designer until converted into a live profile. They are independent and not affecting your live OctoBot. This means that you can test a Daily Trading strategy on the Strategy Designer that should be trading MATIC while having a live Dip Analyzer strategy that is trading BTC and ETH. + +Once you are satisfied with your trading strategy created in the Strategy Designer, you can instantly convert it into a live OctoBot profile + +![convert to live profile](/images/blog/strategy-designer-revamp/use-as-live.png) + +This profile will then be usable as any profile, which means that you can also share and edit it just the way you want. + +## How to get the Strategy Designer ? + +The [Strategy Designer](/guides/octobot-usage/strategy-designer) is available to [OctoBot trading bots](https://www.octobot.cloud/trading-bot). + +**[Start your OctoBot trading bot](https://www.octobot.cloud)** diff --git a/docs/blog/2024-01-01-trading-orders.md b/docs/blog/2024-01-01-trading-orders.md new file mode 100644 index 0000000000..6db790bf0b --- /dev/null +++ b/docs/blog/2024-01-01-trading-orders.md @@ -0,0 +1,63 @@ +--- +title: "trading orders" +slug: "trading-orders" +date: "2024-01-01" +--- +--- +title: The basics of trading - The orders +description: Read this article and get a basic understanding of the orders available. + trading-orders +tags: Crypto, Trading, Automation, Orders, Educational +image: /images/blog/trading-orders/cover.png +domain: blog.octobot.online +sidebar: false +breadcrumb: false +date: 2022-08-10 +searchable: false +difficulty_level: 1 +reading_time: 3 +author: guillaume +--- + +# The basics of trading - The orders + +<div style={{textAlign: "center"}}> +![cover](/images/blog/trading-orders/cover.png) +</div> + +Trading cryptocurrencies can be exciting, but also take a lot of time if you don’t know how to do it. There are many different types of orders you can make to buy or sell on your exchange according to what you want. Read our introductory article and get a basic understanding of the orders available. + +<!--truncate--> + +## Market order, the easiest + +When you place a market order, you're telling your broker (the exchange) to buy or sell shares or crypto-assets at the best available price. If you're buying, your broker will try to get the lowest possible price. If you're selling, it will try to get the highest. + +That's it! You don't have to worry about timing the market perfectly or anything like that. Just remember that if you're buying, you might not get the exact price you want, and if you're selling, you might not get the highest price possible. + +There are other types of orders, but market orders are the easiest to understand and use. + +## Limit order, the most valuable + +When it comes to trading, there are different types of orders that can be placed in order to execute a trade. Among these, the limit order is often considered the most valuable due to its ability to help traders get the best possible price for their desired asset. +A limit order is a type of trade that gives you exact instructions on when and how much to buy or sell an asset at any time in future. Your trade will be executed either once your price is met. + +## Stop order, securing your money + +When you're ready to start trading, you need to know the different types of orders that you can place. One important type of order is the stop order. A stop order is an order to buy or sell an asset at a specified price. This type of order becomes active only when the asset's price reaches the specified price. At that point, the order is executed at the next available price. + +Stop orders are often used to limit losses or protect profits. They can also be used to enter or exit a trade. For example, if you wanted to buy a crypto at $50 but it is currently trading at $49, you could place a stop order at $50. If the crypto reaches $50, your order will be executed and you will buy the crypto at $50. Or, if you wanted to sell a crypto if its price reaches $40 (in order to protect your gains) while this asset is currently trading at $51, you could place a stop order at $40. If the asset reaches $40, your order will be executed and you will sell the crypto at $40. This would prevent you from holding the asset at lower prices. + +Remember, stop orders are not guaranteed to be executed at the specified price. This is because once the stop price is reached, there may not be enough buyers or sellers. Regular stop orders are executed at the best available price at the time of their trigger. Stop loss limit orders can be used to create a limit order when the stop order is trigger and avoid this risk. However since those are creating a limit order, if the price of the limit order is not met, you will end up with an unfilled open limit order. + +## Take profit order, cash out before it's too late + +When it comes to trading, one of the most important things to remember is to take profit before it's too late. That's why a take profit order is such an important tool for traders. A take profit order is an order to buy or sell an asset at a certain price once it reaches a certain level of profit. This ensures that you lock in your profits and don't let them slip away. + +One of the biggest mistakes traders make is waiting too long to take profits. They think that they can ride the wave of a rising crypto and make even more money. But the reality is that the market can turn on a dime and all those profits can evaporate just as quickly. That's why it's important to take profits when you have them. + +Another thing to remember is that you can always cash out before it's too late. If you're ever in doubt about a trade, or if you start to see signs that the market is about to turn, don't be afraid to cash out and take your profits. It's better to be safe than sorry when it comes to trading. + +## OctoBot automates any trading strategy + +OctoBot is an automated crypto trading bot that can implement any trading strategy using any type of order. It is simple to use and you can be up and running in minutes. There is no need to be a expert trader to use OctoBot, it will do all the work for you. diff --git a/docs/blog/2024-01-01-what-are-stablecoins.md b/docs/blog/2024-01-01-what-are-stablecoins.md new file mode 100644 index 0000000000..4f16a54cc6 --- /dev/null +++ b/docs/blog/2024-01-01-what-are-stablecoins.md @@ -0,0 +1,101 @@ +--- +title: "what are stablecoins" +slug: "what-are-stablecoins" +date: "2024-01-01" +--- +--- +title: What are stablecoins +description: Explore the stablecoins, the crypto designed for stability in a volatile market. Understand the different types, their benefits, risks, and potential future impact on global finance. + what-are-stablecoins +tags: Cryptocurrency, Trading, Educational +image: /images/blog/what-are-stablecoins/cover.png +sidebar: false +breadcrumb: false +date: 2023-11-05 +searchable: false +difficulty_level: 1 +reading_time: 10 +author: guillaume +--- + +# What are stablecoins + +<div style={{textAlign: "center"}}> +![cover](/images/blog/what-are-stablecoins/cover.png) +</div> + +Stablecoins are a type of cryptocurrency designed to minimize price volatility. While traditional cryptocurrencies like [Bitcoin](https://www.octobot.cloud/what-is-bitcoin) and [Ethereum](https://www.octobot.cloud/what-is-ethereum) experience wild price swings, stablecoins aim to maintain a consistent value over time. + +<!--truncate--> + +Stablecoins play a critical role in the cryptocurrency ecosystem. They provide stability in a volatile market, enabling businesses and individuals to transact using crypto without worrying about price fluctuations. + +## Understanding Stablecoins + +### Definition of Stablecoins + +Stablecoins are digital tokens designed to maintain a stable value against a specific asset or a pool of assets. They are often pegged to traditional fiat currencies like the US Dollar, Euro, or Gold. + +### Types of Stablecoins + +There are three main types of stablecoins: fiat-collateralized, crypto-collateralized, and non-collateralized. Each type has its own mechanism to maintain price stability. + +#### Fiat-Collateralized Stablecoins + +These are stablecoins that are directly backed by fiat currencies like the US Dollar or Euro. For every stablecoin issued, there is a corresponding unit of real-world currency stored in a bank or other regulated financial institution. This physical backing provides a 1:1 value ratio, maintaining the stablecoin's price stability. + +<div style={{textAlign: "center"}}> +![usdt-price](/images/blog/what-are-stablecoins/usdt.png) +*USDT price history* +</div> + +#### Crypto-Collateralized Stablecoins + +Unlike fiat-collateralized stablecoins, these are backed by other cryptocurrencies. Due to the volatile nature of cryptocurrencies, these stablecoins are typically over-collateralized, meaning that the total value of the cryptocurrency collateral is higher than the value of the stablecoins issued, providing a buffer against market fluctuations. + +#### Non-Collateralized Stablecoins + +These stablecoins are not backed by any collateral, whether it's fiat or crypto. Instead, they use algorithms and smart contracts to automatically adjust the supply of the stablecoin in response to changes in demand, aiming to keep the stablecoin's price close to a specific target value. + +### How Stablecoins Work + +Stablecoins maintain their value by holding reserves of a stable asset, using smart contracts to maintain price stability, or through an algorithmic supply mechanism. + +## The History of Stablecoins + +Stablecoins were introduced to tackle the problem of high volatility in the crypto market. The first stablecoin, Tether (USDT), was launched in 2014. +Since the launch of Tether, the stablecoin market has seen significant growth and evolution, with many new stablecoins being introduced, each with unique features and mechanisms. + +<div style={{textAlign: "center"}}> +![usdt-logo](/images/blog/what-are-stablecoins/usdt-logo.png) +*USDT (Tether) logo* +</div> + +## The Benefits of Stablecoins + +- Stablecoins offer a safe haven during market volatility. Traders can convert their volatile cryptocurrencies into stablecoins to avoid market downturns. +- Stablecoins provide a stable medium of exchange, making them ideal for transactions, payments, and remittances. +- Stablecoins can play a crucial role in financial inclusion, providing access to digital currencies to the unbanked and underbanked populations. + +## Risks and Challenges of Stablecoins + +- Stablecoins face regulatory scrutiny worldwide, as they could potentially disrupt the traditional financial system. Managing collateral reserves for fiat-collateralized stablecoins can be complex and challenging. +- There's always a possibility that a stablecoin could fail to maintain its peg, leading to a sharp drop in value. This could have wider implications for the cryptocurrency market as a whole. + +## The Future of Stablecoins + +Beyond just providing stability in the crypto market, stablecoins could be used for a wide range of purposes, including remittances, payments, smart contracts, and more. + +Stablecoins could have a significant impact on global finance by enabling faster and cheaper cross-border transactions, improving access to financial services, and potentially reshaping the global monetary system. + +There's a lot of potential for stablecoins in the future, particularly as blockchain technology continues to evolve and mature. However, the path forward will depend on a range of factors, including regulatory developments, technological advancements, and market dynamics. + +<div style={{textAlign: "center"}}> +![future](/images/blog/what-are-stablecoins/future.png) +</div> + +## Conlusion + +In conclusion, stablecoins play a critical role in the cryptocurrency market by providing stability in a volatile environment. However, they also come with their own set of challenges and risks. + +Despite the challenges, the potential benefits of stablecoins are significant, and they could play a key role in the future of finance. As the technology continues to evolve, it will be interesting to see how the role of stablecoins develops in the coming years. diff --git a/docs/blog/2024-01-01-what-is-dca.md b/docs/blog/2024-01-01-what-is-dca.md new file mode 100644 index 0000000000..e50116e61a --- /dev/null +++ b/docs/blog/2024-01-01-what-is-dca.md @@ -0,0 +1,62 @@ +--- +title: "what is dca" +slug: "what-is-dca" +date: "2024-01-01" +--- +--- +title: DCA - One of the most profitable strategies +description: Dollar cost averaging is one of the most profitable strategies for a long term investment + what-is-dca +tags: Cryptocurrency, Trading, Strategy, How-to, DCA, Educational +image: /images/blog/what-is-dca/cover.png +sidebar: false +breadcrumb: false +date: 2022-09-20 +searchable: false +difficulty_level: 1 +reading_time: 5 +author: guillaume +--- + +# DCA - One of the most profitable strategies + +![cover](/images/blog/what-is-dca/cover.png) + + +## Why dollar cost averaging is one of the most profitable strategies for a long term investment + +When it comes to investing your money, there are a lot of different strategies that you can choose from. But if you're looking for a strategy that is reliable and profitable in the long term, dollar cost averaging is definitely one to consider. In this article, we'll explain exactly what dollar cost averaging is and why it's such a great strategy for long-term investors! + +<!--truncate--> + +## What is dollar cost averaging strategy + +Dollar cost averaging is an investing technique whereby an investor purchases a fixed dollar amount of a particular investment on a regular schedule, regardless of the cryptocurrency price. By buying the same dollar amount each time, more cryptocurrency are purchased when prices are low and fewer cryptocurrency are bought when prices are high. Over time, this technique can help reduce the effects of volatility and risk on an investment portfolio. + +One of the main advantages of dollar cost averaging is that it takes the emotion out of investing. By buying into an investment on a regular schedule, investors can avoid the temptation to "time the market" by trying to predict when prices will go up or down. This strategy can also help investors stick to their long-term goals, even when markets are going through short-term ups and downs. + +## Why dollar cost averaging is even more interesting in the cryptocurrencies world + +As we all know, cryptocurrencies are a volatile market. The prices of [Bitcoin](https://www.octobot.cloud/bitcoin-prediction), [Ethereum](https://www.octobot.cloud/what-is-ethereum), [Litecoin](https://www.octobot.cloud/what-is-litecoin) and other altcoins can fluctuate wildly from day to day, and even from hour to hour. This makes investing in cryptocurrencies a risky proposition. + +However, there is a way to mitigate this risk somewhat, and that is by dollar cost averaging (DCA). DCA is an investing strategy where you spread your investment into multiple smaller investments over time, rather than investing all at once. + +For example, let's say you want to invest $100,000 in [Bitcoin](https://www.octobot.cloud/what-is-bitcoin). You could do this by buying 4 Bitcoins all at once at the current market price. However, if the price of Bitcoin falls tomorrow, you will have lost 10% of your investment. + +Instead, you could spread your investment out over a period of time using DCA. For example, you could invest $1,000 in Bitcoin every week for 100 weeks. This way, if the price of Bitcoin falls one week, you will only lose $1,000 instead of $100,000. + +DCA is a great way to mitigate risk in the volatile world of cryptocurrencies. It is also a great long-term strategy for building up your investment portfolio + +## What are the drawbacks of dollar cost averaging + +Dollar cost averaging is a technique that can be used when investing in order to minimize risk. The idea is to spread your investment into equal increments and invest at regular intervals. This technique smooths out the effects of market volatility and can help you avoid the effects of buying high and selling low. + +However, there are few drawbacks to using dollar cost averaging as your investment strategy. One of the biggest drawbacks is that it can take a long time to see results from this type of investing. This is because you are investing a set amount of money at regular intervals, regardless of what the market is doing. This means that it may take years for your investment to grow to its full potential. + +Another drawback of dollar cost averaging is that you may end up paying more for your investment than if you had invested all at once. This is because you are buying more cryptocurrency when the price is high and fewer cryptocurrency when the price is low. Over time, this can average out to a higher overall cost for your investment. + +Finally, dollar cost averaging does not guarantee that you will make a profit on your investment. It simply helps to minimize the risk of losses in a volatile market. If the market crashes, you could still lose money. + +## How to use DCA with OctoBot + +OctoBot can help you take advantage of dollar cost averaging by automatically buying a fixed dollar amount of your chosen investment on a regular schedule. This way, you can focus on your long-term goals and let OctoBot handle the day-to-day fluctuations in the market. diff --git a/docs/blog/2024-01-03-introducing-trading-modes-guides.md b/docs/blog/2024-01-03-introducing-trading-modes-guides.md new file mode 100644 index 0000000000..70def4945d --- /dev/null +++ b/docs/blog/2024-01-03-introducing-trading-modes-guides.md @@ -0,0 +1,41 @@ +--- +title: "Introducing trading modes guides" +description: "Discover the multiple ways to trade with with OctoBot using trading modes based on DCA, grid trading, AI and TradingView" +slug: "introducing-trading-modes-guides" +date: "2024-01-03" +authors: ["guillaume"] +tags: ["Trading", "Educational"] +image: "/images/blog/introducing-trading-modes-guides/person-looking-at-his-screens-using-many-trading-strategies.jpg" +--- + +# Introducing trading modes guides + +When trading with OctoBot, you use a trading mode. [Trading modes](/guides/octobot-trading-modes/trading-modes) are responsible for how to create, maintain and cancel orders. + +<!--truncate--> + +Trading modes are a key component of any trading strategy and are compatible with each [supported exchange](/guides/exchanges). + +<div style={{textAlign: "center"}}> + <div> + ![Person looking at his screens using many trading strategies](/images/blog/introducing-trading-modes-guides/person-looking-at-his-screens-using-many-trading-strategies.jpg) *A trader using many trading strategies.* + </div> +</div> + +Based on your feedback, we created [guides for each trading modes](/guides/octobot-trading-modes/trading-modes) to make clear what they are made for and how to use them. We are looking forward to getting your feedback on those guides. + +## Breakdown of an OctoBot strategy + +An OctoBot strategy is usually split in 2 parts: +1. The trading mode: it decides how to create ordres on exchange, how much to put in each order, when to cancel them +2. The evaluators: they are sending signals to the trading mode so activate it when necessary. We could say they "wake up" the trading mode when something happens + +Note: Some trading mode, such as grid-based ones or TradingView automations are not using any evaluator, they are "waken up" automatically either when an order is filled or when receiving a notification fron TradingView. + +## Types of trading modes + +When using the [OctoBot trading bot](https://www.octobot.cloud/trading-bot), you have access to [many types of trading modes](/guides/octobot-trading-modes/trading-modes#built-in-trading-modes). Here are the main trading modes categories: + +- **Statistics-based trading modes**: Entries (and possibly exits) are computed using statistics. It might be from technical evaluators, AI, social medias, price events or many other things. +- **Low-risk grid trading modes**: Buy and sell orders are created deterministically according to the trading mode's configuration. There is no probability in those algorithms. +- **Automated TradingView strategies**: Entries and exits are created based on your TradingView signals. In this trading mode, the core of your strategy lies on TradingView and Octobot acts as an automation to synchronize your strategy with any exchange account. diff --git a/docs/blog/2024-02-04-new-octobot-cloud-plans-and-trading-bots.md b/docs/blog/2024-02-04-new-octobot-cloud-plans-and-trading-bots.md new file mode 100644 index 0000000000..061f0ac930 --- /dev/null +++ b/docs/blog/2024-02-04-new-octobot-cloud-plans-and-trading-bots.md @@ -0,0 +1,69 @@ +--- +title: "New OctoBot cloud plans" +description: "OctoBot cloud is releasing updated cloud plans to automate your crypto trading strategies and invest easily." +slug: "new-octobot-cloud-plans-and-trading-bots" +date: "2024-02-04" +authors: ["guillaume"] +tags: ["AI", "Backtesting", "Cryptocurrency", "Trading", "OctoBot cloud", "Release"] +image: "/images/blog/new-octobot-cloud-plans-and-trading-bots/a-man-relaxing-in-his-couch-while-octobot-is-making-money-by-automating-cryptocurrency-strategies-light.png" +--- + + + +# New OctoBot cloud plans and trading bots + +![a man relaxing in his couch while octobot is making money by automating cryptocurrency strategies dark](/images/blog/new-octobot-cloud-plans-and-trading-bots/a-man-relaxing-in-his-couch-while-octobot-is-making-money-by-automating-cryptocurrency-strategies-light.png) + +2024 will be a big year for OctoBot ! + +<!--truncate--> + +In 2023, we [announced the launch of the new Octobot cloud](introducing-the-new-octobot-cloud) where we presented our approach around trading strategies and our will to split OctoBot in 2 worlds: + +Where it is easy to automate crypto investment strategies. This offer is dedicated to users who don't want to create their own trading strategies or are looking for diversification in their own trading systems. We are expanding these offers to include more options and bring you more value when using OctoBot cloud strategies. +It is what is presented on [octobot.cloud](/) + +## Investor plans changes + +We have been thinking a lot about how to add more value to the ready-made trading strategies offers of OctoBot cloud. Our goal is to keep those as simple and accessible as possible to appeal to crypto investors who don't want to create their own strategy but still want to profit from great investment strategies. + +Of course, the current Investor plan remains free and still allows using many OctoBot cloud strategies with [real exchange funds](/investing/invest-with-your-strategy) as well as [vitrual funds](/investing/paper-trading-a-strategy). + +We are now adding 2 other related plans that build on top of the Investor plan. + +### The Investor Plus plan +The [Investor Plus plan](introducing-the-investor-plus-plan) is great to profit from the full OctoBot cloud at an affordable price. + +It allows using each and every OctoBot strategy, whether it is based on crypto basket, AI, DCA or Grid investments. + +It also enables to use up to 10 simultaneous real trading or risk-free vitrual money OctoBots + +**[Try Investor Plus](https://www.octobot.cloud/pricing)** + +Take a look at our [Investor Plus dedicated article](introducing-the-investor-plus-plan) to know more about this plan's details. + +### The Pro plan + +The [Pro plan](introducing-the-pro-plan) is designed for users to want to accurately fine-tune their strategies or even automate their strategies. + +It includes all the Investor Plus plan benefits. + +The Pro plan grants access to [TradingView OctoBots](/investing/tradingview-automated-trading). TradingView OctoBots make it easy to build and automate any type of strategy directly from TradingView on any exchange using real or vitrual funds. + +It also enables you to fine-tune your running OctoBots by offering to easily: +- Cancel or replace any OctoBot orders +- Manually create buy and sell orders whenever you want, on the cryptocurrencies you want + +**[Use Pro](https://www.octobot.cloud/pricing)** + +Learn more on the Pro plan on [our dedicated article](introducing-the-pro-plan). + +## Final words + +We have been thinking about many ways to offer paid plans, and we believe those new plans are great as they leave OctoBot very accessible: +- You can keep using OctoBot cloud strategies free in an unlimited way as you might be already doing when using the Investor plan +- The [free open source version of OctoBot](/guides/octobot-installation/install-octobot-on-your-computer) will benefit from [OctoBot cloud](/) plans subscription revenues, enabling it to keep growing while remaining free + +And on the other hand, if you are looking for additionnal features to simplify and easily monitor your trading, create advanced trading strategies or get personalized support from the OctoBot team, it is now all possible. + +We hope you will like the new OctoBot cloud plans. We are looking forward to receiving your feedback on those ! diff --git a/docs/blog/2024-02-05-trading-on-coinex-with-octobot.md b/docs/blog/2024-02-05-trading-on-coinex-with-octobot.md new file mode 100644 index 0000000000..ace53d2112 --- /dev/null +++ b/docs/blog/2024-02-05-trading-on-coinex-with-octobot.md @@ -0,0 +1,41 @@ +--- +title: "Trading on CoinEx with OctoBot" +description: "OctoBot has just partners with CoinEx to enable crypto traders to easily automate any trading strategy on CoinEx." +slug: "trading-on-coinex-with-octobot" +date: "2024-02-05" +authors: ["guillaume"] +tags: ["Crypto", "Trading", "Exchange", "Partnership", "CoinEx"] +image: "/images/blog/trading-on-coinex-with-octobot/trading-on-coinex-with-octobot.png" +--- + + + +# Trading on CoinEx with OctoBot + +![trading on coinex with octobot](/images/blog/trading-on-coinex-with-octobot/trading-on-coinex-with-octobot.png) + +As mentioned on <a href="https://twitter.com/coinexcom/status/1750701867867537762" rel="nofollow">this CoinEx announcement</a>, the OctoBot team is proud to announce <a href="https://www.coinex.com/register?refer_code=d6muk" rel="nofollow">CoinEx</a> as latest [OctoBot partner exchange](/guides/exchanges#partner-exchanges---support-octobot). + +<!--truncate--> + +## Use your trading strategy on CoinEx + +Starting from OctoBot 1.0.7, it is now possible to automate any [OctoBot trading strategy](/guides/octobot-trading-modes/trading-modes) on the CoinEx exchange when using the [OctoBot trading bots](https://www.octobot.cloud/trading-bot). + +You can now use OctoBot to trade on CoinEx: + +- Using risk-free [paper trading and simulated money](/guides/octobot-usage/simulator) to test your strategy in live condition +- With [backtesting](/guides/octobot-usage/backtesting) to quickly optimize your strategies performances +- With your real funds on CoinEx to really profit from your strategy + +## The technicals + +Starting from OctoBot 1.0.7, CoinEx SPOT trading is [fully supported](/guides/exchanges/coinex), as well as its websocket connection allowing to speed up market data updates. + +CoinEx has also been integrated within OctoBot's regularly tested exchanges. This means that the OctoBot team makes sure that the CoinEx connection remains stable and we (the OctoBot team) will do everything that is possible to maintain this state to offer you the best possible trading automation on CoinEx. + +## Final words + +At OctoBot, we are trying to make trading strategies automation as accessible as possible to everyone. Following this philosophy, it's important for us to enable automated trading on each exchange that is important to our users. + +If you are trading on an exchange that is not currently supported, please create or upvote the post associated to your exchange on <a href="https://feedback.octobot.cloud/open-source" rel="nofollow">our feedback website</a>. diff --git a/docs/blog/2024-02-23-introducing-the-investor-plus-plan.mdx b/docs/blog/2024-02-23-introducing-the-investor-plus-plan.mdx new file mode 100644 index 0000000000..6cb370db14 --- /dev/null +++ b/docs/blog/2024-02-23-introducing-the-investor-plus-plan.mdx @@ -0,0 +1,115 @@ +--- +title: "Introducing the Investor Plus plan" +description: "Discover the new Investor Plus plan and invest with every OctoBot strategy and crypto baskets you want, use more OctoBots and enjoy other benefits" +slug: "introducing-the-investor-plus-plan" +date: "2024-02-23" +authors: ["guillaume"] +tags: ["Cryptocurrency", "Trading", "OctoBot cloud", "Release"] +image: "/images/blog/introducing-the-investor-plus-plan/octobot-investor-plus-plan-announcement-with-tradingview-automations.png" +--- + + + +# Introducing the Investor Plus plan + +![octobot investor plus plan announcement with TradingView automations](/images/blog/introducing-the-investor-plus-plan/octobot-investor-plus-plan-announcement-with-tradingview-automations.png) + +After months of brainstorming, trial and errors including good and bad ideas, our new plan is finally here. + +We are very pleased to announce the release of the Investor Plus subscription plan on OctoBot cloud ! + +## What is Investor Plus ? + +Investor Plus is OctoBot cloud's first account-wide subscription plan. It will improve your whole OctoBot cloud account. + +By choosing the Investor Plus plan, you will gain immediate access to: + +- Every Crypto basket +- Every AI, smart DCA and Grid strategy +- Up to 10 simultaneous OctoBots +- Bespoke email news recaps _(coming soon)_ + +**[Try Investor Plus](https://www.octobot.cloud/pricing)** + +## Crypto Baskets + +[Crypto baskets](https://www.octobot.cloud/features/crypto-basket) are a simple and efficient way to invest in multiple cryptocurrencies. There are many crypto baskets enabling you to: + +- Instantly get a diversified investment in the crypto market by automatically investing in the best cryptocurrencies +- Invest in multiple cryptocurrencies following a specific theme in which you believe, such as <a href="https://www.coingecko.com/en/categories/artificial-intelligence" rel="nofollow">artificial intelligence</a>, <a href="https://www.coingecko.com/en/categories/decentralized-finance-defi" rel="nofollow">decentralized finance</a>, <a href="https://www.coingecko.com/en/categories/gaming" rel="nofollow">gaming</a> and many more. + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="WOgS-VhUyIY" title="Investing in the best crypto" /> + +<!--truncate--> + +Using a [crypto basket](https://www.octobot.cloud/features/crypto-basket) means you don't have to manually choose each cryptocurrency to buy. +You can invest in a theme that interests you and as a result <a href="https://www.investopedia.com/terms/d/diversification.asp" rel="nofollow">diversify your investment</a> to reduce risks. + +## AI, Smart DCA and Grid strategies + +OctoBot comes with many preconfigured strategies. Some of them are available for free and other require the Investor Plus plan. Here is an overview of the different strategies available on OctoBot. + +### AI strategies + +Artificial intelligence is one of the topics OctoBot focuses a lot on, and it also comes with investment strategies. + + +<YouTube id="BV4ZHQrIpRQ" title="ChatGPT Crypto trading strategies using OctoBot" /> + +We have been experimenting with a lot with different AI systems to improve investment strategies, and we created with several AI-powered DCA-based strategies. + +Those AI strategies have the advantage to combine the simplicity of DCA strategies and AI to increase profits. Take a look at our [Trading using ChatGPT](trading-using-chat-gpt) and [ChatGPT strategy deep dive](chatgpt-strategy-deep-dive) to learn more about AI strategies in OctoBot. + +### Smart DCA Strategies + +<a href="https://www.investopedia.com/terms/d/dollarcostaveraging.asp" rel="nofollow">Dollar Cost Averaging (or DCA)</a> is a very well known investment strategy where you buy on +a regular basis in order to profit from local price drops and gradually sell +afterward. + +What we call Smart DCA in OctoBot is a very simple yet powerful trading strategy, as it lowers the risk by spreading trades over different prices while taking profit on a regular basis. + + +<YouTube id="519pwSV1uwE" title="Smart DCA with OctoBot" /> + +DCA strategies can be used to invest on multiple crypto at the same time, therefore reducing risk by diversifying investments and taking advantage of coins volatility. + +Learn more about DCA strategies in OctoBot on our [Smart DCA blog article](smart-dca-making-of). + +### Grid Strategies + +Grids are low risk trading strategies that generate small but constant profits as long as conditions are met. + +<div style={{textAlign: "center"}}> + <div> + ![grid trading illustrated by a man stepping up on green stairs grabbing + coins](/images/blog/introducing-the-investor-plus-plan/grid-trading-illustrated-by-a-man-stepping-up-on-green-stairs-grabbing-coins.png) + </div> +</div> + +When using a Grid strategy, your OctoBot will split your portfolio into the two assets of the grid and place buy and sell orders around the current price at constant price intervals. + +Then, when the price changes and an order is filled, an order of the opposite side is created. Once this order is then filled, profits are generated. + +A Grid strategy is great to profit from markets that are stable within the price window of the grid: the more the price moves within this window, the more profits are earned. + +## More OctoBots + +With the Investor Plus plan, you can use up to 10 simultaneous OctoBots on your account. + +Those OctoBots can: + +- Trade on any supported exchange (see the [guide on using multiple OctoBots on the same account](/investing/having-multiple-octobot-strategies) to use many OctoBots on an exchange account) +- Connect to your exchange account portfolio to invest using your funds with your chosen strategy +- Use [vitrual money](/investing/paper-trading-a-strategy) to risk-free test as many strategies as you wish + +## Free trial + +Are you interested in trying Investor Plus ? We created a free trial for you to try it ! + +**[Try Investor Plus](https://www.octobot.cloud/pricing)** + +## Final words + +We hope you will love the new Investor Plus plan. We designed it to add flexibility to OctoBot cloud and make your crypto investments easier, and we are really excited to know your feedback about it ! diff --git a/docs/blog/2024-02-27-bingx-wheel-of-fortune-event.md b/docs/blog/2024-02-27-bingx-wheel-of-fortune-event.md new file mode 100644 index 0000000000..c14e51f6d5 --- /dev/null +++ b/docs/blog/2024-02-27-bingx-wheel-of-fortune-event.md @@ -0,0 +1,61 @@ +--- +title: "BingX Wheel of Fortune Event" +description: "Join the BingX Wheel of Fortune event to earn up to 1000 USDT simply by trading using OctoBot cloud or your own OctoBot trading robot" +slug: "bingx-wheel-of-fortune-event" +date: "2024-02-27" +authors: ["guillaume"] +tags: ["Crypto", "Trading", "Exchange", "Partnership", "BingX", "Event"] +image: "/images/blog/bingx-wheel-of-fortune-event/bingx-and-octobot-wheel-of-fortune-event-with-usdt-to-earn.png" +--- + + + +# BingX Wheel of Fortune Event + +![bingx and octobot wheel of fortune event with 1000 usdt to earn](/images/blog/bingx-wheel-of-fortune-event/bingx-and-octobot-wheel-of-fortune-event-with-usdt-to-earn.png) + +In december, we [announced](octobot-1-0-4-whats-new) the official support of BingX in OctoBot, <a href="https://bingx.com/invite/Z4UUVX/" rel="nofollow">BingX</a> is now also supported on [OctoBot cloud strategies](https://www.octobot.cloud/). + +<!--truncate--> + +Today, we are glad to announce that starting from the 28th of February till the 18th of March, BingX will be hosting a special event for OctoBot users with up to 1000 USDT to win ! + +## How the Wheel of Fortune works + +After <a href="https://bingx.com/invite/Z4UUVX/" rel="nofollow">creating your BingX account</a> (if you don't have one already) and registering on the <a href="https://bingx.com/en-us/act/turntable/8628992176/" rel="nofollow">BingX Wheel of Fortune event</a>, you will earn draws by simply trading on BingX using any of: + +- BingX pre-configured strategies on [OctoBot cloud](https://www.octobot.cloud/). +- The new [TradingView OctoBots](/investing/tradingview-automated-trading) from [OctoBot cloud](https://www.octobot.cloud/) to simply automate trades using price events, indicators or Pine Script strategies directly from TradingView. +- Your own OctoBot strategy using [cloud or self hosted OctoBot trading bots](https://www.octobot.cloud/trading-bot). + +<div style={{textAlign: "center"}}> + **[Join the event](https://bingx.com/en-us/act/turntable/8628992176)** +</div> + +Note: paper trading bots using simulated money are not counted in generating draws, trades must happen on your BingX account. + +## Every OctoBot user can win + +This is the great point of the event: + +- Everyone trading using Octobot on BingX will earn draws. +- Each draw carries an equal chance to instantly win up to 1000 USDT. + +It means that there is no need to be the trader with the largest trading volume or the best PNL to be eligible for rewards. + +OctoBot is designed to help the most people automating their investment startegies and the same goes for the BingX Wheel of Fortune: it is also profiting everyone. + +We can't wait to know the rewards you will win on BingX with this event, join the OctoBot <a href="https://t.me/octobot_trading" rel="nofollow">OctoBot Telegram group</a> to tell us what you earned ! + +## Earn more by spreading the word + +Your might have friends who would like to [easily follow pre-configured investment strategies](/investing/introduction), [automate they trading using TradingView](/investing/tradingview-automated-trading) or [create their own trading bot](https://www.octobot.cloud/trading-bot). + +We believe this BingX event to be a great time to tell your friends about OctoBot as they might the among the lucky winners of the event ! And it won't reduce your own chances to win either. + +Also, did you know that OctoBot has a referral program ? When your friends sign up using your OctoBot referral link: + +- They get a $5 discount on the subscription of their choice. +- You receive 25% of their subscription fees as well as a part of the trading fees OctoBot cloud receives from partner exchanges on their real trading OctoBot. + +Get your referral link on <a href="https://www.octobot.cloud/rewards" rel="nofollow">your referral dashboard</a>. diff --git a/docs/blog/2024-03-20-introducing-the-pro-plan.mdx b/docs/blog/2024-03-20-introducing-the-pro-plan.mdx new file mode 100644 index 0000000000..60ed1532aa --- /dev/null +++ b/docs/blog/2024-03-20-introducing-the-pro-plan.mdx @@ -0,0 +1,212 @@ +--- +title: "Introducing the Pro plan" +description: "Discover the new Pro plan and fine-tune your OctoBot strategies, automate your trades from TradingView, configure your crypto baskets and more." +slug: "introducing-the-pro-plan" +date: "2024-03-20" +authors: ["guillaume"] +tags: ["Cryptocurrency", "Trading", "OctoBot cloud", "Release"] +image: "/images/blog/introducing-the-pro-plan/octobot-trading-plan-announcement-with-TradingView-automations-and-advanced-coins-trading.png" +--- + + + +# Introducing the Pro plan + +![octobot trading plan announcement with TradingView automations and advanced coins trading](/images/blog/introducing-the-pro-plan/octobot-trading-plan-announcement-with-TradingView-automations-and-advanced-coins-trading.png) + +Following the launch of the [Investor Plus plan](introducing-the-investor-plus-plan), we are very pleased to announce the launch of the new Pro plan. + +## What is in the Pro plan ? + +The Pro plan gives you access to the full power of OctoBot cloud. + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="LwpDxDwGF0w" title="Trading configuration in OctoBot" /> + +<!--truncate--> + +It naturally includes everything from the Investor and [Investor Plus plan](introducing-the-investor-plus-plan) plans, which means : + +- Investing on all supported exchanges +- Access to all preconfigured strategies and crypto baskets +- Up to 10 real or paper trading OctoBots +- Bespoke email news recaps _(coming soon)_ + +With the Pro plan, you also benefit from : + +- OctoBot fine-tuning to change the way your OctoBots are investing +- Manually create orders from your OctoBots +- TradingView strategies automation +- Custom crypto baskets _(coming soon)_ +- Up to 20 real or paper trading OctoBots +- Priority support + +**[Switch to Pro](https://www.octobot.cloud/pricing)** + +## Fine tune your OctoBots + +Using the Pro plan enables you to [change the way any of your OctoBots buy and sell their crypto](/investing/fine-tune-your-octobots). + +### Manually create orders from your OctoBots + +Make your OctoBots buy or sell their crypto + +<div style={{textAlign: "center"}}> + ![buy and sell crypto directly from your + OctoBot](/images/blog/introducing-the-pro-plan/buy-and-sell-crypto-directly-from-your-octobot.png) +</div> +With the Pro plan, you can from any OctoBot: - Easily trade directly by yourself +on your exchange or your [risk-free paper trading +account](/en/investing/paper-trading-a-strategy) - Create any kind of buy or +sell orders at any time - Rebalance your portfolio - Exclude a part of your +funds from your trading strategies + +### Make your OctoBots trade your way + +You would prefer an OctoBot cloud strategy to buy or sell differently ? + +<div style={{textAlign: "center"}}> + ![cancel orders directly from your + OctoBot](/images/blog/introducing-the-pro-plan/cancel-orders-directly-from-your-octobot.png) +</div> +With the Pro plan, all your OctoBots can: - Cancel any OctoBot cloud or +TradingView strategy order - Replace existing orders by your own + +### Follow your OctoBots' activity + +Profit from a clear history of trades, automations and cancel commands that happened with your Octobot. + +<div style={{textAlign: "center"}}> + ![buy and sell crypto octobot activity + history](/images/blog/introducing-the-pro-plan/buy-and-sell-crypto-octobot-activity-history.png) +</div> + +Learn more on OctoBot fine-tuning on the [Investor guide](/investing/fine-tune-your-octobots). + +## TradingView automation with OctoBots + +> What would be the best way for most crypto investors to create their investment strategies? + +At OctoBot cloud, we spent a lot of time thinking about how to offer customizable strategies in the simplest way possible. While the preconfigured OctoBot cloud strategies are and will remain available for free, it's important for us to enable crypto investors to invest with OctoBot cloud according to their own ideas. + +When thinking about the different possibilities, we realized that what is important for crypto investors when creating an investment strategy is that it should be: + +- **Clear and appealing**: Using a graphical tool to create a strategy and not requiring any coding skill. +- **Adapting to the investor** level of expertise: We want OctoBot cloud to enable the best strategies for you as an investor. It is important that OctoBot adapts to the knowledge of its users. +- **Simple to follow and monitor**: It is necessary to always know what trades are open and what were the previous activities. + + +<YouTube id="TNRMUP6-a_g" title="Automating TradingView" /> + +After testing a lot of possibilities, we realized that integrating <a href="https://www.tradingview.com/?aff_id=27595" rel="nofollow">TradingView</a> is by far the best option as it is: + +- **Very well known**: most crypto investors know how to use TradingView and use it on a regular basis +- **All about visualizing**: TradingView enables to visually analyse prices and use indicators and Pine Script strategies +- **Extendable** : TradingView can be easily and securely connected to other services such as OctoBot cloud. + +So we decided to integrate TradingView strategies into OctoBot cloud following those principles. You can now easily automate your own investment strategies using [TradingView and OctoBot](/investing/tradingview-automated-trading). + +### Create your strategy on TradingView + +With TradingView, it is easy to create automated alerts based on either: + +- Price events, for example "BTC crosses $50 000" +- Any technical indicator (free or paid), such as "RSI is higher than 80" or "the 9 days moving average just crossed the 21 days moving average" +- Pine Script strategies, which are integrated TradingView strategies that can be optimized using backtesting and visually drawn directly on charts. + +Let's take an example with an <a href="https://www.investopedia.com/terms/e/ema.asp" rel="nofollow">Exponential Moving Averages</a> (or EMA) based strategy. The concept is to buy and sell when the 9 and 21 candles EMA are crossing each other. + +The TradingView strategy looks like this: it just takes 2 regular EMA indicators ... +![TradingView ema strategy illustration with 2 buy and 2 sell](/images/blog/introducing-the-investor-plus-plan/tradingview-ema-strategy-illustration-with-2-buy-and-2-sell.png) + +... with 2 alerts: a BUY and a SELL. + +<div style={{textAlign: "center"}}> + ![TradingView ema strategy + configuration](/images/blog/introducing-the-investor-plus-plan/tradingview-ema-strategy-configuration.png) +</div> + +This is 100% of the configuration of this TradingView Strategy. Pretty simple ! +Learn more on how to automate TradingView alerts from price events, indicators or Pine Script strategies on our [TradingView alerts automation guide](/investing/tradingview-alerts-automation). + +The next step is to make the strategy create orders on real crypto exchanges to turn it into a fully functional automated investment strategy. + +### Automating trades from TradingView + +To turn TradingView alerts into trades, we created a new system called `Automations` that goes in pair with TradingView OctoBots: an automation is an action such as creating or cancelling orders. Automations can be bound to TradingView alerts to easily automate every aspect of your TradingView strategy trades on exchanges. + +Going back to the EMA strategy we just created on TradingView, here is what it looks like on the OctoBot side. + +![octobot TradingView trading side of ema strategy illustration with 2 buy and 2 sell](/images/blog/introducing-the-investor-plus-plan/octobot-tradingview-trading-side-of-ema-strategy-illustration-with-2-buy-and-2-sell.png) + +You might have noticed the automation identifiers in the `Message` section of the BUY and SELL alerts on TradingView, they are used to tell OctoBot which automation to trigger when the alert is received: `5a6e0e4d-4c8a-4212-881c-3174cd322002` is `Buy BTC - 50% USDT` and `b1791518-b92c-4dc7-8dc1-4905bcaf3165` is `Sell BTC - 100%`. + +<div style={{textAlign: "center"}}> + ![octobot TradingView market buy btc + automation](/images/blog/introducing-the-investor-plus-plan/octobot-tradingview-market-buy-btc-automation.png) +</div> + +The main advantage of this approach is that it is very simple and powerful at the same time: you create your TradingView strategy and then just attach automations to your OctoBot. +Moreover, automations are easy to configure and unlimited: you can make a TradingView OctoBot with very simple buy and sell automations or use an advanced set of automations to invest with multiple coins and different settings if you want to. + +Are you wondering how to use TradingView OctoBots ? Check out our [TradingView trading tutorial](/investing/tradingview-trading-tutorial). + +### Multiple order types + +Automations allow you to further optimize your entry and exit prices by leveraging limit orders using a fixed or relative price. + +<div style={{textAlign: "center"}}> + ![buy sol with 50 usdt at 10 percent discount octobot tradingview + automation](/images/blog/introducing-the-pro-plan/buy-sol-with-50-usdt-at-10-percent-discount-octobot-tradingview-automation.png) +</div> + +Profit from orders fine-tuning to easily check, cancel or replace any order created from your TradingView automations. + +### Advanced order amounts + +Optimize single and multi-coin TradingView strategies by accurately configuring your TradingView automation order amounts. + +<div style={{textAlign: "center"}}> + ![sell 1 avax at 120 usdt octobot tradingview + automation](/images/blog/introducing-the-pro-plan/sell-1-avax-at-120-usdt-octobot-tradingview-automation.png) +</div> + +Configure your automation order amounts according to: + +- A percent of your portfolio funds +- A given amount of the traded coin +- A specific amount of USDT or any quote currency of your traded pair + +Learn more on TradingView OctoBots automations on [the automations guide](/investing/tradingview-automated-trading#automations-to-create-your-strategies). + +**[Switch to Pro](https://www.octobot.cloud/pricing)** + +:::info + We will keep working on adding new types of automations to enable more options + and increase possibilities. Please tell us if you need new types of + automations, and we will our best to integrate it ! +::: + +## Configurable Crypto Baskets + +:::info + Available soon +::: + +<div style={{textAlign: "center"}}> + <div> + ![configured crypto + basket](/images/blog/introducing-the-pro-plan/crypto-basket-landing.png) + </div> +</div> + +Configure the content of your [crypto baskets](https://www.octobot.cloud/features/crypto-basket) directly from your OctoBot. + +- Add cryptocurrencies to your baskets. +- Remove cryptocurrencies from your baskets. +- Configure the ratio of each crypto within your baskets. + +## Priority support + +If you have any questions regarding OctoBot, the investment strategies, TradingView strategies or anything else, it will be our priority to bring you assistance. diff --git a/docs/blog/2024-04-15-invest-with-crypto-baskets.mdx b/docs/blog/2024-04-15-invest-with-crypto-baskets.mdx new file mode 100644 index 0000000000..60e179421d --- /dev/null +++ b/docs/blog/2024-04-15-invest-with-crypto-baskets.mdx @@ -0,0 +1,96 @@ +--- +title: "Invest in crypto easily with crypto baskets" +description: "Discover a new way to invest and easily diversify your crypto portfolio with theme-based baskets" +slug: "invest-with-crypto-baskets" +date: "2024-04-15" +authors: ["paul"] +tags: ["Cryptocurrency", "Trading", "OctoBot cloud", "Release"] +image: "/images/blog/invest-with-crypto-baskets/crypto-basket.png" +--- + + + +# Invest in crypto easily with crypto baskets + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="WOgS-VhUyIY" title="Investing in the best crypto" /> + +In our goal to make cryptocurrency investment simple and accessible for everyone, we are very pleased to announce the release of [crypto baskets](https://www.octobot.cloud/features/crypto-basket). + +<!--truncate--> + +## What are crypto baskets? + +A crypto basket is a collection of different cryptocurrencies grouped together based on a specific theme. + +<div style={{textAlign: "center"}}> + <div> + ![crypto baskets investment feature + announcement](/images/blog/invest-with-crypto-baskets/crypto-basket.png) + </div> +</div> + +For example, you can find a basket composed of cryptocurrencies related to <a href="https://www.coingecko.com/en/categories/artificial-intelligence" rel="nofollow">artificial intelligence</a>, <a href="https://www.coingecko.com/en/categories/meme-token" rel="nofollow">memes</a>, <a href="https://www.coingecko.com/en/categories/non-fungible-tokens-nft" rel="nofollow">NFTs</a>, <a href="https://www.coingecko.com/en/categories/metaverse" rel="nofollow">metaverse</a>, and many more. + +## Why using a crypto basket? + +Using a crypto basket means you don't have to manually choose each cryptocurrency to buy. +You can invest in a theme that interests you and as a result <a href="https://www.investopedia.com/terms/d/diversification.asp" rel="nofollow">diversify your investment</a> to reduce risks. + +Crypto baskets also allow you to profit from all variations in cryptocurrencies. +As soon as the price of a cryptocurrency in the basket increases, its share in your portfolio will increase and may exceed the percentage allocation of the basket. +In this case, a portfolio rebalancing operation will be triggered. This operation will sell or buy cryptocurrencies to rebalance the portfolio. + +Therefore, if a cryptocurrency in your basket has increased by 50%, during rebalancing you will cash in the profit and reinvest it automatically in other cryptocurrencies in the basket. + +You even have the option to use multiple baskets to invest in different themes simultaneously. +It can help you reduce risk even more and allow you to capitalize on market variations. + +## Can I test crypto baskets with virtual money first? + +Yes, with OctoBot cloud, you can test any crypto basket for free with [virtual money](/investing/paper-trading-a-strategy). +Using virtual money allows you to understand how baskets work and to check their performance before switching to real money. + +## Who creates these crypto baskets? + +Crypto baskets on OctoBot cloud are mainly created using <a href="https://www.coingecko.com/en/categories" rel="nofollow">Coingecko's categories</a>, a platform that ranks different cryptocurrencies and exchanges. +We also use the market capitalization ranking provided by <a href="https://www.coingecko.com/en" rel="nofollow">Coingecko</a> to offer other types of baskets. + +<div style={{textAlign: "center"}}> + <div> + ![An example of a crypto basket with the top 3 cryptocurrencies by market + capitalization](/images/blog/invest-with-crypto-baskets/use-top-market-cap-basket.png) + </div> +</div> + +For example, we have baskets composed of the three largest market capitalizations. It allows you to invest in these three cryptocurrencies simultaneously and benefit from their price fluctuations. + +You can explore some of our baskets on the [crypto baskets page](https://www.octobot.cloud/features/crypto-basket). + +## How are they updated? + +Crypto baskets on OctoBot cloud are automatically updated regularly to follow market changes. The frequency may vary from one basket to another. + +When your crypto portfolio deviates too much from the chosen basket's distribution, OctoBot automatically rebalances it to maintain the same distribution as the basket. + +## How can I invest with a crypto basket? + +With OctoBot cloud, you can invest for free (with a 14-day trial period) in any [crypto basket](https://www.octobot.cloud/features/crypto-basket) by following these steps: + +1. Create an account or log in to your OctoBot cloud account +2. Follow the introduction or go to the <a href="https://www.octobot.cloud/explore" rel="nofollow">strategy explorer</a> to choose the theme of your crypto basket +3. Select a [virtual money](/investing/paper-trading-a-strategy) or a real money account (to use real money, you'll need to [connect your exchange account](/investing/investor-faq#how-can-i-connect-my-exchange-account-to-octobot-)) +4. Launch your OctoBot to start investing + +## Can I create my own basket? + +Yes, you can create your own basket with OctoBot cloud's [Pro](introducing-the-pro-plan) plan. +This plan allows you to customize an existing basket or create your very own basket with the distribution you want. +Warning: if you use custom baskets, you won't benefit from automatic updates of the basket you customized. + +No more spending hours choosing the best cryptocurrencies. With crypto baskets, investing becomes simpler and more efficient. + +<div style={{textAlign: "center"}}> + **[Start investing with a crypto basket](https://www.octobot.cloud)** +</div> diff --git a/docs/blog/2024-04-16-coinbase-and-binance.us-trading-bot.mdx b/docs/blog/2024-04-16-coinbase-and-binance.us-trading-bot.mdx new file mode 100644 index 0000000000..b2eeaa0a0e --- /dev/null +++ b/docs/blog/2024-04-16-coinbase-and-binance.us-trading-bot.mdx @@ -0,0 +1,102 @@ +--- +title: "Coinbase and Binance.us Trading Bots" +description: "Automate your crypto investments using crypto baskets, DCA, AI, Grid or TradingView on Coinbase and Binance.us with OctoBot Cloud." +slug: "coinbase-and-binance.us-trading-bot" +date: "2024-04-16" +authors: ["guillaume"] +tags: ["Cryptocurrency", "Trading", "Exchange", "Binance.us", "Coinbase"] +image: "/images/blog/coinbase-and-binance.us-trading-bot/binance.us-and-coinbase-support-on-octobot.png" +--- + + + +# Coinbase and Binance.us Trading Bots + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="N6zwmPGvBaw" title="Coinbase Trading Bot" /> + +Coinbase and Binance.us support on OctoBot cloud is our most requested feature by far and we are glad to announce that both exchanges are now integrated within OctoBot cloud! + +<!--truncate--> + +You can now easily automate your crypto investments using OctoBot cloud both on <a href="https://www.coinbase.com/" rel="nofollow">Coinbase</a> and <a href="https://www.binance.us/" rel="nofollow">Binance.us</a> with your exchange account or using [risk-free paper trading](/investing/paper-trading-a-strategy). + +## Invest using Coin Baskets on Coinbase and Binance.us + +[Coin Baskets](invest-with-crypto-baskets) enable you to easily invest in the whole crypto market or in specific themes you believe in. + +<div style={{textAlign: "center"}}> + ![octobot crypto basket featuring bitcoin ethereum solana and + dogecoin](/images/blog/coinbase-and-binance.us-trading-bot/crypto-basket.png) +</div> + +When using Coin Baskets, your OctoBot will split your portfolio holdings among the assets of the basket. Each basket is automatically updated according to <a href="https://www.coingecko.com/" rel="nofollow">CoinGecko</a> rankings and categories and coin availability on each exchange. + +**[Invest with OctoBot](https://www.octobot.cloud)** + +### Top Crypto Baskets + +Top crypto baskets enable you to invest in the best cryptos of the market according to their marketcap on platforms sur as <a href="https://www.coingecko.com/" rel="nofollow">CoinGecko</a>. + +Investing in top crypto baskets is a great way to diversify and profit from the whole crpyto market. Following a top crypto basket will maintain your exchange portfolio balanced among coins of the basket therefore making sure that you profit from price increases of each of those coins. + +### Theme-oriented Crypto Baskets + +Theme-oriented crypto baskets on the other hand enable you to invest in themes as defined in <a href="https://www.coingecko.com/en/categories" rel="nofollow">CoinGecko categories</a>. + +Crypto baskets that are oriented around a theme are useful to profit from global trends and invest topics you believe in such as Artificial Intelligence, NFTs, Real World assets, Meme coins and more. + +### Learn more about Crypto Baskets + +At OctoBot, we believe that investing using crypto baskets is on of the best ways to invest in crypto as it enables to invest: + +- Into all the main crypto of the market or whole themes instead of cherry-picking coins +- While reducing risk by diversifying over a multiple coins +- Using a neutral basis: marketcap & category-based ranking + +If you want to learn more, have a look a our [detailed crypto baskets article and video](invest-with-crypto-baskets) + +## Profit from DCA, AI, Grid strategies on Coinbase and Binance.us + +With OctoBot cloud, you can also invest using pre-configured Dollar Cost Averaging, Artificial Intelligence and Grid strategies. + +<div style={{textAlign: "center"}}> + ![octobot collaborating with chatgpt + light](/images/blog/coinbase-and-binance.us-trading-bot/octobot-collaborating-with-chatgpt-light.png) +</div> + +Those strategies can be used for free on OctoBot cloud and are available alongside their historical performances + +### Smart Dollar Cost Averaging Strategies + +[Smart Dollar Cost Averaging strategies](smart-dca-making-of) are a powerful way to profit from the rise of multiple coins leveraging multiple small buy and sell orders in order to minimize your buying price and maximize your selling price. + +To learn more about smart DCA strategies, check out our [Smart DCA with OctoBot article](smart-dca-making-of) + +### Artificial Intelligence Strategies + +You can also take Smart DCA strategies one step further and integrate AI in the decision making of your DCA strategies entries. + +With OctoBot cloud, you can use Smart DCA strategies using ChatGPT models to analyse market trends and decide to buy when ChatGPT considers the timing right. + +Here is our [ChatGPT trading article](trading-using-chat-gpt) covering AI strategies in details. + +## Automate your TradingView strategies on Coinbase and Binance.us + +Last but not least, OctoBot cloud enables you to easily automate your TradingView strategies directly on your Binance.us and Coinbase accounts. + +<div style={{textAlign: "center"}}> + ![tradingview automation illustrated by tradingview + logo](/images/blog/coinbase-and-binance.us-trading-bot/tradingview-automation-illustrated-by-tradingview-logo.png) +</div> + +This way, you can automate trades based on: + +- Market price changes +- Built-in or custom indicators +- Pine Script strategies + +Here is our [guide showcasing TradingView OctoBots](/investing/tradingview-automated-trading) with videos and strategies examples to learn more. + +**[Invest with OctoBot](https://www.octobot.cloud)** diff --git a/docs/blog/2024-04-18-one-click-cloud-deployment-with-octobot-1-0-9.md b/docs/blog/2024-04-18-one-click-cloud-deployment-with-octobot-1-0-9.md new file mode 100644 index 0000000000..025f483c4b --- /dev/null +++ b/docs/blog/2024-04-18-one-click-cloud-deployment-with-octobot-1-0-9.md @@ -0,0 +1,97 @@ +--- +title: "One Click Cloud Deployment with OctoBot 1.0.9" +description: "OctoBot 1.0.9 is released ! Deploy your OctoBot from the DigitalOcean marketplace, create your custom crypto baskets, use the improved TradingView Trading Mode" +slug: "one-click-cloud-deployment-with-octobot-1-0-9" +date: "2024-04-18" +authors: ["paul"] +tags: ["Tradingview", "Hosting", "Release"] +image: "/images/blog/one-click-cloud-deployment-with-octobot-1-0-9/octobot-1.0.9-ditigtalocean-1-click-deployment-custom-crypto-baskets.png" +--- + + + +# One Click Cloud Deployment with OctoBot 1.0.9 + +![octobot 1.0.9 ditigtalocean 1 click deployment custom crypto baskets](/images/blog/one-click-cloud-deployment-with-octobot-1-0-9/octobot-1.0.9-ditigtalocean-1-click-deployment-custom-crypto-baskets.png) + +## One Click Cloud Deployment + +Running your OctoBot trading robot on the cloud has never been **easier and cheaper**! OctoBot is now available as a 1-Click Droplet on the <a href="https://digitalocean.pxf.io/octobot-app" rel="nofollow">official DigitalOcean marketplace</a>. + +<!--truncate--> + +<div style={{textAlign: "center"}}> + ![octobot on the digitalocean + marketplace](/images/blog/one-click-cloud-deployment-with-octobot-1-0-9/octobot-on-the-digitalocean-marketplace.png) +</div> +Using DigitalOcean, you can now run simply your own OctoBot trading bot on the +cloud and have it available and automating your trading strategies 100% of the +time. + +<div style={{textAlign: "center"}}> + **[Deploy your OctoBot](/guides/octobot-installation/cloud-install-octobot-on-digitalocean)** +</div> + +Having your OctoBot up and running on DigitalOcean only takes **1 click** and starting from as cheap as **$6 per month** when using the minimal setup. + +## Introducing OctoBot 1.0.9 + +We're glad to announce the release of OctoBot 1.0.9. This version notably adds support for the above mentioned [DigitalOcean One Click Deployment](/guides/octobot-installation/cloud-install-octobot-on-digitalocean) and also adds custom crypto baskets into OctoBot and improves the existing trading modes while fixing many issues. + +### Crypto Baskets + +Similarly to [OctoBot cloud crypto baskets](https://www.octobot.cloud/features/crypto-basket), you can now create your own crypto baskets using OctoBot and the new [Index Trading Mode](/guides/octobot-trading-modes/index-trading-mode). + +<div style={{textAlign: "center"}}> + <div> + ![crypto + basket](/images/blog/one-click-cloud-deployment-with-octobot-1-0-9/crypto-basket.png) + </div> +</div> + +When using the Index Trading Mode, your OctoBot will split your reference market holdings into the different coins of your traded pairs. You can also define a rebalance interval and threshold to customize the way your OctoBot should behave when coins held in your basket change in value. + +And of course, you can use backtesting to optimize the content of your baskets! + +### Improved trading modes + +Both the DCA and TradingView trading mode have been improved in OctoBot 1.0.9. + +**DCA Trading Mode** + +The [DCA Trading Mode](/guides/octobot-trading-modes/dca-trading-mode) now supports an additional parameter. By setting the `Max asset holding` of your DCA strategies, you can limit the exposure to a given asset. This is especially useful when using a evaluator-based DCA as it prevents your DCA bot from building an excessive exposure to a given asset when buying conditions are repeating. + +**TradingView Trading Mode** + +<div style={{textAlign: "center"}}> + <div> + ![tradingview logo showing octobot tradingview trading + mode](/images/blog/one-click-cloud-deployment-with-octobot-1-0-9/tradingview-logo-showing-octobot-tradingview-trading-mode.png) + </div> +</div> + +Limit and stop orders created by the [TradingView Trading Mode](/guides/octobot-trading-modes/tradingview-trading-mode) are now much more flexible. + +The TradingView Trading Mode now supports [relative pricing](/guides/octobot-trading-modes/order-price-syntax) for limit and stop orders. This means that you can configure your TradingView alerts to trigger for example: + +- A BTC/USDT buy order at -10% of the current price +- An ETH/BTC sell order at the current price + 0.01 BTC +- A BTC/USDT stop loss at the price of 35000 USDT + +### Exchanges improvements + +- **Coinbase**: OctoBot now support both the legacy and updated Coinbase API key format +- **MEXC**: Trading on MEXC is now much more stable +- **All exchanges**: The order flow inside OctoBot as been improved. This fixes many issues related to order synchronization as well as errors on order creation. + +<div style={{textAlign: "center"}}> + **[Update your OctoBot](/guides/octobot-installation/install-octobot-on-your-computer)** +</div> + +### Full changelog + +Find the full changelog of OctoBot 1.0.9 on the OctoBot <a href="https://github.com/Drakkar-Software/OctoBot/blob/master/CHANGELOG.md" rel="nofollow">GitHub repository</a>. + +## Final words + +We would like to thank the OctoBot community for their great support and improvement ideas as well as reporting many of the issues that have been fixed in 1.0.9. diff --git a/docs/blog/2024-04-19-mobile-app-revamp.md b/docs/blog/2024-04-19-mobile-app-revamp.md new file mode 100644 index 0000000000..9c3529181d --- /dev/null +++ b/docs/blog/2024-04-19-mobile-app-revamp.md @@ -0,0 +1,75 @@ +--- +title: "New Design of the Mobile App" +description: "A new version of the Android and iPhone OctoBot mobile app is available, allowing you to better track your investments" +slug: "mobile-app-revamp" +date: "2024-04-19" +authors: ["paul"] +tags: ["Mobile", "App", "OctoBot cloud", "Release"] +image: "/images/blog/mobile-app-revamp/thumb.png" +--- + + + +# New Design of the OctoBot Mobile App + +Thank you to everyone who downloaded our mobile app in its experimental version. Your feedback has been very helpful. + +<!--truncate--> + +That's why we have completely redesigned the app to better meet your needs. + +<div style={{textAlign: "center"}}> + **[Download the latest version](https://www.octobot.cloud)** +</div> + +## A new detailed view for each of your OctoBot + +We have also created a new detailed view of your OctoBot. You will be able to precisely follow what your OctoBots are doing. +You will have access to the performance history of your bots, the distribution of their portfolio, ongoing orders, and their latest actions. + +This new view is available whether you are using an OctoBot from OctoBot cloud or have installed it yourself. + +<div style={{textAlign: "center"}}> + <div> + ![new detailed view for your + bots](/images/blog/mobile-app-revamp/bot-view-pf-en.png) + </div> +</div> + +## A new dashboard + +As soon as you log in to the new version, you will discover a new dashboard. This dashboard shows the history of your actual portfolio, including all your exchange accounts. +You will also get an overview of how your portfolio is currently distributed. + +<div style={{textAlign: "center"}}> + <div> + ![new dashboard of the OctoBot mobile + app](/images/blog/mobile-app-revamp/mobile-dashboard-en.png) + </div> +</div> + +This new dashboard is designed so that you can monitor the performance of your crypto portfolios easily from your smartphone. + +## Also available on iPhone + +With this new version of the application, we are also adding a web version of the app available at <a href="https://mobile.octobot.cloud" rel="nofollow">mobile.octobot.cloud</a>. +It makes the OctoBot mobile app installable on iPhone directly from your browser to provide an experience similar to the Android application. This is the first step towards the iPhone version of the OctoBot app. + +To install the app on iPhone it, follow those 4 steps: + +1. With your mobile, go to <a href="https://mobile.octobot.cloud" rel="nofollow">mobile.octobot.cloud</a> +2. From your mobile web browser interface, click the `share` button +3. Scroll down and click `Add to Home Screen` +4. Click `Add` + +🎉 The OctoBot app is now installed on your iPhone! + +## Conclusion + +This is just the beginning. More improvements and new features will further enrich the mobile app. + +We hope you enjoy this new version. If so, do not forget to leave us a review! + +<div style={{textAlign: "center"}}> + <a href="https://play.google.com/store/apps/details?id=com.drakkarsoftware.octobotapp&utm_source=www.octobot.cloud&utm_media=blog&utm_content=mobile-app-revamp" rel="nofollow"><GoogleStoreButton /></a> +</div> diff --git a/docs/blog/2024-05-02-crypto-bubble.md b/docs/blog/2024-05-02-crypto-bubble.md new file mode 100644 index 0000000000..594d245a58 --- /dev/null +++ b/docs/blog/2024-05-02-crypto-bubble.md @@ -0,0 +1,103 @@ +--- +title: "Understanding the Crypto Bubble" +description: "Discover the exciting world of cryptocurrencies! Learn how to identify bubbles and keep your investments safe" +slug: "crypto-bubble" +date: "2024-05-02" +authors: ["guillaume"] +tags: ["Crypto", "Ecosystem", "Finance", "Educational"] +image: "/images/blog/fomo-meaning/cover.png" +--- + + + +# Understanding the Crypto Bubble + +Discover the exciting world of cryptocurrencies! Learn how to identify bubbles and keep your investments safe. + +<!--truncate--> + +## What is a crypto bubble? + +Imagine a balloon that keeps getting bigger as more air is blown into it. +In the world of cryptocurrencies, similar things can happen. +A crypto bubble occurs when the prices of cryptocurrencies like [Bitcoin](https://www.octobot.cloud/what-is-bitcoin) increase very rapidly due to a lot of excitement and investment from people hoping to make quick money. + +However, just like a balloon can only take so much air before it pops, a crypto bubble can burst, resulting in a significant decline in prices. + +## Why do crypto bubbles happen? + +1. <b>New investors</b>: Many people hear stories about others making huge + profits from cryptocurrencies and decide to invest too, hoping for the same. + This rush drives the prices up quickly. +2. <b>[Fear of Missing Out](fomo-meaning) (FOMO)</b>: Fearing they might miss + out on profits, people often rush into buying cryptocurrencies, which can + push prices even higher. +3. <b>Media hype</b>: When the news constantly talks about rising crypto prices, + even more people want to invest, which adds more air to the bubble. +4. <b>Bandwagon effect</b>: People often follow what others are doing. If + everyone is buying crypto, it might seem like a good idea to do the same, + even if the prices are very high. + +<div style={{textAlign: "center"}}> + <div> + ![A person with an excited expression watches a rising crypto market graph + on their computer, symbolizing FOMO in + cryptocurrency.](/images/blog/fomo-meaning/cover.png) *An illustration of + the FOMO sentiment.* + </div> +</div> + +## Historical Crypto Bubbles + +Bitcoin, the first cryptocurrency, has seen several bubbles since it started in 2009. +For example, in 2017, Bitcoin's price skyrocketed to nearly $20 000, followed by a decline to approximately $3 000 a year later. + +## Signs of a Crypto Bubble + +- Quick price increase: When prices of cryptocurrencies rise very fast without a good reason, it might be a bubble. +- High volatility: Prices change dramatically in a short period. +- Big trading volumes: A sudden increase in buying and selling can be a sign. + +## How to Handle a Crypto Bubble + +- <b>Diversify Investments</b>: Don't put all your money in the same + cryptocurrencies, spread your investments across multiple cryptos through + [crypto baskets](https://www.octobot.cloud/features/crypto-basket) to reduce risk. Avoid putting all + your funds into cryptocurrency. Having a mix of different types of investments + can protect you if the crypto market crashes. +- <b>Watch Market Trends Carefully</b>: Keep an eye on how cryptocurrencies are + performing and what the market sentiment is like. Tools like the <a href="https://coinstats.app/fear-and-greed/" rel="nofollow">Fear and Greed Index</a> can give you an idea of whether emotions are driving + the market too much, which is often the case in a bubble. +- <b>Stay Disciplined</b>: Have a solid plan for your investments and stick to + it, no matter what others are doing. Don't make decisions just because prices + suddenly change or because of what other people say. + +<div style={{textAlign: "center"}}> + ![crypto baskets investment helping investors to diversify their crypto + portfolio](/images/blog/invest-with-crypto-baskets/crypto-basket.png) *A + diversified crypto portfolio* +</div> + +## Preparing for After the Bubble Bursts + +If the crypto bubble bursts and you're affected, remember to remain calm. + +Here are a few tips on how to proceed: + +- Assess and rebalance your portfolio: Look at your investments and see if you need to make changes to reduce risk or take advantage of new opportunities. [Crypto baskets](https://www.octobot.cloud/features/crypto-basket) are an easy way to diversify and rebalance your crypto portfolio. +- Learn from experience: Analyze what happened and why, and use this knowledge to make better investment decisions in the future. +- Keep an eye on the market: After a crash, there could be opportunities to buy good cryptocurrencies at low prices, but remember that it's important to do your own research and not just act on impulse. + +## Frequently Asked Questions + +### How can I tell if it's really a bubble? + +Look for signs like extremely rapid price increases, extreme public enthusiasm driven more by media and [FOMO](fomo-meaning). + +### Are cryptocurrencies still a good investment after a bubble bursts? + +Yes, cryptocurrencies can still be good investments after a bubble bursts. Just make sure to do your own research and consider how much risk you're comfortable with before investing. + +### Will there always be crypto bubbles? + +Given the current trends and historical patterns, it's likely that the cryptocurrency market will continue to see bubbles. This is a natural part of a young and evolving market, where enthusiasm and innovation drive significant price movements. Each bubble also brings increased attention and investment, potentially strengthening the market's foundation over time. diff --git a/docs/blog/2024-05-07-best-ai-trading-bots.md b/docs/blog/2024-05-07-best-ai-trading-bots.md new file mode 100644 index 0000000000..0882b0c84b --- /dev/null +++ b/docs/blog/2024-05-07-best-ai-trading-bots.md @@ -0,0 +1,210 @@ +--- +title: "5 Best AI Trading Bots" +description: "Discover the best AI trading bots. Compare top free and paid Artificial intelligence bots, their features, ease of use, and pricing." +slug: "best-ai-trading-bots" +date: "2024-05-07" +authors: ["paul"] +tags: ["Cryptocurrency", "Trading", "Plans"] +image: "/images/blog/best-crypto-trading-bots/cover.png" +--- + + + +# 5 Best AI Crypto Trading Bots + +Choosing the right Artificial intelligence crypto trading bot from the many options available can be tough. This article is here to help you find the best one for your needs. + +<!--truncate--> + +## What is a crypto trading bot? + +Crypto trading bots are like your digital assistants for cryptocurrency trading. +They work autonomously, executing trades based on your chosen strategies, which means you don't need to constantly monitor the markets. + +## What is an AI crypto trading bot? + +An AI crypto trading bot utilizes artificial intelligence algorithms to automate trading processes in the cryptocurrency market. +By analyzing vast amounts of data and market trends, these bots can execute trades based on predefined strategies without constant supervision. + +With many traders using them today, they're a popular choice for both beginners and experts. +For those new to crypto trading, these bots often come with user-friendly strategies to help you get started easily. + +<div> + Now that we have a clear understanding of what a crypto trading bot is, let's + explore the various types of trading bots available in the market. +</div> + +## 1. OctoBot + +<div style={{textAlign: "center"}}> + <div> + ![A man relaxing in his couch while OctoBot is making money by automating + cryptocurrency + strategies](/a-man-relaxing-in-his-couch-while-octobot-is-making-money-by-automating-cryptocurrency-strategies-light.png) + </div> +</div> + +[OctoBot](/) is a flexible and easy-to-use trading bot that offers a variety of strategies for free, including [AI-based](https://www.octobot.cloud/features/ai-trading-bot), smart [DCA](smart-dca-making-of), and GRID strategies. +It's [open-source](open-source-trading-software). With its focus on transparency, users can backtest strategies or use paper trading and track performance. +OctoBot supports most major crypto exchanges and offers premium plans for advanced users, making it suitable for both beginners and experienced crypto investors. +It also offers crypto [AI predictions](https://www.octobot.cloud/tools/crypto-prediction) and AI focused [crypto baskets](https://www.octobot.cloud/features/crypto-basket). + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Ease of use" + level={3} + h="14px" + tooltipText="User-friendly with AI-based and various pre-made strategies for beginners and pros" + /> + <Rating + title="Price" + level={3} + h="14px" + tooltipText="Multiple free offers with options for advanced premium plans" + /> + <Rating + title="Features" + level={3} + h="14px" + tooltipText="Open-source, supports major exchanges, variety of trading strategies, backtesting, and performance tracking" + /> + </div> +</Card> + +## 2. CryptoHero + +CryptoHero offers automated crypto trading to many cryptocurrencies and integration with major exchanges like [Binance](https://www.octobot.cloud/binance-trading-bot) and Kraken. +Users can set parameters, leverage AI-optimized bots for simulations, and use backtesting for strategy refinement. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Ease of use" + level={2} + h="14px" + tooltipText="The dashboard may seem a bit complicated" + /> + <Rating + title="Price" + level={3} + h="14px" + tooltipText="Free offer and competitive pricing compared to similar platforms" + /> + <Rating + title="Features" + level={2} + h="14px" + tooltipText="Simulation, Backtesting and integration with major trading platforms" + /> + </div> +</Card> + +## 3. 3Commas + +<div style={{textAlign: "center"}}> + <div> + ![3commas-logo](/images/blog/best-crypto-trading-bots/3commas.png) + </div> +</div> + +3Commas is a paid crypto trading bot, offering GRID, DCA and Signal bots. +Known for its user-friendly interface, 3Commas supports multiple trading strategies and technical indicators. + +It also features a marketplace for third-party crypto signals. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Ease of use" + level={3} + h="14px" + tooltipText="Easy-to-navigate interface with multiple trading strategies" + /> + <Rating + title="Price" + level={1} + h="14px" + tooltipText="Expensive paid service" + /> + <Rating + title="Features" + level={2} + h="14px" + tooltipText="Supports GRID, DCA, and Signal bots with a marketplace" + /> + </div> +</Card> + +## 4. Cryptohopper + +Cryptohopper is a paid crypto trading bot, offering a 3 days free trial for new users. +It stands out for its market-making bot and the ability for users to create custom trading strategies or copy others' from its marketplace. + +The platform also supports automated trading via a telegram bot and offers additional services like crypto signals, strategy templates, and paper trading. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Ease of use" + level={1} + h="14px" + tooltipText="Challenging for beginners, offers market-making bot" + /> + <Rating + title="Price" + level={2} + h="14px" + tooltipText="Free and paid plans, suitable for various budgets" + /> + <Rating + title="Features" + level={3} + h="14px" + tooltipText="Custom strategy creation, telegram bot trading, and paper trading" + /> + </div> +</Card> + +## 5. Pionex + +<div style={{textAlign: "center"}}> + <div> + ![pionex-logo](/images/blog/best-crypto-trading-bots/pionex.jpg) + </div> +</div> + +[Pionex](https://www.pionex.com/en/signUp?r=octobot) ([Pionex.us](https://accounts.pionex.us/en/signup?ref=octobot) for US citizens) is a trading platform that became famous for its user-friendly automated trading bots, allowing traders to execute strategies effortlessly. +Le platform trading bots can be tailored using AI to fit different trading styles, making it ideal for traders seeking flexibility. +Pionex empowers users to improve their strategies and manage risk effectively using backtesting and performance monitoring tools. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Ease of use" + level={2} + h="14px" + tooltipText="Easy to use copy bot but complex bot creation" + /> + <Rating + title="Price" + level={3} + h="14px" + tooltipText="Low exchanges fees" + /> + <Rating + title="Features" + level={3} + h="14px" + tooltipText="Great variety of pre-made bots" + /> + </div> +</Card> + +## Conclusion + +In conclusion, whether you're new to trading or an experienced investor, there's an Artificial intelligence crypto trading bot that's right for you. +These bots come with different features and pricing, so you can find one that matches how you like to automate your trading. + +You didn't find what you were looking for ? +If you are looking for more AI tools to automate and improve your investments, <a href="https://www.insidr.ai/" rel="nofollow">Insidr.ai</a> is a great website referencing a ton of useful AI tools. diff --git a/docs/blog/2024-06-18-making-octobot-more-accessible.md b/docs/blog/2024-06-18-making-octobot-more-accessible.md new file mode 100644 index 0000000000..5f5b086377 --- /dev/null +++ b/docs/blog/2024-06-18-making-octobot-more-accessible.md @@ -0,0 +1,83 @@ +--- +title: "Making OctoBot more accessible" +description: "OctoBot plans have been improved. They are not more accessible, contain more features and add many new possibilities." +slug: "making-octobot-more-accessible" +date: "2024-06-18" +authors: ["paul"] +tags: ["Cryptocurrency", "Trading", "Plans"] +image: "/images/blog/making-octobot-more-accessible/octobot-plans-improvements.png" +--- + + + +# Making OctoBot more accessible + +<div style={{textAlign: "center"}}> + ![octobot plans improvements](/images/blog/making-octobot-more-accessible/octobot-plans-improvements.png) +</div> + +At OctoBot, our mission is to make crypto investment simpler. +Therefore, we spend a lot of time trying to understand what is the most complicated when investing in Crypto. + +<!--truncate--> + +One of the things we realized was that automated investment strategies (either investing in the whole market with [crypto baskets](https://www.octobot.cloud/features/crypto-basket) or with simple algorithmic strategies) are very popular as long as they remain **easily accessible**. + +The crypto world is already complicated to get into, if investors also need to pay to use their first automated strategies such as crypto baskets, OctoBot is not really making things more accessible. This is especially true if you need to figure out how much gains you need to make to justify a $14/m subscription. + +**We decided to change that.** + +## Free automated strategies for every crypto investor + +The free Investor plan now unlocks a set of both crypto basket and algorithmic strategies. +This will enable crypto investors using any size of portfolio to directly profit from OctoBot investment strategies. + +<div style={{textAlign: "center"}}> + ![OctoBot strategy explorer with crypto baskets and dca strategies](/images/blog/making-octobot-more-accessible/octobot-strategy-explorer-with-crypto-baskets-and-dca-strategies.png) +</div> + +Additionally to the existing strategies, we also added new moderated-risk strategies, all available with the Investor plan and specifically created for investors who want to profit from a reasonable risk level. + +<div style={{textAlign: "center"}}> + ![OctoBot moderated risk crypto baskets strategies](/images/blog/making-octobot-more-accessible/octobot-low-risk-crypto-baskets-strategies.png) +</div> + +We also made the multi account historical dashboard available from the Investor plan, to make it easier for everyone to follow their accounts. + +<div style={{textAlign: "center"}}> + ![OctoBot multi exchange dashboard](/images/blog/making-octobot-more-accessible/octobot-multi-exchange-dashboard.png) +</div> + +## Affordable advanced investment strategies + +The [Investor Plus](introducing-the-investor-plus-plan) plan now focuses on unlocking specific types of [crypto baskets](https://www.octobot.cloud/features/crypto-basket) and investment strategies. + +It also increases the amount of simultaneous OctoBots an account can use. + +The goal with this new Investor Plus plan is to be relevant to crypto investors who want to invest using more advanced strategies while remaining very affordable. + +As a result, the Investor Plus plan is now much cheaper and can also be bought as a lifetime plan. + +**[Try Investor Plus](https://www.octobot.cloud/pricing)** + +## Customized investment strategies + +The [Pro plan](introducing-the-pro-plan) unlocks all the investor plus features and adds the possibility to configure your strategies + +- Trade with your own strategies using [TradingView OctoBots](/investing/tradingview-automated-trading): automate your trades based on TradingView alerts from the price directly, indicators or even Pine Script strategies. +- Invest with your own custom crypto baskets +- [Fine tune](/investing/fine-tune-your-octobots) all your OctoBots: using the Pro plan, you can easily interact with your running OctoBots to create, replace or cancel orders directly from your OctoBot. +- Enjoy priority support from the OctoBot team + +We also adapted the Pro plan price to be less impactful on your portfolio: the Pro plan is now $25/m. + +**[Switch to the Pro plan](https://www.octobot.cloud/pricing)** + + +## Final words + +We believe that this revamp of the OctoBot plans will greatly increase the accessibility of automated investment strategies for any crypto investor. + +With those new plans, we want to make [OctoBot cloud](/) the best platform to easily invest in crypto using investment strategies. + +We hope that you will enjoy those changes and want can't wait to get your feedback on it. \ No newline at end of file diff --git a/docs/blog/2024-06-18-what-are-octobot-rewards-and-how-to-get-them.md b/docs/blog/2024-06-18-what-are-octobot-rewards-and-how-to-get-them.md new file mode 100644 index 0000000000..5344327f85 --- /dev/null +++ b/docs/blog/2024-06-18-what-are-octobot-rewards-and-how-to-get-them.md @@ -0,0 +1,92 @@ +--- +title: "What are OctoBot rewards and how to get them?" +description: "OctoBot rewards enable you to unlock paid plan features for free by using OctoBot, being an active user and inviting friends." +slug: "what-are-octobot-rewards-and-how-to-get-them" +date: "2024-06-18" +authors: ["guillaume"] +tags: ["Rewards", "OctoBot cloud", "Release"] +image: "/images/blog/what-are-octobot-rewards-and-how-to-get-them/octobot-rewards-get-rewarded-for-using-octobot-and-use-advanced-strategies.png" +--- + + + +# What are OctoBot rewards and how to get them? + +<div style={{textAlign: "center"}}> + ![OctoBot rewards get rewarded for using OctoBot and use advanced + strategies](/images/blog/what-are-octobot-rewards-and-how-to-get-them/octobot-rewards-get-rewarded-for-using-octobot-and-use-advanced-strategies.png) +</div> +We are thrilled to announce the release of the OctoBot reward system. Rewards +offer an alternative for free Investor accounts to profit from paid investment +strategies as well as other benefits without having to spend a dollar. + +<!--truncate--> + +## Unlock OctoBot paid strategies for free + +When creating an OctoBot account, you can choose to use the free Investor plan or one of the following paid plans: + +- The [Investor Plus](introducing-the-investor-plus-plan) plan, among other benefits, instantly unlocks every OctoBot AI, DCA, crypto baskets and grid strategies +- The [Pro plan](introducing-the-pro-plan) unlocks everything from the Investor Plus plan while also adding TradingView OctoBots, priority support and more + +Rewards are the alternative way to unlock the AI, DCA, crypto baskets and grid strategies that are normally only available starting from the Investor Plus plan. + +<div style={{textAlign: "center"}}> + ![OctoBot rewards dashboard showing crypto apprentice + rewards](/images/blog/what-are-octobot-rewards-and-how-to-get-them/octobot-rewards-dashboard-showing-crypto-apprentice-rewards.png) +</div> + +While paid plans grant you instant access to every strategy, it is now also possible to gradually unlock access to those strategies by being a valuable OctoBot user. + +When visiting your <a href="https://www.octobot.cloud/rewards" rel="nofollow">rewards dashboard</a>, you will see your current reward level. Each reward level permanently improves your free OctoBot account. + +Rewards are notably useful to unlock access to: + +- Each [AI strategy](chatgpt-strategy-deep-dive) +- Each [crypto basket strategy](https://www.octobot.cloud/features/crypto-basket) +- Each [DCA strategy](smart-dca-making-of) +- Each grid strategy +- An increased number of simultaneous OctoBots you can use + +<div style={{textAlign: "center"}}> + ![OctoBot rewards dashboard showing grandmaster of crypto + rewards](/images/blog/what-are-octobot-rewards-and-how-to-get-them/octobot-rewards-dashboard-showing-grandmaster-of-crypto-rewards.png) +</div> + +Grandmaster of Crypto is the maximum reward level. It takes time to acquire, but when reached, it permanently unlocks all the Investor Plus features. + +## How to earn rewards + +Rewards are earned by completing missions from the "Missions" section of your <a href="https://www.octobot.cloud/rewards" rel="nofollow">rewards dashboard</a>. + +<div style={{textAlign: "center"}}> + ![OctoBot rewards list with completed and uncompleted + missions](/images/blog/what-are-octobot-rewards-and-how-to-get-them/octobot-rewards-list-with-completed-and-uncompleted-missions.png) +</div> + +There are many kinds of missions such as starting multiple OctoBots, completing crypto courses or just having a running OctoBot. + +The main aspects to keep in mind to quickly earn rewards are: + +- Making sure to configure your OctoBot account and installing the app to get all the welcome rewards +- Having at least one OctoBot investing on your exchange account to get your daily rewards +- Talking to your friends about your experience with OctoBot and helping them setup their account: you get rewarded when your referees start their OctoBot and when they purchase a yearly plan + +:::info + **Pro tip**: Referring 2 friends purchasing the annual Investor Plus + subscription will instantly update your account to the highest reward level. +::: + +## The reason behind the OctoBot rewards system + +We realize that many crypto investors are investing with a portfolio containing less than few hundred dollars. In this context, paying for a monthly subscription can be really painful. + +Our goal at OctoBot cloud is to make crypto investment more accessible, and we also want to improve investments for smaller portfolios, and for those who simply don't want to pay. + +This is why we designed this rewards system. If you or a friend of yours wish to automate their investments with OctoBot cloud, there are many options to do so: + +- Using the Investor Plus or Pro plan to instantly unlock all the strategies +- Using strategies that are available free +- Being a valuable OctoBot user and gain access to the Investor Plus strategies and features as a gift from OctoBot cloud + +**[Check out OctoBot plans](https://www.octobot.cloud/pricing)** diff --git a/docs/blog/2024-07-07-octobot-2-0-0-whats-new.md b/docs/blog/2024-07-07-octobot-2-0-0-whats-new.md new file mode 100644 index 0000000000..c37b265123 --- /dev/null +++ b/docs/blog/2024-07-07-octobot-2-0-0-whats-new.md @@ -0,0 +1,86 @@ +--- +title: "OctoBot 2.0.0 - What's new" +description: "OctoBot 2.0.0 improves OctoBot with a full redesign of the user interface and includes the Premium OctoBot extension." +slug: "octobot-2-0-0-whats-new" +date: "2024-07-07" +authors: ["guillaume"] +tags: ["Tradingview", "Chatgpt", "Release", "Backtesting", "Strategy designer", "OctoBot cloud"] +image: "/images/blog/octobot-2-0-0-whats-new/octobot-2.0.0-annoucement-with-new-design-preview.png" +--- + + + +# OctoBot 2.0.0 - What's new + +![octobot 2.0.0 annoucement with new design preview](/images/blog/octobot-2-0-0-whats-new/octobot-2.0.0-annoucement-with-new-design-preview.png) + +## Introducing OctoBot 2.0.0 + +We're very excited to announce the release of OctoBot 2.0.0! This version is a major leap forward for the whole OctoBot project, as it: + +<!--truncate--> + +1. Revamps the whole user interface +2. Introduces the Premium OctoBot Extension +3. Includes many fixes and exchange connector updates. + +<div style={{textAlign: "center"}}> + ![octobot premium new tentacles + available](/images/blog/octobot-2-0-0-whats-new/octobot-premium-new-tentacles-available.png) +</div> + +## Revamped user interface + +In OctoBot 2.0.0, we greatly improved the OctoBot UI that started to look a bit from another time. + +The whole UI is now much more modern and is using the new OctoBot visual identity. + +<div style={{textAlign: "center"}}> + ![octobot 2.0.0 preview + dark](/images/blog/octobot-2-0-0-whats-new/octobot-2.0.0-preview-dark.png) +</div> + +The user interface now has dark and light modes and has better rendering on different screen sizes. + +<div style={{textAlign: "center"}}> + ![octobot 2.0.0 pnl + light](/images/blog/octobot-2-0-0-whats-new/octobot-2.0.0-pnl-light.png) +</div> + +We hope you will enjoy the new design of OctoBot! + +## The Premium OctoBot Extension + +The second major feature of this version is the introduction of the [Premium OctoBot Extension](/guides/octobot-configuration/premium-octobot-extension). + +<div style={{textAlign: "center"}}> + ![octobot premium extension + preview](/images/blog/octobot-2-0-0-whats-new/octobot-premium-extension-preview.png) +</div> + +The Premium OctoBot Extension is an optional paid extension to permanently improve your open source OctoBot. It adds: + +- The [Strategy Designer](/guides/octobot-usage/strategy-designer) to create advanced OctoBot strategies +- [Secure OctoBot cloud webhooks](/guides/octobot-interfaces/tradingview/using-a-webhook) for your TradingView strategies +- [OctoBot cloud crypto baskets](https://www.octobot.cloud/features/crypto-basket) directly into your open source Octobot + +<div style={{textAlign: "center"}}> + ![octobot open source utilisant les paniers de crypto avec extension premium + octobot](/images/guides/trading-modes/octobot-open-source-using-crypto-baskets-from-premium-extension.png) +</div> + +We created the Premium OctoBot extension in order to: + +- Improve the ease of automating your TradingView strategies +- Provide a tool suited for those who want to delve deeper into creating advanced strategies +- Allow open source OctoBots to use OctoBot cloud crypto baskets that are automatically kept up to date + +These features incur significant operating and development costs, which is why we decided to include them in the [Premium OctoBot Extension](/guides/octobot-configuration/premium-octobot-extension) and we look forward to receiving your feedback on this. + +If you have any idea of features you would like to see on the Premium OctoBot Extension, please reach out to us. + +## Other improvements + +OctoBot 2.0.0 updates the version of <a href="https://github.com/ccxt/ccxt" rel="nofollow">CCXT</a>, its exchange connector to 4.3.56, therefore improving the global crypto exchange connection and compatibility with OctoBot. + +This update also includes many bug fixes as well as improvements in exchange synchronization. diff --git a/docs/blog/2024-09-08-announcing-the-bitmart-and-octobot-partnership.md b/docs/blog/2024-09-08-announcing-the-bitmart-and-octobot-partnership.md new file mode 100644 index 0000000000..949998883b --- /dev/null +++ b/docs/blog/2024-09-08-announcing-the-bitmart-and-octobot-partnership.md @@ -0,0 +1,74 @@ +--- +title: "Announcing the BitMart and OctoBot partnership" +description: "Join the trade to earn campaign to automate your DCA, AI, TradingView, crypto baskets and grid strategies on BitMart using OctoBot" +slug: "announcing-the-bitmart-and-octobot-partnership" +date: "2024-09-08" +authors: ["guillaume"] +tags: ["Crypto", "Trading", "Exchange", "Partnership", "BitMart"] +image: "/images/blog/announcing-the-bitmart-and-octobot-partnership/bitmart-and-octobot-partnership.png" +--- + + + +# Announcing the BitMart and OctoBot partnership + +![bitmart and octobot partnership](/images/blog/announcing-the-bitmart-and-octobot-partnership/bitmart-and-octobot-partnership.png) + +The OctoBot team is proud to announce the <a href="https://support.bitmart.com/hc/en-us/articles/28977125395355-BitMart-and-Octobot-Broker-Partnership" rel="nofollow">partnership with BitMart</a> aiming to help BitMart investors to simply automate their trading strategies. + +<!--truncate--> + +## Automate BitMart trading + +BitMart is now joining the [officially supported exchanges](/guides/exchanges) of the [open source version of OctoBot](https://www.octobot.cloud/trading-bot). + +It is now easy to use OctoBot to automate your BitMart trading strategies: + +- Using risk-free [paper trading and simulated money](/guides/octobot-usage/simulator) to test your strategy in live condition +- With [backtesting](/guides/octobot-usage/backtesting) and the [Strategy Designer](/guides/octobot-usage/strategy-designer) to quickly optimize your strategies performances +- With your funds directly stored on BitMart to really profit from your strategy + +**[Start your OctoBot](https://www.octobot.cloud)** + +## Trade to earn for OctoBot users on BitMart + +To celebrate the partnership, BitMart is running a special reward campaign for all OctoBot users who trade on BitMart! +Simply connect your BitMart account to OctoBot, trade with your strategy to receive a $5 reward. + +Naturally, this comes in addition to the regular BitMart rewards and makes using OctoBot on BitMart more profitable. + +Learn more about the details of this reward on <a href="https://support.bitmart.com/hc/en-us/articles/28977125395355-BitMart-and-Octobot-Broker-Partnership" rel="nofollow">the BitMart announcement</a>. + +## Supported OctoBot strategies on BitMart + +The BitMart support by OctoBot opens the door to a wide range of trading strategies that are free and easily accessible from the open source version of OctoBot. + +### BitMart Crypto Baskets + +Invest in the whole crypto market or themes you like directly from BitMart using OctoBot cloud-based [crypto baskets](/guides/octobot-trading-modes/index-trading-mode) or your custom baskets. To use OctoBot cloud crypto baskets on BitMart, simply download the crypto basket you want directly from your OctoBot and enable the BitMart exchange. + +### BitMart TradingView automated trading + +Automate your [TradingView strategies](/guides/octobot-trading-modes/tradingview-trading-mode) on BitMart and easily trade from your TradingView alerts. + +### BitMart AI Smart DCA + +Optimize your BitMart trading strategies using [smart dollar cost averaging strategies](/guides/octobot-trading-modes/dca-trading-mode) and automate your trades based on technical analysis or artificial intelligence. + +### BitMart customized grid trading + +Profit from stable markets using [grid trading](/guides/octobot-trading-modes/grid-trading-mode) and automate trades to create a regular passive income + +## BitMart is available starting from OctoBot 2.0.5 + +Starting from OctoBot 2.0.5, BitMart is SPOT trading is [fully supported](/guides/exchanges/bitmart), as well as its websocket connection allowing to speed up market data updates. + +BitMart has been included within OctoBot's regularly tested exchanges. The OctoBot team will regularly make sure that the BitMart connection remains stable and we (the OctoBot team) will do everything that is possible to maintain this state to offer you the best possible trading automation on BitMart. + +## Final words + +At OctoBot, we are trying to make trading strategies automation as accessible as possible to everyone. Following this philosophy, it's important for us to enable automated trading on each exchange that is important to our users. + +Adding [BitMart](https://www.octobot.cloud/bitmart-trading-bot) to the supported exchanges of the open source OctoBot is a first step towards a broader integration with BitMart, if you are interested in using BitMart with [OctoBot cloud strategies](https://www.octobot.cloud/explore), let us know by upvoting <a href="https://feedback.octobot.online/cloud/p/bitmart-support" rel="nofollow">the BitMart support</a> on our roadmap. + +If you are trading on an exchange that is not currently supported, please create or upvote the post associated to your exchange on <a href="https://feedback.octobot.online" rel="nofollow">our feedback website</a>. diff --git a/docs/blog/2024-09-26-how-to-automate-trading-in-tradingview.md b/docs/blog/2024-09-26-how-to-automate-trading-in-tradingview.md new file mode 100644 index 0000000000..b5ecdf5781 --- /dev/null +++ b/docs/blog/2024-09-26-how-to-automate-trading-in-tradingview.md @@ -0,0 +1,143 @@ +--- +title: "How to automate trading in TradingView" +description: "Automate your trades and strategies on any exchange directly from TradingView with webhook or emails. Trade with the best strategies and increase your gains." +slug: "how-to-automate-trading-in-tradingview" +date: "2024-09-26" +authors: ["paul"] +tags: ["Crypto", "Trading", "Educational", "TradingView"] +image: "/images/blog/how-to-automate-trading-in-tradingview/automate-your-tradingview-trades-to-trade-on-any-indicator-or-strategy.png" +--- + + + +# How to automate trading in TradingView + +<div style={{textAlign: "center"}}> + <div> + ![automate your tradingview trades to trade on any indicator or + strategy](/images/blog/how-to-automate-trading-in-tradingview/automate-your-tradingview-trades-to-trade-on-any-indicator-or-strategy.png) + </div> +</div> + +<!--truncate--> + +## Can you automate trading on TradingView? + +Yes you can, and you can do it with a TradingView free plan. <a href="https://www.tradingview.com/?aff_id=27595" rel="nofollow">TradingView</a> has a built-in alert system that can be used to automatically trigger trades on brokers such as <a href="https://www.binance.com" rel="nofollow">Binance</a>. + +Such alerts can be sent via email, which is free or using webhooks, which require a TradingView subscription. +Those alerts can be automatically processed by a platform such as [OctoBot](/investing/tradingview-automated-trading) to instantly convert each alert into trades on your exchange account. + +## Automating trades on TradingView + +TradingView makes it easy to create alerts that will fire as soon as any type of condition is met. + +<div style={{textAlign: "center"}}> + <div> + ![creating an alert from + tradingview](/images/blog/how-to-automate-trading-in-tradingview/creating-an-alert-from-tradingview.png) + </div> +</div> + +Alert can are triggered as soon as their `Condition` is met. Conditions be: + +- **Price events** such as the price of [Bitcoin](https://www.octobot.cloud/what-is-bitcoin) reaching a certain value +- **Indicator** thresholds such as the RSI value entering a specific range +- **Pine Script strategies** creating buy or sell orders + <div style={{textAlign: "center"}}> + <div> + ![tradingview alert + form](/images/blog/how-to-automate-trading-in-tradingview/tradingview-alert-form.png) + </div> + </div> + +Overall, alerts are very flexible and are a great tool to connect TradingView to other platforms in order to automate trades. This connection can be done using email or webhooks. + +### Automate with Email alerts + +When checking the `Send plain text` notification option in the alert `Notifications` tab, TradingView will automatically send an email to your alert email address every time the alert is triggered. + +<div style={{textAlign: "center"}}> + <div> + ![tradingview alert notification + form](/images/blog/how-to-automate-trading-in-tradingview/tradingview-alert-notification-form.png) + </div> +</div> + +Email alerts are included in the free TradingView plan, which makes them accessible to every TradingView user. +However, when using the free plan, only one email alert can be used at a time. + +Automating trades via email is possible but can be complicated. In order to do this, specialized platforms such as [OctoBot](/investing/tradingview-automated-trading) are required to be able to automate TradingView trades while remaining on the free TradingView plan. + +### Automate with webhook alerts + +The most common way to automate trade on TradingView is to use webhooks. A webhook is a URL that TradingView will automatically call as soon as your alert is triggered. + +This is the most efficient way to connect TradingView to any trading platform. + +Using webhooks require a TradingView paid plan and is compatible with most platforms. + +## TradingView automated trading bot + +Adding alerts to TradingView can be used to automate trading strategies using automated trading bots. + +A trading bot is a software that will automatically trade on an exchange account based on its strategy. A trading bot can listen to your TradingView alerts and instantly trade on your exchange account as soon as it receives a notification of your alert. + +A TradingView trading bot can easily be created using a [TradingView OctoBot](/investing/tradingview-automated-trading). Such bot makes it easy to trade based on your TradingView alert emails or webhook calls. + +Using OctoBot, you can create your own TradingView trading bot that will be trading on your exchange account or with [risk-free virtual money](/investing/paper-trading-a-strategy) and automate any type of trading strategy. + +You got a TradingView strategy that you would like to automate? Check out our guide on [how to automate a TradingView Strategy](/investing/how-to-automate-any-tradingview-strategy-on-octobot-cloud) to learn more. + +## Finding the best TradingView strategy + +TradingView is a great platform to find trading strategies. Using the strategy explorer, you can view hundreds of strategies. + +<div style={{textAlign: "center"}}> + <div> + ![tradingview strategy + explorer](/images/blog/how-to-automate-trading-in-tradingview/tradingview-strategy-explorer.png) + </div> +</div> + +A TradingView strategy is written in <a href="https://www.tradingview.com/pine-script-docs/welcome/" rel="nofollow">Pine Script</a>, a language specifically designed to create and visualize trading strategies. + +<div style={{textAlign: "center"}}> + <div> + ![tradingview strategy + example](/images/blog/how-to-automate-trading-in-tradingview/tradingview-strategy-example.png) + </div> +</div> + +When selecting a strategy, TradingView will display its signals on your chart, and you will be able to display and edit its Pine Script code directly from the website. + +<div style={{textAlign: "center"}}> + <div> + ![tradingview simple rsi + strategy](/images/blog/how-to-automate-trading-in-tradingview/tradingview-simple-rsi-strategy.png) + </div> +</div> + +There are many other ways to find TradingView strategies, such as: + +- The TradingView <a href="https://www.tradingview.com/scripts/" rel="nofollow">scripts explorer</a> +- Specialized Youtube channels such as <a href="https://www.youtube.com/@DaviddTech" rel="nofollow">Trading with DaviddTech</a> or <a href="https://www.youtube.com/@TradeIQ" rel="nofollow">TradeIQ</a> + +## Testing a TradingView strategy + +When using a TradingView strategy, you can always visualize how it behaves directly on your price chart. You can also use TradingView's integrated backtesting engine to execute your trading strategy on historical data. + +Backtesting a strategy is especially useful when optimizing your strategy settings to generate the most profits on your traded market. + +<div style={{textAlign: "center"}}> + <div> + ![tradingview strategy + tester](/images/blog/how-to-automate-trading-in-tradingview/tradingview-strategy-tester.png) + </div> +</div> + +To test a TradingView strategy, simply go to the `Strategy Tester` tab and view your strategy historical trading performance. + +Changing any input of your strategy will automatically update your strategy backtesting results. + +**[Start your TradingView bot](https://www.octobot.cloud)** diff --git a/docs/blog/2024-11-09-how-to-create-your-tradingview-strategy-with-ai.mdx b/docs/blog/2024-11-09-how-to-create-your-tradingview-strategy-with-ai.mdx new file mode 100644 index 0000000000..b252b3c875 --- /dev/null +++ b/docs/blog/2024-11-09-how-to-create-your-tradingview-strategy-with-ai.mdx @@ -0,0 +1,130 @@ +--- +title: "How to create your TradingView strategy with AI" +description: "Create and trade with your best trading strategies on TradingView using AI. Describe your strategy with your own words, improve it and invest with it." +slug: "how-to-create-your-tradingview-strategy-with-ai" +date: "2024-11-09" +authors: ["paul"] +tags: ["Crypto", "Trading", "Educational", "TradingView", "AI"] +image: "/images/blog/how-to-create-your-tradingview-strategy-with-ai/trading-strategy-with-ai-in-5-minutes-using-octobot-cloud-and-free-tradingview-account.png" +--- + + + +# How to create your TradingView strategy with AI + +> What if you could create a trading strategy just by describing it? + +This is exactly what we built, and we are very proud to announce that it works really well! + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="1g4T2IsIKBk" title="TradingView Automated Trading with AI" /> +In this video, Guillaume creates a <a href="https://www.investopedia.com/terms/m/macd.asp" rel="nofollow">MACD</a> + <a href="https://www.investopedia.com/terms/s/sma.asp" rel="nofollow">SMA</a> trading strategy exclusively using the OctoBot AI to +generate and then improve the strategy. Finally, the strategy is automated on a +live exchange with a risk-free [virtual money](/investing/paper-trading-a-strategy) TradingView OctoBot using a free +TradingView account. + +<!--truncate--> + +Let's go over the highlights of this new way to create TradingView strategies + +## Describe your strategy to the AI to create it + +Just describe your strategy on the new <a href="https://www.octobot.cloud/creator" rel="nofollow">OctoBot AI strategy generator</a>, and it will create the TradingView equivalent of your strategy using <a href="https://www.tradingview.com/pine-script-docs/welcome/" rel="nofollow">Pine Script</a>. + +<div style={{textAlign: "center"}}> + <div> + ![octobot ai strategy generator creating a macd + strategy](/images/blog/how-to-create-your-tradingview-strategy-with-ai/octobot-ai-strategy-generator-creating-a-macd-strategy.png) + _prompt: macd strategy_ + </div> +</div> + +This artificial intelligence is similar to ChatGPT and is specifically trained to generate ready-to-use Pine Script strategies with the following criteria: + +- The strategy can be configured using regular TradingView settings +- No change should be required to run and test the strategy from TradingView +- The strategy can be automated as is, without having to edit it + +Just describe it the strategy you wish to create, and the AI strategy generator will write it for you! + +<div style={{textAlign: "center"}}> + <div> + ![octobot ai strategy generator macd strategy pinescript + code](/images/blog/how-to-create-your-tradingview-strategy-with-ai/octobot-ai-strategy-generator-macd-strategy-pinescript-code.png) + _Pine Script Code of the generated MACD strategy_ + </div> +</div> + +This generated strategy can then just be pasted into the TradingView "Pine Editor" and be used right away. + +<div style={{textAlign: "center"}}> + **[Generate your strategy](https://www.octobot.cloud/creator)** +</div> + +And this is not all. The AI strategy generator can also be used to improve a strategy. + +## Improve your trading strategy using AI + +Once you created your strategy, you can make changes to it the same way you created it: using natural language and your own words. + +Just ask the generator to update the strategy the way you want: you can add an indicator, change the way take profits are made, use multiple time frames or coins. There is no limit. + +<div style={{textAlign: "center"}}> + <div> + ![octobot ai strategy generator macd strategy add moving average to the + strategy](/images/blog/how-to-create-your-tradingview-strategy-with-ai/octobot-ai-strategy-generator-macd-strategy-add-moving-average-to-the-strategy.png) + _Prompt: add a moving average condition to the strategy to only buy when the price is bellow average and sell when the price is above average_ + </div> +</div> + +The strategy generator will just do exactly as you tell it to and update your strategy code for you. + +<div style={{textAlign: "center"}}> + <div> + ![octobot ai strategy generator macd sma strategy pinescript + code](/images/blog/how-to-create-your-tradingview-strategy-with-ai/octobot-ai-strategy-generator-macd-sma-strategy-pinescript-code.png) + _Pine Script Code of the generated MACD + SMA strategy_ + </div> +</div> + +Here, we asked the AI to also consider moving averages (SMA) when identifying its buy and sell signals. We now have a working MACD + SMA TradingView strategy without writing a line of code. + +We can use it directly on TradingView to test and optimize it. It is now just a regular TradingView strategy, just like if you used the code of someone else's strategy, except that it's your own. + +<div style={{textAlign: "center"}}> + <div> + ![octobot ai testing the macd sma strategy on + tradingview](/images/blog/how-to-create-your-tradingview-strategy-with-ai/octobot-ai-testing-the-macd-sma-strategy-on-tradingview.png) + _Testing the MACD + SMA strategy on TradingView_ + </div> +</div> + +You now have your own TradingView strategy. What about using it in live condition on your exchange? + +## Automate your AI TradingView strategy + +Binding your TradingView strategy to your exchange can be done using a TradingView OctoBot. They are special OctoBots that you can start on your target exchange and that can be using: + +- **Risk free virtual money** - This is best to test your strategy live before using your real funds +- **Your funds on exchange** - Once you are confident enough to use your strategy with your funds, you can start making gains with a real-money OctoBot + +Your TradingView strategy can be automated with OctoBot using TradingView alerts. Just create an alert, enter your OctoBot TradingView alert email address or webhook URL and set the alert message to `{{strategy.order.alert_message}}`. +Your OctoBot will then automatically apply your strategy on your exchange account. + +Are you wondering where this `{{strategy.order.alert_message}}` comes from ? It's a special parameter that has been set in your strategy code by the AI generator in order to make your strategy ready to be automated by OctoBot without having to change the code. + +<div style={{textAlign: "center"}}> + **[Automate TradingView](https://www.octobot.cloud/explore?category=tv)** +</div> + +:::info + Save money: use email alerts, they are compatible with a TradingView free + account. +::: + +## Going further + +We create a [set of guides and videos](/investing/tradingview-automated-trading) to help users create and automate their TradingView strategy. If you ever wanted to try something with indicators or TradingView automations of any kind, this is the right place to get started! + +Final note: if you wish to use the strategy described on the video, you will find it under the "MACD SMA strategy" on the <a href="https://www.octobot.cloud/explore?category=tv" rel="nofollow">TradingView strategy explorer</a>. diff --git a/docs/blog/2024-11-12-automated-trading-bot.md b/docs/blog/2024-11-12-automated-trading-bot.md new file mode 100644 index 0000000000..b971e58b55 --- /dev/null +++ b/docs/blog/2024-11-12-automated-trading-bot.md @@ -0,0 +1,97 @@ +--- +title: "Automated Trading Bot" +description: "Learn how automated trading bots can revolutionize your trading strategy. Discover key benefits, best practices, and top strategies to maximize profits while minimizing emotional decisions." +slug: "automated-trading-bot" +date: "2024-11-12" +authors: ["guillaume"] +tags: ["Cryptocurrency", "Trading", "Bot"] +image: "/images/blog/automated-trading-bot/cover.png" +--- + + + +# Automated Trading Bot + +Tired of spending several hours analyzing the market trends and then making emotional trading decisions? + +<!--truncate--> + +> Do you want to maximize your profits and minimize your losses? + +If so, you are not alone. In recent times, the use of automated trading bots has increased in an effort to revolutionize the trading strategy among many traders. +Here is all the important information on the world of automated trading bots, covering benefits, risks, and best practices to succeed. + +## What is an Automated Trading Bot? + +A trading bot is simply a software program that uses market or any other type of data to analyze and make a trade on behalf of the user. +They may be set to follow a defined trading strategy; besides, they are able to perform trades faster and more frequently than a human ever could. +Many trading bots are designed for use in trading stocks, forex, and crypto, and they come with a host of benefits for traders. + +They can analyze market data and make trades without emotional bias, granting the trader more time for analysis and strategy development. +Just like a self-driving car, automated trading bots need regular checkups and updates to make sure they keep running correctly and efficiently. + +<div style={{textAlign: "center"}}> + <div> + ![A man relaxing in his couch while OctoBot is making money by automating + cryptocurrency + strategies](/a-man-relaxing-in-his-couch-while-octobot-is-making-money-by-automating-cryptocurrency-strategies-light.png) + </div> +</div> + +## How Do Automated Trading Bots Work? + +Automated trading bots are using [algorithms and machine learning](how-does-trading-bot-work) in the analysis of market data for the execution of trades. +They can simply be programmed to follow a particular trading strategy, such as [DCA](smart-dca-making-of), GRID and are capable of executing trades with a speed and frequency impossible for humans. +They can be easily integrated with other tools, such as technical indicators and risk management software, to offer a whole trading solution. + +## Advantages of An Automated Trading Bot + +Some of the benefits associated with a trading bot include: + +- Increased Efficiency and Productivity: The automated trading bots can execute trades faster and more frequently than any human being; hence, this frees up more time for them to carry out analysis and strategy development. +- Higher Accuracy and Less Emotions: This automated trading bot can analyze markets and make trading decisions free from emotional bias, avoiding impulsive decisions. +- Scalability and Flexibility: One can program an automated trading bot to follow any specific trading strategy; besides, they are straightforward to scale up or scale down with respect to market conditions that keep changing. + +## Select the Best Automated Trading Software + +With so many automated trading software options out there, it's hard to know which one to choose. Here are a few things to keep in mind: + +- Look for software that offers a variety of trading strategies. Automated trading software should be able to give its user a variety of different trading strategies, including DCA and GRID. +- Consider the level of customization: Good automated trading software should allow you to customize it in accordance with your trading needs. +- Check if it has risk management tools. A good automated trading software should have built-in risk management tools, such as stop-loss orders and position sizing. + +We've also made a top of the [best crypto trading bots](best-crypto-trading-bots). + +<div style={{textAlign: "center"}}> + <div> + ![A podium of the best trading + robots](/images/blog/best-crypto-trading-bots/cover.png) + </div> +</div> + +## Top Automated Trading Strategies + +Some of the more popular automated trading strategies include: + +- DCA Strategy: Dollar-Cost Averaging (DCA) means investing a fixed amount regularly, no matter the price. This helps smooth out buying costs over time and reduces the impact of market ups and downs. +- Grid Strategy: The Grid Strategy sets buy and sell orders at regular intervals around a price. It takes advantage of price swings, allowing traders to profit from small movements without needing to predict market direction. +- AI strategy: AI trading uses algorithms to analyze data and make trades quickly. These systems can spot trends and react faster than humans, adapting to market changes for better outcomes. + +## Risk Management and Portfolio Optimization + +Key parts of automated trading are risk management and portfolio optimization. Some points to be considered are as follows: + +- Stop-loss orders can help you limit your losses if the market moves against you. +- Position sizing: Position sizing is the most critical factor in managing risk and maximizing profits. +- Diversify your portfolio: You can reduce your risk and potentially increase your returns by diversifying your portfolio by using for example [crypto baskets](https://www.octobot.cloud/features/crypto-basket) + +## OctoBot: Your automated trading bot + +[OctoBot](/) is easy to use automated trading bot to automate your strategies and risk management that can be customized to fit any trading plan. +You can start your own automated trading bot and personalize it according to his particular trading needs using OctoBot or choose any pre-configured strategy. + +## Conclusion + +Giving traders a bunch of benefits, the automated trading bots will increase efficiency and productivity, improve accuracy and reduce emotions, and be scalable and flexible. +The right choice of automated trading software and the development of your own automated trading bot will maximize your profits and reduce potential losses. +It is equally important that you should not make common errors such as over-optimization and bad risk management. diff --git a/docs/blog/2024-11-14-what-is-spot-trading.md b/docs/blog/2024-11-14-what-is-spot-trading.md new file mode 100644 index 0000000000..99cbd097ba --- /dev/null +++ b/docs/blog/2024-11-14-what-is-spot-trading.md @@ -0,0 +1,72 @@ +--- +title: "What is Spot Trading" +description: "Learn the ins and outs of spot trading and start building wealth in the cryptocurrency market. Discover the benefits, strategies, and best practices for successful spot trading." +slug: "what-is-spot-trading" +date: "2024-11-14" +authors: ["guillaume"] +tags: ["Cryptocurrency", "Trading", "Educational"] +image: "/images/blog/what-is-spot-trading/cover.png" +--- + + + +# What is Spot Trading? + +> Curious about cryptocurrency, but really have no idea where to start? + +<!--truncate--> + +Spot trading is an excellent place to start, but a host of available options and strategies may overwhelm you. In this article, some of the most basic aspects of spot trading will be discussed, a step-by-step guide on how one can get started will be given out, and valuable insight will also be shared concerning the best strategy and practices that shall bring in success. + +Spot trading means buying or selling a cryptocurrency at the prevailing current market price and intending to take immediate delivery of the asset. This is opposed to either futures or options trading, whereby you would deal in a contract that will eventually give one the right to buy or sell an asset at a certain price some point in time. + +Many traders love to spot trade since the market provides ample opportunities to make quick money by speculating on its ups and downs. It does, however, have its risks and challenges, too, which will be discussed later in this article. + +<div style={{textAlign: "center"}}> + <div> + ![traders with a laptop bying and selling + crypto](/images/blog/what-is-spot-trading/cover.png) + </div> +</div> + +## How to Get Started with Spot Trading + +Getting started with spot trading is relatively easy but does take some basic knowledge and preparation. Here are the steps to follow: + +First, choose a reputable exchange that enjoys good credibility. Select any exchange that lists your cryptocurrency pair for a spot trade. Create an account on that exchange and get it verified. Afterwards, you may deposit the funds with which you wish to trade into your account. Get familiar with the platform of the exchange and its features, including charts and order books. + +Start trading: [Make your first trade](/investing/invest-with-your-strategy) and begin growing your portfolio. + +## Strategy of Spot Trading + +There are numerous spot trading strategies that you can avail depending on your goals and risk tolerance. +Let's discuss a few widely used one's below: + +- Technical Analysis: You can make use of charts along with [technical indicators](/investing/tradingview-strategies-tutorials/automating-a-tradingview-death-and-golden-cross-strategy) to identify trends and patterns taking place within the market. +- Fundamental analysis: The trader needs to research the underlying fundamentals of a digital currency, its use case, team, and market demand. +- [Scalping](https://www.octobot.cloud/tools/scalping-signals): Earn money by buying and selling a cryptocurrency in as little time as possible. +- Swing trading: Simply hold onto the cryptocurrency for an extended period of time, while it waits for a huge movement in the market. + +<div style={{textAlign: "center"}}> + <div> + ![cryptocurrency trading desk abstract concept + illustration](/images/blog/what-is-spot-trading/cryptocurrency-desk.png) + </div> +</div> + +## Spot Trading vs Other Types of Trading + +A little further down the line, spot trading is just one type of trading, and it does have its advantages and disadvantages versus other kinds of trading. A few of the main differences existing are: + +- [Futures trading](what-is-future-trading): Futures trading involves buying and selling a contract that leads to the entitlement of purchasing or selling at a particular price in times to come. This involves more intricacies and hence requires deeper market understanding. +- <a href="https://www.investopedia.com/terms/o/option.asp" rel="nofollow">Option trading</a> is generally defined as a trade of contracts wherein + one receives the right to sell or buy any specific form of an asset in the + future at a pre-identified price. Options trading is also a more complex form + of trade and entails much more comprehensive understanding of the market. +- Day trading: As the name would suggest, trading involves the buying and selling of a cryptocurrency in one single trading day. This kind of trading is faster and requires better knowledge of the market. + +## Common Mistakes to Avoid in Spot Trading + +Spot trading offers lucrative opportunities but carries inherent risks. +While emotions can derail trading decisions, disciplined risk management through stop-loss orders is crucial. +Success requires careful market analysis and a deep understanding of traded cryptocurrencies. diff --git a/docs/blog/2024-11-15-grid-trading.md b/docs/blog/2024-11-15-grid-trading.md new file mode 100644 index 0000000000..f8d011b94b --- /dev/null +++ b/docs/blog/2024-11-15-grid-trading.md @@ -0,0 +1,81 @@ +--- +title: "Grid trading" +description: "Discover grid trading strategies to profit from market volatility. Learn how to set up your automated trading system with OctoBot for optimal gains." +slug: "grid-trading" +date: "2024-11-15" +authors: ["guillaume"] +tags: ["Trading", "Strategy", "OctoBot cloud"] +image: "/images/guides/grid-trading-illustrated-by-a-man-stepping-up-on-green-stairs-grabbing-coins.png" +--- + + + +# Grid Trading + +Grid trading can be one of the strongest methods of trading that helps in making good profits out of the volatility in the markets. +Building a grid trading system gives you the capability of leveraging price fluctuations upward to increase your trading profits. +In this in-depth guide, we take a close look at what grid trading is, touching on the benefits, strategies, and common mistakes to be avoided. + +<!--truncate--> + +## What is Grid Trading? + +Grid trading is a technique of trading that involves placing a number of buy and sell orders at fixed price intervals. This then creates a "grid" of orders which can be activated as the price heads higher or lower. + +> Grid trading is an attempt to make a profit from the difference between buying and selling, but not through forecasting the trend. + +<div style={{textAlign: "center"}}> + ![grid trading illustrated by a man stepping up on green stairs grabbing + coins](/images/guides/grid-trading-illustrated-by-a-man-stepping-up-on-green-stairs-grabbing-coins.png) +</div> + +## How does Grid Trading work? + +<a href="https://www.investopedia.com/terms/g/grid-trading.asp" rel="nofollow">Grid trading</a> is a process of setting multiple buy and sell orders at +measured price levels. Usually, they are set according to a grid pattern, +whereby each of them triggers with every movement in the price upwards or +downwards. In other words, you could be trading in crypto, setting a $50 buy +order and a $55 sell order. Once the price reaches $55, the sell order is +triggered, and you sell the crypto for profit. If the price heads lower to $50, +it triggers a buy order, through which you will buy the crypto at a lower price. + +## Types of Grid Trading Strategies + +You may employ several kinds of [grid trading](/guides/octobot-trading-modes/grid-trading-mode) strategies depending upon your goals about trading and risk tolerance. + +The most common grid trading strategy is the range-based grid trading. This strategy involves the establishment of a grid of orders that are to be triggered once the price starts to move within a pre-defined range. +For example, assume the price is trading between $50 and $60, you're going to set buy and sell orders at higher or lower levels from this range. + +Advantages of Grid trading are many and from different perspectives; the essential ones are as follows: + +- More profit: Through grid trading, you can have gains when there is fluctuation, even in a sideways market. +- Risk reduction: Setting up a grid of orders will actually allow you to reduce the risks and hence avoid making huge losses. +- Flexibility: Through grid trading, you can efficiently engage yourself with a variety of markets such as stocks, forex, and cryptocurrencies. + +However, it also has some disadvantages. + +- Complexity: Grid trading can be cumbersome to set up. As such, its setup can be very confusing to any trader who sets it up. +- Over-trading: Grid trading can cause over-trading. This also has the potential of raising commissions, along with slippage. +- Emotional stress: It can be very emotionally stressful to trade using grid trading, especially in cases where you are trading with a big chunk of your capital. + +## Grid trading bots + +<div style={{textAlign: "center"}}> + ![grid trading strategies available on + octobot.cloud](/images/blog/grid-trading/grid-strategies.png) +</div> + +Grid trading bots, like [OctoBot](/), are an automated program that automatically closes buy and sell orders based on pre-set parameters. It does this by creating a series of price levels or "grids" within which it will automatically execute trades in response to market fluctuations. +It automatically performs partial sales when the price reaches a certain level within the set sell order and buys more in the event the price drops to a buy order level. + +In this way, a trader can continuously make a profit from small price fluctuations without his or her constant monitoring of the market. +Automation given by these bots has also helped them save traders from emotional stress while implementing effective strategies in both volatile and sideways markets. + +<div style={{textAlign: "center"}}> + **[Start a grid trading bot](https://www.octobot.cloud/explore?category=strategies)** +</div> + +## Conclusion + +The grid trading strategy, is among the best stategies to actually realize gains from volatility in markets. You will have all the opportunities to fully capitalize on the fluctuation in prices by creating a grid trading system that increases your trading profits. +On the other hand, grid trading requires serious risk management and discipline in overtrading emotional stress. diff --git a/docs/blog/2024-11-26-best-free-crypto-trading-bots.md b/docs/blog/2024-11-26-best-free-crypto-trading-bots.md new file mode 100644 index 0000000000..238c9b0fff --- /dev/null +++ b/docs/blog/2024-11-26-best-free-crypto-trading-bots.md @@ -0,0 +1,350 @@ +--- +title: "10 Best Free Crypto Trading Bots" +description: "Discover the top 10 free crypto trading bots that can help you automate your cryptocurrency trading strategy, minimize risks, and maximize potential profits without breaking the bank." +slug: "best-free-crypto-trading-bots" +date: "2024-11-26" +authors: ["guillaume"] +tags: ["Cryptocurrency", "Trading", "Plans"] +image: "/images/blog/best-crypto-trading-bots/cover.png" +--- + + + +# 10 Best Free Crypto Trading Bots + +In the complex world of cryptocurrency trading, staying ahead of market trends can feel like a full-time job. + +<!--truncate--> + +Crypto trading bots are game-changing tools that allow traders to execute strategies 24/7, without being glued to their screens. +While many trading bots come with hefty price tags, the free ones are democratizing algorithmic trading for investors of all levels. +This comprehensive guide will walk you through the top 10 free crypto trading bots that can help you automate your trading strategy, reduce emotional decision-making, and potentially increase your trading efficiency. +Whether you're a seasoned crypto trader or a curious beginner. + +## What is a crypto trading bot? + +A crypto trading bot is an advanced software program designed to interact with cryptocurrency exchanges, automatically executing trades based on pre-defined strategies and market analysis. +These sophisticated algorithms can monitor market conditions, analyze price movements, and make trading decisions on your behalf. +By using complex mathematical models and [machine learning](best-ai-trading-bots) techniques, trading bots can implement various strategies like [arbitrage](https://www.octobot.cloud/tools/triangular-arbitrage-crypto), trend following, [market making](https://market-making.octobot.cloud), and sentiment analysis, all without experiencing the emotional biases that often impact human trading decisions. + +<div> + Now that we have a clear understanding of what a crypto trading bot is, let's + explore the various types of trading bots available in the market. +</div> + +## 1. OctoBot + +<div style={{textAlign: "center"}}> + <div> + ![A man relaxing in his couch while OctoBot is making money by automating + cryptocurrency + strategies](/a-man-relaxing-in-his-couch-while-octobot-is-making-money-by-automating-cryptocurrency-strategies-light.png) + </div> +</div> + +[OctoBot](/) is a flexible and easy-to-use trading bot that offers a variety of strategies for free, including [AI-based](https://www.octobot.cloud/features/ai-trading-bot), [crypto baskets](https://www.octobot.cloud/features/crypto-basket), smart [DCA](smart-dca-making-of), grid and [TradingView](https://www.octobot.cloud/features/tradingview-bot) strategies. +It's [open-source](open-source-trading-software). With its focus on transparency, users can backtest strategies or use paper trading and track performance. +OctoBot supports most major crypto exchanges and also offers premium plans for free by completing missions, making it suitable for both beginners and experienced crypto investors regardless of their budget. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Ease of use" + level={3} + h="14px" + tooltipText="User-friendly with AI-based and various pre-made strategies for beginners and pros" + /> + <Rating + title="Free version limits" + level={3} + h="14px" + tooltipText="You can unlock paid features by completing missions for free" + /> + <Rating + title="Features" + level={3} + h="14px" + tooltipText="Open-source, supports major exchanges, variety of trading strategies, backtesting, and performance tracking" + /> + </div> +</Card> + +## 2. 3Commas + +<div style={{textAlign: "center"}}> + <div> + ![3commas-logo](/images/blog/best-crypto-trading-bots/3commas.png) + </div> +</div> + +3Commas is a crypto trading bot, offering GRID, DCA and Signal bots. +Known for its user-friendly interface, 3Commas supports multiple trading strategies and technical indicators. +It also features a community for support and learning, and a marketplace for third-party crypto signals. It offers a limited free plan +and requires KYC in Europe. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Ease of use" + level={2} + h="14px" + tooltipText="Easy-to-navigate interface with multiple trading strategies but requires KYC in Europe" + /> + <Rating + title="Free version limits" + level={1} + h="14px" + tooltipText="Few trades and bots" + /> + <Rating + title="Features" + level={2} + h="14px" + tooltipText="Supports GRID, DCA, and Signal bots with a marketplace" + /> + </div> +</Card> + +## 3. CoinRule + +Coinrule is a no-code crypto trading bot. +It features an easy "if-this-then-that" rule setup, over 150 pre-set trading rules, and a risk-free demo exchange. +The bot is available via a web platform, supports all major tokens, and offers various subscription plans, including a lite free option. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Ease of use" + level={1} + h="14px" + tooltipText="Complex 'if-this-then-that' rule setup for beginners" + /> + <Rating + title="Free version limits" + level={2} + h="14px" + tooltipText="Only 1 exchange and 2 rules" + /> + <Rating + title="Features" + level={3} + h="14px" + tooltipText="Over 150 pre-set rules and a risk-free demo exchange" + /> + </div> +</Card> + +## 4. Cryptohopper + +Cryptohopper is a cloud based [crypto trading bot](automated-trading-bot), offering a 3 days free trial for new users. +It stands out for its market-making bot and the ability for users to create custom trading strategies or copy others' from its marketplace. +The platform also supports automated trading via a telegram bot and offers additional services like crypto signals, strategy templates, and paper trading. +The free plan only offers copy bots and portfolio management. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Ease of use" + level={1} + h="14px" + tooltipText="Challenging for beginners, offers market-making bot" + /> + <Rating + title="Free version limits" + level={1} + h="14px" + tooltipText="Only copy bots" + /> + <Rating + title="Features" + level={3} + h="14px" + tooltipText="Custom strategy creation, telegram bot trading, and paper trading" + /> + </div> +</Card> + +## 5. Pionex + +<div style={{textAlign: "center"}}> + <div> + ![pionex-logo](/images/blog/best-crypto-trading-bots/pionex.jpg) + </div> +</div> + +[Pionex](https://www.pionex.com/en/signUp?r=octobot) ([Pionex.us](https://accounts.pionex.us/en/signup?ref=octobot) for US citizens) is a cutting-edge trading platform known for its user-friendly automated trading bots, allowing traders to execute strategies effortlessly. +It offers a variety of customizable bots tailored to different trading styles, making it ideal for both novice and experienced traders seeking flexibility. +With advanced backtesting and performance monitoring tools, Pionex empowers users to optimize their strategies and manage risk effectively. +As it is also an exchange, there is no additional fees to start a trading bot. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Ease of use" + level={2} + h="14px" + tooltipText="Easy to use copy bot but complex bot creation" + /> + <Rating + title="Free version limits" + level={3} + h="14px" + tooltipText="Low exchanges fees" + /> + <Rating + title="Features" + level={3} + h="14px" + tooltipText="Great variety of pre-made bots" + /> + </div> +</Card> + +## 6. Binance trading bot + +[Binance](/guides/exchanges/binance) trading bots are automated tools designed to execute cryptocurrency trades based on predefined parameters, allowing users to trade 24/7 without constant monitoring. +These bots enhance trading efficiency by analyzing market data and making split-second decisions, which can help capitalize on market volatility. +Binance offers a variety of bots, including the popular Spot [Grid](/guides/octobot-trading-modes/grid-trading-mode) bot, which is particularly effective in sideways markets by buying low and selling high within a set price range. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Ease of use" + level={1} + h="14px" + tooltipText="Complex bot exploration and creation" + /> + <Rating + title="Free version limits" + level={3} + h="14px" + tooltipText="Exchanges fees only" + /> + <Rating + title="Features" + level={3} + h="14px" + tooltipText="Great variety of pre-made bots" + /> + </div> +</Card> + +## 7. Cornix + +Cornix is an automated crypto trading platform, renowned for beeing the largest crypto signals provider marketplace. +It offers DCA bots, a dedicated mobile app and integration with Telegram for easy trade automation. It offers a limited free plan. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Ease of use" + level={2} + h="14px" + tooltipText="Moderate ease with mobile app and Telegram integration" + /> + <Rating + title="Free version limits" + level={2} + h="14px" + tooltipText="1 bot of all types" + /> + <Rating + title="Features" + level={2} + h="14px" + tooltipText="DCA bots and integration with major trading platforms" + /> + </div> +</Card> + +## 8. Bybit trading bot + +[Bybit](/guides/exchanges/bybit) trading bots are automated tools that execute cryptocurrency trades based on pre-set strategies, enabling continuous trading without constant human oversight. +These sophisticated algorithms analyze market trends quickly, offering options like the Grid Bot for sideways markets and the DCA Bot for systematic, risk-managed investments. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Ease of use" + level={2} + h="14px" + tooltipText="Complex bot exploration" + /> + <Rating + title="Free version limits" + level={3} + h="14px" + tooltipText="Exchanges fees only" + /> + <Rating + title="Features" + level={2} + h="14px" + tooltipText="Auto fill feature" + /> + </div> +</Card> + +## 9. Kucoin trading bot + +[KuCoin](/guides/exchanges/kucoin) trading bots are automated tools that perform cryptocurrency trades according to user-defined settings, allowing for continuous trading without the need for constant oversight. +They analyze real-time market data to make quick decisions and capitalize on price fluctuations. +Key options include the Spot Grid bot for sideways markets and the [Dollar-Cost Averaging](smart-dca-making-of) (DCA) bot for systematic investing. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Ease of use" + level={1} + h="14px" + tooltipText="Complex bot exploration and setup" + /> + <Rating + title="Free version limits" + level={3} + h="14px" + tooltipText="Exchanges fees only" + /> + <Rating + title="Features" + level={2} + h="14px" + tooltipText="No pre-made bots" + /> + </div> +</Card> + +## 10. OKX trading bot + +[OKX](/guides/exchanges/okx) trading bots are algorithmic cryptocurrency trading systems that autonomously execute predefined strategies by analyzing real-time market data. +These automated tools leverage sophisticated techniques like grid trading, dollar-cost averaging, arbitrage to optimize trading performance. +By minimizing human intervention, the bots enable precise, rapid market interactions across multiple cryptocurrency platforms. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Ease of use" + level={1} + h="14px" + tooltipText="Complex bot exploration and setup" + /> + <Rating + title="Free version limits" + level={2} + h="14px" + tooltipText="High exchanges fees" + /> + <Rating + title="Features" + level={2} + h="14px" + tooltipText="No pre-made bots" + /> + </div> +</Card> + +## Conclusion + +In summary, whether you're a beginner or a seasoned trader, there's a crypto trading bot out there that can meet your specific needs. +While these tools can be powerful allies in your trading journey, it's crucial to remember that no bot guarantees profits. +Always conduct thorough research, understand the bot's strategy, and start with small investments or [paper trading](/investing/paper-trading-a-strategy). diff --git a/docs/blog/2025-02-25-kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai.md b/docs/blog/2025-02-25-kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai.md new file mode 100644 index 0000000000..3336786fcc --- /dev/null +++ b/docs/blog/2025-02-25-kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai.md @@ -0,0 +1,88 @@ +--- +title: "Kucoin x OctoBot Fireside chat - Simplifying crypto investment with AI" +description: "Listen to our Fireside with Kucoin discussing how artificial intelligence can help you with your crypto investment." +slug: "kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai" +date: "2025-02-25" +authors: ["guillaume"] +tags: ["Partnership", "Trading", "Exchange", "AI"] +image: "/images/blog/kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai/kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai-announcement-banner.png" +--- + + + +# Kucoin x OctoBot Fireside chat - Simplifying crypto investment with AI + +> **How can AI really help you with your crypto investment?** + +<!--truncate--> + +In <a href="https://x.com/i/spaces/1ynJOlgWzaExR" rel="nofollow">this Fireside chat</a> with <a href="https://x.com/swingy369" rel="nofollow">Serena</a> from <a href="https://www.kucoin.com/ucenter/signup?rcode=rJ2Q2T3" rel="nofollow">Kucoin</a>, Paul and Guillaume, co-founders of [OctoBot](/) we discuss how AI is changing the game in crypto investment. + +<div style={{textAlign: "center"}}> + <div> + <a href="https://x.com/i/spaces/1ynJOlgWzaExR" rel="nofollow">![kucoin x octobot fireside chat simplifying crypto investment with ai + announcement banner](/images/blog/kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai/kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai-announcement.jpeg)</a> + </div> +</div> + +<div style={{textAlign: "center"}}> + **[Listen to the Fireside chat](https://x.com/i/spaces/1ynJOlgWzaExR)** +</div> + +Here is a summary of the main topics covered over the fireside chat. + +## OctoBot is built around simplicity +At OctoBot, we believe that crypto investment strategies should be available to everyone who looks for it. However, selecting and setting up an investment strategy can be really complicated and risky. + +That's why [octobot.cloud](/) is built with simplicity in mind. Any trading strategy can be easily started from pre-configured strategies. + +## AI to create strategies +Creating a trading strategy doesn't have to be a complex task. Using <a href="creator" rel="nofollow">OctoBot AI</a>, anyone can create their own trading strategy on any supported exchange. + +Naturally, each trading strategy can be used with [risk-free virtual money](/investing/paper-trading-a-strategy) first and then on the user's exchange account. + +### Creating a crypto basket with AI + +![creating a meme coins crypto basket using ai](/images/blog/kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai/creating-a-meme-coins-crypto-basket-using-ai.png) + +Create any crypto basket just by describing it to the AI. The created crypto basket will be ready to use on the exchange you specified. + +### Creating a DCA strategy with AI +![creating a sol eth usdc dca strategy on conbase using ai](/images/blog/kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai/creating-a-sol-eth-usdc-dca-strategy-on-conbase-using-ai.png) + +DCA strategies can also be created from the AI, simply describe what to trade and how to trade it and your DCA strategy will be ready. + +### Creating a grid strategy with AI + +Grid strategies are a great way to make profits in sideway markets but can be intimidating to design, AI makes it much easier. Using OctoBot AI, you can get your custom grid in a few seconds. + +![creating a sol usdt grid strategy on kucoin using ai](/images/blog/kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai/creating-a-sol-usdt-grid-strategy-on-kucoin-using-ai.png) + +The AI describes the key information of your grid in a simple way. Here, this 10 orders Solana grid covers a pretty tight price interval and will automatically follow the price by trailing up when the SOL/USDT price increases beyond 146.291 USDT. + +### Creating a TradingView strategy with AI + +When using TradingView to invest, Pine Script is the way to automate a strategy. However, it can be hard to get started with a new TradingView strategy. OctoBot AI makes it much easier. Simply describe your TradingView strategy idea and the AI will create your strategy Pine Script in a second. + +<div style={{textAlign: "center"}}> + **[Create your own strategy with AI](https://www.octobot.cloud/creator)** +</div> + +## Investing using crypto basket in the bull market + +During the bull market, Crypto Baskets are performing extremely well. Ourselves, at OctoBot, we use crypto baskets a lot for our personal investments. + +![top 5 basket with 73 percent profit on kucoin](/images/blog/kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai/top-5-basket-with-73-percent-profit-on-kucoin.png) + +Of course, at OctoBot, we use OctoBot for our investments and here is a screenshot of one of our personal crypto baskets bot. We believe crypto baskets to be a simple great investment strategy, especially during the bull market. + +<div style={{textAlign: "center"}}> + **[View crypto baskets](https://www.octobot.cloud/explore?category=indexes)** +</div> + +## Transparency is at the core of OctoBot + +Since 2018, OctoBot is open source. We don't want users to blindly have to trust OctoBot, that's why the whole code is available on <a href="https://github.com/Drakkar-Software/OctoBot" rel="nofollow">GitHub</a> and everyone can download and install the software. + +## Final words +At OctoBot, we want to make investment strategies easy to use for everyone. AI has recently become powerful enough to really help investors with their strategies, and we are proud to announce that OctoBot is now much easier to use and opens more possibilities than ever. diff --git a/docs/blog/2025-11-05-how-to-use-a-self-custody-crypto-trading-bot.md b/docs/blog/2025-11-05-how-to-use-a-self-custody-crypto-trading-bot.md new file mode 100644 index 0000000000..ddbd848af6 --- /dev/null +++ b/docs/blog/2025-11-05-how-to-use-a-self-custody-crypto-trading-bot.md @@ -0,0 +1,192 @@ +--- +title: "How to use a self custody crypto trading bot" +description: "Learn how to use a self custody crypto trading bot to automate your investment strategies on centralized and decentralized exchanges." +slug: "how-to-use-a-self-custody-crypto-trading-bot" +date: "2025-11-05" +authors: ["guillaume"] +tags: ["Trading", "Exchange", "Self custody", "Crypto"] +image: "/images/blog/how-to-use-a-self-custody-crypto-trading-bot/not-your-keys-not-your-coins-writen-on-paper-with-keys-and-a-bitcoin-logo.png" +--- + + + + BarChart, + CheckCircle, + CircleDollarSign, + Globe, + Shield, + TrendingUp, + Zap, +} from 'lucide-react' + +# How to use a self custody crypto trading bot + + +<div style={{textAlign: "center"}}> + <div> + ![chatgpt-logo](/images/blog/how-to-use-a-self-custody-crypto-trading-bot/not-your-keys-not-your-coins-writen-on-paper-with-keys-and-a-bitcoin-logo.png) + _"Not your keys, not your coins"_ + </div> +</div> + +<!--truncate--> + +This is one of the core principles of crypto: self custody of your crypto assets guarantees the security and independence provided by blockchain technology. + +- First, it started with wallets such as <a href="https://metamask.io/" rel="nofollow">MetaMask</a> or <a href="https://electrum.org/" rel="nofollow">Electrum</a> to give you full control over your crypto assets. +- Then decentralized exchanges such as <a href="https://app.uniswap.org/" rel="nofollow">Uniswap</a> allowed to easily trade your crypto assets without having to trust a central authority. +- Decentralized exchanges continued to improve and now also propose sophisticated trading instruments such as perpetual futures on <a href="https://app.hyperliquid.xyz/" rel="nofollow">Hyperliquid</a>. + +Finally, self custody trading bots, allowing to leverage crypto exchanges to automate investment strategies independently from any centralized authority, are starting to appear. + +## What is a non custodial crypto trading bot +A non custodial crypto trading bot is a trading bot that is fully controlled by you, its user. +1. It is not controlled by a central authority such as a crypto exchange or a trading bot platform that holds your API (or wallet) keys. +2. It's a trading robot that lets you have custody of your exchange API keys or crypto wallet. + + +While self custody is a core principle of crypto, it is not always easy to implement. +To achieve it, you need to configure your own wallet, which can be quickly done on a browser wallet such as <a href="https://metamask.io/" rel="nofollow">MetaMask</a>. This wallet can then be used to store your crypto and exchange them on decentralized exchanges. + + +<div style={{textAlign: "center"}}> + <div> + ![metamask-logo](/images/blog/how-to-use-a-self-custody-crypto-trading-bot/metamask-logo.png) + </div> +</div> + + +From this point, a trading bot connected to this wallet will be able to apply your investment strategy by trading your crypto on decentralized exchanges using your own wallet. + +This implies that the bot will access your crypto wallet directly, which means that you need to have a very high trust in the bot platform you are using. The risk being that any hacker compromising the bot platform will be able to steal your crypto. + +Here is where a self custody crypto trading bot comes in. It is a trading bot that is only controlled by you, it never shares your crypto wallet's keys with any platform or anyone. + + +A self custody crypto trading bot can connect to both centralized and decentralized exchanges, and in both cases, it greatly increases the security of your crypto assets. + + +### Self custody crypto trading bot for centralized exchanges + +To automate a strategy on a centralized exchange, there are three main options: + +**Simple but rigid:** centralized exchange built-in trading bot services +In this case, the bot is running <a href="https://www.binance.com/trading-bots" rel="nofollow">directly on the centralized exchange servers</a>. This has the advantage of being secure and very easy to setup and use, but it also has the drawbacks of lacking the flexibility of a specialized trading bot tools. + +**Flexible but less secure:** specialized trading bot platforms +The bot is running on the servers of a trading bot platform such as <a href="https://3commas.io/" rel="nofollow">3Commas</a>. This has the advantage of being flexible and allowing to use a lot of different trading strategies, but it also has the drawbacks of being less secure as the bot platform can <a href="https://blockworks.co/news/3commas-security-breach" rel="nofollow">leak your API keys</a> in case of a security breach. + +**Secure and flexible:** self custody crypto trading bots +A bot, such as [the open source version of OctoBot](https://www.octobot.cloud/trading-bot), is running on your own computer or server. Your API keys never leave your device. This has the advantage of being secure and flexible, but it also has the drawbacks of being more complex to setup and use. + +As often, there is no "one size fits all" solution. You need to choose the best solution for your needs. The interesting part being the fact that there are more and more [secure and flexible options](https://www.octobot.cloud/features/self-custody-trading-bot) available as self custody trading bots are becoming more accessible to the general public. + +### Self custody crypto trading bot for decentralized exchanges + +To automate a strategy on a decentralized exchange, there are two main ways of running a DEX bot: + +**Trusting a DEX trading bot platform** +In this case, the bot is running on the servers of the DEX bot platform . While this is convenient, as it implies sharing your wallet with the platform, any issue with this platform can have devastating consequences for your funds. + +**Self custody crypto trading bots** +This bot runs on your device and never shares your wallet with any platform, making it by far the most secure option. Drawback being that very few trading bots are available for decentralized exchanges. + +Overall, automating an investment strategy on a DEX is still very challenging, that's why we are working on an easy to use [self custody crypto trading bot for decentralized exchanges](https://www.octobot.cloud/features/self-custody-trading-bot). + +### Pros and cons of self custody crypto trading bots + + +The only way use a self custody crypto trading bot is to run it on your own computer or server. Its usually means a desktop application that you install on your system and a lot of headaches to setup and run properly. + +**Advantages of self custody crypto trading bots** + +<div> + {[ + { + icon: <CheckCircle className="text-primary" />, + name: 'Your keys, your coins', + description: 'You are the only one who has access to your crypto wallet', + }, + { + icon: <Shield className="text-primary" />, + name: 'Best security', + description: 'There is no third party to compromise your crypto or API keys', + }, + { + icon: <CircleDollarSign className="text-primary" />, + name: 'Highest flexibility', + description: 'Use decentralized and centralized exchanges from the same platform', + }, + ].map((element, i) => ( + <HighlightElement key={i} element={element} /> + ))} +</div> + +**Drawbacks of self custody crypto trading bots** + +<div> + {[ + { + icon: <CircleDollarSign className="text-rating-color-2" />, + name: 'Responsibility', + description: 'You are responsible of your crypto wallet and API keys security, there is no recovery service if you lose your keys.', + }, + { + icon: <Globe className="text-rating-color-2" />, + name: 'Running the software', + description: + 'The software needs to continuously be executed on your own computer or server.', + }, + { + icon: <Globe className="text-rating-color-2" />, + name: 'Complex setup', + description: + 'A self custody crypto trading bot usually requires technical knowledge and a secure setup.', + }, + ].map((element, i) => ( + <HighlightElement key={i} element={element} /> + ))} +</div> + +At OctoBot, we are working on a self custody crypto trading bot that solves both the **Running the software** and **Complex setup** drawbacks using a secure self-custody trading bot from your mobile phone. + +<div style={{textAlign: "center"}}> + **[Register to the early access](https://www.octobot.cloud/features/self-custody-trading-bot)** +</div> + + +## How to use a self custody crypto trading bot + +A self custody crypto trading bot is always a software you need to install, configure and run on your own mobile, computer or server. + +First step is then to download and install a self custody crypto trading bot, such as the [OctoBot open source trading bot desktop application](https://www.octobot.cloud/trading-bot). + +Then, you will be able to select the strategy you want to use, connect to your exchange account and start trading. +While OctoBot works with most centralized exchanges and a few decentralized exchanges, if your primary goal is to trade on decentralized exchanges, you might want to use a specialized bot such as <a href="https://hummingbot.org/" rel="nofollow">Hummingbot</a>. +That's it, your bot is installed and configured, this was the easy part. + +> "Installing a self custody crypto trading bot is simple. Properly running and securing it is not." + +Your self custody crypto trading bot is now automating your strategy. Next steps are to: +- Make sure it runs 24/7 (which means monitoring, restarting it if it crashes, keeping it updated) +- Keep it secure against other people accessing your computer, potential malwares and security breaches. +- If it runs on a server, make sure your connection to it is secure and encrypted at all times. + +This can be a real challenge, especially if you are not a person with a strong technical background. This is why until today, the vast majority of self custody crypto trading bots were only available as desktop applications (or even command line tools) designed for highly technical users. + + +## A self custody crypto trading bot mobile app + +At OctoBot, we have been working on trading bots since 2018, the year we coded the first version of the <a href="https://github.com/Drakkar-Software/OctoBot" rel="nofollow">open source OctoBot on GitHub</a>. Time has passed since then and the crypto world has changed a lot. +With the rise of popular decentralized exchanges such as Hyperliquid or Uniswap and stricter and stricter regulations, traditional trading bot platforms are, in many cases, not the obvious choice anymore. + +This is why we have been working on a **self custody crypto trading bot mobile app** that allows you to: +- **Automate your investment strategies** on your centralized and decentralized exchanges **in a simple way**. +- **Secure your crypto wallet and centralized exchange API keys** on your own device. +- Profit from your **mobile phone accessibility** to always have control over your investment strategies. + +We will be launching the app very soon. Register to the early access to be the first to use it. + +<div style={{textAlign: "center"}}> + **[Register to the early access](https://www.octobot.cloud/features/self-custody-trading-bot)** +</div> diff --git a/docs/blog/authors.yml b/docs/blog/authors.yml new file mode 100644 index 0000000000..857fb31934 --- /dev/null +++ b/docs/blog/authors.yml @@ -0,0 +1,11 @@ +guillaume: + name: Guillaume + title: Co-founder of OctoBot + url: https://github.com/Drakkar-Software + image_url: https://www.octobot.cloud/images/team/guillaume.jpg + +paul: + name: Paul + title: Co-founder of OctoBot + url: https://github.com/Drakkar-Software + image_url: https://www.octobot.cloud/images/team/paul.jpg diff --git a/docs/content/developers/architecture/_category_.json b/docs/content/developers/architecture/_category_.json new file mode 100644 index 0000000000..c01432064b --- /dev/null +++ b/docs/content/developers/architecture/_category_.json @@ -0,0 +1,8 @@ +{ + "label": "Architecture", + "position": 2, + "link": { + "type": "generated-index", + "description": "OctoBot system architecture and design decisions." + } +} diff --git a/docs/content/developers/architecture/design-philosophy.md b/docs/content/developers/architecture/design-philosophy.md new file mode 100644 index 0000000000..2518d5d714 --- /dev/null +++ b/docs/content/developers/architecture/design-philosophy.md @@ -0,0 +1,107 @@ +--- +title: "Design Philosophy" +description: "Learn about the OctoBot design philosophy and technical architecture based with speed and scalability in mind using Python and asynchronous programming with asyncio." +sidebar_position: 2 +--- + + + +# Design Philosophy + +## Philosophy + +The goal behind OctoBot is to have a **very fast and scalable** trading robot. + +To achieve this, OctoBot is entirely built around the + +<a href="https://docs.python.org/3/library/asyncio.html" rel="nofollow">asyncio</a> producer-consumer +<a href="https://github.com/Drakkar-Software/Async-Channel" rel="nofollow">Async-Channel</a> framework which allows to very quickly and efficiently +transmit data to different elements within the bot. The idea is to all the time +maintain **fully up-to-date data** without having to use update loops. Update +loops require sleeping time, which is inefficient. This architecture enables to +**notify the evaluation chain as quickly as possible** when an update is +available without having to wait for any update cycle of any update loop. + +Additionally, in order to save CPU time, as little threads as possible are used +by OctoBot (usually less than 10 with a standard setup). + +## Overview + +The OctoBot code is split into [several repositories](github-repositories). +Each module is handled as an independent python module and is available on the + +<a href="https://pypi.org/" rel="nofollow">official python package repository</a> (used in `pip` commands). + +## OctoBot + +![OctoBot architecture](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/wiki_resources/octobot_arch.svg) + +Simplified view of the OctoBot core components. + +Inside the OctoBot part, each arrow is an async channel. + +## OctoBot tentacles + +Tentacles are OctoBot's extensions, they are meant to be easily customizable, can +be activated or not and do any specific action within OctoBot. + +### Evaluation chain tentacles + +They are tools to analyze market data as well as any other type of data (Teddit, Telegram, etc). +They implement abstract evaluators, strategies and trading modes. + +### Utility tentacles + +These are OctoBot's interfaces (web, telegram), notification systems, social news feeds +and [backtesting](/guides/octobot-usage/backtesting) data collectors. They implement abstract interfaces, services, service +feeds, notifiers and data collectors + +## Evaluators, strategies and trading modes: + +### Evaluators + +Simple python classes that will automatically be wake up when new data is available. +Their goal is to set `self.eval_note` and call `await self.evaluation_completed` +that will then be made available to the Strategy(ies). They should be dedicated to +a single simple task such as (for example) evaluate the RSI on the current data or +looks for a divergence in a trend. + +### Strategies + +Strategies are more complex elements, they can read all the evaluators evaluations +on every time frame and are considering these evaluations to set their `self.eval_note` +and call `await self.strategy_completed`. As a comparison if evaluators are human +senses, strategies are the brain that will take these senses' signals and decide to +do something or not. Strategies can be generic like SimpleStrategyEvaluator that +will take any standard evaluator and time frame into account or using specific +evaluators only like MoveSignalsStrategyEvaluator. + +### Trading modes + +[Trading modes](../octobot-trading-modes/trading-modes) use the strategy(ies) evaluations to create, update or cancel orders. +Using the strategies signals, they are responsible for the way to translate a signal +into an order by looking at the available funds, open orders, considering stop loss +or not and other trading related responsibilities. + +### Triggers + +Evaluators, strategies and trading modes are automatically triggered when their channel +has a new data. Trigger sources are: + +For evaluators + +- Technical evaluators: any new candle or refresh request (with updated candles data) from a strategy +- Real time evaluators: any new candle and any market price change +- Social evaluators: associated signal (ex: a post for a Reddit social evaluator) + +For strategies + +- After a technical evaluator cycle: when all TA have updated their evaluation and called `await self.evaluation_completed` +- After any real time evaluator evaluation and call of `await self.evaluation_completed` +- After any social evaluator evaluation and call of `await self.evaluation_completed` + +For trading mode + +- After any strategy evaluation and call of `await self.strategy_completed` + +_Thanks for reading this guide and if you have any idea on how to improve it, please reach out to us !_ diff --git a/docs/content/developers/architecture/overview.md b/docs/content/developers/architecture/overview.md new file mode 100644 index 0000000000..25c2102bfd --- /dev/null +++ b/docs/content/developers/architecture/overview.md @@ -0,0 +1,117 @@ +--- +title: Architecture Overview +description: OctoBot system architecture — package layers, tentacle plugin system, async channel backbone, evaluation pipeline, configuration, and deployment modes. +keywords: [octobot, architecture, monorepo, packages, tentacles, async-channel, trading, evaluators] +sidebar_position: 1 +--- + +# Architecture Overview + +OctoBot is a modular, async crypto trading bot. The codebase is a Python monorepo under `packages/`, built with [Pants](https://www.pantsbuild.org/). Every strategy, evaluator, exchange connector, and trading mode lives as a **tentacle** — a plugin that sits on top of the framework without modifying it. This separation is the central design decision: the core packages define contracts; tentacles fulfill them. + +## Package layers + +The stack has six layers. Each package depends only on packages in the layers below it. + +**commons** and **async_channel** are the foundation — neither has any internal dependency. Commons provides configuration, databases, DSL interpreter, and shared utilities. Async_channel is the typed message bus: channels connect producers to consumers through async queues. Synchronized mode removes async tasks and drives execution deterministically, which is what makes backtesting possible. + +**tentacles_manager**, **backtesting**, and **trading_backend** form the next layer. Tentacles_manager handles plugin discovery, installation, and configuration. Backtesting provides the time-driven simulation engine. Trading_backend handles exchange-specific broker ID injection and API key permission validation. + +**trading** and **evaluators** are the core framework. Trading owns exchange managers, order lifecycle, portfolio accounting, and the trading mode abstraction. Evaluators own the Matrix (the in-memory signal tree) and the factory that instantiates evaluator classes across symbol/time-frame combinations. + +**services** sits above trading and backtesting. It integrates external interfaces — web dashboard, Telegram, notification dispatch, AI model backends. + +**agents**, **flow**, and **sync** are higher-level packages. Agents orchestrates LLM-powered teams through services. Flow is the stateless automation runner that uses trading for exchange operations. Sync provides multi-instance coordination over HTTPS. + +**node** depends on flow and wraps it in a durable scheduler with crash recovery. + +**tentacles** sits at the top alongside the CLI. It contains no framework code — only concrete implementations that subclass what the framework defines. + +## The tentacle plugin system + +A tentacle is a directory in the `tentacles/` tree with a Python module, a `metadata.json` descriptor, and an optional `config/` subdirectory. The descriptor names the Python classes it exports and declares a minimum compatible version. `tentacles_manager` discovers these directories at startup by scanning for `metadata.json` files up to three levels deep — no registry, no explicit registration call. + +The `__init__.py` files throughout the `tentacles/` tree are generated by `tentacles_manager`, not written by hand. Each calls `check_tentacle_version()` on import: if the tentacle's declared version is below the minimum, the import is silently skipped. A broken tentacle cannot crash OctoBot at startup. + +This boundary matters because it keeps strategy code out of the core. A new trading mode, evaluator, or exchange connector is a new directory in `tentacles/`, not a patch to `octobot_trading` or `octobot_evaluators`. The framework packages stay stable across very different trading strategies. + +Configuration follows the same separation. Every tentacle class has a **reference config** inside its own `config/` directory — the factory default, never modified at runtime — and an optional **profile-specific copy** written to the active profile's `specific_config/` folder. At runtime, the profile copy wins if present; otherwise the reference is used. + +## The async channel backbone + +All runtime data flow is channel-based. A `Channel` subclass names a data type and declares its producer and consumer classes. Producers enqueue; consumers dequeue and call a registered callback. Consumers can be filtered — a TA evaluator subscribes to `EvaluatorsChannel` filtered to its specific symbol and time frame, so it only receives relevant triggers. + +Channels start paused and resume automatically when a consumer with a non-optional priority level registers. This prevents upstream processing when nothing meaningful is listening. + +`MatrixChannel` is the most consequential channel: every time an evaluator finishes and calls `evaluation_completed()`, it publishes to `MatrixChannel`. Strategy evaluators and trading mode producers both subscribe here to be notified when new signal data is available. + +## The evaluation to trading pipeline + +``` + ╔══════════════════════════════════════════════════════════════╗ + ║ Exchange (WebSocket / REST) ║ + ╚════════════════════════════╤═════════════════════════════════╝ + │ + ┌──────────▼──────────┐ + │ ExchangeManager │ + │ candles · tickers │ + │ order book · fees │ + └──────────┬──────────┘ + │ EvaluatorsChannel + ┌───────────────────┼───────────────────┐ + ▼ ▼ ▼ + ┌───────────┐ ┌─────────────┐ ┌───────────┐ + │ TA │ │ RealTime │ │ Social │ + │ Evaluator │ │ Evaluator │ │ Evaluator │ + └─────┬─────┘ └──────┬──────┘ └─────┬─────┘ + │ eval_note │ eval_note │ eval_note + └───────────────┐ │ ┌───────────────┘ + ▼ ▼ ▼ + ┌──────────────────┐ + │ Matrix │ + │ (signal tree) │ + └────────┬─────────┘ + │ MatrixChannel + ┌────────▼─────────┐ + │ Strategy │ + │ Evaluator │ + └────────┬─────────┘ + │ TradingModeChannel + ┌───────────┴───────────┐ + ▼ ▼ + ┌──────────────┐ ┌──────────────┐ + │ Producer │ │ Consumer │ + │ what/when │─────►│ how/execute │ + │ to trade │ │ on exchange │ + └──────────────┘ └──────┬───────┘ + │ + ┌──────▼───────┐ + │ Orders │ + │ Portfolio │ + └──────────────┘ +``` + +Exchange data arrives from REST polling or WebSocket and populates per-time-frame circular buffers (three thousand candles each). On each closed candle, `EvaluatorsChannel` triggers the relevant TA evaluators. Each evaluator writes its `eval_note` (a float in `[-1, 1]`) to the Matrix and broadcasts on `MatrixChannel`. A `StrategyEvaluator` subscribes to `MatrixChannel` and aggregates signals from all evaluators for its configured time frames — but only after verifying that every contributing TA evaluator's Matrix timestamp is fresh enough relative to exchange time. The strategy posts its own note, which trading mode producers pick up to decide what orders to create. + +The producer/consumer split inside a trading mode is deliberate: the producer decides *what* to trade based on signals; the consumer decides *how* to execute it against the exchange. Multiple trading modes can share one exchange and operate against isolated sub-portfolios. + +## Configuration and profiles + +Configuration has two layers merged at runtime: + +- `config/config.json` — exchange credentials and per-installation settings. Never travels with a profile. +- `user/profiles/<name>/profile.json` — everything that defines a strategy: active tentacles, evaluator parameters, trading mode config. Safe to share or commit. + +The active profile also contains `tentacles_config.json` (which tentacle classes are active) and `specific_config/` (per-class parameter overrides). Profiles can be marked `auto_update` to poll an origin URL on a configurable interval, which is how managed strategy updates are distributed. A profile update triggers a graceful bot restart. + +`update_config_fields` applies dot-path updates in-place without reloading from disk, so the web UI can save small changes without unnecessary churn. + +## Deployment modes + +**Standalone bot** — the default. `octobot/octobot.py` starts four producers (`ExchangeProducer`, `EvaluatorProducer`, `ServiceFeedProducer`, `InterfaceProducer`) on a single asyncio loop. Everything described in the pipeline section runs here. + +**Node (master/consumer)** — `octobot_node` is a standalone FastAPI service backed by [DBOS](https://docs.dbos.dev/), a workflow engine that persists every step to SQLite or PostgreSQL. A node accepts automation tasks over its REST/WebSocket API and hands them to `octobot_flow` for execution. An instance can act as master (schedules tasks), consumer (executes them), or both. Multi-node deployments share a PostgreSQL database; SQLite is single-node only. Task payloads support end-to-end encryption via a hybrid RSA/AES-GCM/ECDSA scheme with directional key separation. + +**Serverless flow** — `octobot_flow` is a stateless execution engine for individual automations. An `AutomationState` object is passed in, a DAG of DSL actions runs, and the updated state is returned. No memory is retained between invocations, which makes it safe to run as a serverless function. The flow engine is what the node invokes per task iteration. + +**Sync** — `octobot_sync` lets multiple OctoBot instances share configurations, signals, and account data over HTTPS. Every request is authenticated via an EIP-191 EVM wallet signature. Access control is data-driven through on-chain ownership resolution. Primary servers use S3-compatible object storage; replica servers mirror a subset locally. diff --git a/docs/content/developers/environment/_category_.json b/docs/content/developers/environment/_category_.json new file mode 100644 index 0000000000..0587533d00 --- /dev/null +++ b/docs/content/developers/environment/_category_.json @@ -0,0 +1 @@ +{"label": "Developer Environment", "position": 2} diff --git a/docs/content/developers/environment/environment-variables.md b/docs/content/developers/environment/environment-variables.md new file mode 100644 index 0000000000..c9cf813afc --- /dev/null +++ b/docs/content/developers/environment/environment-variables.md @@ -0,0 +1,28 @@ +--- +title: "Environment variables" +description: "Use environment variables to change OctoBot's behavior. Install the latest tentacles, change the web interface IP and port, disable exchange rate limit." +sidebar_position: 10 +--- + + + +# OctoBot's environment variables + +## Tentacles installation + +`TENTACLES_URL_TAG` overrides the default OctoBot version tag for +tentacles package installation. Some additional tags are available : + +- **latest** : to install the latest published tentacles (usually requires an up-to-date `dev` branch on OctoBot to work) +- **tests/XXX** : for OctoBot-Tentacles-Manager tests + +## Web Interface + +- `WEB_ADDRESS` overrides the host IP address, can be set to `0.0.0.0` to accept all incoming connections. +- `WEB_PORT` overrides the default web port (5001). + +## Exchanges + +- `DEFAULT_REQUEST_TIMEOUT`: Exchanges requests timeout in milliseconds. Can be increased if your internet connection is very slow. Default value is `20000`. +- `ENABLE_CCXT_VERBOSE`: Set to `True` to log each <a href="https://github.com/ccxt/ccxt" rel="nofollow">ccxt</a> exchange request. Default is `False`. +- `ENABLE_CCXT_RATE_LIMIT`: Set to `False` to disable <a href="https://docs.ccxt.com/#/?id=rate-limit" rel="nofollow">ccxt rate limit</a>. This will make each exchange request to be instantly emitted. **Be careful as this can lead to an IP ban** if the exchange spamming rules are not respected. Default is `True`. diff --git a/docs/content/developers/environment/github-repositories.md b/docs/content/developers/environment/github-repositories.md new file mode 100644 index 0000000000..93c96a0371 --- /dev/null +++ b/docs/content/developers/environment/github-repositories.md @@ -0,0 +1,38 @@ +--- +title: "GitHub repositories" +description: "Learn about the different OctoBot repositories on GitHub. How the split is done and what is their purpose." +sidebar_position: 8 +--- + + + +# OctoBot GitHub repositories + +OctoBot code is split into multiple repositories, all hosted under +the <a href="https://github.com/Drakkar-Software" rel="nofollow">Drakkar-Software</a> organisation on +GitHub. + +- <a href="https://github.com/Drakkar-Software/OctoBot" rel="nofollow">github.com/Drakkar-Software/OctoBot</a> (dev branch) for the main program initialization, + backtesting and strategy optimizer setup as well as community data management. +- <a href="https://github.com/Drakkar-Software/OctoBot-Tentacles" rel="nofollow">github.com/Drakkar-Software/OctoBot-Tentacles</a> (dev branch) tentacles: evaluators, strategies, trading + modes, interfaces, notifiers, external data feeds (reddit, telegram etc), + backtesting data formats management and exchange specific behaviors. +- <a href="https://github.com/Drakkar-Software/OctoBot-Trading" rel="nofollow">github.com/Drakkar-Software/OctoBot-Trading</a> for everything trading and exchange related: exchange + connections, exchange data fetch and update, orders, trades and portfolios + management. +- <a href="https://github.com/Drakkar-Software/OctoBot-evaluators" rel="nofollow">github.com/Drakkar-Software/OctoBot-evaluators</a> for everything related to evaluators and strategies. +- <a href="https://github.com/Drakkar-Software/OctoBot-Services" rel="nofollow">github.com/Drakkar-Software/OctoBot-Services</a> for everything related to interfaces: graphic (web) and + text(telegram), notifications push and social analysis data management: update + engine to handle new data from an external feed (ex: reddit) when it gets + available. +- <a href="https://github.com/Drakkar-Software/OctoBot-Backtesting" rel="nofollow">github.com/Drakkar-Software/OctoBot-Backtesting</a> for the [backtesting + engine](/en/guides/octobot-usage/backtesting) and scheduling as well as + historical data collection unified storage management. +- <a href="https://github.com/Drakkar-Software/OctoBot-Tentacles-Manager" rel="nofollow">github.com/Drakkar-Software/OctoBot-Tentacles-Manager</a> for tentacles installation, updates and interactions: + get a tentacle documentation, configuration or it's dependencies. +- <a href="https://github.com/Drakkar-Software/OctoBot-Commons" rel="nofollow">github.com/Drakkar-Software/OctoBot-Commons</a> for common tools and constants used across each above + repository. +- <a href="https://github.com/Drakkar-Software/Async-Channel" rel="nofollow">github.com/Drakkar-Software/Async-Channel</a> which is used by OctoBot as a base framework for every + data transfer within the bot. This allows a highly optimized and scalable + architecture that adapts to any system while using a very low amount of CPU + and RAM. diff --git a/docs/content/developers/environment/running-tests.md b/docs/content/developers/environment/running-tests.md new file mode 100644 index 0000000000..a4dc133c8c --- /dev/null +++ b/docs/content/developers/environment/running-tests.md @@ -0,0 +1,39 @@ +--- +title: "Running Tests" +description: "Learn how automated tests are working on the OctoBot open source Python repositories using pytest git github actions." +sidebar_position: 4 +--- + + + +# Tests + +Each OctoBot repository test suite is run using <a href="https://docs.pytest.org/" rel="nofollow">pytest</a> on <a href="https://docs.github.com/actions" rel="nofollow">GitHub Action</a> and can be run locally on a development environment. + +## Requirements + +To run OctoBot's tests, an OctoBot development environment is necessary, +development environment setup is described on the [Setup your environment section](setup-your-environment) + +## OctoBot engine + +To run OctoBot's engine tests, use the _pytest tests_ in OctoBot's root folder : + +```bash +pytest tests +``` + +This will run all tests in the test folder. + +## Tentacles + +To run OctoBot's tentacles tests, use the `pytest tentacles` command in OctoBot's root folder : + +```bash +pytest tentacles +``` + +This will run all tests in the **tentacles** folder. Testing tentacles works only if tentacles +are installed on the tested OctoBot. See the +[developer environment](setup-your-environment) +to install tentacles. diff --git a/docs/content/developers/environment/setup-your-environment.md b/docs/content/developers/environment/setup-your-environment.md new file mode 100644 index 0000000000..76bb89d385 --- /dev/null +++ b/docs/content/developers/environment/setup-your-environment.md @@ -0,0 +1,452 @@ +--- +title: "Setup your environment" +description: "Learn how to create your OctoBot developer environment from the open source OctoBot GitHub Python repositories using VSCode or PyCharm." +sidebar_position: 3 +--- + + + +# OctoBot developer installation + +This environment allows you to execute a local OctoBot from the python code, make local changes, debug and test them. + +## Installing OctoBot requirements + +- Programming language: <a href="https://www.python.org/downloads/release/python-31011/" rel="nofollow">Python 3.10</a> +- SCM: <a href="https://git-scm.com/downloads" rel="nofollow">Git</a> +- IDE: <a href="https://code.visualstudio.com/Download" rel="nofollow">Visual Studio Code</a> (recommended) or <a href="https://www.jetbrains.com/pycharm/" rel="nofollow">PyCharm</a> + + +## Cloning OctoBot repositories + +The `OctoBot` and `OctoBot-Tentacles` repositories are required for the OctoBot developer environment. + +Open a terminal in your project folder and execute the following commands to download the repos to use the official version of the repositories. + + +```bash +git clone https://github.com/Drakkar-Software/OctoBot.git --branch dev +git clone https://github.com/Drakkar-Software/OctoBot-Tentacles.git --branch dev +``` +A development environment will prefer using the `dev` branches as all pull requests to those OctoBot repositories should be created against the official `dev` branch of each repository. + +If you wish to contribute to those repositories, please create your own fork of these repositories and use them instead. + +*Going further* +Are you an advanced developer who already understand how OctoBot works as a whole and you would like to add changes to the core modules of OctoBot? + +As the OctoBot code is split into different repositories, each dedicated to a different aspect of the software, cloning repositories might be necessary. More details on the [GitHub repositories page](github-repositories). + +## VSCode OctoBot environment + +### Creating the project and installing dependencies + +1. Open Visual Studio Code and open the folder where the OctoBot repositories are. +2. Open the terminal and create a new Python 3.10 virtual environment to contain OctoBot's dependencies. Command: `python -m venv venv` +3. Activate your virtual environment (`.\venv\Scripts\Activate.ps1` on Windows or `source venv/bin/activate` on Linux/macOS) +<div style="text-align: center"> + +![vscode create octobot venv](/images/guides/dev_env/vscode-create-octobot-venv.png) + +</div> +4. Install python dependencies using `python -m pip install -r OctoBot/requirements.txt -r OctoBot/dev_requirements.txt` from the integrated VSCode terminal, which is using your new virtual env. +<div style="text-align: center"> + +![vscode install python requirements](/images/guides/dev_env/vscode-install-python-requirements.png) + +</div> + + +### Configuring VSCode +1. Create a `.vscode` folder at the root of your project. +2. In the `.vscode` folder, create a `settings.json` file with the following content to make VSCode use your Virtual environment. Note: replace the path to the python executable on Linux/MacOS. +```json +{ + "python.defaultInterpreterPath": "${workspaceFolder}/venv/Scripts/python.exe" +} +``` +3. In the `.vscode` folder, create a `launch.json` file with the following content to create your run configurations. This file will configure the run configurations you need to develop on OctoBot by making it simple to: +- Start OctoBot +- Run tests +- Manage tentacles + +```json +{ + "configurations": [ + { + "type": "debugpy", + "name": "Start OctoBot", + "request": "launch", + "console": "integratedTerminal", + "program": "${workspaceFolder}/OctoBot/start.py", + "cwd": "${workspaceFolder}/OctoBot", + "presentation": { + "hidden": false, + "group": "1.Run", + "order": 1 + }, + "justMyCode": false, + "args": [], + "env": {} + }, + { + "type": "debugpy", + "name": "OctoBot tests", + "request": "launch", + "console": "integratedTerminal", + "cwd": "${workspaceFolder}/OctoBot", + "presentation": { + "hidden": false, + "group": "2.Test", + "order": 20 + }, + "justMyCode": false, + "args": [ + "tests", + "--no-header", + "--disable-warnings", + "--show-capture=no", + "-v", + "-vv", + "-k", + " " + ], + "module": "pytest" + }, + { + "type": "debugpy", + "name": "OctoBot-Tentacles tests trading modes", + "request": "launch", + "console": "integratedTerminal", + "cwd": "${workspaceFolder}/OctoBot", + "presentation": { + "hidden": false, + "group": "2.Test", + "order": 21 + }, + "justMyCode": false, + "args": [ + "tentacles/Trading/Mode", + "--no-header", + "--disable-warnings", + "--show-capture=no", + "-v", + "-vv", + "-s", + "-k", + " " + ], + "module": "pytest" + }, + { + "type": "debugpy", + "name": "Export tentacles to repo", + "request": "launch", + "console": "integratedTerminal", + "program": "${workspaceFolder}/OctoBot/start.py", + "cwd": "${workspaceFolder}/OctoBot", + "presentation": { + "hidden": false, + "group": "OctoBot-Tentacles-Manager", + "order": 31 + }, + "justMyCode": false, + "args": [ + "tentacles", + "-e", + "../../OctoBot-Tentacles", + "OctoBot-Default-Tentacles", + "-d", + "../OctoBot/tentacles" + ] + }, + { + "type": "debugpy", + "name": "OctoBot repair tentacles", + "request": "launch", + "console": "integratedTerminal", + "program": "${workspaceFolder}/OctoBot/start.py", + "cwd": "${workspaceFolder}/OctoBot", + "presentation": { + "hidden": false, + "group": "OctoBot-Tentacles-Manager", + "order": 32 + }, + "justMyCode": false, + "args": [ + "tentacles", + "--repair", + "-d", + "." + ] + }, + { + "type": "debugpy", + "name": "Export OctoBot-Tentacles to zip", + "request": "launch", + "console": "integratedTerminal", + "program": "${workspaceFolder}/OctoBot/start.py", + "cwd": "${workspaceFolder}/OctoBot", + "presentation": { + "hidden": false, + "group": "OctoBot-Tentacles-Manager", + "order": 33 + }, + "justMyCode": false, + "args": [ + "tentacles", + "-p", + "../tentacles_default_export.zip", + "-d", + "../OctoBot-Tentacles" + ] + }, + { + "type": "debugpy", + "name": "Install tentacles zip", + "request": "launch", + "console": "integratedTerminal", + "program": "${workspaceFolder}/OctoBot/start.py", + "cwd": "${workspaceFolder}/OctoBot", + "presentation": { + "hidden": false, + "group": "OctoBot-Tentacles-Manager", + "order": 34 + }, + "justMyCode": false, + "args": [ + "tentacles", + "-i", + "--all", + "--location", + "any_platform.zip" + ] + } + ] +} +``` + +VSCode should now display the launch.json configurations in its user interface. + +<div style="text-align: center"> + +![vscode run configurations selector](/images/guides/dev_env/vscode-run-configurations-selector.png) + +</div> + +Note: these files were created using VSCode `1.102.1` (from July 2025). If any value becomes deprecated in newer VSCode versions, please contact us to update this guide. + +### Executing OctoBot + +#### 1. Installing tentacles from a tentacles repository +Now that your VSCode is configured, it is necessary to install your initial OctoBot tentacles. + +1. Execute the `Export OctoBot-Tentacles to zip` run configuration + +This run configuration will automatically install all tentacles contained in a local folder into your OctoBot, so that it can use them. OctoBot can only use tentacles that are properly installed in its `tentacles` folder. + +This step is necessary to use the previously clonned `OctoBot-Tentacles` tentacles code. Skipping it will make your OctoBot download the tentacles associated to its latest release which might be incompatible with the `dev` branch your OctoBot code is currently set to. + +<div style="text-align: center"> + +![vscode executed export tentacles to zip](/images/guides/dev_env/vscode-executed-export-tentacles-to-zip.png) + +</div> + +This will export the OctoBot-Tentacles tentacles into a zip archive that can be installed on your OctoBot, or shared + +2. Execute the `Install tentacles zip` run configuration + +<div style="text-align: center"> + +![vscode executed install tentacles from zip](/images/guides/dev_env/vscode-executed-install-tentacles-from-zip.png) + +</div> + +This added to your OctoBot tentacles the tentacles contained into this zip. This run configuration can be used to install any tentacles zip + + +Your OctoBot local folder now contains the tentacles code you clonned from the `OctoBot-Tentacles` repository. Re-execute `Export OctoBot-Tentacles to zip` and `Install tentacles zip` when you want to update your local tentacles from the `OctoBot-Tentacles` git repository. +Warning: this will override any local change to the re-installed tentacles so be sure to save your local changes beforehand. + +#### 2. Starting OctoBot + +This run configuration will start your local OctoBot. Make sure your `OctoBot-Tentacles` tentacles have been installed first (from the `Export OctoBot-Tentacles to zip` and `Install tentacles zip` run config executions) or OctoBot will install its default tentacles and their import will might fail. + +Execute the `Start OctoBot` run configuration + +<div style="text-align: center"> + +![vscode executed start octobot](/images/guides/dev_env/vscode-executed-start-octobot.png) + +</div> + +#### 3. Exporting your tentacle changes into their git repository + +This run configuration will export changes of your local OctoBot tentacles into the configured tentacles repository. It will take the files linked to your selected tentacle package. + +Execute the `Export tentacles to repo` run configuration + + +This will apply your the changes from your OctoBot/tentacles folder into the git repository of this tentacles package. +<div style="text-align: center"> + +![vscode executed export tentacles to repo](/images/guides/dev_env/vscode-executed-export-tentacles-to-repo.png) + +</div> + +From the `launch.json` parameters, you can change: +- `OctoBot-Default-Tentacles` to select tentacles to export from a different package. Packages are defined in the `metadata.json` of each tentacle, under the `origin_package` key. +- `OctoBot-Tentacles` to export tentacles to a different git reposition. + + +#### 4. Running tests + +The `OctoBot tests` and `OctoBot-Tentacles tests trading modes` are example configurations to execute all OctoBot tests or OctoBot tentacles Trading Modes tests. Feel fee to add any other test run configurations. + +<div style="text-align: center"> + +![vscode executed tests](/images/guides/dev_env/vscode-executed-tests.png) + +</div> + + +## PyCharm OctoBot environment + +### Creating the project and installing dependencies +1. Open Pycharm and open the folder where the OctoBot repositories are. +2. Create a new Python 3.10 virtual environment to contain OctoBot's dependencies. +<div style="text-align: center"> + +![create pycharm interpreter](/images/guides/dev_env/create-pycharm-interpreter.png) + +</div> +3. Install python dependencies from the OctoBot repo folder using `python -m pip install -r OctoBot/requirements.txt -r OctoBot/dev_requirements.txt` from the integrated PyCharm terminal, which is using your new virtual env by default. +<div style="text-align: center"> + +![install octobot requirements from pycharm](/images/guides/dev_env/install-octobot-requirements-from-pycharm.png) + +</div> + +### Create PyCharm run configurations + +The following steps will create PyCharm run configurations using the previously created virtual env (then one which contains the OctoBot dependencies) for each way you want to start python commands: +- Starting OctoBot +- Running tests +- Managing tentacles + +#### 1. Installing tentacles from a git repository +This run configuration will automatically install all tentacles contained in a local folder into your OctoBot, so that it can use them. OctoBot can only use tentacles that are properly installed in its `tentacles` folder. + +This step is necessary to use the previously clonned `OctoBot-Tentacles` tentacles code. Skipping it will make your OctoBot download the tentacles associated to its latest release which might be incompatible with the `dev` branch your OctoBot code is currently set to. + +1. Click on `Edit Configurations` +<div style="text-align: center"> + +![edit pycharm configurations](/images/guides/dev_env/edit-pycharm-configurations.png) + +</div> +2. Create the `Export OctoBot-Tentacles to zip` run configuration: +- Script path: `path_to_your_octobot_repositories/OctoBot/start.py` +- Working directory: `path_to_your_octobot_repositories/OctoBot` +- Script parameters: `tentacles -p ../tentacles_default_export.zip -d ../OctoBot-Tentacles` +<div style="text-align: center"> + +![create pycharm export tentacles config](/images/guides/dev_env/create-pycharm-export-tentacles-config.png) + +</div> +3. Execute this run configuration. This will export the OctoBot-Tentacles tentacles into a zip archive that can be installed on your OctoBot, or shared. +<div style="text-align: center"> + +![execute pycharm export tentacles](/images/guides/dev_env/execute-pycharm-export-tentacles.png) + +</div> +4. Create the `Install tentacles zip` run configuration to install these zipped tentacles on your OctoBot: +- Script path: `path_to_your_octobot_repositories/OctoBot/start.py` +- Working directory: `path_to_your_octobot_repositories/OctoBot` +- Script parameters: `tentacles -i --all --location any_platform.zip` +<div style="text-align: center"> + +![create pycharm install tentacles config](/images/guides/dev_env/create-pycharm-install-tentacles-config.png) + +</div> +5. Execute this run configuration. This added to your OctoBot tentacles the tentacles contained into this zip. This run configuration can be used to install any tentacles zip. +<div style="text-align: center"> + +![execute pycharm install tentacles](/images/guides/dev_env/execute-pycharm-install-tentacles.png) + +</div> + +Your OctoBot local folder now contains the tentacles code you clonned from the `OctoBot-Tentacles` repository. Re-execute `Export OctoBot-Tentacles to zip` and `Install tentacles zip` when you want to update your local tentacles from the `OctoBot-Tentacles` git repository. +Warning: this will override any local change to the re-installed tentacles so be sure to save your local changes beforehand. + +#### 2. Starting OctoBot +This run configuration will start your local OctoBot. Make sure your `OctoBot-Tentacles` tentacles have been installed first (from the `Export OctoBot-Tentacles to zip` and `Install tentacles zip` run config executions) or OctoBot will install its default tentacles and their import will might fail. + +1. Click on `Edit Configurations` +<div style="text-align: center"> + +![edit pycharm configurations](/images/guides/dev_env/edit-pycharm-configurations.png) + +</div> +2. Create the `Start OctoBot` run configuration: +- Script path: `path_to_your_octobot_repositories/OctoBot/start.py` +- Working directory: `path_to_your_octobot_repositories/OctoBot` +<div style="text-align: center"> + +![create pycharm start octobot run config](/images/guides/dev_env/create-pycharm-start-octobot-run-config.png) + +</div> +3. Execute this the run configuration to start your OctoBot +<div style="text-align: center"> + +![execute pycharm start octobot](/images/guides/dev_env/execute-pycharm-start-octobot.png) + +</div> + +You can now start your OctoBot from your development environment, make local changes and run python in debug mode. + +#### 3. Exporting your tentacle changes into their git repository +This run configuration will export changes of your local OctoBot tentacles into the configured tentacles repository. It will take the files linked to your selected tentacle package. + +1. Click on `Edit Configurations` +<div style="text-align: center"> + +![edit pycharm configurations](/images/guides/dev_env/edit-pycharm-configurations.png) + +</div> +2. Create the `Export tentacles to repo` run configuration: +- Script path: `path_to_your_octobot_repositories/OctoBot/start.py` +- Working directory: `path_to_your_octobot_repositories/OctoBot` +- Script parameters: `tentacles -e ../../OctoBot-Tentacles OctoBot-Default-Tentacles -d ../OctoBot/tentacles` +<div style="text-align: center"> + +![create pycharm export tentacles to repo config](/images/guides/dev_env/create-pycharm-export-tentacles-to-repo-config.png) + +</div> +3. Execute this the run configuration to apply your the changes from your OctoBot/tentacles folder into the git repository of this tentacles package. +<div style="text-align: center"> + +![execute pycharm export tentacles to repo](/images/guides/dev_env/execute-pycharm-export-tentacles-to-repo.png) + +</div> + +From the script parameters, you can change: +- `OctoBot-Default-Tentacles` to select tentacles to export from a different package. Packages are defined in the `metadata.json` of each tentacle, under the `origin_package` key. +- `OctoBot-Tentacles` to export tentacles to a different git reposition. + + +#### 4. Running tests + +Create `pytest` run configurations to run OctoBot tests. Feel fee to add any other test run configurations. + +<div style="text-align: center"> + +![create pycharm tests config](/images/guides/dev_env/create-pycharm-tests-config.png) + +</div> +<div style="text-align: center"> + +![execute pycharm tests](/images/guides/dev_env/execute-pycharm-tests.png) + +</div> diff --git a/docs/content/developers/environment/tips.md b/docs/content/developers/environment/tips.md new file mode 100644 index 0000000000..544eefa3fa --- /dev/null +++ b/docs/content/developers/environment/tips.md @@ -0,0 +1,45 @@ +--- +title: "Developer tips" +description: "Profit from our tips to get you quickly started as an OctoBot developer. Explore the SQLite backtesting files using SQLite browser and test your strategies." +sidebar_position: 9 +--- + + + +# Developer tips + +## Backtesting data + +[Backtesting](/guides/octobot-usage/backtesting) data files are sqlite database files. When using the regular data collector, these files contain every historical candles the requested exchange is willing to give. You can use a <a href="https://sqlitebrowser.org/" rel="nofollow">SQLite browser</a> to explore these files. + +## Strategy tests + +To quickly check tentacles strategy tests states or develop a new tentacles strategy test, change the following lines in **octobot/tests/functional_tests/strategy_evaluators_tests/abstract_strategy_test.py**: + +```python +def _handle_results(self, independent_backtesting, profitability): + exchange_manager_ids = get_independent_backtesting_exchange_manager_ids(independent_backtesting) + for exchange_manager in get_exchange_managers_from_exchange_ids(exchange_manager_ids): + _, run_profitability, _, market_average_profitability, _ = get_profitability_stats(exchange_manager) + actual = round(run_profitability, 3) + # uncomment this print for building tests + # print(f"results: rounded run profitability {actual} market profitability: {market_average_profitability}" + # f" expected: {profitability} [result: {actual == profitability}]") + assert actual == profitability +``` + +into + +```python +def _handle_results(self, independent_backtesting, profitability): + exchange_manager_ids = get_independent_backtesting_exchange_manager_ids(independent_backtesting) + for exchange_manager in get_exchange_managers_from_exchange_ids(exchange_manager_ids): + _, run_profitability, _, market_average_profitability, _ = get_profitability_stats(exchange_manager) + actual = round(run_profitability, 3) + # uncomment this print for building tests + print(f"results: rounded run profitability {actual} market profitability: {market_average_profitability}" + f" expected: {profitability} [result: {actual == profitability}]") + # assert actual == profitability +``` + +This will not stop tests on failure and display the current tests results as well as expected values. diff --git a/docs/content/developers/getting-started.md b/docs/content/developers/getting-started.md new file mode 100644 index 0000000000..42a71edbc6 --- /dev/null +++ b/docs/content/developers/getting-started.md @@ -0,0 +1,41 @@ +--- +title: "Starting as a developer" +description: "Wondering how to deeply customize OctoBot ? Learn how to start as an OctoBot developer, create your own tentacles and contribute to the open source trading robot" +sidebar_position: 21 +--- + + + +# OctoBot developers + +:::info +For developers and contributors to the <a href="https://github.com/Drakkar-Software/OctoBot" rel="nofollow">open source OctoBot</a>. +::: + +## 1 - Learn the basics of Python + +Watching an hour of a beginner Python video course should be enough to get started. + +## 2 - Setup your OctoBot developer environment + +Once setup you'll love the [OctoBot Developer environment](/guides/octobot-developers-environment/setup-your-environment). + +## 3 - Learn how to create Tentacles + +OctoBot Tentacles are apps/extensions for OctoBot. + +A tentacle can be what ever you want it to be. For example: + +- A technical Indicator +- A custom python trading Strategy +- Something to push notifications to Discord +- An improvement to the web interface +- Or something entirely different + +Finally, tentacles can be packed into a tentacle bundle to be shared with other OctoBot users. + +Check out out the [OctoBot customization guide](/guides/octobot-tentacles-development/customize-your-octobot) to learn more. + +## 4 - Contribute to OctoBot + +Please have a look at our <a href="https://github.com/Drakkar-Software/OctoBot/blob/master/CONTRIBUTING.md" rel="nofollow">contribution guidelines</a>. diff --git a/docs/content/developers/packages/_category_.json b/docs/content/developers/packages/_category_.json new file mode 100644 index 0000000000..09c7f68a22 --- /dev/null +++ b/docs/content/developers/packages/_category_.json @@ -0,0 +1,8 @@ +{ + "label": "Packages", + "position": 3, + "link": { + "type": "doc", + "id": "developers/packages/overview" + } +} diff --git a/docs/content/developers/packages/agents.md b/docs/content/developers/packages/agents.md new file mode 100644 index 0000000000..030cdfc614 --- /dev/null +++ b/docs/content/developers/packages/agents.md @@ -0,0 +1,43 @@ +--- +title: Agents +description: Multi-agent AI orchestration framework built on async channels, supporting teams, memory, self-improvement, debate phases, and LangChain Deep Agents integration. +sidebar_position: 1 +--- + +# Agents Package + +`octobot_agents` provides the infrastructure for composing and running multi-agent AI workflows inside OctoBot. It defines an abstract layer on top of `async_channel` that lets you build individual LLM-backed agents, wire them into teams, orchestrate execution order, score results with a critic, and improve performance over time through a persistent memory subsystem. + +## Core abstractions + +An **Agent** is a single LLM-backed unit that runs against input data and produces a result. Each agent type has its own channel class that routes results from a producer to one or more consumers, following the `async_channel` pattern used throughout OctoBot. + +A **Team** is a DAG of agents managed by a single manager agent. The team controls execution order, passes outputs between agents, and optionally runs debate and self-improvement cycles on top of the main execution pass. + +The **Manager** decides what runs and in what order. It can work in two modes: plan-driven, where it produces an ordered `ExecutionPlan` before any agent fires, or tools-driven, where it calls agents directly as tools and returns a `ManagerResult`. The right mode depends on whether the task is structured enough to plan ahead. + +The **Critic** runs after team execution and produces a structured analysis of issues, inconsistencies, and per-agent improvement notes. Its output feeds directly into the memory subsystem. + +The **Judge** arbitrates debate phases: given the accumulated debate history it returns either continue or exit, with an optional synthesis summary. The default maximum is three rounds. + +## Execution modes + +Three team execution strategies are available. **Sync** is one-shot sequential — the manager produces a plan or result, agents execute in DAG order, and the call returns when all results are collected. **Live** is long-running async — channels are wired, agents fire as upstream results arrive, and completion is signaled when all terminal agents finish. **Deep Agents** delegates to a LangChain supervisor with `SubAgentMiddleware`, which orchestrates workers as subagents and supports both `ainvoke` and streaming. The Deep Agents path is optional; the package remains fully importable without the LangChain dependencies installed. + +## Self-improvement loop + +When a team is configured with `self_improving=True`, execution triggers an additional pass in the background. The `CriticAgent` receives all agent outputs and produces an analysis, then the `MemoryAgent` writes new memories to each agent's `JSONMemoryStorage`. On the next run, agents retrieve those memories via LLM tool calls and adjust their behavior accordingly. Both steps run as a background `asyncio.Task` so they do not block the caller waiting for results. + +Memory files are stored per agent class and pruned when they exceed the configured maximum, prioritizing entries with high importance and high usage. The effect is that frequently useful memories survive compression while stale ones are dropped. + +## Skills + +Skills are markdown files with YAML frontmatter that describe capabilities or domain knowledge an agent should be aware of. They live in a `skills/` directory alongside the agent's code and are auto-discovered at build time, then passed into the agent's context during inference. Individual agents can also receive skills injected at instantiation time, separate from the directory-level defaults. + +## Deep Agents and human-in-the-loop + +The Deep Agents integration supports tool-level interrupts. An interrupt configuration identifies which tools require human approval before proceeding — for example, high-risk tools like order placement. When execution hits one of those tools, the workflow pauses and surfaces an `__interrupt__` in its result. The caller can then resume by approving all interrupts, rejecting them, or providing explicit decisions per tool. + +## Utilities + +The package includes resilient JSON extraction helpers for parsing LLM output, which rarely arrives as clean JSON. The extractor tries multiple strategies in sequence: brace-matching from mixed text, extraction from fenced code blocks, extraction from XML-style tags, and preprocessing to strip fences and escape sequences. An async retry decorator wraps the tools-driven manager's LLM calls internally. diff --git a/docs/content/developers/packages/async-channel.md b/docs/content/developers/packages/async-channel.md new file mode 100644 index 0000000000..ed0b3a667c --- /dev/null +++ b/docs/content/developers/packages/async-channel.md @@ -0,0 +1,41 @@ +--- +title: Async Channel +description: Asyncio-based producer/consumer message bus with filtering, priority levels, and synchronized execution mode. +sidebar_position: 1 +--- + +# Async Channel + +`async_channel` is OctoBot's internal multi-task communication library. It implements a typed, async producer/consumer message bus built on top of `asyncio.Queue`. Components across the application use it to pass data between loosely coupled parts without holding direct references to each other. + +## Channels, producers, and consumers + +A `Channel` is the hub that connects one or more producers to one or more consumers. You always subclass it to define a specific data flow, declaring `PRODUCER_CLASS` and `CONSUMER_CLASS` as class attributes. The channel name defaults to the class name with the `"Channel"` suffix stripped; override `get_name()` to change this. `ChannelInstances` is a process-global registry mapping channel names (or `chan_id` and name pairs) to live instances. For deployments where the same channel type exists under multiple IDs, `*_at_id` variant helpers group channels by `chan_id`. + +A `Producer` pushes data into consumer queues by calling `send()` to enqueue across all registered consumers. The higher-level `push()` method is the normal entry point and can transform or gate data before calling `send()`. Each producer starts its own `asyncio.Task` via `run()`, unless the channel is in synchronized mode. + +A `Consumer` owns an `asyncio.Queue` and runs a background task that continuously dequeues and calls `perform()`, which invokes the registered callback with the queued kwargs. Consumers are registered with a callback and optional filters; each gets its own queue and background task. Stopping the channel stops all producers and consumers in order. + +## Filtering + +When registering a consumer you provide a `consumer_filters` dict. When a producer calls `get_consumer_from_filters()`, only consumers whose stored filters match all keys in the provided dict are returned. A query value of `CHANNEL_WILDCARD` matches any stored value for that key, and a consumer whose stored value is `CHANNEL_WILDCARD` matches any queried value. If the consumer's stored value is a list, the match succeeds if the queried value appears in it or any list element is the wildcard. An empty filter dict in the query returns all consumers. + +## Pause and resume + +Channels start in the paused state. A channel resumes its producers automatically when at least one consumer with a non-`OPTIONAL` priority level is registered, and pauses again when no such consumers remain. Producers that only serve `OPTIONAL` consumers are considered logically idle from the channel's perspective — this prevents wasteful processing when nothing meaningful is listening. + +## Synchronized mode + +In normal operation each consumer and producer runs its own asyncio task. Synchronized mode disables task creation entirely — no tasks are spawned for producers or consumers. Instead, the producer drives execution explicitly by calling `synchronized_perform_consumers_queue()`, which drains each consumer's queue in the current coroutine for consumers at or above the requested priority level. This gives full deterministic control over execution order and is used in backtesting, where you need to replay events in a defined sequence without the non-determinism of concurrent tasks. + +## Priority levels + +Priority levels serve two purposes. `HIGH` and `MEDIUM` consumers keep producers running; `OPTIONAL` consumers do not. In synchronized mode, consumers are drained in priority order, so high-priority subscribers always process data before lower-priority ones. This ordering matters for backtesting correctness, where strategies must process evaluator output before the next market event is injected. + +## Supporting types + +`Channel.get_internal_producer()` provides a lazily-created producer that lives on the channel itself, enabling non-producer code to publish without managing an explicit producer reference. It is stopped automatically when the channel stops. + +`SupervisedConsumer` extends `Consumer` with an `idle` event that tracks whether `perform()` is currently executing. This lets a producer wait for a specific consumer to finish before continuing — important when correctness depends on consumption order rather than just delivery. + +`InternalConsumer` is a consumer subclass where the callback is declared as `internal_callback` on the class itself rather than passed at construction, which is useful when the callback logic is tightly coupled to the consumer's own state. diff --git a/docs/content/developers/packages/backtesting.md b/docs/content/developers/packages/backtesting.md new file mode 100644 index 0000000000..835faead78 --- /dev/null +++ b/docs/content/developers/packages/backtesting.md @@ -0,0 +1,45 @@ +--- +title: Backtesting +description: Architecture and key concepts of the octobot_backtesting package — the engine that runs trading strategies against historical or social data. +sidebar_position: 1 +--- + +# Backtesting Package + +`octobot_backtesting` provides a time-driven simulation loop that replays historical market or social data through the same channel infrastructure that live trading uses. Because the backtesting engine feeds data through `async_channel` producers the same way a live exchange connector does, trading modes and evaluators require no modifications to run in either context. + +## Simulation loop + +The loop is driven by `TimeUpdater`, which advances a clock managed by `TimeManager` and pushes each timestamp through a `TimeChannel`. At each tick, the channel manager flushes producers in ascending priority order, fully draining one priority level before moving to the next. This ordering replicates live causal sequencing: raw price data completes before evaluators compute signals, which completes before strategies issue orders. After all producers are flushed, the loop yields to the asyncio event loop before advancing the clock, ensuring any triggered coroutines run at the correct simulated moment. + +After the first successful iteration, the loop prunes producers whose channels have no consumers and rebuilds the priority-level map. This removes irrelevant work from every subsequent tick rather than re-checking it each time. + +When multiple backtests run in the same process — as happens during optimisation runs — each `Backtesting` instance registers its `TimeChannel` under a namespaced key so their clocks do not interfere. + +## Clock and timestamp control + +`TimeManager` holds the starting and finishing timestamps, the current position, and a configurable time interval that defaults to 50 seconds. Advancing the clock is normally just adding the interval, but a whitelist mode is available for cases where only specific timestamps matter — sparse social data feeds, for example. When a whitelist is active, `next_timestamp()` skips any timestamp not present in the sorted whitelist deque. The deque pops stale entries as it advances, keeping the scan cheap. A callback can bypass the whitelist for a specific tick when needed. + +## Data files + +Historical data lives in `.data` files, which are SQLite databases. A `description` table records the file's version, type (exchange or social), exchange name, symbols, time frames, and the time range covered. During collection the database is written to a `.part` path and atomically renamed to `.data` on completion, so importers never encounter a partial file. + +The file name encodes the collector class that produced it. When an importer is created from a file name, the package resolves the collector class by name and reads its `IMPORTER` attribute to instantiate the right importer. If no match is found it falls back to the default exchange history importer, which handles renamed files gracefully. + +The description schema has evolved across format versions. Version 2.0 separates exchange and social data types and adds `start_timestamp`; older versions are exchange-only and lack that field. Importers detect the version on initialisation and parse accordingly. + +## Exchange and social importers + +`ExchangeDataImporter` probes each table at startup and records only the non-empty ones, so time-range queries only touch tables that actually contain data. `SocialDataImporter` stores events with a service name, channel, symbol, and a JSON payload; its description encodes a services list rather than an exchange name. + +Both importers maintain a chronological read cache keyed by symbol, time frame, and data type. The first query fetches all rows from the requested timestamp onwards and populates the cache; subsequent queries slice the already-loaded list. This forward-only contract means backward seeks return stale results unless the cache is explicitly reset — which is the correct behaviour for sequential replay and efficient for the optimisation case where the same file is replayed many times. + +## Collectors + +`DataCollector` is the base class for anything that writes a `.data` file. It manages path creation, constructs the database at the `.part` path, holds an HTTP session, and provides a retry-capable request helper and a recursive pagination helper for APIs that return continuation URLs. Exchange and social branches extend it with typed save helpers. The concrete collector implementation for a given context is resolved via tentacle discovery, so tentacle packages can override collection behaviour without touching core code. + +## Multi-run sharing and progress + +`BacktestData` pre-initialises importers once and shares them across multiple `Backtesting` instances. It also manages pre-warmed candle arrays keyed by exchange, symbol, time frame, and time range. Between sequential runs, resetting the importer cache indexes rewinds the read position without reopening SQLite connections. + +Progress is exposed as a 0.0–1.0 float based on remaining versus total iterations. Completion is signalled via an `asyncio.Event` that callers can await; there is no need to poll. A per-priority-level drain timeout of 15 seconds guards against stuck consumers — a timeout is logged but does not abort the run. diff --git a/docs/content/developers/packages/binary.md b/docs/content/developers/packages/binary.md new file mode 100644 index 0000000000..fc8a31ffb8 --- /dev/null +++ b/docs/content/developers/packages/binary.md @@ -0,0 +1,31 @@ +--- +title: Binary +description: PyInstaller-based pipeline that compiles OctoBot into self-contained executables for Windows, Linux, and macOS. +sidebar_position: 1 +--- + +# Binary + +The `binary` package contains the tooling that packages OctoBot into standalone, single-file executables using [PyInstaller](https://pyinstaller.org/). The resulting binaries run on Windows, Linux, and macOS without requiring Python or any dependencies to be installed on the target machine. + +## The core problem + +PyInstaller works by statically tracing imports and bundling everything it finds. OctoBot's plugin architecture defeats this: tentacles are discovered at runtime by scanning the filesystem, not by static imports, so PyInstaller cannot see them. The solution is a pre-processing pipeline that runs before PyInstaller and makes the invisible visible. + +## Build pipeline + +The pipeline has four steps. First, a module discovery script walks all installed site-packages and the local repository to find every `octobot*` module and the `async_channel` library, producing a list of dotted import paths that feeds directly into PyInstaller's `hiddenimports`. This covers the runtime-discovered plugin code. + +Second, hidden import patching handles a specific case that the discovery step cannot: the `gevent` async driver for `python-engineio` is loaded via string lookup at runtime and has no static import anywhere in the codebase. The pipeline appends an explicit import statement to the CLI entry point before compilation to force PyInstaller to include it. + +Third, NLTK corpus bundling ensures that sentiment analysis works inside the binary. The NLTK `words` corpus cannot be downloaded at runtime inside a packaged executable, so it is downloaded before packaging and bundled as a static asset. + +Fourth, PyInstaller is invoked against a custom spec file rather than a plain entry point. The spec gives precise control over data assets, hidden imports, and exclusions. Notably, the `tentacles/`, `logs/`, and `user/` directories are excluded from the bundle — they are runtime-only and must live outside the executable on the user's machine. + +After the build completes, CI validates the output by running `OctoBot --version` and renames the artifact to the platform-specific filename. + +## Design decisions + +The `hiddenimports` list in the spec file is maintained manually rather than generated, because fully automated discovery would include test dependencies and dev tools that should not ship in a production binary. The list reflects libraries that use dynamic import patterns: exchange connectivity libraries, web interface async transports, notification service integrations, blockchain connectivity, and sentiment analysis. The `websockets.legacy.*` sub-modules are listed explicitly because PyInstaller does not recurse into namespace packages automatically. + +PyInstaller is pinned to a specific version across all CI runs to ensure reproducible builds. Floating the version would risk silent behavioral changes in how PyInstaller traces imports, which can produce a binary that passes `--version` but silently omits a module that only matters at runtime. diff --git a/docs/content/developers/packages/commons.md b/docs/content/developers/packages/commons.md new file mode 100644 index 0000000000..d02a22a9f9 --- /dev/null +++ b/docs/content/developers/packages/commons.md @@ -0,0 +1,31 @@ +--- +title: Commons +description: Shared foundations for all OctoBot packages — enums, constants, configuration, databases, logging, signals, DSL, and more. +sidebar_position: 1 +--- + +# OctoBot Commons + +The `octobot_commons` package is the foundational library shared by every other OctoBot package. It owns cross-cutting concerns — configuration, databases, async utilities, a DSL interpreter, and more — so no other package needs to re-implement them. + +## Configuration and profiles + +Configuration is organized into two layers that are merged at runtime. The global config (`config/config.json`) holds exchange credentials and per-installation settings. The profile (`user/profiles/<profile>/profile.json`) holds everything that defines a trading strategy and can be freely shared or committed to version control, because API credentials are always kept in the global config and never written into a profile. When a profile specifies exchange settings, only the non-secret fields travel with it. + +`update_config_fields` applies dot-path updates in-place without reloading from disk, which is how the web UI saves small changes without unnecessary churn. Profile metadata flags carry runtime meaning beyond simple display: `read_only` prevents deletion of non-imported profiles, `hidden` excludes a profile from the main list for internal and template purposes, and `auto_update` makes the bot poll an origin URL on a configurable interval. + +## Databases + +The database layer is organized in three levels. Adaptors define the async CRUD contract. `DBWriter` and `DBReader` sit above them — the writer stages writes through an in-memory cache or row-buffering for backtesting throughput, while the reader wraps access in a chronological read cache. `MetaDatabase` is the single entry point for the trading engine, grouping all databases for a run under one async context manager. + +`RunDatabasesIdentifier` generates all file paths for a run by encoding the trading mode class, campaign, and run type into the path. Run IDs are assigned by scanning for the next available integer, so runs are never overwritten. `RunDatabasesProvider` is a process-global singleton that shares one `MetaDatabase` connection per run, which prevents competing file handles from different packages. + +The TinyDB backend auto-wipes corrupted files on known errors rather than failing hard. The SQLite backend uses an async cursor pool to avoid blocking the event loop. `CacheWrapper` stores computed indicator values keyed by timestamp and honors `DO_NOT_CACHE` and `DO_NOT_OVERRIDE_CACHE` sentinels so evaluators can skip storage without special-casing at the call site. A metadata table tracks configuration and version for stale-cache detection. `GlobalSharedMemoryStorage` is a simpler in-process singleton dict for transient cross-component state that does not need to survive a restart. + +## DSL interpreter + +The DSL interpreter accepts Python-syntax strings, parses them with `ast.parse`, and converts the resulting AST into a tree of `Operator` instances. Literals stay as plain Python values; everything else becomes an operator subclass. The design is built around a two-phase contract: `pre_compute()` walks the tree bottom-up to handle any async work — I/O, cache lookups — before `compute()` runs top-down synchronously. Operators that need async data inherit from `PreComputingCallOperator`, which stores the fetched value during `pre_compute()` and returns it from `compute()`. Calling `compute()` before `pre_compute()` on such an operator raises immediately. + +Operator registration is name-keyed: each subclass exposes a static `get_name()` returning the token as it appears in DSL source. The interpreter resolves function calls, binary and unary operators, comparisons, boolean operators, subscripts, and even `raise` statements against this dict. New operators can be injected into an existing interpreter instance via `extend()`, which is how higher-level packages augment a base set without subclassing the interpreter. `Operator.get_parameters()` returns a typed parameter list that drives both runtime validation and user-facing documentation generation via `get_docs()`. + +A few behaviors are worth knowing before working with the DSL. Chained comparisons like `a < b < c` are decomposed into pairwise `Compare` operators joined by the registered `And` operator — if `And` is absent, chained comparisons fail at parse time even when individual comparisons would succeed. Expressions that are not valid in `eval` mode are retried in `single` statement mode. `interpreter.prepare(expr)` builds the operator tree once so that subsequent evaluations re-execute pre-compute and compute against the same tree, making repeated evaluation against changing data cheap. `ReCallableOperatorMixin` enables stateful operators by carrying a serialized `last_execution_result` back into the next call, letting operators implement waiting periods or incremental state across evaluations without external storage. diff --git a/docs/content/developers/packages/evaluators.md b/docs/content/developers/packages/evaluators.md new file mode 100644 index 0000000000..1bc6db664c --- /dev/null +++ b/docs/content/developers/packages/evaluators.md @@ -0,0 +1,41 @@ +--- +title: Evaluators +description: Overview of the octobot_evaluators package — the framework for signal generation, strategy composition, and the evaluation matrix. +sidebar_position: 1 +--- + +# Evaluators Package + +The `octobot_evaluators` package is the signal-generation and strategy-composition layer of OctoBot. It defines the abstract base classes that all evaluators and strategies extend, the Matrix data structure that holds live evaluation results, and the async channels that route those results to trading modes. + +## Evaluator types + +All concrete evaluators extend `AbstractEvaluator`, which itself extends `AbstractTentacle` from `octobot_commons`. Three wildcard class methods — one for cryptocurrency, one for symbol, one for time frame — return `True` by default. The factory uses these to decide how many instances to create: one per concrete combination of dimensions, or one shared instance for each wildcard dimension. + +`TAEvaluator` fires on closed OHLCV candles. When a re-evaluation trigger arrives on `EvaluatorsChannel`, it re-fetches the last full candle and replays the callback. In live mode it waits up to five minutes for price initialisation before processing the first candle. `RealTimeEvaluator` fires on forming candles and selects the shortest available time frame that satisfies the requested frame at registration. + +`SocialEvaluator` consumes external feeds such as news and social media through `octobot_services`. A single instance is shared across all symbols. `ScriptedEvaluator` runs a user-supplied async coroutine, caches results via the trading `Context` cache, and supports hot-reload of the script module via a `RELOAD_SCRIPT` command — it is always bound to specific symbols and time frames, never wildcard. + +`StrategyEvaluator` aggregates signals from all other evaluators. Before calling its callback it applies a cycle guard: it checks that the triggering evaluator's Matrix timestamp has actually changed, and that every TA evaluator for the strategy's relevant time frames has a value within the allowed time delta of exchange time. This prevents acting on stale or mixed-freshness signals. + +## The eval note + +Every evaluator stores its result in `self.eval_note`, a float in `[-1.0, 1.0]` where `-1` is the strongest sell signal and `+1` is the strongest buy. `START_PENDING_EVAL_NOTE` is the sentinel value meaning no result yet. After computing, the evaluator calls `evaluation_completed()`, which writes to the Matrix and broadcasts on `MatrixChannel`. Passing `notify=False` updates the Matrix silently without broadcasting. + +## The Matrix + +The Matrix is a lazy path-based tree where nodes are created on first write. The canonical path is six segments: exchange name, evaluator type, evaluator name, cryptocurrency, symbol, and time frame. The evaluator type is always one of the `EvaluatorMatrixTypes` string values. Segments are omitted — not set to `None` — when they don't apply, so a social evaluator with no time frame produces a four-segment path while a TA evaluator produces all six. Traversal helpers treat a missing segment as a wildcard, so you can fetch all evaluator nodes under a given exchange and type with a two-segment query. + +Each node carries the `eval_note` float, the Unix timestamp at which it was evaluated, the eval note type string, and optional description and metadata blobs. Writes always go through `MatrixChannelProducer.send_eval_note`, and the timestamp stored is the one passed by the evaluator — not wall clock at write time — so backtesting can inject historical timestamps without the staleness check falsely failing. + +Reading uses `get_evaluations_by_evaluator`, which walks evaluator-name nodes under a given exchange and type prefix and returns a name-to-node dict. Nodes whose value fails the valid eval note check are silently dropped unless `allow_missing=False`, in which case an `UnsetTentacleEvaluation` is raised. + +A node is considered fresh if the current time is within the time frame's duration plus a 10-second allowed delta of the evaluation timestamp. The staleness check requires a path ending in a valid time frame value — paths for non-TA evaluators with no time frame will always be considered stale by that check, which is intentional. + +Each `Matrix` instance is assigned a UUID at construction and registered in the process-global `Matrices` singleton. Separate exchange connections therefore have separate matrices and separate channel instances keyed by the same matrix ID. + +## Channels and factory + +Two async channels run per matrix ID. `EvaluatorsChannel` carries inter-evaluator commands such as re-evaluation triggers and resets; each evaluator subscribes here filtered by symbol and time frame. `MatrixChannel` broadcasts on every `evaluation_completed()` call and is where strategy evaluators and trading modes subscribe. + +The factory `create_and_start_all_type_evaluators` is triggered by an evaluator creation event on `OctoBotChannel`. It computes the Cartesian product of cryptocurrencies, symbols, and time frames per evaluator class, skips instances that don't pass the relevant-evaluators filter, and starts survivors in descending priority order drawn from each evaluator's tentacle config. Before the factory runs, a startup helper reads required time frames from all active strategy classes and required candle counts from all active evaluators, writing both into the bot config so the exchange feed buffers the right amount of history before evaluators begin. diff --git a/docs/content/developers/packages/flow.md b/docs/content/developers/packages/flow.md new file mode 100644 index 0000000000..69790bdb2c --- /dev/null +++ b/docs/content/developers/packages/flow.md @@ -0,0 +1,31 @@ +--- +title: Flow +description: Architecture and concepts of the octobot_flow package — OctoBot's serverless automation runner. +sidebar_position: 1 +--- + +# Flow Package + +`octobot_flow` is a stateless automation execution engine. An `AutomationState` object is passed in at the start of each invocation, the job runs, and the updated state is returned via `AutomationJob.dump()`. Nothing is held in memory between calls, which means the engine can run as a serverless function and multiple automations are naturally isolated from one another. + +## Execution model + +Each job runs a DAG of actions. The DAG identifies which actions are ready — not yet completed, with all dependencies satisfied — resolves any DSL placeholders by injecting upstream results, and executes them via `DSLExecutor`. After execution, exchange state is synced back into the automation state. + +Priority actions stored in `AutomationState.priority_actions` run before the normal DAG cycle but use the main DAG as their resolution context. This is the mechanism for bootstrapping: on the very first invocation, when there is no previous execution and no exchange account, only `apply_configuration` actions are processed to set up the exchange account from config before the regular cycle runs. + +A DAG reset can be triggered mid-run by a `ReCallingOperatorResult`, which the `wait()` operator returns when its condition is not yet met. A reset computes the transitive closure of dependents from the target action, saves their current results into `previous_execution_result`, and clears their execution timestamps so they re-run on the next invocation. The saved previous result lets re-running operators resume from where they left off rather than starting cold. + +## DSL execution + +`DSLExecutor` wraps the `octobot_commons` DSL interpreter with operator sets registered by tentacles. A fresh interpreter is created per action to prevent state leakage between actions in the same run. DSL scripts are parsed before the exchange is initialised so that required symbols and time frames can be extracted upfront — only the OHLCV data that scripts actually reference is fetched. + +## Simulated and live modes + +When no credentials are present, `ExchangeRepositoryFactory` returns simulated implementations that read from `FetchedExchangeData` snapshots instead of making live API calls. OHLCV data is still fetched live even in simulated mode because it is public. A portfolio can be forced onto the simulated exchange manager to test strategies against a specific account state. + +The ticker cache, with a five-minute TTL and a fifty-entry cap, serves as a fallback when OHLCV data is unavailable during initialisation. Community repositories intentionally use non-singleton auth instances per job — no session is shared between automations, which prevents credential leakage. + +## Exchange lifecycle + +`ExchangeContextMixin` manages the full exchange lifecycle for each job: build config, initialise `ExchangeManager` with storage disabled, apply any forced portfolio for simulated runs, then tear down after the job completes. Portfolio sync is disabled during order creation because the flow package manages portfolio state explicitly through post-action sync calls rather than relying on automatic sync triggered by order events. diff --git a/docs/content/developers/packages/node.md b/docs/content/developers/packages/node.md new file mode 100644 index 0000000000..024ded72ee --- /dev/null +++ b/docs/content/developers/packages/node.md @@ -0,0 +1,107 @@ +--- +title: Node +description: OctoBot Node — a durable task execution server that runs OctoBot automations via a FastAPI backend and DBOS-powered scheduler. +sidebar_position: 1 +--- + +# OctoBot Node + +The `node` package is a standalone service that executes OctoBot automations as durable, distributed tasks. It runs a FastAPI application backed by [DBOS](https://docs.dbos.dev/) — a workflow engine that persists every step of every execution to a database, making the node resilient to crashes and safe to restart mid-task. + +## What a node does + +A node receives automation tasks over its REST and WebSocket API, stores them in either SQLite or PostgreSQL, and hands them off to the `octobot_flow` runtime for execution. Each task describes a DAG of actions. The node handles scheduling, retries, crash recovery, and per-workflow log isolation. Task payloads can optionally be encrypted end-to-end. + +An instance can play one or more roles: it can accept and schedule tasks (master), pull and execute them (consumer), or both. Running them separately is what allows the consumer tier to scale independently. Multi-node deployments require a shared PostgreSQL database; SQLite is single-node only. + +## Workflow lifecycle + +DBOS persists every workflow invocation and step result to the database. If the process crashes mid-execution, the workflow resumes from the last completed step rather than starting over. Each automation iteration runs as a separate child workflow — a deliberate design choice that prevents a long-running automation from accumulating an unbounded step history in a single workflow record. Child workflow IDs embed the parent UUID4 as a prefix so the API can group them back into a coherent execution history. + +The `execute_iteration` function is itself a DBOS step rather than a plain async call. This is what prevents double-execution: DBOS records step entry and exit atomically, so a crash between those two points replays the step rather than running it a second time. Steps are retried up to three times before the workflow exits with an error. + +User-triggered actions — things like a manual override sent through the API — are delivered as DBOS messages on the `"user_actions"` topic while the workflow is running. This lets them bypass the DAG's scheduled next step and be drained as extra iterations immediately after the current one completes. + +Log messages emitted inside any workflow or step are routed to a per-workflow file under `logs/automations/`. Child workflows share their parent's log file, keyed by the first 36 characters of the workflow ID. + +## Encryption + +Task payloads are optionally encrypted using a hybrid RSA/AES-GCM/ECDSA scheme. Each encryption call generates a fresh AES-256-GCM key and IV; the AES key is wrapped with RSA-OAEP so the bulk payload never travels under the asymmetric key directly. An ECDSA signature over the ciphertext — computed as `ciphertext + encrypted_aes_key + iv` concatenated — is verified before any decryption attempt, preventing chosen-ciphertext attacks. + +**Split-ownership key model.** The server holds two private keys set via environment variables: + +| Environment variable | Purpose | +|---|---| +| `TASKS_SERVER_RSA_PRIVATE_KEY` | Decrypts incoming task content (wrapped AES key) | +| `TASKS_SERVER_ECDSA_PRIVATE_KEY` | Signs outgoing task results | + +The browser holds two private keys, entered once in the Settings page and stored locally: + +| Browser key | Purpose | +|---|---| +| `USER_RSA_PRIVATE_KEY` | Decrypts result content from the server | +| `USER_ECDSA_PRIVATE_KEY` | Signs task content before submission | + +User public keys are not configured on the server. When the browser submits an encrypted task, it derives both public keys from the stored private keys using the Web Crypto API and embeds them in the task payload (`user_rsa_public_key`, `user_ecdsa_public_key`). The server uses those per-task keys to verify the input signature and encrypt the result — which means a single node instance can serve any number of browser users with different keypairs without reconfiguration. + +The server public keys (`SERVER_RSA_PUBLIC_KEY` and `SERVER_ECDSA_PUBLIC_KEY`) are never entered manually — the browser fetches them on demand from `GET /tasks/server-public-keys`, which derives and returns them from the server's private keys at runtime. The server never loads the user's private keys; the browser never loads the server's private keys. + +**Encryption in the browser.** When submitting encrypted tasks the browser performs all cryptographic operations locally using the Web Crypto API (`crypto.subtle`), without sending any key material to the server. The `encryptAndSign` function first fetches the server's RSA public key from `GET /tasks/server-public-keys`, generates a fresh AES-256-GCM key, encrypts the task payload, wraps the AES key with that server RSA public key (RSA-OAEP), then signs the concatenation of ciphertext, wrapped key, and IV with `USER_ECDSA_PRIVATE_KEY`. The ECDSA signature is converted from the IEEE P1363 format that Web Crypto produces to DER format before transmission, because Python's `cryptography` library expects DER. + +**Metadata format.** The accompanying metadata envelope carries `ENCRYPTED_AES_KEY_B64`, `IV_B64`, and `SIGNATURE_B64`. For task inputs, `content_metadata` is `base64(JSON)` — the JSON object is serialised then base64-encoded — because it travels as a CSV or API field where a single opaque string is easiest to embed. For task results, `result_metadata` is a plain JSON string; it is stored in the database and consumed by code that already handles JSON, so the extra base64 layer would be noise. Being aware of this distinction matters when building tooling that reads raw database records. + +**`encrypted_task` context manager.** This wraps each task execution on the consumer node transparently. On entry it decrypts `task.content` using `TASKS_SERVER_RSA_PRIVATE_KEY` and verifies the signature when `task.content_metadata` is non-null. Signature verification uses the task's own `user_ecdsa_public_key` field first (browser-submitted tasks carry it inline); if absent, falls back to the `TASKS_USER_ECDSA_PUBLIC_KEY` env var (legacy single-user deployments); then falls back to the server's own ECDSA public key (server-generated internal state, signed with `TASKS_SERVER_ECDSA_PRIVATE_KEY`). If decryption fails the context manager logs the error and continues with the original encrypted content — it does not crash the workflow. On exit it restores the original `task.content` and does not touch results. + +**Internal state and result encryption.** Between iterations the automation state is stored in DBOS encrypted with `encrypt_task_content` (AES-GCM wrapped with SERVER_RSA_PUBLIC, signed with SERVER_ECDSA_PRIVATE), making it readable only by the server. When completed executions are fetched via the API, the scheduler decrypts the stored state using the `encrypted_task` context manager (SERVER_RSA_PRIVATE + SERVER/USER ECDSA public), then immediately re-encrypts it with `encrypt_task_result` (AES-GCM wrapped with the task's `user_rsa_public_key` field, signed with SERVER_ECDSA_PRIVATE) before returning it. The API surface therefore only ever exposes ciphertext targeted at the specific browser user who submitted the task. Decryption happens in the browser using `USER_RSA_PRIVATE_KEY`, with the signature verified against `SERVER_ECDSA_PUBLIC_KEY`. + +**Security boundary with `octobot_flow`.** The `encrypted_task` context manager wraps the call to `octobot_flow`'s `AutomationJob.run()` inside the node's workflow step. Task content is decrypted just before execution on the consumer node that holds the server private keys. From flow's perspective nothing changes — it receives a plaintext `AutomationState` dict and returns an updated one. The flow package has no awareness of encryption, which means the same engine works identically in encrypted node deployments, unencrypted nodes, and standalone bots. + +**Key loading and validation.** The two server keys are accepted as PEM-encoded strings via environment variables, decoded to `bytes` at process startup by a `BeforeValidator` in the pydantic `Settings` model. There is no lazy loading — `settings` is a module-level singleton instantiated at import time, so a misconfigured key value fails fast before any requests are served. The `is_node_side_encryption_enabled` property checks whether both server keys are present, and `tasks_encryption_enabled` is an alias used in API responses. + +**Browser key storage.** The browser keys entered in the Settings page, and the login passphrase, are stored in `IndexedDB` encrypted with a device-bound, non-extractable AES-256-GCM key. That device key is generated on first login using `crypto.subtle.generateKey` with `extractable: false` and can never be exported or read as raw bytes — not even from a filesystem dump of the database file. It is origin-bound, so it cannot be used from another domain or browser profile. Neither the passphrase nor the user keys are ever stored in `localStorage` or sent to the server. + +**Key generation.** Generate the server key pairs with openssl: + +```bash +# Server RSA-4096 keypair (private key → TASKS_SERVER_RSA_PRIVATE_KEY env var) +openssl genrsa -out server_rsa_private.pem 4096 + +# Server ECDSA-P256 keypair (private key → TASKS_SERVER_ECDSA_PRIVATE_KEY env var) +openssl ecparam -genkey -name prime256v1 -noout -out server_ecdsa_ec.pem +openssl pkcs8 -topk8 -nocrypt -in server_ecdsa_ec.pem -out server_ecdsa_private.pem +``` + +The server public keys are never distributed manually — the browser fetches them via `GET /tasks/server-public-keys` at runtime. + +User key pairs are generated by the browser on first use and stored locally in the Settings page. The browser derives the corresponding public keys from the stored private keys using the Web Crypto API and embeds them in each task at submission time. No user public key configuration is required on the server. + +Encryption is opt-in. If the server keys are absent from the environment, the corresponding path is skipped and fields stay plaintext, which is the backward-compatible default. + +## Template importing + +Both the CSV import flow and the export results page support user-defined templates loaded from JSON files. This lets teams share reusable configurations without touching application code. + +**Import templates** (used during CSV task import) compose multiple base action templates into a single combined template by listing them as ordered steps. Each step can pre-fill parameter values as defaults and mark parameters as hidden so they don't appear in the form. The import UI validates the JSON with a Zod schema, checks that every referenced base template exists, and rejects any hidden required parameter that lacks a default — preventing the form from silently blocking submission. Templates that pass validation are saved to `localStorage` and appear alongside the built-in templates in the action dropdown immediately. + +**Export templates** (used on the export results page) define flat column mappings: each column specifies a label, a JSON path into the task result object, and an optional formatter (`text`, `number`, `date`, or `json`). JSON paths starting and ending with double underscores (e.g. `__task_name__`) resolve against task-level metadata rather than the result payload. Like import templates, export templates are Zod-validated on ingest, stored in `localStorage`, and appear in the template dropdown without a page reload. + +Both systems use the same localStorage-backed CRUD pattern — load, upsert by ID, delete — and silently skip malformed entries on load so a corrupted template doesn't break the entire list. Built-in template IDs are reserved; attempting to import a user template with the same ID as a built-in raises an error. + +Example files for both systems ship with the application: +- Import meta-template examples: `public/meta-template-examples/` +- Export template examples: `public/export-template-examples/` + +The export template JSON format: + +```json +{ + "id": "my_export", + "label": "My Export", + "description": "Custom export columns", + "columns": [ + { "key": "name", "label": "Name", "jsonPath": "__task_name__", "formatter": "text" }, + { "key": "amount", "label": "Amount", "jsonPath": "amount", "formatter": "number" }, + { "key": "fee", "label": "Fee", "jsonPath": "fee.cost", "formatter": "number" } + ] +} +``` diff --git a/docs/content/developers/packages/overview.md b/docs/content/developers/packages/overview.md new file mode 100644 index 0000000000..e09035047b --- /dev/null +++ b/docs/content/developers/packages/overview.md @@ -0,0 +1,23 @@ +--- +title: Packages Overview +description: Overview of OctoBot's monorepo package architecture. Each package encapsulates a specific domain of the trading bot. +keywords: [octobot, packages, architecture, monorepo, trading, evaluators, commons] +slug: /developers/packages/overview +sidebar_position: 0 +--- + +# Packages + +OctoBot is organized into self-contained packages under `packages/`. Each package owns a specific domain and has a clear boundary: it exposes a public API to the rest of the system and manages its own dependencies. Packages with Rust components include a `crates/` directory with PyO3 bridge code alongside the Python source, allowing performance-critical paths to run in Rust while remaining callable from Python. + +## Core packages + +**Trading** is the center of the system. It owns orders, portfolio management, exchange interactions, and position tracking — everything that touches real money flows through here. **Commons** provides the shared utilities and data structures used across all other packages; it has no dependencies on the rest of the stack. **Evaluators** handles technical analysis, social signal evaluation, and strategy composition, turning market data into normalized signals that trading modes can act on. **Async Channel** is the messaging backbone: a multi-task asynchronous communication layer that enables real-time data flow between components without tight coupling. + +## Infrastructure packages + +**Tentacles Manager** handles the plugin lifecycle — discovering, installing, updating, and removing tentacle bundles, as well as generating the Python import infrastructure that makes them loadable. **Backtesting** runs strategies against historical data, using the same evaluator and trading mode code as live trading. **Services** integrates external services for notifications, the web interface, and APIs. **Trading Backend** provides low-level trading primitives with optional Rust acceleration via PyO3. + +## Supporting packages + +**Flow** orchestrates the data flow between evaluators, trading modes, and services, wiring them together at runtime. **Node** manages distributed OctoBot deployments, providing durable task execution for automations across multiple instances. **Agents** is the multi-agent AI orchestration layer, coordinating LLM-powered agents for automated analysis and decision-making. **Sync** handles multi-instance coordination, letting separate OctoBot instances share configurations, signals, and account data through a cryptographically authenticated sync server. diff --git a/docs/content/developers/packages/services.md b/docs/content/developers/packages/services.md new file mode 100644 index 0000000000..2362e4be11 --- /dev/null +++ b/docs/content/developers/packages/services.md @@ -0,0 +1,39 @@ +--- +title: Services +description: Architecture and concepts of the octobot_services package — services, service feeds, interfaces, and the notification system. +sidebar_position: 1 +--- + +# Services Package + +`octobot_services` is OctoBot's integration layer between external systems and the rest of the bot. It defines the abstract contracts and runtime machinery for connecting to third-party APIs, streaming external data into the internal channel bus, presenting user-facing interfaces, and delivering notifications. All four concepts share a common lifecycle managed by factory and manager utilities and wired together through a single `octobot_channel_consumer` callback. + +## Services + +`AbstractService` is the base class for every external connection. Each concrete service class is a singleton — at most one live instance per service type exists at runtime. Configuration is stored under the top-level `services` key, with each service reading and writing its own sub-key. `save_service_config` persists changes back to disk. The `say_hello()` method emits a startup message and sets an internal health flag that the factory checks before handing an instance to callers. + +`ServiceFactory` provides an idempotent `create_or_get_service` that either returns an existing healthy instance or creates a new one by calling `prepare()` then `say_hello()`. It discovers all concrete service subclasses via the tentacle system. + +`AbstractAIService` extends the base for LLM backends. It adds a complete invocation layer: single-shot completions, an agentic loop that drives tool calls up to a configurable iteration limit, provider-aware message construction, and a retry decorator for common parsing failures. Model selection is policy-driven — an `AIModelPolicy` value like `"fast"` or `"reasoning"` is resolved to a concrete model name at runtime via the service's models config. Hooks for LangGraph integration are also provided. `AbstractWebSearchService` follows the same pattern for search backends, adding normalized `search` and `search_news` methods. + +## Service feeds + +`AbstractServiceFeed` bridges an external data stream to an internal async channel. Each feed declares the `FEED_CHANNEL` that becomes its internal distribution bus and the `REQUIRED_SERVICES` that must be healthy before it can start. Simulator subclasses set a flag for backtesting use. `ServiceFeeds` is a singleton registry mapping `(bot_id, feed_name)` pairs to instances; `ServiceFeedFactory` instantiates feeds and registers them there. + +## Interfaces + +`AbstractInterface` is the base for all user-facing surfaces. Two specialisations exist: `AbstractBotInterface` for chat-style interfaces such as a Telegram bot, which provides helpers that query the trading API and format responses for portfolio status, trade history, open orders, and control commands; and `AbstractWebInterface` as a marker subclass for browser-based interfaces. All interfaces share class-level metadata — bot ID, project name, project version — set once at startup via `AbstractInterface.initialize_global_project_data`. + +## Notifications + +A `Notification` is a plain value object carrying a plain-text body, a markdown body, a short title, a severity level, a category, an optional sound hint, and an optional link to a prior notification. `NotificationChannel` is an async channel with a singleton producer. `api.notification.send_notification` pushes onto it. If the channel is not yet running when a notification is sent, it is buffered up to a cap of ten and replayed once the channel comes up. + +`AbstractNotifier` is the delivery end. Each notifier declares the config key that activates it, the services it depends on, and a `_handle_notification` implementation that delivers to its transport. Notifiers also subscribe to the trading `OrderChannel` for automatic order lifecycle notifications alongside the `NotificationChannel`. + +## Lifecycle utilities + +`AbstractServiceUser` combines `InitializableWithPostAction` with automatic service dependency resolution. Subclasses declare `REQUIRED_SERVICES` as a list of service classes, or `False` when no service is needed. During initialization, each required service is created or fetched via the factory. `InitializableWithPostAction` guards against double initialization and chains into a post-init hook. `ReturningStartable` provides both async and threaded start modes. `ExchangeWatcher` tracks exchange registrations and notifies subclasses when a new exchange comes online — used by interfaces and notifiers that need to react to new exchange connections. + +## OctoBot channel integration + +`octobot_channel_consumer.py` connects this package to the top-level OctoBot channel bus. It handles creation events for interfaces, notifiers, and service feeds; exchange registration updates for interfaces and notifiers; and start requests for named service feeds. After creation, a confirmation is sent back on the OctoBot channel so the caller knows the instance is ready. diff --git a/docs/content/developers/packages/sync.md b/docs/content/developers/packages/sync.md new file mode 100644 index 0000000000..a264c48824 --- /dev/null +++ b/docs/content/developers/packages/sync.md @@ -0,0 +1,39 @@ +--- +title: Sync +description: Cryptographically authenticated sync server and client for multi-instance OctoBot data sharing, built on FastAPI and the Starfish framework. +sidebar_position: 1 +--- + +# Sync + +`octobot_sync` is the synchronization server and client for OctoBot. It lets multiple OctoBot instances share collections of data — bot configs, accounts, signals, product metadata — over HTTPS, with every request authenticated by an EVM wallet signature and every stored payload encrypted at rest. + +## Architecture + +The package is built on top of [Starfish](https://github.com/Drakkar-Software/starfish-server), which provides the generic collection routing, role-based access control, encryption, and replica sync machinery. `octobot_sync` contributes the OctoBot-specific layer: the EVM-based auth scheme, the chain registry for on-chain ownership resolution, the collection definitions, and the application entry points. This split means the sync server's core machinery is not OctoBot-specific and can be reused elsewhere, while the collections and auth rules live where they can evolve with the product. + +A sync deployment can run in two modes. A **primary server** is backed by S3-compatible object storage and is the canonical source of truth. A **replica server** is backed by local filesystem storage and mirrors a subset of the primary's collections using Starfish's `ReplicaManager`. The client entry point `create_sync_client` handles both: it returns a connected client and can optionally spin up a local replica server in a daemon thread before connecting to it, so an OctoBot instance can work against a nearby local copy. + +## Authentication + +Every request carries five HTTP headers: the caller's EVM address, an EIP-191 personal-sign signature, a millisecond Unix timestamp, a UUID nonce, and the chain ID in `evm:<chainId>` format. The server builds a canonical string from the method, path, timestamp, nonce, and SHA-256 of the request body, then verifies the signature against that string. Timestamps are checked within ±10 seconds of server time, and nonces are tracked for 30 seconds to prevent replay attacks. A caller whose address matches `PLATFORM_PUBKEY_EVM` receives admin role; all other valid callers receive user role. + +When a request carries a `productId` path parameter, a role enricher runs and queries registered chains for on-chain ownership and access rights. Owning a product grants owner and member roles; having access grants member only. These roles are what determine which collections the caller can read or write, making access control data-driven rather than hard-coded. + +## Collections + +A collection is the unit of storage. Each one defines a storage path template, read/write role requirements, encryption mode, and optional constraints on size, schema, MIME type, and rate limiting. The path template (for example `users/{identity}` or `items/{itemId}/feed/{version}`) serves double duty: it determines where data is stored and whether the collection is replicable. Template variables make a collection non-replicable, because there is no single canonical path the replica can pull from. + +When no `collections.json` is present the package falls back to a default config covering bots, accounts, and errors collections with appropriate role and encryption settings. + +## On-chain layer + +The chain layer provides an abstract interface for multi-chain support and an EVM implementation targeting Base. On-chain calls are TTL-cached to avoid hammering the RPC endpoint: ownership is cached for a year since it is treated as immutable, access rights for 60 seconds, and item lookups for 30 seconds. The cache tiers reflect how frequently each piece of data changes in practice. + +## Replica sync + +Only collections whose storage path contains no template variables can be replicated, since replication requires a single canonical pull path. Each replicable collection gets pull and push paths injected along with sync triggers — on-pull and scheduled — so the replica stays current both reactively and on a timer. Outgoing requests from the replica to the primary are authenticated using the same EVM signature scheme via `StarfishAuthProvider`. + +## nginx integration + +The package can generate an nginx reverse-proxy configuration from a `collections.json`. Public pull-only collections get a one-hour proxy cache; public writable collections get a 30-second cache; all other collections are proxied directly without caching. Collections with rate limiting enabled get a strict rate limit zone on push paths. This keeps the nginx configuration in sync with collection semantics automatically, rather than requiring manual alignment between the two. diff --git a/docs/content/developers/packages/tentacles-manager.md b/docs/content/developers/packages/tentacles-manager.md new file mode 100644 index 0000000000..346a79068a --- /dev/null +++ b/docs/content/developers/packages/tentacles-manager.md @@ -0,0 +1,33 @@ +--- +title: Tentacles Manager +description: Lifecycle management for OctoBot tentacle plugins — install, update, uninstall, configure, export, and publish. +sidebar_position: 1 +--- + +# Tentacles Manager + +`octobot_tentacles_manager` is responsible for the full lifecycle of OctoBot tentacles — the plugin system that extends OctoBot with trading modes, evaluators, exchange connectors, services, and more. It handles everything from downloading and installing a tentacle bundle to generating the Python import infrastructure that makes tentacles loadable at runtime. + +## What the manager does + +Installation works by downloading a ZIP archive (from a URL or a local path), extracting it, and copying each tentacle into the bot's `tentacles/` tree while resolving any cross-tentacle requirements declared in `metadata.json`. An update pass skips tentacles whose installed version is already current, making it safe to re-run against the same source. Uninstall removes the relevant directories and regenerates the `__init__.py` import files so the rest of the tree stays consistent. + +The generated `__init__.py` files are a first-class output of this package, not a side effect. Each one is built around a call to `check_tentacle_version()` that gates the import on the minimum compatible version for the tentacle's origin package. If a tentacle's declared version is too old, its import is silently skipped rather than raising an exception, which means a single outdated tentacle cannot break startup for the others. + +A **repair** operation regenerates missing `__init__.py` files and folder structure without touching tentacle configs. This is the recovery path for a broken installation where the code is intact but the import machinery has gone out of sync. + +## Configuration management + +The manager reads and writes two distinct kinds of configuration. The first is `tentacles_setup_config.json` at the profile root, which records which tentacles are activated in a given profile. Evaluators and trading modes are deactivated by default and must be explicitly enabled; services and utility tentacles activate automatically on install. The second is per-tentacle JSON config files, each stored inside the tentacle's own `config/` directory as the reference default. When a user customizes a tentacle, a profile-specific copy is written to the profile's `specific_config/` folder and takes precedence over the reference at runtime. + +## Discovery and loading + +At startup, OctoBot calls the manager to scan the `tentacles/` tree up to three folder levels deep, looking for directories that contain a `metadata.json`. Each discovered module is parsed into a `Tentacle` model that tracks the type path, class names, version, origin package, and optional tentacle group. The result is cached in a module-level dict keyed by class name, and everything downstream — activation checks, configuration resolution, documentation loading, resource path lookups — reads from that cache. + +## Export and distribution + +The manager can also produce redistributable artifacts. A pack operation copies or zips a set of tentacles from an installed tree into a bundle, with optional Cython compilation for distributing compiled-only packages. The upload path pushes those artifacts to S3 or Nexus artifact repositories. + +## CLI + +The manager ships with a standalone command-line interface and also exposes a `register_tentacles_manager_arguments()` function that OctoBot uses to attach tentacle sub-commands to its own argument parser. This lets the same install, update, repair, and pack operations be driven from either the manager's own CLI or from within `octobot --install`, depending on context. diff --git a/docs/content/developers/packages/tentacles.md b/docs/content/developers/packages/tentacles.md new file mode 100644 index 0000000000..7847879861 --- /dev/null +++ b/docs/content/developers/packages/tentacles.md @@ -0,0 +1,73 @@ +--- +title: Tentacles +description: Overview of the tentacles package — OctoBot's plugin system providing all default evaluators, strategies, trading modes, services, and automation components. +sidebar_position: 1 +--- + +# Tentacles Package + +The `tentacles` package is OctoBot's default plugin bundle — the concrete implementation layer that sits on top of the abstract framework packages (`octobot_evaluators`, `octobot_trading`, etc.). Every evaluator, trading mode, service, AI agent, automation rule, and exchange connector ships as a tentacle. + +## What a tentacle is + +A tentacle is a self-contained directory that lives under the `tentacles/` tree and follows a fixed layout: a Python module with your implementation, a `metadata.json` descriptor, and optionally a `config/` subdirectory containing default configuration files (one JSON per class) and a JSON schema for the form renderer. + +`metadata.json` is the authoritative descriptor for the unit. It declares the tentacle's `version`, the `origin_package` it belongs to (used by the version gate), the list of `tentacles` (Python class names) it exports, and an optional `tentacles-requirements` list naming sibling tentacle modules that must be present for this one to work correctly. + +The top-level `__init__.py` files throughout the tree are **generated** by `tentacles_manager`, not hand-written. Each one is built around a call to `check_tentacle_version()`: if the version declared in `metadata.json` falls below the minimum compatible version for its package, the import is skipped and an error is logged — the rest of the system continues unaffected. This makes the plugin boundary hard-isolated: a bad tentacle cannot crash OctoBot at startup. + +## Discovery and loading + +At startup, `octobot_tentacles_manager` scans the `tentacles/` directory tree up to three folder levels deep, looking for any directory that contains subdirectories with a `metadata.json` file. That heuristic determines the tentacle type path (e.g. `Evaluator/TA`, `Trading/Mode`) without requiring a registry or any explicit registration call. Each discovered module is parsed into a `Tentacle` model object that tracks the type path, class names, version, origin package, and optional `tentacle_group`. + +The result is cached in a module-level dict keyed by class name. Everything downstream — activation checks, configuration resolution, documentation loading, and resource path lookups — goes through that cache via the `loaders` API. + +Tentacles can also be registered programmatically via `register_extra_tentacle_data` for cases where a tentacle class cannot be discovered from disk (e.g. dynamically generated or compiled tentacles). + +## Configuration: reference vs. profile-specific + +Every tentacle class has a **reference config** stored inside its own `config/` directory. That file is the factory default and is never modified at runtime. + +When a user (or a profile) customises a tentacle, a **profile-specific copy** is written to the active profile's `specific_config/` folder. At runtime, `get_config()` checks for a profile-specific file first; if none exists it falls back to the reference config. A factory reset simply copies the reference file back over the profile-specific one. + +Activation state — which evaluators and trading modes are actually turned on — is stored separately in `tentacles_config.json` at the profile root. This file is also managed by `TentaclesSetupConfiguration`, which knows that evaluator and trading mode sub-types are deactivated by default (users must explicitly enable them in a profile), whereas services and utility tentacles activate automatically on install. When a new tentacle is installed into a `tentacle_group`, the manager can automatically swap the default group member's activation state to avoid running duplicate implementations. + +Profiles bundled inside the `tentacles/profiles/` directory ship with their own `tentacles_config.json` and `specific_config/` files, so a profile can override both activation state and parameter values out of the box. + +## Relationship to tentacles_manager + +The `tentacles` package (this repo) only contains implementations. All lifecycle operations — install, update, uninstall, packaging, init-file generation, version gating, configuration management — live in the separate `octobot_tentacles_manager` package. The two are coupled only through the file layout convention and the generated `__init__.py` contract. This separation means the framework can work with any conforming plugin bundle, not just the default one bundled here. + +## Evaluators + +Evaluators analyze market data and produce a normalized signal (`eval_note` in `[-1, 1]`). Four types exist: **TA** evaluators trigger on each closed candle and use technical indicators, **RealTime** evaluators react to live market events, **Social** evaluators consume external data feeds (fear & greed, news, etc.), and **Strategy** evaluators aggregate signals from other evaluators into a final trading decision. + +LLM-backed strategy evaluators can use either a fast parallel agent pattern or a LangChain supervisor for deeper reasoning. DSL-based evaluators let users define custom evaluation logic as scripts. + +## Trading Modes + +Trading modes define how OctoBot translates signals into orders. Each mode splits into a producer (decides what and when to trade) and a consumer (executes order operations on the exchange). + +The package ships grid/staggered modes, index rebalancing modes, DCA modes, daily signal-based modes, copy-trading modes that replicate remote signals or profiles, and DSL modes where the entire trading logic is a user-written script. AI-powered index modes use agent teams to determine target portfolio allocations instead of fixed weights. + +Exchange connectors are mostly CCXT-based, with notable exceptions for prediction markets (Polymarket with on-chain EVM settlement) and perpetual DEXs (Hyperliquid). A generic CCXT connector handles any exchange without a dedicated tentacle. + +## AI Agents + +Agent teams orchestrate multiple LLM-powered sub-agents for market analysis. A simple team runs sub-agents in parallel and summarizes results for low latency. A deep team uses a LangChain supervisor for higher reasoning depth. For index trading, a structured bull/bear debate pattern with risk assessment and memory-enabled allocation decisions is used. + +LLM backends support OpenAI, Anthropic, Ollama, Gemini, Azure, Bedrock, and other providers. Agents declare a speed/quality preference so the system selects the appropriate model tier. + +## Services and Automation + +Service feeds bridge external data sources to the evaluator system. Interfaces provide a web dashboard, Telegram bot, and a Node API for multi-instance management. Notifiers dispatch messages via Telegram, Twitter/X, and WebSocket. + +The automation system is declarative: a trigger event (price, portfolio, P&L, volatility, or time threshold) combined with an optional condition guard and an action (notify, sell all, cancel orders, stop trading, pause strategies). Conditions can be DSL scripts for complex logic. Each trigger supports one-shot and minimum re-trigger frequency settings. + +## DSL and Scripting + +The DSL layer registers operators with the commons interpreter for technical indicators, exchange data access, order management, blockchain wallets, and automation rules. A higher-level scripting library provides an async API for trading modes — covering order creation, position sizing, order chaining and grouping, chart annotations, and index distribution. + +## User Inputs + +Every configurable tentacle implements `init_user_inputs`, which registers parameters with the UI framework. This serializes into a JSON schema rendered as a configuration form in the web interface. diff --git a/docs/content/developers/packages/trading-backend.md b/docs/content/developers/packages/trading-backend.md new file mode 100644 index 0000000000..5bb81a8f0b --- /dev/null +++ b/docs/content/developers/packages/trading-backend.md @@ -0,0 +1,19 @@ +--- +title: Trading Backend +description: Exchange backend layer handling broker identification, API key permission checks, and account validation across 20+ supported exchanges. +sidebar_position: 1 +--- + +# Trading Backend + +`trading_backend` is the exchange-facing validation layer that runs before any trading begins. Its job is to inspect API key permissions and account state so the rest of the system can trust that the credentials it has are actually capable of the operations it intends to perform. + +## Structure + +`Exchange` is the base class. Each supported exchange has a subclass that overrides only what differs from the default behaviour — most overrides are small: a different permissions endpoint URL, a different error code to interpret, or broker-tagging logic specific to that venue. A factory selects the right subclass at runtime using the ccxt exchange `id`. Unrecognised exchanges fall back to the base class, which covers the common case well enough. + +## Permission detection + +Two strategies exist for detecting what an API key is allowed to do. Exchanges that expose a dedicated permissions endpoint are queried directly. For exchanges that don't, the package uses a cancellation probe: it attempts to cancel a non-existent order and interprets the error response. A permission error means the key is read-only; an order-not-found error means trading rights are present; a nonce error signals clock drift between the client and exchange server. + +If withdrawal rights are detected and `ALLOW_WITHDRAWAL_KEYS` is not explicitly enabled, the key is rejected before trading starts. This is a safety default — keys that can withdraw funds carry more risk than keys that can only trade, and most automated strategies have no reason to withdraw. diff --git a/docs/content/developers/packages/trading.md b/docs/content/developers/packages/trading.md new file mode 100644 index 0000000000..2647268fa3 --- /dev/null +++ b/docs/content/developers/packages/trading.md @@ -0,0 +1,45 @@ +--- +title: Trading +description: Overview of the octobot_trading package — the core trading engine for exchange connectivity, order management, portfolio tracking, and trading mode abstraction. +sidebar_position: 1 +--- + +# Trading Package + +`octobot_trading` is the core trading engine. It owns everything between a raw exchange API call and a filled order: exchange connectivity, market data ingestion, order lifecycle, portfolio tracking, and the strategy abstraction layer that trading modes build on. + +## Exchange management + +Each connected exchange gets one `ExchangeManager`, the root object that wires together connectors, traders, and data managers. Mode flags on that object control which code paths activate, so the same trading mode logic runs identically across live, simulated, and backtesting contexts without branching. + +`ExchangeBuilder` is the only supported construction path. It resolves the active trading mode, registers the manager in a global registry singleton, and starts all subsystems in the right order. A shared market cache prevents redundant REST fetches when multiple exchange instances reference the same market. + +The production connector wraps ccxt and normalises all responses through an adapter layer, converting ccxt exceptions into internal types. Exchange-specific tentacles subclass the REST connector to override parsing or expose additional endpoints. For backtesting, a simulator connector replays data from importers instead of hitting the exchange at all. + +## Market data + +Each data type — candles, tickers, order book, trades, funding — follows the same manager/channel/updater pattern. Managers hold in-memory state, channels broadcast updates via async_channel, and updaters pull from REST or push via WebSocket. The switch between REST polling and WebSocket is transparent to anything consuming the channel. + +Candles are stored as a circular buffer, three thousand entries per time frame. Mark price resolves from four possible sources with automatic fallback. Components can register price-threshold callbacks against the mark price stream; this is how simulated stop-loss and take-profit orders detect when their fill condition is met without polling. + +The exchange is considered ready only after a defined set of channel topics has produced their first update. Futures exchanges require additional signals — positions, contracts, funding rates — before the ready flag is set, preventing strategies from acting on incomplete state. + +## Orders and portfolio + +Orders carry a stable internal UUID alongside the exchange-assigned ID. They support chained orders that auto-submit on fill, order groups for coordinated take-profit and stop-loss, trailing price profiles, and price-based triggers that hold an order inactive until a threshold is crossed. Open orders are capped in count and serialised to storage for restart recovery. + +Portfolio accounting uses async locks and maintains fund reservations — funds are locked on order creation and released on fill or cancel. When multiple trading modes share one exchange, sub-portfolios partition the accounting so each mode operates against its own slice. Value conversion to a reference market enables consistent profitability tracking across assets. + +Futures positions come in two structural variants — linear/quote-margined and inverse/base-margined — with different PnL and margin calculations. Trades are immutable fill records; transactions cover fees, PnL events, deposits, withdrawals, and transfers, with duplicate-insertion protection to handle exchange re-delivery. + +## Trading modes + +Trading modes define the strategy abstraction layer. A mode channel carries market signals from producers — which subscribe to evaluator matrix updates or candle-close events — to consumers that translate those signals into exchange operations. The split between producer and consumer is what allows the same evaluation logic to drive different order behaviours. + +Scripted trading modes allow user-defined Python scripts with hot reload. A context object aggregates the exchange manager, symbol, time frame, and trigger candle into a single handle. A built-in DSL covers amount translation, price offset calculation, and position inspection. A script declares its required candle feeds upfront so the framework activates them before the first script call. + +## Signals and storage + +The signal system lets one OctoBot broadcast order operations as structured bundles for followers to replicate. Signals capture the full order dependency graph — chained orders, groups, triggers — and use portfolio-relative sizing so followers scale to their own portfolio rather than copying absolute quantities. + +Order storage serialises the complete in-memory graph including groups, chains, trailing profiles, and triggers. On startup, all values are reconstructed with Decimal precision restored from strings to avoid floating-point drift accumulated during serialisation. Optional historical storage records every order status change rather than just the terminal state. diff --git a/docs/content/developers/tentacles-dev/_category_.json b/docs/content/developers/tentacles-dev/_category_.json new file mode 100644 index 0000000000..e97f3e78cb --- /dev/null +++ b/docs/content/developers/tentacles-dev/_category_.json @@ -0,0 +1 @@ +{"label": "Tentacles Development", "position": 3} diff --git a/docs/content/developers/tentacles-dev/create-a-tentacle-package.md b/docs/content/developers/tentacles-dev/create-a-tentacle-package.md new file mode 100644 index 0000000000..2eb27f3126 --- /dev/null +++ b/docs/content/developers/tentacles-dev/create-a-tentacle-package.md @@ -0,0 +1,202 @@ +--- +title: "Create a tentacle package" +description: "Learn how to bundle your custom OctoBot tentacles in a tentacle package, enable configuration via user inputs, add associated docs and share it." +sidebar_position: 7 +--- + + + +# Tentacle package development + +## Tentacle packages + +This page covers tentacle package creation. A working [Octobot developer environment](/developers/environment/setup-your-environment) is required to create a tentacle. + +A tentacle package is a python module that contains one or multiple [tentacles](create-a-tentacle) +of the same type. + +### The tentacle package folder + +A tentacle package is defined by a folder located at : + +```bash +tentacles/YOUR_TP_CATEGORY/YOUR_TP_SUB_CATEGORY/YOUR_TENTACLE_PACKAGE_NAME/ +``` + +> TP is for tentacle package + +- **YOUR_TP_CATEGORY** can be Backtesting, Evaluator, Services or Trading + +- **YOUR_TP_SUB_CATEGORY** should be a sub category of **YOUR_TP_CATEGORY** in the existing + tentacle architecture + +- **YOUR_TENTACLE_PACKAGE_NAME** is the name of your tentacle package, shouldn't use an + existing tentacle package name + +### Description file + +A tentacle package contains metadata described in the metadata.json file. This file is +used to properly install the tentacle and should be carefully written. It's located at +the root path of the tentacle package : + +```bash +tentacles/YOUR_TP_CATEGORY/YOUR_TP_SUB_CATEGORY/YOUR_TENTACLE_PACKAGE_NAME/metadata.json +``` + +A tentacle package metadata.json contains : + +```javascript +{ + "version": "YOUR_TP_VERSION", + "origin_package": "YOUR_TP_ORIGIN_PACKAGE", + "tentacles": ["YOUR_TP_TENTACLE_1", "YOUR_TP_TENTACLE_2"], + "tentacles-requirements": ["YOUR_TP_TP_REQUIREMENT_1", "YOUR_TP_TP_REQUIREMENT_2"] +} +``` + +- **YOUR_TP_VERSION** is your tentacle package version +- **YOUR_TP_ORIGIN_PACKAGE** is the author or the origin repository of the tentacle package + +- **YOUR_TP_TENTACLE_1** and **YOUR_TP_TENTACLE_2** are name of the tentacle classes contained + in your tentacle package tentacles (1 or more). + +- **YOUR_TP_TP_REQUIREMENT_1** and **YOUR_TP_TP_REQUIREMENT_2** are the names of required + tentacle packages to have installed to run your tentacle package (0 or more) + +> **YOUR_TP_TENTACLE_X** should match python classes to be exposed in the tentacle + +Example _DailyTradingMode/metadata.json_ : + +```javascript +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["DailyTradingMode"], + "tentacles-requirements": ["mixed_strategies_evaluator"] +} +``` + +### Tentacle modules + +[Tentacle](create-a-tentacle) python modules should be placed at the root path of the +tentacle package. There can 1 or more modules per package. + +Example with _momentum_evaluator_ : The main python module that contains multiple tentacles +is located at + +```bash +tentacles/Evaluator/TA/momentum_evaluator/momentum.py +``` + +Every tentacle classes should be imported in the package root `__init__.py` file. + +Example with _momentum_evaluator_'s `__init__.py`.py : + +```python +from .momentum import RSIMomentumEvaluator, ADXMomentumEvaluator, RSIWeightMomentumEvaluator, +BBMomentumEvaluator, MACDMomentumEvaluator, KlingerOscillatorMomentumEvaluator, +KlingerOscillatorReversalConfirmationMomentumEvaluator +``` + +### Configuration + +A tentacle package can contain tentacle configurations. +Tentacles configuration consists in 2 parts: + +1. User inputs: the description of each configuration parameter, in the `init_user_inputs` tentacle method +2. Default configuration: the default configuration values, as json, in the `config` folder + +#### User inputs + +On OctoBot's web interface, tentacle configuration settings are generated using their definition +as User inputs as well as their current values. + +For a configuration parameter to show on the configuration interface, it has to be defined as +user input. Any value contained in the json configuration file can be used by the tentacle but +only the ones associated to user inputs will be visible to the user. + +```python +def init_user_inputs(self, inputs: dict) -> None: + self.period_length = self.UI.user_input( + "period_length", enums.UserInputTypes.INT, 14, inputs, + min_val=1, title="EMA period length." + ) + self.min_trigger_value = self.UI.user_input( + "sleep_delay", enums.UserInputTypes.FLOAT, 0.5, inputs, + min_val=0.34, max_val=0.75, title="Threshold above which to trigger a signal." + ) + self.send_notification = self.UI.user_input( + "send_notification", enums.UserInputTypes.BOOLEAN, True, inputs, + title="When enabled, send telegram notification on signal." + ) +``` + +The full definition of user inputs can be found + +<a href="https://github.com/Drakkar-Software/OctoBot-Commons/blob/master/octobot_commons/configuration/user_inputs.py" rel="nofollow">here</a> +. + +If you are unsure how to use user inputs, have a look at + +<a href="https://github.com/Drakkar-Software/OctoBot-Tentacles/blob/master/Evaluator/TA/momentum_evaluator/momentum.py" rel="nofollow">the existing tentacles user inputs</a> +. + +> Tentacles configuration are displayed using the <a href="https://github.com/json-editor/json-editor" rel="nofollow">json-editor</a> +> library. User inputs are converted into json schemas that are then passed to the editor alongside +> their current configuraiton values. + +- The `editor_options` argument allows to set json-editor specific options such as the `disable_array_add` option (`editor_options={"disable_array_add": True}`). +- The `other_schema_values` argument allows to set json schema specific parameters such as the `minItems` or `uniqueItems` for arrays (`other_schema_values={"minItems": 1, "uniqueItems": True}`). + +#### Default configuration + +Values for an tentacle default configuration are located in the _config/_ folder at : + +```bash +tentacles/YOUR_TP_CATEGORY/YOUR_TP_SUB_CATEGORY/YOUR_TENTACLE_PACKAGE_NAME/config/ +``` + +Each tentacles config file should be named with the exact case and name as the associated +tentacle class. Below an example for _MyAwesomeTentacle_ : + +```bash +tentacles/YOUR_TP_CATEGORY/YOUR_TP_SUB_CATEGORY/YOUR_TENTACLE_PACKAGE_NAME/config/MyAwesomeTentacle.json +``` + +Once a tentacle configuration has been edited, a local copy of this json configuration file +is added to your profile where local changes are saved. + +### Resources + +Tentacle resources are located in the **resources** folder of your tentacle package. + +Each tentacles documentation should be created for in `resources/YOUR_TP_TENTACLE_1.md`, +`resources/YOUR_TP_TENTACLE_2.md` (the file name should match the tentacle class name). + +A tentacle package can also contain many resources that can be binary files, images... + +Example _DailyTradingMode/resources/DailyTradingMode.md_ : + +```text +DailyTradingMode is a **low risk versatile trading mode** that reacts only the its state changes to +a state that is different from the previous one and that is not NEUTRAL. + +When triggered for a given symbol, it will cancel previously created (and unfilled) orders +and create new ones according to its new state. + +DailyTradingMode will consider every compatible strategy and average their evaluation to create +each state. +``` + +> You can use <a href="https://www.markdownguide.org/cheat-sheet" rel="nofollow">markdown</a> to format a +> tentacle documentation. + +### Tests + +Tentacle should be tested. Tests file are usually located in the **tests** folder +of the tentacle package. + +## Installing and sharing tentacles + +Follow the [tentacles installation guide](customize-your-octobot) to install or +share your custom tentacle package. diff --git a/docs/content/developers/tentacles-dev/create-a-tentacle.md b/docs/content/developers/tentacles-dev/create-a-tentacle.md new file mode 100644 index 0000000000..a36a221cae --- /dev/null +++ b/docs/content/developers/tentacles-dev/create-a-tentacle.md @@ -0,0 +1,100 @@ +--- +title: "Create a tentacle" +description: "Create your own OctoBot tentacles (aka extensions). Add technical and social analysis tools, improve the web, telegram interfaces, fix exchange connections." +sidebar_position: 6 +--- + +# Tentacle development + +## Tentacles + +This page covers tentacle development. A working [Octobot developer environment](/developers/environment/setup-your-environment) is required to create a tentacle. + +A tentacle is part of a [tentacle package](create-a-tentacle-package) +and defines a tool for OctoBot such as a way to analyse moving averages, listen +to reddit or create grid-like orders. + +OctoBot uses tentacles to handle: + +* Price technical analysis \(moving averages, RSI, MACD, ...\) +* Social analysis \(Telegram, Reddit and Google\) +* Evaluator signals interpretations \(strategies\) +* Orders creation and followup \(trading modes\) +* User interfaces and notifications \(web, telegram\) +* [Backtesting](/guides/octobot-usage/backtesting) data files reading and writing \(.data\) +* Exchanges fixes \(to handle exchange specific behaviors\) + +There is no limit to the things OctoBot can handle: everything that can be +coded can be used by OctoBot through a tentacle. It is possible to create a +new tentacle to add a new tool to OctoBot or to build on an existing one and improve it. + +## Developing a new Tentacle + +The most efficient way to create a new tentacle is to build on top of an +existing one to add features to it. It is of course also possible to create +a new completely new tentacle. + + +To create a tentacle improving an existing one, all you need to do is to use +the existing tentacle folder as a template \(to create a +[tentacle package](create-a-tentacle-package)\) and extend the existing +tentacle you want to improve and re-implement the methods you want to change +in the package's python file. + +Examples: + +**RedditForumEvaluator** is a simple Reddit evaluator available by default +in `tentacles/Evaluator/Social/forum_evaluator/forum.py`. Let's say you want +to implement **SuperRedditForumEvaluator** which is a better Reddit evaluator. + +Create the `tentacles/Evaluator/Social/super_forum_evaluator/` +[tentacle package](create-a-tentacle-package) based on +`tentacles/Evaluator/Social/forum_evaluator` and start coding the the python file. + +```python + + +class SuperRedditForumEvaluator(Socials.RedditForumEvaluator): + # _get_sentiment is the RedditForumEvaluator method taking an entry and + # returning a number representing the "bullishness" of the entry. + # to change this part only, just redefine this method here + def _get_sentiment(self, entry): + # your new content + sentiment = 1 + # some advanced entry analysis to set sentiment value + return sentiment +``` + +**SimpleStrategyEvaluator** is a strategy available by default in +`tentacles/Evaluator/Strategies/mixed_strategies_evaluator/mixed_strategies.py`. +Create the `tentacles/Evaluator/Social/super_simple_strategy_evaluator/` +[tentacle package](create-a-tentacle-package) based on +`tentacles/Evaluator/Strategies/mixed_strategies_evaluator` and start coding the the python file. + +```python + + +class SuperSimpleStrategyEvaluator(SimpleStrategyEvaluator): + + # _trigger_evaluation is the methods called when OctoBot is + # asking for a strategy evaluation + async def matrix_callback(self, + matrix_id, + evaluator_name, + evaluator_type, + eval_note, + eval_note_type, + exchange_name, + cryptocurrency, + symbol, + time_frame): + final_evaluation = 0 + # some advanced computations to set final_evaluation value + + # update self.eval_note to store the strategy result + self.eval_note = final_evaluation + # finally, call self.strategy_completed to notify that + # trading modes should wake up after this update + await self.strategy_completed(cryptocurrency, symbol) +``` + diff --git a/docs/content/developers/tentacles-dev/customize-your-octobot.md b/docs/content/developers/tentacles-dev/customize-your-octobot.md new file mode 100644 index 0000000000..9e6f49e6a9 --- /dev/null +++ b/docs/content/developers/tentacles-dev/customize-your-octobot.md @@ -0,0 +1,112 @@ +--- +title: "Customize your OctoBot" +description: "Lean how to deeply customize your OctoBot by installing and creating and sharing custom tentacles and tentacle packages." +sidebar_position: 3 +--- + + + +# OctoBot is customizable! + +You can easily create or add existing tentacles to your OctoBot. + +Tentacles are evaluators \(using social media, trend analysis, news, ...\), strategies +\(interpretations of evaluator's evaluations\), analysis tools \(implementation of a +bollinger bands in depth analysis, reddit entries reader, ...\) and trading modes. + +![tentacles](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/wiki_resources/tentacles.jpg) + +OctoBot is available for free with <a href="https://github.com/Drakkar-Software/OctoBot-Tentacles" rel="nofollow">basic implementations of a lot of different evaluators</a> +. The very high modularity in OctoBot's architecture allows it to automatically +look for the most advanced version\(s\) of all the available tentacles and +automatically use them in its trading strategies. + +Therefore, anyone can implement its own version of any evaluator, strategy, analysis +tool and trading modes. It's even possible to use another version provided by someone else ! + +## Default tentacles + +OctoBot default tentacles are automatically installed when first starting your OctoBot. + +You can re-install them anytime using the following command arguments with your OctoBot: +`tentacles --install --all` + +It is also possible to install a tentacles package using the web interface Tentacles tab. + +## Installing tentacles + +To install tentacles, OctoBot can either install a tentacle package bundle or a single +tentacle from a local folder. + +Note: +VSCode & PyCharm run configuration examples of the following commands are described in the [Octobot developer environment](/developers/environment/setup-your-environment). + +### Creating a tentacle package bundle + +Tentacle package bundles are the prefered way to share tentacles. + +Steps to create a tentacles package bundle from a local folder: + +1. Make sure it follows the <a href="https://github.com/Drakkar-Software/OctoBot-Tentacles" rel="nofollow">OctoBot-Tentacles folders architecture</a> + to properly locate tentacles to be installed. There is no need to create empty folders + but packages with content have to be at the [appropriate path](create-a-tentacle-package#the-tentacle-package-folder).\ + Example: a trading mode should be located at **Trading/Mode/name_of_your_trading_mode** + in your bundle. +2. Call OctoBot with the following arguments: + +```bash +tentacles --pack "../tentacles_export.zip" --directory "path/to/your/local/tentacle_bundle" +``` + +> You now have a **tentacles_export.zip** file. It is a tentacle bundle containing your +> tentacles packages that you can install and share. + +### Installing the tentacle package bundle + +To install a package bundle, call OctoBot with the following arguments: + +```bash +tentacles --install --all --location "path/to/your/tentacles_export.zip" +``` + +You can also make it available from an URL and later install it via (for example) : + +```bash +tentacles --install --all --location "https://my.tentacles.com/pack_name" +``` + +> Installing a tentacle package will replace any existing source file that share the +> same name at the same path. + +### Installing a single tentacle package + +It is also possible to install a single tentacle package from a local folder using +the following arguments: + +```bash +tentacles --single-tentacle-install "path/to/your/tentacle/to/install" Evaluator/TA +``` + +Please note that in this command, you also need to provide the type of the +tentacle (`Evaluator/TA` in this example). + +### Installation troubleshoot + +- **TentacleLoader Error when loading _your_tentacle_module_** : This means + the import of your tentacle module failed. Tentacles that can't be imported by + Python can't be used. +- **Python doesn't even see my tentacle**: Your tentacle module has to be imported + in your tentacle package `__init__.py` file. Your tentacle package has also to be imported + in the parent folder `__init__.py`. Please note that this parent `__init__.py` file is + managed by OctoBot and should already be properly filled when installing a tentacle bundle. +- **Python sees my tentacle but I can't see it on the web interface**: Your tentacle + classes have to be listed in your **user/profiles/NameOfYourProfile/tentacles_config.json**. + The web interface uses this file to list tentacles and check if they are enabled or not. + This file is also kept up to date when installing a tentacle bundle. + +In most cases, issues related to the **parent `__init__.py`** and `tentacles_config.json` +files can be fixed by calling OctoBot with the following arguments: + +```bash +tentacles --repair +``` diff --git a/docs/content/guides/_category_.json b/docs/content/guides/_category_.json new file mode 100644 index 0000000000..f03235204d --- /dev/null +++ b/docs/content/guides/_category_.json @@ -0,0 +1 @@ +{"label": "Guides"} diff --git a/docs/content/guides/octobot-advanced-usage/_category_.json b/docs/content/guides/octobot-advanced-usage/_category_.json new file mode 100644 index 0000000000..f5f4a3b475 --- /dev/null +++ b/docs/content/guides/octobot-advanced-usage/_category_.json @@ -0,0 +1 @@ +{"label": "Advanced Usage", "position": 7} diff --git a/docs/content/guides/octobot-advanced-usage/beta-program.md b/docs/content/guides/octobot-advanced-usage/beta-program.md new file mode 100644 index 0000000000..945cf14685 --- /dev/null +++ b/docs/content/guides/octobot-advanced-usage/beta-program.md @@ -0,0 +1,38 @@ +--- +title: "Beta program" +description: "Join the OctoBot beta tester program and get early access to new features." +sidebar_position: 1 +--- + + + +# OctoBot Beta Tester program + +The OctoBot Beta Tester program gives you early access to most of the new features that are added to OctoBot. + +It's important to understand that beta testing is not the same as using a finished product. You may encounter +bugs or other issues that have not yet been resolved, and you may be asked to provide feedback on your experience +with the application. +This feedback can be used to help improve the final version of the feature being tested, so it's important to be thorough and honest in your testing. + +## Why joining the program + +The goal of this program is to test those new features as well as experimenting with improvements on existing ones. + +By joining the OctoBot Beta Tester, you will have early access to new features and improvements on OctoBot cloud by using [app-beta.octobot.cloud](https://app-beta.octobot.cloud/) + +## Who can join + +Registrations are currently open. We are looking for different types of user with different background. No need to be a developer or an advanced trader. + +If you are interested in helping OctoBot project to get better and are willing to try out our new features, we will be glad to have you in the program ! + +## Joining the Beta Tester program + +The Beta Tester program requires an account on [app-beta.octobot.cloud](https://app-beta.octobot.cloud/). This is a separate account from the one you might have on [octobot.cloud](https://www.octobot.cloud/). + +1. Create an OctoBot cloud [beta account](https://app-beta.octobot.cloud/). +2. Activate `Connect to the beta environment` in your OctoBot `About` tab and restart your OctoBot. +3. You can now login using your [app-beta.octobot.cloud](https://app-beta.octobot.cloud/) account in your OctoBot's `Community` tab. + +> Please note that for as long as `Connect to the beta environment` is enabled, your OctoBot will only be able to interact with the beta ecosystem. Therefore if you want to use features or your account from the public [octobot.cloud](https://www.octobot.cloud/), you will have to disable `Connect to the beta environment` from the `About` tab. diff --git a/docs/content/guides/octobot-advanced-usage/tentacle-manager.md b/docs/content/guides/octobot-advanced-usage/tentacle-manager.md new file mode 100644 index 0000000000..87f38800fc --- /dev/null +++ b/docs/content/guides/octobot-advanced-usage/tentacle-manager.md @@ -0,0 +1,38 @@ +--- +title: "Tentacles manager" +description: "Learn how to install new tentacles (extensions) on your OctoBot using the tentacles manager." +sidebar_position: 2 +--- + + + +# Tentacle Manager + +OctBot is fully modular, so you can install any modules you want ! + +![tentacles](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/wiki_resources/tentacles.jpg) + +You can find the + +<a href="https://github.com/Drakkar-Software/OctoBot-Tentacles" rel="nofollow">OctoBot-Tentacles</a> repository all default tentacles (modules) you can create +to custom your own cryptocurrencies trader bot. + +And all the default tentacles package from this repository will be +installed automatically. + +If you want to modify or disable some of them see the developers [customize your OctoBot section](/guides/octobot-tentacles-development/customize-your-octobot). + +## Add new tentacles packages to your OctoBot + +### Using the web interface + +![tentacles_packages](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/wiki_resources/tentacles_packages.jpg) + +Got to the **Tentacles** tab on the navigation bar (in the advanced part), then go to +**INSTALL TENTACLES PACKAGES** and register the address (local or url) +of the wanted tentacles packages. This will automatically install the +package in your OctoBot. + +## Install a specific tentacle + +Please checkout the [dedicated section on our developers guides](/guides/octobot-tentacles-development/customize-your-octobot#installing-the-tentacle-package-bundle). diff --git a/docs/content/guides/octobot-advanced-usage/using-octobot-with-a-proxy.md b/docs/content/guides/octobot-advanced-usage/using-octobot-with-a-proxy.md new file mode 100644 index 0000000000..b8f1932f90 --- /dev/null +++ b/docs/content/guides/octobot-advanced-usage/using-octobot-with-a-proxy.md @@ -0,0 +1,33 @@ +--- +title: "Using a proxy" +description: "Use an http or https proxy for your OctoBot connect to your crypto exchange account from a specific IP address or location." +sidebar_position: 3 +--- + + + +# Using OctoBot with a proxy + +## Why using a proxy with your OctoBot + +When using OctoBot to automate your investment or trading strategies on your exchange, you might want to use a <a href="https://en.wikipedia.org/wiki/Proxy_server" rel="nofollow">proxy server</a> to emit requests to your exchange from a different IP address or location than the one you are currently at. + +This can be relevant in case: + +- You wish to enable IP address whitelisting and would like to be sure to always use the same IP address for your OctoBot, even if it changes its location or its server. +- You are traveling somewhere and would like to keep using the IP address for your OctoBot running from your computer. + +## How to use OctoBot with an HTTP or HTTPS proxy + +To configure your OctoBot to request exchanges from your proxy, configure the following environment variables before starting your [open source OctoBot](../octobot): + +- For an HTTP proxy (REST requests): `EXCHANGE_HTTP_PROXY_AUTHENTICATED_URL` +- For an HTTPS proxy (REST requests): `EXCHANGE_HTTPS_PROXY_AUTHENTICATED_URL` +- For a SOCKS proxy (websocket connections): `EXCHANGE_SOCKS_PROXY_AUTHENTICATED_URL` + +Those variable should be configured with your full proxy URL and OctoBot will use it for each of its exchange requests. + +Example with a HTTPS proxy: +`EXCHANGE_HTTPS_PROXY_AUTHENTICATED_URL=https://username:password@your_proxy.com:8002` + +Please note that only one of `EXCHANGE_HTTP_PROXY_AUTHENTICATED_URL` or `EXCHANGE_HTTPS_PROXY_AUTHENTICATED_URL` should be set to apply a proxy to your REST requests. diff --git a/docs/content/guides/octobot-configuration/_category_.json b/docs/content/guides/octobot-configuration/_category_.json new file mode 100644 index 0000000000..639d43a41f --- /dev/null +++ b/docs/content/guides/octobot-configuration/_category_.json @@ -0,0 +1 @@ +{"label": "Configuration", "position": 3} diff --git a/docs/content/guides/octobot-configuration/accounts.md b/docs/content/guides/octobot-configuration/accounts.md new file mode 100644 index 0000000000..4fead7defa --- /dev/null +++ b/docs/content/guides/octobot-configuration/accounts.md @@ -0,0 +1,64 @@ +--- +title: "Accounts" +description: "Learn OctoBot handles your exchange accounts and setup its web and telegram interfaces and notifications." +sidebar_position: 4 +--- + +# Accounts + +The accounts configuration page allows global (cross profile) configuration. It contains exchange API keys, interfaces credentials or keys and notification configuration. + +## Exchanges + +![exchange accounts configuration in octobot](/images/guides/configuration/exchange-accounts-configuration-in-octobot.png) + +You can save as many accounts as you want and only trade on those enabled in your profile. + +[Here is the guide helping to setup an exchange for OctoBot](/guides/exchanges) + +## Interfaces + + +![interfaces configuration in octobot](/images/guides/configuration/interfaces-configuration-in-octobot.png) + +Interfaces are ways to connect your OctoBot to other services. + +Here are different page explaining interfaces configuration : + +- [Web](/guides/octobot-interfaces/web) +- [Telegram](/guides/octobot-interfaces/telegram) +- [Chat GPT](/guides/octobot-interfaces/chatgpt) +- [TradingView](/guides/octobot-interfaces/tradingview) +- [Reddit](/guides/octobot-interfaces/reddit) + + +## Notifications + + +![notifications configuration in octobot](/images/guides/configuration/notifications-configuration-in-octobot.png) + +When notifications are enabled, OctoBot will create notifications on all the given medias. These notifications contain the current evaluations of monitored markets as well as created, filled and cancelled orders. + +Different types of notifications are available, it is possible to use any of them, or even all of them. + +### Types of notifications + +- **Global-Info**: General notifications like a startup message or a shutdown message. +- **Price-Alerts**: A price movement is detected and is triggering a new market state. +- **Trades**: An order is created, filled or cancelled. +- **Trading-Script-Alerts**: Any notification related to a scripted trading mode. +- **Other**: Other type of notifications. + +Enable notifications types to tell which types of notifications OctoBot should use. + +### Telegram notifications + +When selected, notifications will be sent to you on Telegram. + +Telegram notifications use the Telegram service. [See Telegram configuration guide](/guides/octobot-interfaces/telegram) + +### Web notifications + +When selected, notifications will be sent to you on the web interface. + +Web notifications use the Web service. [See Web interface configuration guide](/guides/octobot-interfaces/web) diff --git a/docs/content/guides/octobot-configuration/cloud-strategy-in-open-source-and-pro.md b/docs/content/guides/octobot-configuration/cloud-strategy-in-open-source-and-pro.md new file mode 100644 index 0000000000..f922302306 --- /dev/null +++ b/docs/content/guides/octobot-configuration/cloud-strategy-in-open-source-and-pro.md @@ -0,0 +1,28 @@ +--- +title: "OctoBot cloud Strategies" +description: "Learn how to use, test and customize OctoBot cloud trading strategies directly from your OctoBot trading bot or self hosted OctoBot." +sidebar_position: 3 +--- + +# Use OctoBot cloud strategies directly from your OctoBot + +Starting from OctoBot 1.0.4, you can now profit from OctoBot cloud strategies directly from your [OctoBot trading bots](https://www.octobot.cloud/trading-bot). + +Simply go the the **community** tab of your OctoBot to use OctoBot cloud strategies on your own robot. + +![download octobot cloud strategies in open source bot](/images/guides/download-octobot-cloud-strategies-in-open-source-bot.png) + +## Customize OctoBot cloud strategies + +Being able to use OctoBot cloud strategies directly from your OctoBot means that you can explore and use them with simulated or real funds, as much as you want. + +You can also configure those strategies to trade differently, which means: +- Using other exchanges +- Trading different cryptocurrencies +- Changing indicators' configuration + +As regular strategies, OctoBot cloud strategies are using [trading modes](/guides/octobot-trading-modes/trading-modes) that can be configured. + +## Backtesting custom OctoBot cloud strategies + +Backtest OctoBot cloud strategies using the OctoBot [backtesting engine](/guides/octobot-usage/backtesting) or the [Strategy Designer](/guides/octobot-usage/strategy-designer) to optimize them according to your ideas. diff --git a/docs/content/guides/octobot-configuration/custom-profile.md b/docs/content/guides/octobot-configuration/custom-profile.md new file mode 100644 index 0000000000..3ee0f502a2 --- /dev/null +++ b/docs/content/guides/octobot-configuration/custom-profile.md @@ -0,0 +1,86 @@ +--- +title: "Custom profile" +description: "Lean how to create custom configuration profiles on your OctoBot." +sidebar_position: 5 +--- + + + +# Custom Profile + +A custom profile allow to customize [strategy and trading mode](/guides/octobot-trading-modes/trading-modes). + +To create a custom profile : + +1. Open an existing profile page +2. Click on duplicate button + +![duplicate octobot profile](/images/guides/configuration/duplicate-octobot-profile.png) + +## Evaluator and trading configuration + +![custom profile trading modes selector](/images/guides/configuration/custom-profile-trading-modes-selector.png) + +**user/profiles/profile_name/tentacles_config.json** is a configuration file +telling OctoBot which evaluators, strategies and trading modes to use. It is +kept up to date after each [Tentacle Manager](/guides/octobot-advanced-usage/tentacle-manager) +usage (when new elements become available). + +An example of **user/profiles/profile_name/tentacles_config.json** is available <a href="https://github.com/Drakkar-Software/OctoBot-Tentacles/blob/master/profiles/daily_trading/tentacles_config.json" rel="nofollow">as default_tentacles_config.json on github</a>. + +![custom profile evaluator selector](/images/guides/configuration/custom-profile-evaluator-selector.png) + +## Understanding configuration files + +Enabled [evaluators and trading modes](/guides/octobot-trading-modes/trading-modes) are stored in configuration files. You will probably never need to touch them but here is how they work. + +Example of **tentacles_config.json**: + +```json +{ + "RSIMomentumEvaluator": true, + "DoubleMovingAverageTrendEvaluator": true, + "BBMomentumEvaluator": true, + "MACDMomentumEvaluator": true, + "CandlePatternMomentumEvaluator": false, + "ADXMomentumEvaluator": true, + + "InstantFluctuationsEvaluator": true, + + "RedditForumEvaluator": false, + "GoogleTrendStatsEvaluator": true, + + "TempFullMixedStrategiesEvaluator": true, + "InstantSocialReactionMixedStrategiesEvaluator": false +} +``` + +- Here, the first part is about technical analysis evaluators: they are all + activated except for the **CandlePatternMomentumEvaluator**. This means that + any technical evaluator of these types (except **CandlePatternMomentumEvaluator**) + will be used by OctoBot. +- Second part contains only **InstantFluctuationsEvaluator**, OctoBot will + then take real time market moves into account using **InstantFluctuationsEvaluator** only. +- Third part is the social evaluation. Here OctoBot will look at Google + stats using **GoogleTrendStatsEvaluator**. However, OctoBot will not look + a reddit (`"RedditForumEvaluator": false`), therefore + a [Reddit interface](/guides/octobot-interfaces/reddit) configuration is not necessary. +- Last part are the strategies to use. Here only one strategy out of + two is to be used by OctoBot: **TempFullMixedStrategiesEvaluator**. + +### Details for the devs + +Any setting also applies to subclasses of these evaluators. For example +if you add an evaluator extending **ADXMomentumEvaluator**, `"ADXMomentumEvaluator": true` +will tell OctoBot to use the **most advanced ADXMomentumEvaluator** available: if you evaluator +extends **ADXMomentumEvaluator**, your evaluator will be considered more advanced than the **basic +ADXMomentumEvaluator** and OctoBot will use it. See the +developers [customize your OctoBot](/guides/octobot-tentacles-development/customize-your-octobot) +to learn how to add elements to your OctoBot. + +This is valid for any evaluator and strategy. + +Please note that any evaluator or strategy that doesn't extend +an element in **tentacles_config.json** has to be added to this file otherwise will +be ignored by OctoBot. When using the [Tentacle Manager](/guides/octobot-advanced-usage/tentacle-manager.md) +to install tentacles, this is done automatically. diff --git a/docs/content/guides/octobot-configuration/exchanges.md b/docs/content/guides/octobot-configuration/exchanges.md new file mode 100644 index 0000000000..18f3843a66 --- /dev/null +++ b/docs/content/guides/octobot-configuration/exchanges.md @@ -0,0 +1,76 @@ +--- +title: "Exchanges" +description: "Lean how to configure your exchange accounts for your OctoBot to trade using your exchange funds or simulated money." +sidebar_position: 7 +--- + +# Exchanges + + +To know more about an exchange support in OctoBot, please have a look at [the exchange summary](/guides/exchanges). + +## Web interface configuration + + +OctoBot reads trading data (prices, volumes, trades, etc) from exchanges. At least one exchange +is required for OctoBot to perform trades. In [simulation mode](/guides/octobot-usage/simulator), +exchange API keys configuration is not necessary. + +![exchange accounts configuration in octobot](/images/guides/configuration/exchange-accounts-configuration-in-octobot.png) + +You can configure OctoBot's exchanges using the [web interface](/guides/octobot-interfaces/web) +**configuration** tab. + +## Manual configuration + + +In **user/config.json**, find this lines: + +``` json +"exchanges": { + +} +``` + +Edit this lines and add the exchange(s) you want to use. + +In OctoBot configuration, exchange connection info are encrypted. To manually add exchange configuration, you can add your info directly into your **user/config.json** file, OctoBot will then take care of the encryption for you. + +If you want to encrypt your exchange keys before starting OctoBot, you +can use the following instructions: + +Start the OctoBot with option **--encrypter** like below : + +``` bash +python start.py --encrypter +``` + +And copy and paste your api-key and api-secret to your configuration file (see example below). + +Example with Binance and Coinbase : + +``` json +"exchanges": { + "binance": { + "api-key": "YOUR_BINANCE_API_KEY_ENCRYPTED", + "api-secret": "YOUR_BINANCE_API_SECRET_ENCRYPTED", + "sandboxed": false + }, + "coinbasepro": { + "api-key": "YOUR_EXCHANGE_API_KEY_ENCRYPTED", + "api-secret": "YOUR_EXCHANGE_API_SECRET_ENCRYPTED", + "api-password": "YOUR_EXCHANGE_API_SECRET_ENCRYPTED", + "sandboxed": true + } +} +``` + +- **api-key** is your exchange account API key +- **api-secret** is your exchange account API secret +- **api-password** is your exchange account API password if this exchange is requiring a password. Leave empty otherwise +- **sandboxed** if your exchange is supporting a sandbox(or testnet) mode, allows to trade on this version of the exchange + +## Simulated exchange + + +To use the Simulated exchange feature of the OctoBot, you have to specify a [trader simulator](/guides/octobot-usage/simulator.md) configuration. To use an exchange in simulation only, you also have to specify its configuration as described above. For most exchanges, API credentials are not required in simulation mode, adding the exchange with default values is enough. diff --git a/docs/content/guides/octobot-configuration/premium-octobot-extension.mdx b/docs/content/guides/octobot-configuration/premium-octobot-extension.mdx new file mode 100644 index 0000000000..dc46f80470 --- /dev/null +++ b/docs/content/guides/octobot-configuration/premium-octobot-extension.mdx @@ -0,0 +1,65 @@ +--- +title: "Premium extension" +description: "Permanently improve your OctoBot with the Premium OctoBot Extension. Access the Strategy Designer, secure TradingView webhooks and Crypto baskets." +sidebar_position: 8 +--- + + +# The Premium OctoBot Extension + +The Premium OctoBot Extension permanently improves your OctoBot simplifies its use. + +## Content of the Premium OctoBot Extension + +### Optimize your strategies with the Strategy Designer +Use the [Strategy Designer](../octobot-usage/strategy-designer), OctoBot's most advanced strategy optimization and backtesting interface. + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="V4Z1xUhqWz8" title="The OctoBot Strategy Designer" /> + +The Strategy Designer is the best tool to create your OctoBot trading strategies, it notably allows you to easily: +- Compare backtesting results of your strategies +- Visualize your strategies' behavior through time +- Optimize your strategy while using a different live profile + +![octobot strategy designer results on doge btc shib](/images/guides/strategy-designer/octobot-strategy-designer-results-on-doge-btc-shib.png) + +### Seamlessly connect to your TradingView strategies + +In the default version of OctoBot, [advanced technical knowledge or a paid external webhook provider](../octobot-interfaces/tradingview/using-a-webhook) such as Ngrok is required to connect to your TradingView alerts. + +![tradingview ema strategy illustration with 2 buy and 2 sell](/images/guides/trading-view/tradingview-ema-strategy-illustration-with-2-buy-and-2-sell.png) + +Using OctoBot with the Premium OctoBot extension makes [TradingView strategies automation](../octobot-interfaces/tradingview): +- Simple: Use OctoBot cloud webhooks - no Ngrok subscription is needed for webhooks +- Easy: You get a unique webhook URL you can use rightaway +- Secure: Use the OctoBot cloud secure webhook system + +### Use OctoBot cloud crypto baskets + +OctoBot cloud [crypto baskets](https://www.octobot.cloud/features/crypto-basket) are special strategies that make it easy to invest in the top crypto of the market or specific themes. + +<div style={{textAlign: "center"}}> + ![octobot open source using crypto baskets from premium extension](/images/guides/trading-modes/octobot-open-source-using-crypto-baskets-from-premium-extension.png) +</div> + +With the Premium OctoBot Extension, you can follow every OctoBot cloud crypto baskets directly from your open source OctoBot: your OctoBot will keep them up-to-date. + +## How to get the Premium OctoBot Extension? +The Premium OctoBot Extension can be purchased directly from your open source OctoBot using your OctoBot account. After purchase, it will be bound to your OctoBot account. This means that updating or reinstalling your OctoBot will automatically install your extension as long as you are connected to your OctoBot account. + +<div style={{textAlign: "center"}}> +![premium octobot extension buy section](/images/guides/premium-octobot-extension/premium-octobot-extension-buy-section.png) +</div> + +The extension is a single purchase, there is no monthly subscription. Once you bought it, you just have it. And this includes the future improvements and features we will add later on. + +## Is the Premium OctoBot Extension mandatory? +No, the Premium OctoBot Extension is completely optional. Your OctoBot will run just fine with or without it. + +The extension adds features to improve certain aspects of your OctoBot such as creating a strategy, following TradingView strategies or using OctoBot cloud crypto baskets. + +However, if you prefer using OctoBot without those improvements, then you are more than welcome to use the base version of OctoBot for as long as you want. + + diff --git a/docs/content/guides/octobot-configuration/profile-configuration.md b/docs/content/guides/octobot-configuration/profile-configuration.md new file mode 100644 index 0000000000..c81a24821e --- /dev/null +++ b/docs/content/guides/octobot-configuration/profile-configuration.md @@ -0,0 +1,158 @@ +--- +title: "Profile configuration" +description: "Lean how to configure an OctoBot profile by selecting its trading strategy, evaluators, cryptocurrencies, exchanges and trader setting." +sidebar_position: 2 +--- + + + +# Profile configuration + +## Strategies + +Most [evaluators and trading modes](/guides/octobot-trading-modes/trading-modes) can be configured. + +![octobot trading mode details from profiles](/images/guides/configuration/octobot-trading-mode-details-from-profiles.png) + +To open the configuration interface, click on your strategy configuration icon. + +![trading mode configuration from profiles](/images/guides/configuration/trading-mode-configuration-from-profiles.png) + + +This edition interface is generated according to the user inputs of the evaluator or trading mode to configure. You will find the technical details on the developers section, in [the tentacles configuration docs](/guides/octobot-tentacles-development/create-a-tentacle-package#configuration). + +It is also possible to manually edit each configuration file using a +text editor for JSON. When configurable, each evaluator or trading mode +has a **NameOfTheRelatedClass.json** file in +**user/profiles/profile_name/specific_config**. Note: this file created in your profile after any change in the default configuration of the tentacle. + + +### Custom profiles + +When using default profiles, trading modes and strategies configurations can be edited, but not switch to others. +To use other trading modes, strategies or evaluators, you can duplicate a default profile to create a [custom profile](custom-profile) +which can be more deeply configured. +![custom profile trading modes selector](/images/guides/configuration/custom-profile-trading-modes-selector.png) + + +## Currencies + +![octobot trading pairs settings from profiles](/images/guides/configuration/octobot-trading-pairs-settings-from-profiles.png) + +OctoBot will trade all the cryptocurrencies listed in its configuration. +To tell which cryptocurrencies to trade, add the currency in the +**crypto-currencies** section in +**user/profiles/profile_name/profile.json**. + +In order to keep OctoBot working at its full potential, we recommend to +trade **between 1 and 5** different assets **not to use more than 10 to +15** different assets at the same time, depending on the size of your +available funds. + +### Wildcard + +To tell OctoBot to trade all BTC trading pairs (with BTC as a quote +asset), use the wildcard "\*" instead of a list for "pairs", directly in your profile's profile.json file: + +```json +"crypto-currencies":{ + "Bitcoin": { + "pairs": ["*"], + "quote": "BTC" + } +} +``` + +A "quote" is required to specify the name of the currency to trade +with. + +## Exchanges + +![octobot exchanges settings from profiles](/images/guides/configuration/octobot-exchanges-settings-from-profiles.png) + +For each profile, you can enable the exchanges you want to trade on. + +It is also where you can select if you want to use **spot** or **future** trading on those exchanges. + +## Trading + +![octobot trading settings from profiles](/images/guides/configuration/octobot-trading-settings-from-profiles.png) + +OctoBot can process two types of trading: + +- Real trading using your exchanges' portfolio. +- Simulated trading using any imaginary portfolio. + +### Reference-market + +The **Reference-market** parameter defines which currency OctBot should +use as a reference, this reference is used to compute profitability and +the portfolio total value + +### Risk + +Any type of trading has its risk parameter. It is a parameter defining +the behavior of the trader, similarly to a real human trader. + +The **Risk** parameter defines the behaviour of OctoBot in an optimism +manner. + +It is a value between 0 and 1: + +- A low risk (closer to 0) will make OctoBot a very safe trader with + few bold moves and mostly small trades. A 0 risk bot is very + pessimistic (regarding its orders creation) and does not expect big + market moves. +- A high risk (closer to 1) will make OctoBot a very active and heavy + trader. A 1 risk bot is very optimistic (regarding its orders + creation) and is expecting significant market moves. + +### Trader + +When the **Enabled** parameter of the **Trader** section is set at **true**, OctoBot will trade +using your real funds from your exchange's accounts. When **false** +OctoBot will never any create a real trade. + +### Load trade history + +When the **load-trade-history** parameter is set at **true**, OctoBot +will load the account's recent trades for the enabled traded pairs at +startup. This allows to have a view on your account's trade history. +When **false**, OctoBot will only historize trades that happen after the +bot startup. + +### Trader simulator + +Additionally to the real trading system, a [trading simulator](/guides/octobot-usage/simulator) is +available in OctoBot. + +[Here is the guide describing the simulator feature of OctoBot](/guides/octobot-usage/simulator) + +## Configuration files + +OctoBot configuration is located in the **user** folder: + +- **user/config.json** is the global configuration file, mostly used + to setup the bot exchanges credentials, interfaces and notification + settings. +- **user/profiles/** contains all the [profiles](profiles) + created and imported in your OctoBot. + +OctoBot's web interface allows to easily edit the configuration, +however, it is also possible to manually edit configuration files. +Please be careful when manually editing them or OctoBot won't be able +to read them and wont start. Json file are readable and editable using +any text editor. + +```bash +ERROR root <class 'Exception'>: Error when load config +``` + +This will appear when a configuration file is not a json valid file. + +**user/config.json** is the technical configuration file of OctoBot, an +example is available <a href="https://github.com/Drakkar-Software/OctoBot/blob/master/octobot/config/default_config.json" rel="nofollow">on +github</a>. + +When starting OctoBot, if the **user** folder is missing or incomplete, +it will automatically be created or completed with default values. diff --git a/docs/content/guides/octobot-configuration/profiles.md b/docs/content/guides/octobot-configuration/profiles.md new file mode 100644 index 0000000000..475f2dbfe8 --- /dev/null +++ b/docs/content/guides/octobot-configuration/profiles.md @@ -0,0 +1,27 @@ +--- +title: "Profiles" +description: "What is an OctoBot profile ? OctoBot is configured using profiles, where your configuration for a specific trading strategy is saved and can be shared." +sidebar_position: 1 +--- + +# Profiles + +OctoBot's trading configuration is using profiles (located into +user/profiles). This allows for quick switches between previously set +configurations. Each profile defines a [Trading Mode](/guides/octobot-trading-modes/trading-modes) configuration as well as other settings. + +![octobot trading mode details from profiles](/images/guides/configuration/octobot-trading-mode-details-from-profiles.png) + +Profiles include: + +- Tentacles activations +- Tentacles configurations +- Traded pairs +- Enabled exchanges +- Trading configuration +- Automation configuration + +Login related data (exchange api keys, telegram settings, ...) are not stored in profiles. + + +Profiles can also be [shared and imported](sharing-and-importing-octobot-profiles) between OctoBot's users. diff --git a/docs/content/guides/octobot-configuration/sharing-and-importing-octobot-profiles.md b/docs/content/guides/octobot-configuration/sharing-and-importing-octobot-profiles.md new file mode 100644 index 0000000000..7a1d6bca60 --- /dev/null +++ b/docs/content/guides/octobot-configuration/sharing-and-importing-octobot-profiles.md @@ -0,0 +1,23 @@ +--- +title: "Sharing profiles" +description: "Share your OctoBot configuration by sharing its profile. Import your friends' profile and use it from your OctoBot." +sidebar_position: 6 +--- + +# Sharing and importing OctoBot profiles + +## How to share an OctoBot profile + +To share an OctoBot profile, open your profile configuration, click on "Edit profiles", open to your profile overview and click "Share". + +![share octobot profile](/images/guides/configuration/share-octobot-profile.png) + +Your profile will be downloaded as a zipped folder, ready to be imported by another OctoBot. + +## How to import an OctoBot profile + +To import an OctoBot profile, open your profile configuration, click on "Edit profiles", open any profile overview and click "Import". OctoBot will then invite you to select the profile to import and import it. + +![import octobot profile](/images/guides/configuration/import-octobot-profile.png) + +Your imported profile will then be available next to your other profiles, ready to be used and edited. diff --git a/docs/content/guides/octobot-installation/_category_.json b/docs/content/guides/octobot-installation/_category_.json new file mode 100644 index 0000000000..16485faa48 --- /dev/null +++ b/docs/content/guides/octobot-installation/_category_.json @@ -0,0 +1 @@ +{"label": "Installation", "position": 2} diff --git a/docs/content/guides/octobot-installation/cloud-install-octobot-on-digitalocean.md b/docs/content/guides/octobot-installation/cloud-install-octobot-on-digitalocean.md new file mode 100644 index 0000000000..e93648990f --- /dev/null +++ b/docs/content/guides/octobot-installation/cloud-install-octobot-on-digitalocean.md @@ -0,0 +1,71 @@ +--- +title: "In the cloud" +description: "Install your OctoBot in the cloud with DigitalOcean in a few minutes and have your OctoBot trading using your strategies 24/7" +sidebar_position: 2 +--- + + + +# Install OctoBot in the cloud with DigitalOcean + +## Create a DigitalOcean account + +- Create an account on DigitalOcean by following this link: <a href="https://digitalocean.pxf.io/octobot-app" rel="nofollow">DigitalOcean</a> (or log in if you already have one). + +- Validate your account by adding a payment method. + +## Start the OctoBot application + +- Open the <a href="https://digitalocean.pxf.io/octobot-app" rel="nofollow">OctoBot App page</a> on Digital Ocean marketplace. + +- Click on "Create OctoBot droplet". + +![DigitalOcean Create Droplet Button](/images/guides/installation/digitalocean/digital-ocean-octobot-app-page.png) + +- Choose a region close to you. + +![DigitalOcean Droplet choose region](/images/guides/installation/digitalocean/choose-droplet-location.png) + +- Let the OctoBot application image selected + +![DigitalOcean Droplet choose region](/images/guides/installation/digitalocean/digital-ocean-octobot-image.png) + +- Select the desired server power. The minimal requirement is the $6/month option. + +![DigitalOcean Droplet choose pricing](/images/guides/installation/digitalocean/digital-ocean-droplet-pricing.png) + +- Enter a secure password or a ssh key. + +![DigitalOcean Droplet choose pricing](/images/guides/installation/digitalocean/digital-ocean-droplet-access.png) + +- Click at the bottom on "Create droplet". + +- Wait for the Droplet to start. + +![DigitalOcean Droplet wait for boot complete](/images/guides/installation/digitalocean/wait-for-droplet-start.png) + +## Access OctoBot + +- On the DigitalOcean Droplet page, get the Droplet's IP. For example, in this example, it's IP `143.198.96.188`. + +![DigitalOcean droplet IP address](/images/guides/installation/digitalocean/get-droplet-ip.png) + +- Copy this address. + +- In your browser, open a new tab and type http://$DROPLET_IP. In this example, you would type `http://143.198.96.188`. + +<div style="text-align: center"> + +![open OctoBot web interface with droplet IP](/images/guides/installation/digitalocean/open-octobot-with-droplet-ip.png) + +</div> + +- If your browser indicates that the connection is not secure (which is normal because it is not HTTPS), accept by clicking "continue to the site". + +- After a few seconds, the web interface of your OctoBot should appear. + +:::warning + Attention: Since anyone knowing the IP of your OctoBot can open this + interface, it is strongly recommended to add a [password + protection](/en/guides/octobot-interfaces/web#protect-your-web-interface). +::: diff --git a/docs/content/guides/octobot-installation/install-octobot-on-raspberry-pi.md b/docs/content/guides/octobot-installation/install-octobot-on-raspberry-pi.md new file mode 100644 index 0000000000..cadea53dcd --- /dev/null +++ b/docs/content/guides/octobot-installation/install-octobot-on-raspberry-pi.md @@ -0,0 +1,59 @@ +--- +title: "With Raspberry Pi" +description: "Learn how to easily install and start your OctoBot on Raspberry Pi using the executable version of the bot." +sidebar_position: 5 +--- + + + +# Install OctoBot on Raspberry Pi + +## 1. Preparing the Raspberry Pi + +1. Install Rapberry OS and configure it. +2. Enable `ssh` as it will be essential for accessing the Raspberry remotely from a local network +3. Create a new user or use the default one and change the password to use a strong password. + +## 2. Install OctoBot + +1. On the Octobot latest release page, download the `OctoBot_linux_arm64` + file: this is the Raspberry Pi x64 compatible version of OctoBot. + +<div style="text-align: center"> + <a href="https://github.com/Drakkar-Software/OctoBot/releases/latest"><strong>Get the latest release</strong></a> +</div> + +2. Copy the file to the `/home/pi/` folder + Note: here `pi` it is the folder of the `pi` user (default user). + +3. To facilitate this process (when using Windows), you can use <a href="https://winscp.net/eng/index.php" rel="nofollow">WinSCP</a>: it has a graphical interface and works like the Windows "file explorer". It will also be easier to later edit your Raspberry Pi files. + +4. Connect to Raspberry through a terminal using the following command: `ssh pi@192.168.1.XX` replace `pi` by your Raspberry username and `192.168.1.XX` by your Raspberry IP address and enter the password you created in setp 1. + +5. After logging on to the Raspeberry it is necessary to make the file "OctoBot_linux_arm64" into an executable. To do this, still from the terminal, type this command: `sudo chmod +x OctoBot_linux_arm64` + +6. Done. Nothing else is needed! + +## 3. Run OctoBot + +1. To run OctoBot, use the terminal from the previous step or open a new one and go to the folden containing the OctoBot executable and type in `./OctoBot_linux_arm64` + OctoBot starts and creates the necessary folders the first time it runs. + +2. In the Web browser you already have access to your OctoBot through the Raspberry Pi's local IP at the following address: `http://192.168.1.XX:5001` where `192.168.1.XX` is the IP address of your Rapberry Pi. It is the same as the one you use to connect to your Rapberry Pi. + +3. Press `Ctrl-A` then `Ctrl-D`. This will detach your screen session but leave your OctoBot process running. You can now close the terminal. + +## 4. Starting OctoBot automatically + +You might want OctoBot to start automatically when starting your Raspberry Pi. + +To start OctoBot automatically after restarting Raspberry Pi, proceed as follows. +Still from a terminal: + +1. Type in the following command: `crontab -e` +2. Add the following line at the end: `@reboot /home/pi/OctoBot_linux_arm64` where `pi` is your username +3. Save + +In the event of a power outage, your Raspberry Pi will automatically restart your OctoBot and continue executing its configured strategies. + +Also, every time your Raspberry Pi starts up, it will run Octobot and you will be able to access it from your browser. diff --git a/docs/content/guides/octobot-installation/install-octobot-on-your-computer.mdx b/docs/content/guides/octobot-installation/install-octobot-on-your-computer.mdx new file mode 100644 index 0000000000..c46af3a610 --- /dev/null +++ b/docs/content/guides/octobot-installation/install-octobot-on-your-computer.mdx @@ -0,0 +1,68 @@ +--- +title: "Local installation" +description: "Learn how to install your OctoBot on your own computer or server using Docker, the executable or Python and the open source code on github or PIP." +sidebar_position: 1 +--- + + + +# Install OctoBot on your Computer + +## Option 1: With the executable + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="unbkFUAWXWM" title="Install OctoBot with the executable" /> + +1. Download the latest version for your system using the <a href="https://github.com/Drakkar-Software/OctoBot/releases/latest" rel="nofollow">latest release on GitHub</a>. There is an OctoBot executable for: + - [Windows](with-binary) + - [Linux](with-binary) + - [MacOS](with-binary) + - [Raspberry Pi (arm64)](install-octobot-on-raspberry-pi) + +<div style={{textAlign: "center"}}> + **[Get the latest release](https://github.com/Drakkar-Software/OctoBot/releases/latest)** +</div> +2. Start the downloaded executable. OctoBot opens a terminal, initializes its +configuration environment and the OctoBot interface opens automatically. + +> On Windows, you can just double click the executable. +> On Linux, first type `chmod +x OctoBot_linux_x64` and then `./OctoBot_linux_x64` in a terminal open next to your downloaded executable to start OctoBot. + +## Option 2: With Docker + + +<YouTube id="JL7ef3bK8SY" title="Install OctoBot using Docker" /> + +1. If you don't have <a href="https://www.docker.com/" rel="nofollow">Docker</a>, install it on your system. Here is the docker documentation for <a href="https://docs.docker.com/engine/install/ubuntu/" rel="nofollow">Ubuntu</a>, <a href="https://docs.docker.com/engine/install/debian/" rel="nofollow">Debian</a> and <a href="https://phoenixnap.com/kb/docker-on-raspberry-pi/" rel="nofollow">Raspberry pie</a>. +2. Download the OctoBot image using the command:\ + `docker pull drakkarsoftware/octobot:stable` +3. Start your OctoBot using the command:\ + `docker run -itd --name OctoBot -p 80:5001 -v $(pwd)/user:/octobot/user -v $(pwd)/tentacles:/octobot/tentacles -v $(pwd)/logs:/octobot/logs drakkarsoftware/octobot:stable` + +The interface of your OctoBot is available on port 5001 of your system. http://localhost:5001/ or http://ip-address:5001/ + +Find all the details regarding the docker installation on [our OctoBot with docker guide](install-octobot-with-docker-video) + +## Option 3: From the source code + +Deploy your OctoBot from the code to be able to use a local Python environment, edit or audit the source code. + +1. If you don't already have Python 3.10, install <a href="https://www.python.org/downloads/release/python-31011/" rel="nofollow">Python in version 3.10</a>. +2. Clone the OctoBot repository\ + `git clone https://github.com/Drakkar-Software/OctoBot` +3. Install the Python dependencies\ + `cd OctoBot`\ + `python3 -m pip install -Ur requirements.txt` +4. Start your OctoBot using this command:\ + `python3 start.py` + +The interface of your OctoBot is available on port 5001 of your system. http://localhost:5001/ or http://ip-address:5001/ + +Find all the details regarding the source code based installation on [the python and git installation guide](install-octobot-with-python-and-git). + +## Other option: using a cloud + +If you prefer using a server to run your OctoBot, here is the [DigitalOcean cloud installation Guide](cloud-install-octobot-on-digitalocean). + +Installing OctoBot on a server is a simple way to let your OctoBot execute its strategies 24/7. diff --git a/docs/content/guides/octobot-installation/install-octobot-with-docker-video.mdx b/docs/content/guides/octobot-installation/install-octobot-with-docker-video.mdx new file mode 100644 index 0000000000..e16928da91 --- /dev/null +++ b/docs/content/guides/octobot-installation/install-octobot-with-docker-video.mdx @@ -0,0 +1,160 @@ +--- +title: "With Docker" +description: "Learn how to install and start your OctoBot on your own computer or server (Windows, Mac or Linux) using the docker version of the bot." +sidebar_position: 4 +--- + + + +# Install OctoBot with Docker in video + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="JL7ef3bK8SY" title="Install OctoBot using Docker" /> + +> For unix distribution only + +## Quick install + +- In a terminal, enter the command to install the necessary packages: + +```bash +sudo apt install git build-essential ca-certificates curl gnupg lsb-release -y +``` + +- Next, enter the command to install Docker: + +```bash +curl -fsSL https://get.docker.com -o get-docker.sh +sh ./get-docker.sh +``` + +- Then download the OctoBot docker-compose file: + +```bash +curl -fs https://raw.githubusercontent.com/Drakkar-Software/OctoBot/master/docker-compose.yml -o docker-compose.yml +``` + +- Start OctoBot with the previously downloaded docker-compose file: + +```bash +docker compose up -d +``` + +- Don't forget to run the following command at the end of the installation (and logout) + +```bash +sudo usermod -aG docker $USER +``` + +## Manual install + +### Using the stable image: + +1. Download OctoBot + +```bash +docker pull drakkarsoftware/octobot:stable +``` + +2. Start OctoBot (for linux x64/x86 and raspberry linux arm64/arm32) + +```bash +docker run -itd --name OctoBot -p 80:5001 -v $(pwd)/user:/octobot/user -v $(pwd)/tentacles:/octobot/tentacles -v $(pwd)/logs:/octobot/logs drakkarsoftware/octobot:stable +``` + +### Using the latest image (might be unstable): + +1. Download OctoBot latest + +```bash +docker pull drakkarsoftware/octobot:latest +``` + +2. Start OctoBot (for linux x64/x86 and raspberry linux arm64/arm32) + +```bash +docker run -itd --name OctoBot -p 80:5001 -v $(pwd)/user:/octobot/user -v $(pwd)/tentacles:/octobot/tentacles -v $(pwd)/logs:/octobot/logs drakkarsoftware/octobot:latest +``` + +### How to look at OctoBot logs ? + +```bash +docker logs OctoBot -f +``` + +### How to stop OctoBot ? + +```bash +docker stop OctoBot +``` + +### How to restart OctoBot ? + +```bash +docker restart OctoBot +``` + +### How to update OctoBot ? + +```bash +docker pull drakkarsoftware/octobot:stable +docker stop OctoBot +docker rm OctoBot +docker run -itd --name OctoBot -p 80:5001 -v $(pwd)/user:/octobot/user -v $(pwd)/tentacles:/octobot/tentacles -v $(pwd)/logs:/octobot/logs drakkarsoftware/octobot:stable +``` + +### Running with docker-compose + +A simple way to run a docker image is to use docker-compose : + +- Install <a href="https://docs.docker.com/compose/install/" rel="nofollow">docker-compose</a> +- Download the <a href="https://github.com/Drakkar-Software/OctoBot/blob/master/docker-compose.yml" rel="nofollow">docker-compose.yml file</a> +- Create a `.env` file in the current folder +- Add `HOST=YOUR_IP_ADDRESS` in the newly created `.env` file. (where YOUR_IP_ADDRESS is the ip address of the computer, can be replaced by `localhost` if it's a local computer) +- Start OctoBot with docker-compose (with the previous file `docker-compose.yml` in the current folder): + + ```bash + docker-compose up -d + ``` + +You can now open the OctoBot web interface at https://YOUR_IP_ADDRESS. + +### Start multiple OctoBots with docker + +To run a second OctoBot on the same computer : + +1. Create a new directory and enter it +2. Start OctoBot's web interface on a new port by changing "-p" option + +```bash +docker run -itd --name OctoBot -p 8000:5001 -v $(pwd)/user:/octobot/user -v $(pwd)/tentacles:/octobot/tentacles -v $(pwd)/logs:/octobot/logs drakkarsoftware/octobot:stable +``` + +In this example, the second OctoBot's web interface is accessible at http://127.0.0.1:8000. + +Any port can be used except those already used by another OctoBot or any software on your system. + +### Start OctoBot with docker managed files + +> Warning: It's easier to use but it will not be possible to update it without deleting its files. + +-v arguments can be removed from previous start commands but OctoBot's local files will then be managed by docker (and not directly visible). + +```bash +docker run -itd --name OctoBot -p 80:5001 drakkarsoftware/octobot:stable +``` + +Local OctoBot files path are located in /var/lib/docker and can be listed with the following command + +```bash +docker inspect -f '{{ .Mounts }}' OctoBot +``` + +To copy files of a directory outside the OctoBot container, for example for logs files : + +```bash +docker cp OctoBot:/octobot/logs/. . +``` + +Wherer "OctoBot" is your container name diff --git a/docs/content/guides/octobot-installation/install-octobot-with-executable-video.mdx b/docs/content/guides/octobot-installation/install-octobot-with-executable-video.mdx new file mode 100644 index 0000000000..135e4662f3 --- /dev/null +++ b/docs/content/guides/octobot-installation/install-octobot-with-executable-video.mdx @@ -0,0 +1,28 @@ +--- +title: "With Executable" +description: "Learn how to install and start your OctoBot on your own computer or server (Windows, Mac or Linux) using the executable version of the bot." +sidebar_position: 3 +--- + + + +# Install OctoBot with the Executable in video + +## Installation + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="unbkFUAWXWM" title="Install OctoBot with the executable" /> + +Go to <a href="https://github.com/Drakkar-Software/OctoBot/releases/latest" rel="nofollow">OctoBot's official releases</a> page and download the latest version for your system: Windows, Linux or MacOS. You can even use OctoBot on Arm64 Raspberry PI. + +<div style={{textAlign: "center"}}> + **[Get the latest release](https://github.com/Drakkar-Software/OctoBot/releases/latest)** +</div> + +## Usage + +Start the downloaded executable. OctoBot opens a terminal and installs its configuration environment and the OctoBot interface opens automatically. + +> On Windows, you can just double click the executable. +> On Linux, first type `chmod +x OctoBot_linux_x64` and then `./OctoBot_linux_x64` in a terminal open next to your downloaded executable to start OctoBot. diff --git a/docs/content/guides/octobot-installation/install-octobot-with-pip.md b/docs/content/guides/octobot-installation/install-octobot-with-pip.md new file mode 100644 index 0000000000..a09862d92a --- /dev/null +++ b/docs/content/guides/octobot-installation/install-octobot-with-pip.md @@ -0,0 +1,72 @@ +--- +title: "With PIP" +description: "Learn how to install and start your OctoBot on your own computer or server (Windows, Mac or Linux) using the PIP (Python index package) version of the bot." +sidebar_position: 7 +--- + + + +# Install OctoBot on Python index package (pip) + +## Requirements + +- Python 3.10 (<a href="https://www.python.org/downloads/" rel="nofollow">download</a>) +- Add python to your PATH (<a href="https://superuser.com/questions/143119/how-do-i-add-python-to-the-windows-path" rel="nofollow">tutorial windows</a>) + +## Installation + +In a command line (with python in your PATH) type the following command: + +```bash +python3.10 -m pip install OctoBot +``` + +You can change **python3.10** to the name of the python binary you added to your PATH (for example on linux you may use **python3** or even **python** if the **python --version** commands outputs a python 3.10 version) + +## Usage + +```bash +OctoBot +``` + +## Update + +Executing the following command will update your Python OctoBot using the latest version, as well as installing the latest requirements. + +```bash +python3.10 -m pip install -U OctoBot +``` + +The next restart will automatically update your OctoBot tentacles. + +## Start multiple OctoBots + +To run a second OctoBot on the same computer : + +1. Create a new directory and enter it +2. Start OctoBot and stop it after 1-2min to let it create default files +3. Open user/config.json file +4. Change web config lines + + FROM + + ```json + "web": { + "auto-open-in-web-browser": true + } + ``` + + TO + + ```json + "web": { + "auto-open-in-web-browser": true, + "port": 8000 + } + ``` + + In this example, the second OctoBot's web interface is accessible at http://127.0.0.1:8000. + + Any port can be used except those already used by another OctoBot or any software on your system. + +5. Start the new OctoBot diff --git a/docs/content/guides/octobot-installation/install-octobot-with-python-and-git.md b/docs/content/guides/octobot-installation/install-octobot-with-python-and-git.md new file mode 100644 index 0000000000..760d7ab97d --- /dev/null +++ b/docs/content/guides/octobot-installation/install-octobot-with-python-and-git.md @@ -0,0 +1,119 @@ +--- +title: "With Python and Git" +description: "Learn how to install and start your OctoBot on your own computer or server (Windows, Mac or Linux) using the open source Python code directly from GitHub." +sidebar_position: 6 +--- + +# Install OctoBot on Python and Git + +## Requirements + +- Packages installed : Python3.10.X, Python3.10.X-dev, Python3.10.X-pip, git + +## Installation + +**First, make sure you have python3.10 and python3.10-dev and python3.10-pip installed on your computer.** + +### Using the current stable version (master branch) + +**This is the recommended python installation.** + +Clone the OctoBot repository + +``` bash +git clone https://github.com/Drakkar-Software/OctoBot +``` + +Install python packages : + +``` bash +cd OctoBot +python3 -m pip install -Ur requirements.txt +``` + + +> On some setup like 32-bit ARM architectures, you might get a `rust` related error while running `python3 -m pip install -Ur requirements.txt` when installing `cryptography`. +If this happens, you need to install the `rust compiler`: `cryptography` is coded in `rust`. +``` bash +sudo apt-get install -y rustc +``` +You can then restart `python3 -m pip install -Ur requirements.txt`. + +### Using the latest version (dev branch) + +**This is installation allows to use the most up-to-date version of OctoBot but might broken depending on the moment it is being done (modules updates might be in progress in this branch).** + +Clone the OctoBot repository using the **dev** branch + +``` bash +git clone https://github.com/Drakkar-Software/OctoBot -b dev +``` + +*Or if you already have an OctoBot repository* + +``` bash +git checkout dev +git pull +``` + +### Installing latest tentacles : +> Warning: using the latest tentacles might break your OctoBot + +#### On Unix +``` bash +cd OctoBot +python3 -m pip install -Ur requirements.txt +export TENTACLES_URL_TAG="latest" +python3 start.py tentacles --install --all +``` +#### On Windows +``` bash +cd OctoBot +python3 -m pip install -Ur requirements.txt +SET TENTACLES_URL_TAG=latest +python3 start.py tentacles --install --all +``` + +## Usage + +The following command replaces *OctoBot Launcher*: + +``` bash +python3 start.py +``` + +## Update + +Executing the following command will update your Python OctoBot using the latest version of the selected branch (`master` or `dev`), as well as installing the latest requirements. +``` bash +git pull +cd OctoBot +python3 -m pip install -Ur requirements.txt +``` +The next restart will automatically update your OctoBot tentacles. + +## Python3 + +There **python3** is refering to your **Python3.10.X** installation, just adapt the commands to match your setup if any different (might be python, python3, python3.10, etc: it depends on your environment). + +## Start in background + +> For unix distribution only + +With the Linux screen command, you can push running terminal applications to the background and pull them forward when you want to see them. + +``` bash +sudo apt-get install -y screen +screen python3 start.py +``` + +You need the number from the start of the window name to reattach it. If you forget it, you can always use the -ls (list) option, as shown below, to get a list of the detached windows: + +``` bash +screen -ls +screen -r 23167 +``` + +(23167 is an example value) + +OctoBot has been working away in the background is now brought back to your terminal window as if it had never left. diff --git a/docs/content/guides/octobot-installation/octobot-troubleshoot.md b/docs/content/guides/octobot-installation/octobot-troubleshoot.md new file mode 100644 index 0000000000..265e32df46 --- /dev/null +++ b/docs/content/guides/octobot-installation/octobot-troubleshoot.md @@ -0,0 +1,120 @@ +--- +title: "Troubleshoot" +description: "Any question when installing OctoBot ? Check out the most common installation issues on our troubleshoot guide." +sidebar_position: 8 +--- + + + +# OctoBot Troubleshoot + +## Keeping the same configuration and history when updating OctoBot + +On OctoBot, the `user` folder, located in the directory you are executing OctoBot from, contains: + +- Your current configuration +- Your profiles +- Your portfolio history +- Your trades and PNL history + +In order to keep the same configuration and history when using another version of OctoBot, you can either: + +- Copy the `user` directory of your previous OctoBot into your new OctoBot folder. +- Or execute the new OctoBot in the same directory as the previous one. Warning: the `tentacles` folder will be replaced by its new version. + +## Keeping the same backtesting data files when updating OctoBot + +When updating your OctoBot, you might want to keep your previous backtesting data files. + +For your new OctoBot to access your backtesting data files, just copy the `backtesting` directory (located in the directory you were executing your previous OctoBot from) into your new OctoBot folder. + +## Windows + +### Time synchronization + +This issue happens when error messages such as `'recvWindow' must be less than ...` appear. + +Open an administrator terminal (`Win + X` then `A`) and type: + +```bash +net stop w32time +net start w32time +w32tm /resync +w32tm /query /status +``` + +Code from <a href="https://serverfault.com/questions/294787/how-do-i-force-sync-the-time-on-windows-workstation-or-server" rel="nofollow">serverfault.com</a> + +Another solution found by @alpi on discord channel: [timesynctool.com](http://www.timesynctool.com) + +### OctoBot freeze + +When running OctoBot on Windows, clicking into the OctoBot terminal (Powershell or Cmd) can freeze the log output and therefore freeze OctoBot execution (OctoBot will be waiting for the log to be published to continue). + +To fix this issue, untick the "QuickEdit Mode" in your terminal properties and restart it. + +![Powershell](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/wiki_resources/powerShellEditMode.jpg) + +![Cmd](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/wiki_resources/cmdQuickEdit.jpg) + +To open the properties menu, right click on the terminal window header and select "properties". + +## CentOS + +### Install OctoBot on CentOS + +Requirements + +```bash +yum -y update +yum install -y git wget sqlite-devel screen +yum -y groupinstall "Development Tools" +yum -y install openssl-devel bzip2-devel libffi-devel +yum install -y screen +cd /root +wget https://www.python.org/ftp/python/3.10.11/Python-3.10.11.tgz +tar xvf Python-3.10.11.tgz +cd Python-3.10*/ +./configure --enable-loadable-sqlite-extensions && make && sudo make install +``` + +OctoBot + +```bash +git clone https://github.com/Drakkar-Software/OctoBot.git +cd OctoBot/ +python3.10 -m pip install virtualenv +virtualenv venv +source venv/bin/activate +pip install -Ur requirements.txt +python start.py +``` + +## Linux + +### Time synchronization + +This issue happens when error messages such as `'recvWindow' must be less than ...` appear. + +On Debian or Ubuntu, open a terminal and type: + +```bash +sudo service ntp stop +sudo ntpd -gq +sudo service ntp start +``` + +Requires `ntp` package installation `sudo apt-get install ntp`. + +Code from + +<a href="https://askubuntu.com/questions/254826/how-to-force-a-clock-update-using-ntp#256004" rel="nofollow">askubuntu.com</a> +. + +### Installation + +During pip install if you have SSL problems, open a terminal and type + +```bash +pip3 install service_identity --force --upgrade +``` diff --git a/docs/content/guides/octobot-interfaces/_category_.json b/docs/content/guides/octobot-interfaces/_category_.json new file mode 100644 index 0000000000..312eb34a7c --- /dev/null +++ b/docs/content/guides/octobot-interfaces/_category_.json @@ -0,0 +1 @@ +{"label": "Interfaces", "position": 6} diff --git a/docs/content/guides/octobot-interfaces/chatgpt.md b/docs/content/guides/octobot-interfaces/chatgpt.md new file mode 100644 index 0000000000..14e7271437 --- /dev/null +++ b/docs/content/guides/octobot-interfaces/chatgpt.md @@ -0,0 +1,63 @@ +--- +title: "ChatGPT" +description: "Learn how to configure your OctoBot to trade using AI and ChatGPT or another LLM. Understand the costs differences of running it on your local setup." +sidebar_position: 3 +--- + + + +# Trading with ChatGPT + +Seamlessly [Integrate ChatGPT within your trading strategies](/guides/octobot-trading-modes/chatgpt-trading) and profit from the power of AI trading. + +<div style="text-align: center"> + +![octobot collaborating with chatgpt light](/images/guides/interfaces/octobot-collaborating-with-chatgpt-light.png) + +</div> + +Check out the [ChatGPT trading guide](/guides/octobot-trading-modes/chatgpt-trading) to learn more about how to trade with ChatGPT using OctoBot + +OctoBot uses the ChatGPT interface to interact with ChatGPT. + +## ChatGPT service configuration + +To use ChatGPT on an open source OctoBot, the only configuration you need is to enter your OpenAI API key into the GPT Interface + +1. Create or login to your <a href="https://platform.openai.com/" rel="nofollow">OpenAI</a> account +2. Create a new API key on <a href="https://platform.openai.com/account/api-keys" rel="nofollow">your account settings</a> +3. In the Accounts tab of the web interface, add the `GPT` interface if missing +4. Copy your API key into the `openai-secret-key` GPT configuration + +![octobot chatgpt configuration openai key and custom base url](/images/guides/chatgpt/octobot-chatgpt-configuration-openai-key-and-custom-base-url.png) + +## Custom LLM base url for prediction + +OctoBot can connect to any LLM using the **LLM custom base url** configuration parameter. This is useful to use other AI models than the default OpenAI ones. + +In this case, the **Secret key** parameter, will be used to authenticate to this other LLM server when necessary. It will be ignored otherwise. + +## Trading with Ollama prediction + +To connect to a local Ollama LLM model, configure the **LLM custom base url** of your OctoBot to your Ollama server address followed by `/v1`. + +Using the default Ollama address (`localhost:11434`), your **LLM custom base url** would then be: **`http://localhost:11434/v1`**. + +## Selecting your LLM model + +Selection of the LLM model to use is configured in your GPTEvaluator. When your GPT interface as configured and your `GPTEvaluator` is enabled (when using a ChatGPT-based profile or a custom profile using the `GPTEvaluator`), you can select the LLM model to use from your GPTEvaluator configuration. + +The `GPTEvaluator` configuration interface can be accessed from your profile or directly from the `/config_tentacle?name=GPTEvaluator` path of your OctoBot web interface. + +## Costs + +Using ChatGPT from automated API calls is a paid service from OpenAI. Each call to ChatGPT will consume +a few OpenAI tokens. + +Each call to ChatGPT is recrating a request which usually consumes around 90 OpenAI tokens. +You can get the current price of OpenAI token from <a href="https://openai.com/pricing" rel="nofollow">the OpenAI pricing page</a>. + +You can estimate the cost of using ChatGPT related features by estimating the amount of requests per day. + +> Running a strategy on 4h for 2 trading pairs on 1 exchange: the GPT evaluator will be called every +> 4 hours for each trading pair for each exchange. diff --git a/docs/content/guides/octobot-interfaces/reddit.md b/docs/content/guides/octobot-interfaces/reddit.md new file mode 100644 index 0000000000..cdcac6eb72 --- /dev/null +++ b/docs/content/guides/octobot-interfaces/reddit.md @@ -0,0 +1,84 @@ +--- +title: "Reddit" +description: "Learn how to configure your OctoBot to trade using Reddit and watch subreddits to trade according to the Reddit posts." +sidebar_position: 5 +--- + + + +# Trading based on Reddit posts + +<div style="text-align: center"> + ![reddit trading automation illustrated by reddit + logo](/images/guides/interfaces/reddit-connection-to-octobot-illustrated-by-reddit-logo.png) +</div> + +OctoBot can connect to <a href="https://www.reddit.com" rel="nofollow">Reddit</a> to monitor Reddit posts from subreddits. + +When the **RedditForumEvaluator** is enabled, OctoBot will the use <a href="https://github.com/cjhutto/vaderSentiment" rel="nofollow">VADER Sentiment Analysis's AI</a> to analyse the sentiment of each post and make a summary of each coin to be used by the [Daily Trading Mode](/guides/octobot-trading-modes/daily-trading-mode). + +## RedditForumEvaluator configuration + +In the Accounts tab of the web interface, add the `Reddit` interface if missing. + +![RedditForumEvaluator configuration to select subreddits to follow](/images/guides/interfaces/RedditForumEvaluator-configuration-to-select-subreddits-to-follow.png) + +Configure the **RedditForumEvaluator** to specify the subreddits to follow for each traded Cryptocurrency. + +## Reddit connection configuration + +To connect to Reddit, OctoBot needs a Reddit script app, which you can create from your Reddit account, or a new account dedicated to OctoBot. + +<div style="text-align: center"> + ![reddit octobot config](/images/guides/interfaces/reddit-octobot-config.png) +</div> + +1. Login on your Reddit account if you already have one +2. Go to your account's <a href="https://www.reddit.com/prefs/apps/" rel="nofollow">Applications preferences</a>. +3. Create a new `script` app + <div style="text-align: center"> + ![reddit create app](/images/guides/interfaces/reddit-create-app.png) + </div> + - `Name` and `description` can be set as you wish + - Leave `About URL` empty + - `Redirect URI` won't be used, enter `https://www.reddit.com/` (or any other valid url) + - Create your app +3. **Client-Id** is the list of characters under your App name, next to its icon +4. **Client-Secret** is the **secret** identifier of the App +<div style="text-align: center"> + ![reddit created app](/images/guides/interfaces/reddit-created-app.png) +</div> + +Copy and paste your new Reddit app details into your OctoBot configuration. +<div style="text-align: center"> + ![reddit octobot config](/images/guides/interfaces/reddit-octobot-config.png) +</div> + + +### Configuration from user/config.json + +Add in **user/config.json** in the services key : + +```json +"reddit": { + "client-id": "YOUR_CLIENT_ID", + "client-secret": "YOUR_CLIENT_SECRET" + } +``` + +**Exemple:** + +```json +"services": { + "a service": { + + }, + "reddit": { + "client-id": "YOUR_CLIENT_ID", + "client-secret": "YOUR_CLIENT_SECRET" + }, + "another service": { + + } +} +``` diff --git a/docs/content/guides/octobot-interfaces/telegram.mdx b/docs/content/guides/octobot-interfaces/telegram.mdx new file mode 100644 index 0000000000..1fb2b169e2 --- /dev/null +++ b/docs/content/guides/octobot-interfaces/telegram.mdx @@ -0,0 +1,133 @@ +--- +title: "Telegram" +description: "Learn how to configure your OctoBot to be accessible directly from Telegram. Command your OctoBot from anywhere directly from Telegram." +sidebar_position: 2 +--- + + + +# Your OctoBot on Telegram + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="E3nShLEFA90" title="Octobot on Telegram" /> + +OctoBot can use <a href="https://telegram.org" rel="nofollow">Telegram</a> to communicate. With this interface, OctoBot can: + +- Show for how long OctoBot is working +- Display the current portfolio +- Display the current open orders +- Display the profitability since OctoBot started +- Display OctoBot's understanding of the market and its risk parameter +- Changes OctoBot's current risk +- Stop OctoBot +- Trigger emergency trades + +And much more. + +To know the full command list, use the **/help** command + +## Telegram service configuration + +### Create your bot + +<div style={{textAlign: "center"}}> + ![telegram connection to octobot illustrated by telegram + logo](/images/guides/interfaces/telegram-connection-to-octobot-illustrated-by-telegram-logo.png) +</div> + +First, you need to create a Telegram bot, it will be telegram equivalent of your OctoBot. +See tutorial on the <a href="https://core.telegram.org/bots#6-botfather" rel="nofollow">Telegram +website</a> to create one. + +### Configuration + +In the Accounts tab of the web interface, add the `Telegram` interface if missing. + +![telegram config](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/wiki_resources/telegram_config.png) + +### Token + +Get the token in the chat with **botfather** and add it to services config + +### Chat id + +Send a message to your bot and go to this url https://api.telegram.org/botXXX:YYYY/getUpdates with XXX:YYYY replaced by your bot's token. + +Warning: To get your chat id from this url, your telegram bot must have a pending message (the one you just sent). This means that your OctoBot must not be on or you will just receive this message `{"ok":true,"result":[]}` from api.telegram.org. + +Search for: + +```bash +chat: + id: XXXXXXXXX +``` + +Add this id to the telegram service configuration. + +## Allow your bot to listen to telegram groups and channels + +OctoBot can receive messages (trade signals for example) from Telegram groups. + +When invited in a Telegram group, OctoBot will never talk in this group but will +listen to the chat. Using this feature, it is possible to process **telegram signals** in OctoBot. + +In order to be able to read group messages, your telegram bot must have +its **privacy mode** **disabled**. To disable it: + +- say `/setprivacy` to **botfather** +- **botfather** replies: _Choose a bot to change group messages settings._ +- enter the name of your bot +- **botfather** gives information about privacy mode and your bot's privacy setting +- enter `Disable` + +Your OctoBot is now able to read any group message. + +## user/config.json configuration + +Add in **user/config.json** in the services key : + +```json +"telegram": { + "chat-id": "YOUR_CHAT_ID", + "token": "YOUR_BOT_TOKEN" + } +``` + +**Exemple:** + +```json +"services": { + "a service": { + + }, + "telegram": { + "chat-id": "YOUR_CHAT_ID", + "token": "YOUR_BOT_TOKEN" + }, + "another service": { + + } +} +``` + +## Troubleshooting + +### Chat not found + +If OctoBot is producing this you get this error, it means that your +[chat-id](#chat-id) is not set correctly. With an incorrect chat-id, +OctoBot is able to read and reply commands but can't push messages by itself. + +### TelegramSignalEvaluator is not receiving telegram messages + +To use the default Telegram signal evaluator, make sure: + +1. Your telegram group / channel is referenced in the TelegramSignalEvaluator configuration +2. Your telegram bot is setup according to [Allow your bot to listen to telegram groups and channels](#allow-your-bot-to-listen-to-telegram-groups-and-channels) +3. Your telegram bot is in the telegram channel / group +4. The telegram notifications you want your bot to process are matching the notification pattern defined in the TelegramSignalEvaluator documentation +5. The telegram signal trading pairs also are traded pairs in your current OctoBot configuration and are supported by the connected exchange(s) +6. Your TelegramSignalEvaluator is activated + +When a telegram message is ignored, a debug log (in terminal and OctoBot.log) is produced explaining the reason why each notifications has be ignored. Please first refer to this log as it will likely show what is wrong with the current setup. diff --git a/docs/content/guides/octobot-interfaces/telegram/telegram-api.md b/docs/content/guides/octobot-interfaces/telegram/telegram-api.md new file mode 100644 index 0000000000..c04e15c560 --- /dev/null +++ b/docs/content/guides/octobot-interfaces/telegram/telegram-api.md @@ -0,0 +1,48 @@ +--- +title: "Telegram API" +description: "Learn how to configure your OctoBot to trade based on signals from Telegram channels." +sidebar_position: 1 +--- + + + +# Telegram API + +Telegram API allows your OctoBot to listen to telegram **public groups**. + +:::info + The Telegram API configuration is not necessary if your goal is to command your OctoBot from Telegram or to have your OctoBot listen to a private group. In those cases, the [initial Telegram configuration](.) is enough. +::: + + +## Create your App + +Before working with Telegram’s API, you need to get your own API ID and hash: + +In order to obtain an API id and develop your own application using the Telegram API you need to do the following: + +- Sign up for Telegram using any application. +- Log in to your Telegram core: https://my.telegram.org. +- Go to 'API development tools' and fill out the form. +- You will get basic addresses as well as the **api_id** and **api_hash** parameters required for user authorization. + + +## Configuration + + +Add in **user/config.json** in the services key : + +``` json +"telegram-api": { + "telegram-api": "YOUR_API_ID", + "telegram-api-hash": "YOUR_API_HASH", + "telegram-phone": "YOUR_TELEGRAM_ACCOUNT_PHONE_NUMBER" +} +``` + +### Secure code + +At the first OctoBot start with a new `telegram-api` configuration a 2-factor authentication code will be sent to your account. +Just enter it the code in your OctoBot console and press enter. + +> If you are asked a password and Telegram didn't send it to you, try to provide the mobile phone number without "+". diff --git a/docs/content/guides/octobot-interfaces/tradingview.md b/docs/content/guides/octobot-interfaces/tradingview.md new file mode 100644 index 0000000000..f73dd4a6d2 --- /dev/null +++ b/docs/content/guides/octobot-interfaces/tradingview.md @@ -0,0 +1,73 @@ +--- +title: "TradingView" +description: "Learn how to make OctoBot trade based on TradingView alerts. Send signals from TradingView webhooks and have your OctoBot trade on your exchange." +sidebar_position: 4 +--- + + + +# Automating trading from TradingView + +<div style="text-align: center"> + +![tradingview trading automation illustrated by tradingview logo](/images/guides/interfaces/tradingview-automation-illustrated-by-tradingview-logo.png) + +</div> + +With OctoBot, you can listen to <a href="https://www.tradingview.com/?aff_id=27595" rel="nofollow">TradingView</a> alerts +and automate trades based on your TradingView indicators or strategies. + +This way, when a TradingView alert is fired, you can instantly create orders on the exchange of your choice. +This works with any kind of alert, whether it is from: + +- A price threshold you defined yourself +- A threshold value from an indicator +- A trading strategy you are using on TradingView + +:::info + The following guides cover using TradingView with [OctoBot trading + bots](https://www.octobot.cloud/trading-bot). Please use the [TradingView automated trading investor + guide](/en/investing/tradingview-automated-trading) if you are automating + TradingView strategies using a [TradingView + OctoBot](/en/investing/tradingview-trading-tutorial) from + [www.octobot.cloud](https://www.octobot.cloud/). +::: + +Learn more on TradingView trading in OctoBot on the [TradingView Trading Mode guide](/guides/octobot-trading-modes/tradingview-trading-mode) + +## Indicator based alerts + +You can make your OctoBot trade based on TradingView indicators or price events. Follow the [indicator alert guide](tradingview/automating-trading-from-an-indicator) to learn more. + +## Strategy based alerts + +You can also make your OctoBot trade based on TradingView Pine Script strategies. Follow the [strategy alert guide](tradingview/automating-trading-from-a-pine-script-strategy) to synchronize your OctoBot with your TradingView strategies. + +## OctoBot configuration + +Simply add the `Trading-view` interface to your OctoBot's "Accounts" configuration and configure the [webhook service](tradingview/using-a-webhook). + +## TradingView account + +First, create a <a href="https://www.tradingview.com/?aff_id=27595" rel="nofollow">TradingView</a> account if you don't already have one. +Then, to be able to automate your TradingView strategy, you will either need to use [webhooks](tradingview/using-a-webhook), which requires a TradingView pro account. If you don't have one, you can use the 30 days free trial. + +<div style="text-align: center"> + +<a href="https://www.tradingview.com/?aff_id=27595" rel="nofollow">![tradingview go pro trial button](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/wiki_resources/tradingview-go-pro-trial-button.png)</a> + +</div> + +<div style="text-align: center"> + +![tradingview start trial button](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/wiki_resources/tradingview-start-trial-button.png) + +</div> + +Your account is now ready to be used with OctoBot! + +## Alert format + +You can send commands to your OctoBot using TradingView alerts including creating market or limit orders, take profits, canceling orders and much more. + +Check out the [alert format guide](tradingview/alert-format) to learn more. diff --git a/docs/content/guides/octobot-interfaces/tradingview/alert-format.md b/docs/content/guides/octobot-interfaces/tradingview/alert-format.md new file mode 100644 index 0000000000..843915ab31 --- /dev/null +++ b/docs/content/guides/octobot-interfaces/tradingview/alert-format.md @@ -0,0 +1,139 @@ +--- +title: "Alert format" +description: "Find everything you need to know to format your TradingView alerts and automate trades on your OctoBot. Buy or sell with market or limit orders, set take profits and stop losses." +sidebar_position: 3 +--- + + + +# TradingView alerts format + +:::info + The following guide describes how to format TradingView alerts to trade using the [open source version of OctoBot](../../octobot). +::: + +## Creating orders + +### Minimal alert content + + +The alert format is designed to be easily used from TradingView. Minimal alerts contain the exchange name, the alert symbol (BTCUSDT for BTC/USDT and BTC/USDT:USDT) and the side of the order to create. +Example: + +``` bash +EXCHANGE={{exchange}} +SYMBOL={{ticker}} +SIGNAL=BUY +``` + +![alert-message](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/wiki_resources/tradingview-alert-message.png) + +For a buy signal. + + +``` bash +EXCHANGE={{exchange}} +SYMBOL={{ticker}} +SIGNAL=SELL +``` + +For a sell signal. + +Parameters can be separated using a new line or a `;` character. + +### Additional alert parameters + +Additional order details can be added to the alert. These are optional: + +``` bash +ORDER_TYPE=LIMIT +VOLUME=0.01 +PRICE=42000 +STOP_PRICE=38000 +TAKE_PROFIT_PRICE=55000 +REDUCE_ONLY=true +``` + +- `ORDER_TYPE` is the type of order, it can be `MARKET`, `LIMIT` or `STOP` +- `VOLUME` is the volume of the order in base asset (BTC for BTC/USDT) it supports the [orders amount syntax](/guides/octobot-trading-modes/order-amount-syntax) +- `PRICE` is the price of the limit order in quote asset (USDT for BTC/USDT). The [orders price syntax](/guides/octobot-trading-modes/order-price-syntax) is supported +- `STOP_PRICE` is the price of the stop order to create. When increasing the position or buying in spot trading, the stop loss will automatically be created once the initial order is filled. When decreasing the position (or selling in spot) using a LIMIT `ORDER_TYPE`, the stop loss will be created instantly. _Required when `ORDER_TYPE=STOP`_. The [orders price syntax](/guides/octobot-trading-modes/order-price-syntax) is supported +- `TAKE_PROFIT_PRICE` is the price of the take profit order to create. When increasing the position or buying in spot trading, the take profit will automatically be created once the initial order is filled. When decreasing the position (or selling in spot) using a LIMIT `ORDER_TYPE`, the take profit will be created instantly. The [orders price syntax](/guides/octobot-trading-modes/order-price-syntax) is supported. Multiple take profit prices can be used from `TAKE_PROFIT_PRICE_1`, `TAKE_PROFIT_PRICE_2`, ... When using multiple take profits, the initial entry amount will be evenly split between take profits unless a `TAKE_PROFIT_VOLUME_RATIO` is set for each take profit. +- `TAKE_PROFIT_VOLUME_RATIO` is the ratio of the entry order volume to include in this take profit. Used when multiple take profits are set. Specify multiple values using `TAKE_PROFIT_VOLUME_RATIO_1`, `TAKE_PROFIT_VOLUME_RATIO_2`, …. When used, a `TAKE_PROFIT_VOLUME_RATIO` is required for each take profit. +Exemple: `TAKE_PROFIT_PRICE=1234;TAKE_PROFIT_PRICE_1=1456;TAKE_PROFIT_VOLUME_RATIO_1=1;TAKE_PROFIT_VOLUME_RATIO_2=2` will split 33% of entry amount in TP 1 and 67% in TP 2. +- `REDUCE_ONLY` when true, only reduce the current position (avoid accidental short position opening when reducing a long position). **Only used in futures trading**. Default is false. +- `TAG` is an identifier to associate to the order(s) to create. Any value can be used as tag. Tags can later be used to cancel specific orders. +- `LEVERAGE` the updated leverage value to use. **Only used in futures trading**. + +### Examples + +#### A limit buy order of 0.01 BTC at 30000 USDT with a take profit +``` bash +EXCHANGE=binance +SYMBOL=BTCUSDT +VOLUME=0.01 +PRICE=30000 +TAKE_PROFIT_PRICE=35000 +SIGNAL=BUY +ORDER_TYPE=LIMIT +``` + +#### A limit sell order of 0.01 ETH at +10% of its current price in BTC +``` bash +EXCHANGE=binance +SYMBOL=ETHBTC +VOLUME=0.01 +PRICE=10% +SIGNAL=SELL +ORDER_TYPE=LIMIT +``` + +#### A stop loss sell order of 10 ATOM at 3 USDT from the current price wigh an "exit1" tag +``` bash +EXCHANGE=binance +SYMBOL=ATOMUSDT +VOLUME=10 +STOP_PRICE=-3d +SIGNAL=SELL +ORDER_TYPE=STOP +TAG=exit1 +``` + + +## Canceling orders + +Use `SIGNAL=CANCEL` to cancel orders identified buy their `SYMBOL` and `EXCHANGE` + +### Canceling every ETH/BTC order on Binance +``` bash +EXCHANGE=binance +SYMBOL=ETHBTC +SIGNAL=CANCEL +``` + +### Canceling every ATOM/USDT sell order with an "exit1" tag on Binance +``` bash +EXCHANGE=binance +SYMBOL=ATOMUSDT +SIGNAL=CANCEL +PARAM_SIDE=SELL +TAG=exit1 +``` + +Additional cancel parameters are available: +- `PARAM_SIDE` is the side of the orders to cancel, it can be `buy` or `sell` to only cancel buy or sell orders. +- `TAG` is the tag to select orders to cancel with. When provided, only orders created with the given tag and symbols will be canceled. + +## Alerts security + +You can use a token to add a security layer on your webhook alerts using +an identification token, this token is randomly generated by your +OctoBot and can be found on the configuration interface and in execution +logs. + +To add your token on the tradingview.com signal: add the following line +to the alert message: + +``` bash +TOKEN=YOUR_TOKEN +``` diff --git a/docs/content/guides/octobot-interfaces/tradingview/automating-trading-from-a-pine-script-strategy.md b/docs/content/guides/octobot-interfaces/tradingview/automating-trading-from-a-pine-script-strategy.md new file mode 100644 index 0000000000..68135b1ec9 --- /dev/null +++ b/docs/content/guides/octobot-interfaces/tradingview/automating-trading-from-a-pine-script-strategy.md @@ -0,0 +1,41 @@ +--- +title: "Trading from strategies" +description: "Learn how to make OctoBot trade based on TradingView Pine Script strategies. Send signals from TradingView Pine Script strategies and have your OctoBot instantly trade on your exchange." +sidebar_position: 2 +--- + + + +# Automating trading from a TradingView Pine Script strategy + +With OctoBot, you can listen to <a href="https://www.tradingview.com/?aff_id=27595" rel="nofollow">TradingView</a> <a href="https://www.tradingview.com/pine-script-docs/en/v5/index.html#" rel="nofollow">Pine Script</a> strategies signals +to automate your trades. + +## Create strategy alert + +To send alerts from a strategy, use the <a href="https://www.tradingview.com/pine-script-docs/en/v5/concepts/Alerts.html?highlight=alert_message#order-fill-events" rel="nofollow">`alert_message`</a> parameter from Pine Script strategy functions which can create orders. + +1. Define the content of your alert before any `strategy.entry`, `strategy.exit` or `strategy.close` call: + - example: `messageBuy = "EXCHANGE=binance;SYMBOL=SOLUSDT;VOLUME=100a%;SIGNAL=BUY"` + > Note: when defining your alert, remember to add `;` between each parameter. +2. In the strategy section, add `alert_message=messageBuy` to your strategy `entry`, `exit` or `close` calls: + - example: `strategy.entry("Buy", strategy.long, comment="Buy Signal Triggered", alert_message=messageBuy)` +3. When creating a new alert (_right-click on the strategy / add new alert_) make sure that you: - Select the name of your strategy as the condition - Name the alert (the name can be whatever you want) - Replace **ALL** the message content with exactly `{{strategy.order.alert_message}}` + ![adding a TradingView strategy alert](/images/guides/adding-a-tradingview-strategy-alert.png) + +- _Et voilà !_ This alert will automatically notify your OctoBot each time your strategy executes `entry`, `exit` or `close` calls. + +Tips: + +- For multi-coin, simply edit the strategy and modify the SYMBOL entry in the messageBuy definition. You can thus vary the parameters according to the assets. +- It can be easier to define multiple messages such as `messageBuy`, `messageBuyWithATakeProfit`, `messageSell`or even `messageCancel` and use the appropriate message later on (with the `alert_message` parameter) when calling `entry`, `exit` or `close`. + +_Special thanks to @KidCharlemagne for creating the basis of this guide !_ + +## Alert format + +Learn more about how to create your TradingView alerts on [the TradingView alert format guide](alert-format). + +## TradingView setup + +Wondering how to make your OctoBot listen to TradingView signals ? Check out [our TradingView integration guide](/guides/octobot-interfaces/tradingview). diff --git a/docs/content/guides/octobot-interfaces/tradingview/automating-trading-from-an-indicator.md b/docs/content/guides/octobot-interfaces/tradingview/automating-trading-from-an-indicator.md new file mode 100644 index 0000000000..103cbde833 --- /dev/null +++ b/docs/content/guides/octobot-interfaces/tradingview/automating-trading-from-an-indicator.md @@ -0,0 +1,47 @@ +--- +title: "Trading from indicators" +description: "Learn how to make OctoBot trade based on TradingView indicators. Send signals from TradingView Pine Script indicator and have your OctoBot instantly trade on your exchange." +sidebar_position: 1 +--- + + + +# Automating trading from a TradingView indicator + +With OctoBot, you can listen to <a href="https://www.tradingview.com/?aff_id=27595" rel="nofollow">TradingView</a> indicator signals +to automate your trades. + +## Create an indicator alert + +- Go to the right menu and click on the alert button + + ![alert-menu-button](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/wiki_resources/tradingview-alert-menu.png) + +- Create a new alert with ![create-alert-button](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/wiki_resources/tradingview-add-alert-button.png) +- Choose the condition : an indicator cross, a price drop, whatever + you want +- Add your OctoBot [webhook](using-a-webhook) as the following screenshot. + + ![set-webhook-url](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/wiki_resources/tradingview-alert-webhook-url.png) + + You will find OctoBot's alert webhook URL on your OctoBot's configuration + page or in OctoBot's starting logs. It should be an url like `https://webhook.octobot.cloud/tradingview/xxxx` or `http://XXXXXXXX.ngrok.io/webhook/trading_view`. + + WARNING: To improve performances, webhooks are started only when + required, this means that **you need to activate a webhook related + tentacle to get the webhook url** (a tentacle such as the **trading + view signals trading mode**) + + ![octobot open source configured tradingview alert and webhook config](/images/guides/trading-view/octobot-open-source-configured-tradingview-alert-and-webhook-config.png) + + ![webhook log](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/wiki_resources/webhook_log.jpg) + +- Set the alert message + +## Alert format + +Learn more about how to create your TradingView alerts on [the TradingView alert format guide](alert-format). + +## TradingView setup + +Wondering how to make your OctoBot listen to TradingView signals? Check out [our TradingView integration guide](/guides/octobot-interfaces/tradingview). diff --git a/docs/content/guides/octobot-interfaces/tradingview/automating-tradingview-free-email-alerts-with-octobot.md b/docs/content/guides/octobot-interfaces/tradingview/automating-tradingview-free-email-alerts-with-octobot.md new file mode 100644 index 0000000000..e503b6753f --- /dev/null +++ b/docs/content/guides/octobot-interfaces/tradingview/automating-tradingview-free-email-alerts-with-octobot.md @@ -0,0 +1,87 @@ +--- +title: "Using email alerts" +description: "Learn how to configure your OctoBot to trade using the free TradingView email alerts and automate any TradingView strategy with a free TradingView account." +sidebar_position: 4 +--- + + + +# Automating TradingView strategies with email + +:::info + Warning: The automation of TradingView alerts by email is unfortunately no longer available due to a recent restriction by TradingView. +::: + +When creating a TradingView alert, you can choose to be notified by email. This service is available with a **free TradingView account** and can be used to automate trades using OctoBot. + +<div style="text-align: center"> + <div> + ![tradingview alert email form + completed](/images/guides/trading-view/tradingview-alert-email-form-completed.png) + </div> +</div> + +:::info + The [Premium OctoBot + Extension](../../octobot-configuration/premium-octobot-extension) is required + to trade with OctoBot using TradingView email alerts. +::: + +## TradingView email alerts configuration + +To automate your trades based on TradingView alerts using emails, simply go to your `Accounts` configuration, select the `Interfaces` tab and add the `TradingView` interface. + +In case you didn't authenticate to your OctoBot account or setup your email yet (or did not yet purchase the Premium OctoBot Extension), then just click on `GENERATE EMAIL` to connect to your account and setup your alert email address on TradingView. + +<div style="text-align: center"> + ![octobot open source configuring tradingview alert email + address](/images/guides/trading-view/octobot-open-source-configuring-tradingview-alert-email-address.png) +</div> + +The next step is to configure your TradingView account to send alerts to this email address. + +<div style="text-align: center"> + <div> + ![open source octobot start tradingview email + config](/images/guides/trading-view/open-source-octobot-start-tradingview-email-config.png) + </div> +</div> + +## Register your alert email address on TradingView + +Now that you received your personal TradingView alert email address, simply follow the quick configuration steps directly from your OctoBot to add this email address to your TradingView alerts. + +The configuration helper will walk you through the following steps: + +1. Adding your email address to your TradingView alerts + <div style="text-align: center"> + <div> + ![octobot open source add tradingview alert + illustration](/images/guides/trading-view/octobot-open-source-add-tradingview-alert-illustration.png) + </div> + </div> +2. Receiving your confirmation code + <div style="text-align: center"> + <div> + ![octobot open source waiting tradingview verification + code](/images/guides/trading-view/octobot-open-source-waiting-tradingview-verification-code.png) + </div> + </div> +3. Starting your OctoBot to follow your TradingView alerts + <div style="text-align: center"> + <div> + ![octobot open source tradingview email configuration last + step](/images/guides/trading-view/octobot-open-source-tradingview-email-configuration-last-step.png) + </div> + </div> + +## Start trading using your TradingView strategy + +Your OctoBot is now ready to trade using your TradingView alerts. You can trade on any market and exchange following the [OctoBot open source alert format](alert-format). + +Once configured, your personal TradingView alert email address will be shown next to your TradingView configuration. In case you need to go back to the configuration interface, for example to revalidate your email address, just use the wheel icon. + +<div style="text-align: center"> + ![octobot open source configure button tradingview alert email + address](/images/guides/trading-view/octobot-open-source-configure-button-tradingview-alert-email-address.png) +</div> diff --git a/docs/content/guides/octobot-interfaces/tradingview/using-a-webhook.md b/docs/content/guides/octobot-interfaces/tradingview/using-a-webhook.md new file mode 100644 index 0000000000..99da1146e4 --- /dev/null +++ b/docs/content/guides/octobot-interfaces/tradingview/using-a-webhook.md @@ -0,0 +1,81 @@ +--- +title: "Using a webhook" +description: "Learn how to configure your OctoBot webhooks to trade from TradingView signals using OctoBot cloud, Ngrok or your own setup." +sidebar_position: 5 +--- + + + +# Using a webhook with OctoBot + +There are many ways to wake your OctoBot up and make it do something, +one of them is using a webhook. With a webhook, you can automatically +send messages to your OctoBot from any website supporting this system. + +<a href="https://www.tradingview.com/?aff_id=27595" rel="nofollow">TradingView</a> is one of them. + +In order to be able to receive TradingView webhook's message, you need to make your OctoBot reachable from TradingView. For this, there are 3 options: + +- Use the [Premium OctoBot Extension](/guides/octobot-configuration/premium-octobot-extension) and simply connect your OctoBot through the OctoBot cloud secure server. +- Use <a href="https://ngrok.com/" rel="nofollow">Ngrok</a> to act as a secure intermediary between the internet and your OctoBot. +- Or setup your own public IP and port configuration + +## Setting up your OctoBot's webhook + +1. In your OctoBot configuration, from the `Accounts` tab, in `Interfaces`, add the webhook service. +2. Set up your webhook configuration using one of the following options: + + - Option 1: Using [Premium OctoBot Extension](/guides/octobot-configuration/premium-octobot-extension): just select the `Enable-Octobot-Webhook` + - Option 2: Using Ngrok : + + 1. Select `Enable-Ngrok`, uncheck `Enable-Octobot-Webhook` + 2. Create an account on <a href="https://ngrok.com/" rel="nofollow">ngrok</a> + 3. Copy your Ngrok token from https://dashboard.ngrok.com/get-started/your-authtoken + 4. Enter your Ngrok token into your OctoBot's webhook service configuration. + + - Option 3: Manual configration: if you are familiar with webhook setups and your OctoBot is exposed to the Internet, you can disable both `Enable-Ngrok` and `Enable-Octobot-Webhook` and configure the listening port and IP for the webhook yourself. + _Note: With this manual configuration, when using docker, you also need to add `-p 9000:9000` after `docker run`_. + +3. Activate a tentacle using a webhook service (like the TradingView signals trading mode). +4. Restart your OctoBot. +5. The webhook address will be displayed on your OctoBot configuration, on to the TradingView inteface and printed in your logs. + +:::info + **Your Webhook URL is missing?** For your webhook URL to be displayed, a + TradingView-related profile has to be active. If you don't see the URL in your + TradingView configuration, select a TradingView profile in your profile + configuation and restart your OctoBot. +::: + +Follow [this guide](/guides/octobot-interfaces/tradingview) to know more on how to send TradingView signals to your OctoBot. + +## Configuration examples + +### Configuration option 1: Using the Premium OctoBot Extension + +**TradingView** and **Webhook** configuration in the Accounts tab +![octobot open source premium extension webhook configuration](/images/guides/trading-view/octobot-open-source-premium-extension-webhook-configuration.png) + +The Webhook URL is also printed in logs +![octobot open source premium extension webhook log](/images/guides/trading-view/octobot-open-source-premium-extension-webhook-log.png) + +### Configuration option 2: Using Ngrok + +TradingView and Webhook configuration in the Accounts tab +![octobot open source ngrok webhook configuration](/images/guides/trading-view/octobot-open-source-ngrok-webhook-configuration.png) + +The Webhook URL is also printed in logs +![octobot open source ngrok webhook log](/images/guides/trading-view/octobot-open-source-ngrok-webhook-log.png) + +Activate a tentacle using a webhook service (like the TradingView signals trading mode) + +## About ngrok.com + +You can use Ngrok with a free account, the only drawback of having a +free version is that your webhook address will change at every OctoBot +restart, you will have to update it on your message sender <a href="https://www.tradingview.com/?aff_id=27595" rel="nofollow">TradingView</a>. + +To avoid having to re-enter your IP each time, you can either: + +- Use the [Premium OctoBot Extension](/guides/octobot-configuration/premium-octobot-extension): in this case you only pay once and always have your OctoBot secure webhook ready to receive your TradingView alerts. +- Pay a Ngrok monthly subscription diff --git a/docs/content/guides/octobot-interfaces/web.md b/docs/content/guides/octobot-interfaces/web.md new file mode 100644 index 0000000000..989090f3e5 --- /dev/null +++ b/docs/content/guides/octobot-interfaces/web.md @@ -0,0 +1,120 @@ +--- +title: "Web interface" +description: "Learn how to configure your OctoBot web interface. Secure it with a password authentication, set it up to have multiple OctoBots on the same computer." +sidebar_position: 1 +--- + + + +# Web interface + +![home](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/wiki_resources/home.jpg) + +OctoBot comes with a web interface allowing you to: + +- Follow OctoBot's status and moves +- Interact with OctoBot +- Configure OctoBot and the [Trading Modes](/guides/octobot-trading-modes/trading-modes) to use +- Use [Backtesting](/guides/octobot-usage/backtesting) to optimize your strategies + +## Configuration + +In the Accounts tab of the web interface, add the `Web` interface if missing. + +![web config](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/wiki_resources/web_config.png) + +- **port** is the port you want the web interface to be accessible from. Changing it allows you to [have multiple OctoBots running on the same computer](../octobot-usage/having-multiple-octobots-on-one-computer). +- **auto open in web browser** is whether starting your OctoBot should open a new tab on your browser to display the web interface +- **requires password** is whether the web interface of your OctoBot should be protected by a password + +## Protect your web interface + +### Using an authentication password + +You can set a password to protect your web interface. This way you can secure the access to your OctoBot when hosting it on a cloud or just add a security layer to your setup. + +**By default no password is required.** + +You can activate the password authentication from the web interface configuration, it is also where you can set and change your password. + +Any IP will be automatically **blocked after 10 authentication failures in a row**. IPs will remain blocked until your OctoBot restarts. If you accidentally block your IP, you can just restart your OctoBot. + +### How to set it up ? + +- Go to "Accounts" page +- Select "Interfaces" on the left menu +- Click on "**\*\*\*\***" next to "Password: " +- Override the "\*\*\*\*" with your password +- Click on validate +- Click on "SAVE AND RESTART" red button on the left menu + +### You forgot your password + +If you forgot your password, go to your **user/config.json** file and change: + +```json +"require-password": true, +``` + +into: + +```json +"require-password": false, +``` + +Then restart your OctoBot. This way you will be able to access your OctoBot without a password and then change it. + +### About the web interface authentication + +- OctoBot's web interface authentication works on the assumption that you are the only person being able to access your OctoBot's file system and the associated processes. This authentication can be deactivated by anyone being able to edit your **user/config.json** and restart your OctoBot process. +- Only a SHA256 hash of your password will be stored in you **user/config.json** file. This is making it impossible to go back to the original password you entered. + +### Blocking requests from other websites (CSRF) + +You can set the `CORS_ALLOWED_ORIGINS` environment variable before starting your OctoBot, this way only requests from the specified origin(s) will be answered to. + +Examples: + +- `CORS_ALLOWED_ORIGINS=https://mybot.com` +- `CORS_ALLOWED_ORIGINS=http://localhost:5001` +- `CORS_ALLOWED_ORIGINS=https://mybot.com,https://myotherwebsite.com` + +Requests from other origins will be refused with a 400 error and the web interface will behave as if OctoBot was constantly disconnected. + +By default, no request filter is set (equivalent to CORS_ALLOWED_ORIGINS=\*) which might make your bot vulnerable to <a href="https://owasp.org/www-community/attacks/csrf" rel="nofollow">Cross Site Request Forgery attacks</a>. + +### user/config.json configuration + +Add in **user/config.json** in the services key : + +```json +"web": { + "auto-open-in-web-browser": false, + "ip": "0.0.0.0", + "password": "", + "port": 5001, + "require-password": false +} +``` + +You can also change the IP your web interface is binding to from **user/config.json**. + +**Example:** + +```json +"services": { + "a service": { + + }, + "web": { + "auto-open-in-web-browser": false, + "ip": "0.0.0.0", + "password": "", + "port": 5001, + "require-password": false + }, + "another service": { + + } +} +``` diff --git a/docs/content/guides/octobot-trading-modes/_category_.json b/docs/content/guides/octobot-trading-modes/_category_.json new file mode 100644 index 0000000000..48ed898f8f --- /dev/null +++ b/docs/content/guides/octobot-trading-modes/_category_.json @@ -0,0 +1 @@ +{"label": "Trading Modes Guides", "position": 4} diff --git a/docs/content/guides/octobot-trading-modes/chatgpt-trading.mdx b/docs/content/guides/octobot-trading-modes/chatgpt-trading.mdx new file mode 100644 index 0000000000..f62d71408b --- /dev/null +++ b/docs/content/guides/octobot-trading-modes/chatgpt-trading.mdx @@ -0,0 +1,73 @@ +--- +title: "ChatGPT trading" +description: "Learn how to use ChatGPT prediction within OctoBot to boost your crypto trading strategies with real-time AI market views and predictions." +sidebar_position: 5 +--- + + + +# ChatGPT Trading + +With OctoBot, trading according to artificial intelligences such as [ChatGPT](/guides/octobot-interfaces/chatgpt) is possible using both [DCA trading mode](dca-trading-mode) and [Daily trading mode](daily-trading-mode). + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="BV4ZHQrIpRQ" title="ChatGPT Crypto trading strategies using OctoBot" /> + +In this video, we are showing how one of the ChatGPT trading stragies of [OctoBot cloud](/) works. + +In OctoBot, ChatGPT is used as a trading indicator and not a way to create orders. This means that multiple [trading modes](trading-modes) can use ChatGPT. + +## Using ChatGPT in your trading modes + +When using the built-in [DCA trading mode](dca-trading-mode) and [Daily trading mode](daily-trading-mode), you can choose to enable the GPTEvaluator. When doing so, your trading modes will take the ChatGPT prediction into account when trading. + +<div style={{textAlign: "center"}}> + ![ai trading illustrated by octobot head with chatgpt logo trading bitcoin + ethereum litecoin usd + logos](/images/guides/ai-trading-illustrated-by-octobot-head-with-chatgpt-logo-trading-bitcoin-ethereum-litecoin-usd-logos.png) +</div> + +You can therefore choose to trade solely on your ChatGPT predictions by enabling the ChatGPT evaluator only or to consolidate them with other evaluators. + +## The way the ChatGPT evaluator works + +The ChatGPT evaluator works in 3 steps: + +1. Gathering of market data: According to your ChatGPT evaluator configuration, market data are processed to be sent to ChatGPT and ask for a prediction +2. Asking for the prediction: OctoBot asks ChatGPT for a market prediction based on your given data. This prediction consists in asking ChatGPT if the market is more likely to go UP or DOWN in the near future with a % of confidence. +3. Analysing the ChatGPT prediction: According to the ChatGPT answer, the ChatGPT evaluator emits an evaluation. The side of the evaluation depends on the UP or DOWN answer and its weight is based on the confidence of ChatGPT in its prediction. + +## Configuring the ChatGPT evaluator + +The ChatGPT evaluator can be configured in many ways to customize the way you want ChatGPT to make predictions. + +![trading with chatgpt in octobot GPTEvaluator configuration](/images/guides/trading-with-chatgpt-in-octobot-GPTEvaluator-configuration.png) + +- `Indicator` defines the way you wish to preprocess market data before sending them to ChatGPT. You can choose to send the raw candles (no preprocessing), a moving average or other type of processed values. +- `Source` is the input to give to the `Indicator` when one is selected +- `Period` is the period setting of the `Indicator` when one is selected +- `Minimum confidence threshold` is a value in % starting from which to send `1` or `-1` instead of a value **between** -1 and 1. This setting is especially useful when using the [DCA trading mode](dca-trading-mode) in Evaluators based DCA which requires a `1` or `-1` evaluation +- `GPT Model` allows you to select the <a href="https://platform.openai.com/docs/models" rel="nofollow">OpenAI GPT model</a> to use for your evaluations. Note that models might have a different pricing. Other models can also be selected when connecting to a [custom LLM server](../octobot-interfaces/chatgpt#custom-llm-base-url-for-prediction). +- `Allow Reevaluation` is used when using the ChatGPT evaluator alongside other evaluators such as real time evaluators that might require re-evaluations. As re-evaluations can happen at a high frequency, disabling re-evaluations is a safeguard to avoid using too many [OpenAI tokens](/guides/octobot-interfaces/chatgpt#costs) in case this happens +- `OpenAI token limit` is the maximum amount of OpenAI token that can be spent within a day. You can use this setting to set a limit of the token use if you are unsure about the total cost of your strategy. + +:::info + When using the open source OctoBot, the ChatGPT evaluator requires the + [ChatGPT interface](/guides/octobot-interfaces/chatgpt) to be configured to + be able to operate. +::: + +## Backtesting with the ChatGPT evaluator + +It is not really possible to efficiently ask ChatGPT for the thousands of predictions required when running a [backtesting](/guides/octobot-usage/backtesting). It would take hours and cost a huge amount of OpenAI tokens. + +However, at OctoBot cloud, we decided to pay this cost for you on trading pairs that are used in [OctoBot cloud strategies](/). This means that backtesting with the ChatGPT evaluator is possible on trading pairs used by OctoBot cloud strategies. When selecting such pairs, your OctoBot will automatically fetch ChatGPT historical predictions and run your backtesting accordingly. + +## Going deeper in ChatGPT trading + +Learn more on ChatGPT trading strategies on our dedicated blog articles: + +- [Trading using ChatGPT](/blog/trading-using-chat-gpt) +- [ChatGPT trading strategy deep dive](/blog/chatgpt-strategy-deep-dive) +- [ChatGPT trading tools](/blog/introducing-chatgpt-trading-tool) diff --git a/docs/content/guides/octobot-trading-modes/daily-trading-mode.mdx b/docs/content/guides/octobot-trading-modes/daily-trading-mode.mdx new file mode 100644 index 0000000000..61dc43a397 --- /dev/null +++ b/docs/content/guides/octobot-trading-modes/daily-trading-mode.mdx @@ -0,0 +1,61 @@ +--- +title: "Daily trading mode" +description: "Using the Daily Trading Mode, you use the most flexible trading strategy on OctoBot. Trade SPOT and futures using any technical, social or AI evaluator." +sidebar_position: 4 +--- + + + +# Daily Trading Mode + +The Daily Trading Mode (or DailyTradingMode) is designed to be the most flexible trading mode. Using the Daily Trading Mode, you can trade using any technical, social, AI or realtime evaluator on SPOT and futures markets. + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="e-GqmTfrchY" title="The OctoBot Daily Trading Mode" /> + +It allows you to create highly personalized trading strategies. + +Using the Daily Trading mode, the type of the created orders (market or limit) will be defined by the strength of the signal received from evaluators: +- A signal closer to -1 will trigger a market buy order +- A signal closer to -0.5 will trigger a limit buy order. +- A signal closer to 0.5 will trigger a limit sell order. +- A signal closer to 1 will trigger a market sell order. + + +## The Daily Trading Mode can + +- Buy or Sell when evaluators signal a long or short opportunity +- Automatically create a take profit order after each entry +- Automatically create a stop loss order after each entry +- Only Buy or Sell if configured to +- Be used to trade SPOT and Futures markets + +## Default - Target profits mode disabled +When the `Target profits mode` is disabled, the Daily Trading Mode will buy when a long signal is received and sell when a short signal is received. + +:::info + Using the Default mode, the Daily Trading Mode will not consider entry prices when creating exit orders. This means that if a short signal is received, a sell order will be created, even if the asset has been bought at a higher price. Be careful when trading assets that are in a downtrend in this configuration. In some cases, disabling sell orders could help to prevent this issue. +::: + +## Target profits mode +When the `Target profits mode` is enabled, similarly to the Default mode, the Daily Trading Mode will buy when a long signal is received and sell when a short signal is received. + +However in Target profits mode, signals are only used to enter a position (in SPOT or Futures) but never to close it. This mode avoid the described above "selling at a loss" issue of the Default mode. Take profits and stop losses should be configured directly from the Target profits mode configuration and will be created as soon as any entry order is filled. + + +## Configuring orders +- The Daily Trading mode can create market or limit orders depending on the signal's strengh. When using limit orders, the `Fixed limit prices difference` allows to set the % price difference to compute the order price. +- All in buy / sell order: can be enabled to trade using the maximum amount in portfolio each time +- Buy or sell order can be disabled to only consider buy or sell signals +- Order amounts can be configured using the [order amounts syntax](order-amount-syntax). +- `Maximum currency percent` can be used to reduce the maximum % allocated to a given currency. This can be useful to prevent multiple buy of the same asset during unusual market conditins. Warning: this is only used when order amounts are auto-computed. It is ignored when order amounts are set in configuration. + + +## When are orders created +The goal of the Daily Trading Mode is to have a state (long, neutral or short) and to create orders when this state changes. It means that for as long as the trading mode state remains the same, orders will remain. However if the state changes (from long to short for example), the all previous state orders that are still open will be cancelled and new state orders will be created. + +In event or a back and forth switch between neutral and the same state, open orders will simply be replaced. +For example a long state will create a limit buy order. If the Daily Trading Mode state later on changes to neutral, open orders won't be cancelled (neutral is not cancelling open orders). Now if the state goes back to long, the previously open buy order will be cancelled and replaced by a new one. If the previously created buy order was filled, a new buy order will be open. + +Profits comes from buying when the state is long and then selling when the state is short (or the opposite when shorting in futures trading). diff --git a/docs/content/guides/octobot-trading-modes/dca-trading-mode.md b/docs/content/guides/octobot-trading-modes/dca-trading-mode.md new file mode 100644 index 0000000000..58546fa1a0 --- /dev/null +++ b/docs/content/guides/octobot-trading-modes/dca-trading-mode.md @@ -0,0 +1,62 @@ +--- +title: "DCA trading mode" +description: "Optimize your Dollar cost Averaging strategy (DCA) using the OctoBot DCA Trading Mode. Automate DCA on technical indicators or on a regular basis." +sidebar_position: 2 +--- + + +# DCA Trading Mode + +The DCA Trading Mode (or DCATradingMode) is designed to buy and sell according to a [Smart Dollar cost averaging strategy](/blog/smart-dca-making-of). + +<div style="text-align: center"> + +![dca trading illustrated by a man watering a plant growing money](/images/guides/dca-trading-illustrated-by-a-man-watering-a-plant-growing-money.png) + +</div> + +It allows you to optimize your entry and exit prices according to your configuration. + +## The DCA Trading Mode can + +- Buy on a regular basis +- Buy when evaluators signal a buy opportunity +- Create multiple buy orders at different prices +- Automatically create one or many take profit orders after each entry +- Automatically create one or many stop loss orders after each entry +- Be used to trade SPOT and Futures markets + +## Time based DCA +Using the `Time based` Trigger mode, the DCA Trading Mode will create entry (buy) orders on a regular basis according to your configured `Trigger period`. + +## Evaluators based DCA +Using the `Maximum evaluators signals based` Trigger mode, the DCA Trading mode will create entry (buy) orders everytime a new maximum evaluator value is received. A maximum evaluator value is a value of `-1` or `1`. +Using this trigger mode, you can trigger DCA orders based on technical evaluators signals, signals from telegram, ChatGPT or any indicator you enable. Please note that a `-1` or `1` evaluator value is required, any other value will be ignored. + +## Configuring orders +- The DCA Trading mode can create entry (buy) orders as market or limit orders. When using limit orders, the `Limit entry percent difference` allows to set the % price difference to compute the buy order price. +- Secondary entry orders can also be enabled. There can be as many as configured and can have a different price and amount from the initial entry orders. +- Take profit (sell) orders can be enabled to automatically create sell orders when an entry order is filled. +- Stop loss orders can be enabled to automatically create stop loss orders when an entry order is filled. +- Similarly to secondary entry orders, exit order (take profit and stop loss) can also be split into multiple exit orders using different prices. When enabled, the entry amount will be evenly distributed between exit orders. +- Each entry and exit order amount can be configured using the [order amounts syntax](order-amount-syntax). +- Entry orders lifecycle: When `Cancel open orders on each entry` is enabled, only one entry (including its secondary orders if any) is allowed for each traded pair. This means that a new entry signal received when existing entry orders are open will first cancel open entry orders before creating orders associated to this new signal. On the order hand, when disabled, multiple entry orders from different signals could exist as the trading mode wont cancel them. +- `Enable initialization entry orders`: This parameter enables or disables the automated creation of entry orders when starting the bot, regardless of trigger conditions. +- The maximum part of your portfolio allocated to a given crypto can be limited using the `Max asset holding` parameter. For example, a "Max asset holding" of 30% means that the DCA Trading mode won't buy more BTC if the % of BTC holdings in your portfolio is higher than 30% of your portfolio total value. +:::info + For now, when using futures trading, the DCA Trading Mode only supports long positions. It will not create short positions. +::: + +## Health check +Enabling Health check on the DCA Trading Mode will ensure that there are no assets within the trading pairs that remain without sell orders. + +It is useful to ensure that the DCA strategy remains consistent even when restarting the bot or if your OctoBot has been offline for some time. + +For example when trading BTC/USDT and ETH/USDT, if at some point the bot sees that ETH is on the portfolio and is not within a sell order, then it will consider that this ETH should be sold and will sell it for USDT with a market order. + +## Example usages of the DCA Trading Mode +Many OctoBot cloud strategies are built using the DCA Trading Mode. + +- In our [Smart DCA making of](/blog/smart-dca-making-of), we cover the process of designing some of the OctoBot cloud strategies. + +- Trading with [ChatGPT](chatgpt-trading) can also use the DCA trading mode to manage orders diff --git a/docs/content/guides/octobot-trading-modes/dip-analyser-trading-mode.md b/docs/content/guides/octobot-trading-modes/dip-analyser-trading-mode.md new file mode 100644 index 0000000000..c38a91c569 --- /dev/null +++ b/docs/content/guides/octobot-trading-modes/dip-analyser-trading-mode.md @@ -0,0 +1,24 @@ +--- +title: "Dip Analyser trading mode" +description: "Profit from local bottoms and multiple take profits using the Dip Analyser Trading Mode on OctoBot to trade SPOT or futures markets." +sidebar_position: 6 +--- + +# Dip Analyser Trading Mode + +The Dip Analyser Trading Mode (or DipAnalyserTradingMode) is designed to buy on local bottoms and sell the bought assets using multiple take profits. It can be compared to an advanced pre-defined evaluator-based [DCA trading mode](dca-trading-mode). + +## The Dip Analyser Trading Mode can + +- Split take profits into mutliple sell orders to maximize profits +- Use limit or market entry orders +- Use stop losses +- Customize take profit prices based on the local bottom signal strength +- Trade SPOT and Futures markets + +## Configuring orders +- The Dip Analyser Trading mode can spit take profits into as many orders as defined in configuration. +- Entry amounts are using both default or configured amounts and the entry signal's Volume multiplier. +- Take profit order prices are linearly spread between the entry price and the entry signal's Price multiplier. +- Entering a Stop loss price multiplier will enable the creation of stop loss orders alongside take profit orders. +- Entry order amounts can be configured using the [order amounts syntax](order-amount-syntax). diff --git a/docs/content/guides/octobot-trading-modes/grid-trading-mode.md b/docs/content/guides/octobot-trading-modes/grid-trading-mode.md new file mode 100644 index 0000000000..03a8f53d67 --- /dev/null +++ b/docs/content/guides/octobot-trading-modes/grid-trading-mode.md @@ -0,0 +1,32 @@ +--- +title: "Grid trading mode" +description: "Easily profit from sideway markets by maintaining a grid-like set of buy and sell orders using the Grid Trading Mode." +sidebar_position: 7 +--- + +# Grid Trading Mode + +The Grid Trading Mode (or GridTradingMode) is designed to profit from sideway markets by maintaining a grid-like set of buy and sell orders. Make small yet regular profits on each small market change with minimized risks using grid trading. + +<div style="text-align: center"> + +![grid trading illustrated by a man stepping up on green stairs grabbing coins](/images/guides/grid-trading-illustrated-by-a-man-stepping-up-on-green-stairs-grabbing-coins.png) + +</div> + +The Grid Trading Mode is a simplified version of the [Staggered Orders Trading Mode](staggered-orders-trading-mode). + +## The Grid Trading Mode can + +- Use a default configuration +- Be configured for each trading pair independently +- Maintain a grid of buy and sell orders using a the configured spread and increment configured in flat values +- Trail up and down to follow the market when the traded pair's price moves beyond the grid +- Use a limited amount of funds +- Use configured amount for each order +- Automatically dispatch newly deposited funds +- Include a delay when creating opposite orders when a buy or a sell is filled +- Initialize the grid based on a custom price +- Trade SPOT markets +- Automatically optimize your portfolio holdings to create the perfect grid using the `Optimize Initial Portfolio` command +- Pause orders mirroring using the `Pause Orders Mirroring` command diff --git a/docs/content/guides/octobot-trading-modes/index-trading-mode.md b/docs/content/guides/octobot-trading-modes/index-trading-mode.md new file mode 100644 index 0000000000..60dbdb6c72 --- /dev/null +++ b/docs/content/guides/octobot-trading-modes/index-trading-mode.md @@ -0,0 +1,62 @@ +--- +title: "Index trading mode" +description: "Invest in the multiple crypto at the same time and create your own crypto index using the Index trading mode." +sidebar_position: 3 +--- + + +# Index Trading Mode + +The Index Trading Mode (or IndexTradingMode) is designed to maintain your portfolio using a predefined cryptocurrencies configuration. + +<div style="text-align: center"> + <div> + ![index trading illustrated by a crypto basket](/images/guides/crypto-basket.png) + </div> +</div> + +Similarly to [OctoBot cloud's crypto baskets](https://www.octobot.cloud/features/crypto-basket), The Index Trading Mode enables you to easily invest in sets of cryptocurrencies. + +## The Index Trading Mode can + +- Evenly split your reference market holdings into the different coins of your traded pairs +- Check and adapt your portfolio if a crypto: + - Takes a larger part of your portfolio than expected + - Takes a smaller part of your portfolio than expected + - Is missing from your portfolio +- Check and adapt your portfolio whevener you want when starting your OctoBot or by checking your portfolio or a regular basis + +## The way funds are dispatched +When starting a OctoBot with the Index Trading Mode, your OctoBot will: +1. Value all the assets configured in your profile traded pairs and compute your portfolio holdings ratios +2. If a crypto from the traded pairs is missing from your portfolio or present with the wrong ratio, a rebalance is triggered. +3. If a rebalance is triggered, then your funds are converted to the reference market and then split into the configured coins + +## Using OctoBot cloud crypto baskets +When using the [Premium OctoBot Extension](/guides/octobot-configuration/premium-octobot-extension), you can use every crypto basket available OctoBot cloud directly from your open source OctoBot. + +<div style="text-align: center"> + <div> + ![index trading illustrated by a crypto basket](/images/guides/trading-modes/octobot-open-source-using-crypto-baskets-from-premium-extension.png) + </div> +</div> + +This way, when an OctoBot cloud crypto basket gets updated, for example if the top 20 of the crypto market changes or if a new coin joins the AI crypto basket, then your open source OctoBot will also automatically update its basket. + +## Configuring rebalances +### Trigger period +Your OctoBot can check the content of your portfolio on a regular basis to make sure it is still representative of the configured index. + +The `Trigger period` is the number of days for your OctoBot to wait before rechecking the content of your portfolio against the index ideal content. + +### Rebalance cap +When checking the content of your portfolio, the ideal index content will never be exactly matched. As crypto prices change all the time, there will always be minor differences between your holdings and the theoretical holdings of your index. + +The `Rebalance cap` defines a value in `%` from which to consider a holding ratio out of sync with the target ratio of an index. + +**Example with a 4 crypto index: BTC, ETH, SOL and AVAX:** + +Ideally, the portfolio would contain exactly 25% of each. +However, if the price of AVAX increases by 10%, it might now take 28% of the portfolio instead of the ideal 25%. In this case, during the next portfolio rebalance check, 2 outcomes are possible: +- A. `Rebalance cap` is 3% or lower: As the AVAX holding ratio is 3% higher than the ideal 25%, a rebalance is triggered, therefore distributing the AVAX gains into BTC, ETH and SOL +- B. `Rebalance cap` is higher than 3%: the AVAX holding ratio is still within the ideal ratio plus/minus the `Rebalance cap`: no rebalance is required, nothing happens. diff --git a/docs/content/guides/octobot-trading-modes/order-amount-syntax.md b/docs/content/guides/octobot-trading-modes/order-amount-syntax.md new file mode 100644 index 0000000000..0a25fa390f --- /dev/null +++ b/docs/content/guides/octobot-trading-modes/order-amount-syntax.md @@ -0,0 +1,82 @@ +--- +title: "Orders amount syntax" +description: "Configure your OctoBot orders sizes using the many available options. Size your orders based your a percent of your portfolio, a scaling or even a static amount." +sidebar_position: 10 +--- + + +# The order amounts syntax + +Using OctoBot, you can size your orders based on many different factor such as your portfolio holdings, use amounts that remain constant or that scale up or down according to your portfolio growth. + +Order sizes can be configured in your trading mode configuration, in profile settings. + +Note: you can also leave the order amount configuration empty and trading modes will use a percent of your portfolio (computed based on your risk level) when no value is configured. + + +:::info + In the order amounts syntax, `%X` is always equivalent to `X%`. Therefore, using `%s` or `s%` is strictly identical. This is true for every 2-characters identifier. +::: + + +## Constant amounts +Amounts that always remain constant. + +### Flat base amount +A static amount to use in each order, in base currency. + +> Use `0.1` to trade 0.1 BTC on each BTC/USD order. + +### Flat quote amount: q +A static amount to use in each order, in quote currency. + +> Use `100q` to trade 100 USD worth of BTC on each BTC/USD order. + +## Scaling amounts +Amounts that scale with the total portfolio value. Scaling amounts are useful to reinvest profits. + +### Traded symbol assets percent: s% +A percent of combined holdings value associated to the traded symbol assets. + +> Use `12s%` to trade 12% of cumulated BTC & USDT holdings value when trading BTC/USDT. + +Note: unlike `t%`, `s%` ignores other traded pairs assets holdings. + +### Total traded assets percent: t% +A percent of combined holdings associated to each configured trading pairs assets. + +> Use `12t%` to trade 12% of available BTC & ETH & SOL & USDT holdings value when trading BTC/USDT while also trading ETH and SOL in other trading pairs. + +`t%` ignores assets in your holdings that are not associated to any currently traded pairs. + +:::info + Total traded assets percent is especially useful to maintain scaling order sizes through time regardless of other trading pairs. This ignores other assets that might be in portfolio but are not to be traded. +::: + +## Variable amounts +Amounts that change after each buy or sell order. Variable amounts can be useful to buy less and less when available funds are reduced for example. + +### Total asset holdings: % +A percent of the total portfolio holdings of the traded asset. + +> Use `2%` to trade 2% of the total portfolio holdings of the traded asset. + +Here total portfolio holdings means your holding of the asset to buy or sell with. It would be USDT in BTC/USDT buy orders. + +:::info + When using total asset holdings, once an order is filled, if the total portfolio holdings of the traded asset is reduced, the same % amount will create smaller subsequent orders. Similarly, it will also create bigger ones if more of this asset becomes available, after a sell for example. +::: + +### Available asset holdings: a% +A percent of the available holdings of the traded asset. + +> Use `12a%` to trade 12% of the available portfolio holdings of the traded asset. + +Similarly to `%`, here holdings means your holding of the asset to buy or sell with. The difference is that `a%` will only count available funds, which means funds that are not already locked in open orders. + +### Position percent: p% +A percent of the given symbol current position. + +> Use `20p%` to trade using 20% of the open position total value. + +_Only available when trading futures._ diff --git a/docs/content/guides/octobot-trading-modes/order-price-syntax.md b/docs/content/guides/octobot-trading-modes/order-price-syntax.md new file mode 100644 index 0000000000..f7a759849d --- /dev/null +++ b/docs/content/guides/octobot-trading-modes/order-price-syntax.md @@ -0,0 +1,31 @@ +--- +title: "Orders price syntax" +description: "Configure your OctoBot orders prices using a percent of the current price, static price or a difference from the current price." +sidebar_position: 11 +--- + + +# The order price syntax + +Using OctoBot, you can price your orders in different ways using a either a fixed value or a value relative to the current price of an asset. + +Order prices can be configured in your trading mode configuration, in profile settings. + +## Constant price +A price that always remain constant. + +> Use `50000` to set your order price at exactly "50000" USDT when trading BTC/USDT for example. + +## Delta amount: d +A value that increases or reduces the current price using a predefined value. + +> Use `100d` to set your order price 100 higher that the current price. For example, if the current price is "50000", then the order price would be "50100". + +> Use `-400d` to set your order price 400 lower that the current price. For example, if the current price is "50000", then the order price would be "49600". + +## Percent amount: % +Percent increase or decrease from the current price. + +> Use `10%` to set your order price 10% higher that the current price. For example, if the current price is "50000", then the order price would be "55000". + +> Use `-25%` to set your order price 25% lower that the current price. For example, if the current price is "50000", then the order price would be "37500". diff --git a/docs/content/guides/octobot-trading-modes/staggered-orders-trading-mode.md b/docs/content/guides/octobot-trading-modes/staggered-orders-trading-mode.md new file mode 100644 index 0000000000..d07f4b75dd --- /dev/null +++ b/docs/content/guides/octobot-trading-modes/staggered-orders-trading-mode.md @@ -0,0 +1,30 @@ +--- +title: "Staggered Orders trading mode" +description: "Profit from sideway markets by maintaining a grid-like set of buy and sell orders with advanced configuration using the Staggered Orders Trading Mode." +sidebar_position: 8 +--- + +# Staggered Orders Trading Mode + +The Staggered Orders Trading Mode (or StaggeredOrdersTradingMode) is designed to profit from sideway markets by maintaining a grid-like set of buy and sell orders. Make small yet regular profits on each small market change with minimized risks grid order. + +<div style="text-align: center"> + +![grid trading illustrated by a man stepping up on green stairs grabbing coins](/images/guides/grid-trading-illustrated-by-a-man-stepping-up-on-green-stairs-grabbing-coins.png) + +</div> + +The Staggered Orders is a more complex and flexible version of the [Grid Trading Mode](grid-trading-mode). In most situations, the [Grid Trading Mode](grid-trading-mode) is a better choice. + +Where the Grid Trading Mode is mainly defined around the number of orders you want to maintain, the Staggered Orders Trading Mode focuses on the price range you want to cover. By configuring upper and lower bounds, spread and increment, the Staggered Orders Trading Mode will determine how many orders are required, use the maximum available funds and maintain the relevant orders on exchange. + +## The Staggered Orders Trading Mode can + +- Be configured for each trading pair independently +- Specify the way funds are dispatched within buy and sell orders +- Maintain a grid of buy and sell orders using a the configured spread and increment configured in % +- Automatically compute the required number of sell and buy orders according to the configured upper and lower bounds as well a spread and increment +- Maintain a limited amount of orders on exchange (exchanges usually enforce a limit on simultaneous open orders). This limit is set by the `Operational depth` parameter. Other orders will be tagged as "virtual": they will only be created when necessary. +- Include a delay when creating opposite orders when a buy or a sell is filled +- Trade SPOT markets +- Automatically optimize your portfolio holdings to create the perfect staggered orders grid using the `Optimize Initial Portfolio` command diff --git a/docs/content/guides/octobot-trading-modes/trading-modes.mdx b/docs/content/guides/octobot-trading-modes/trading-modes.mdx new file mode 100644 index 0000000000..c9cc62d8b3 --- /dev/null +++ b/docs/content/guides/octobot-trading-modes/trading-modes.mdx @@ -0,0 +1,95 @@ +--- +title: "Trading modes" +description: "Discover how trading strategies work in OctoBot and find the major trading modes based on DCA, grid trading, AI and TradingView." +sidebar_position: 1 +--- + + + +# Octobot Trading modes + +## Main concepts + +Trading modes in OctoBot are what defines how to create, maintain and cancel orders. They are a key component of any trading strategy and are compatible with each [supported exchange](/guides/exchanges). + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="Exdl94cmMDQ" title="Trading configuration in OctoBot" /> + +A trading mode might also rely on evaluators to signal long and short opportunities. + +### Trading modes responsabilities + +In OctoBot, Trading modes define: + +- What type of order to create +- How much money to put in each order and at what price +- How to take profit or stop losses +- When to cancel orders if a cancel is necessary + +### Evaluators responsabilities + +While trading modes are responsible for the orders part of a trading strategy, strategies that are relying on technical, social, AI or real-time evaluators such as [statistics-based trading modes](#statistics-based-trading-modes) are also using `evaluators` and `strategies` to be notified **when** they should create orders. + +This means that when using [statistics-based trading modes](#statistics-based-trading-modes), you might need to also enable: + +- One or more `evaluators`, that will be responsible for analyzing new candles, prices or social media and provide long or short signals when necessary. +- A `strategy`, that gathers all `evaluator` signals and sums it up into one signal given to the trading mode. + +<div style={{textAlign: "center"}}> + ![ai trading illustrated by octobot head with chatgpt logo trading bitcoin + ethereum litecoin usd + logos](/images/guides/ai-trading-illustrated-by-octobot-head-with-chatgpt-logo-trading-bitcoin-ethereum-litecoin-usd-logos.png) +</div> + +An `evaluator` could be a technical evaluator such as a <a href="https://www.investopedia.com/terms/r/rsi.asp" rel="nofollow">RSI</a> evaluation of the last 30 candles, [asking ChatGPT for its opinion about the market](chatgpt-trading), [monitoring reddit](/guides/octobot-interfaces/reddit) or many other things. There are no limit in what `evaluators` are capable of doing to create the right signals for your trading strategies. + +:::info + When using `evaluators`, the time frames to use for your evaluations is + defined in the `strategy` configuration. +::: + +## Built-in trading modes + +OctoBot comes with built-in trading modes. All of them can be configured and deeply tested using [backtesting](/guides/octobot-usage/backtesting). + +### Statistics-based trading modes + +Entries (and possibly exits) are computed using statistics. It might be from technical evaluators, AI, social medias, price events or many other things. + +<div style={{textAlign: "center"}}> + ![dca trading illustrated by a man watering a plant growing + money](/images/guides/dca-trading-illustrated-by-a-man-watering-a-plant-growing-money.png) +</div> + +- [DCA trading mode](dca-trading-mode): Use an advanced Dollar cost Averaging strategy which can (be doesn't have to) include multiple buy and sell orders, stop losses, investment periods and technical or AI evaluators. +- [Daily Trading Mode](daily-trading-mode): Create buy and sell orders based on your technical, social or AI evaluators. +- [Dip Analyser trading mode](dip-analyser-trading-mode): Trade local lows and optimize profits using multiple take profits. + +### Low-risk grid trading modes + +Buy and sell orders are created deterministically according to the trading mode's configuration. There is no probability in those algorithms. + +<div style={{textAlign: "center"}}> + ![grid trading illustrated by a man stepping up on green stairs grabbing + coins](/images/guides/grid-trading-illustrated-by-a-man-stepping-up-on-green-stairs-grabbing-coins.png) +</div> + +- [Grid trading mode](grid-trading-mode): profit from stable markets using a simple grid trading strategy. +- [Staggered Orders trading mode](staggered-orders-trading-mode): profit from stable markets using an advanced grid trading strategy. + +### Automated TradingView strategies + +<div style={{textAlign: "center"}}> + ![tradingview automation illustrated by tradingview + logo](/images/guides/interfaces/tradingview-automation-illustrated-by-tradingview-logo.png) +</div> + +Entries and exits are created based on your TradingView signals. In this trading mode, the core of your strategy lies on TradingView and Octobot acts as an automation to synchronize your strategy with any exchange account. + +- [TradingView trading mode](tradingview-trading-mode): Easily automate orders from your TradingView indicators or strategies on any exchange. + +:::info + As OctoBot is open-source, if you know a bit of Python programming, you can + also create you own trading mode. +::: diff --git a/docs/content/guides/octobot-trading-modes/tradingview-trading-mode.md b/docs/content/guides/octobot-trading-modes/tradingview-trading-mode.md new file mode 100644 index 0000000000..2938a4ab47 --- /dev/null +++ b/docs/content/guides/octobot-trading-modes/tradingview-trading-mode.md @@ -0,0 +1,46 @@ +--- +title: "TradingView trading mode" +description: "Easily automate your TradingView strategies and indicators trades on exchange using the TradingView Trading Mode." +sidebar_position: 9 +--- + + + +# TradingView Trading Mode + +The TradingView Trading Mode (or TradingViewTradingMode) is designed to automate orders creation on exchanges based on <a href="https://www.tradingview.com/?aff_id=27595" rel="nofollow">TradingView</a> signals. + +<div style="text-align: center"> + +![tradingview automation illustrated by tradingview logo](/images/guides/interfaces/tradingview-automation-illustrated-by-tradingview-logo.png) + +</div> + +Simply emit alerts from your TradingView indicators or strategies to trade on any exchange. Learn more on how to configure your OctoBot to trade using TradingView and the [alert on syntax](/guides/octobot-interfaces/tradingview/alert-format) on the [Automating trading from TradingView guide](/guides/octobot-interfaces/tradingview). + +:::info + The TradingView Trading Mode guides cover using TradingView with [OctoBot + trading bots](https://www.octobot.cloud/trading-bot). Please use the [TradingView automated trading + investor guide](/en/investing/tradingview-automated-trading) if you are + automating TradingView strategies using a [TradingView + OctoBot](/en/investing/tradingview-trading-tutorial) from + [www.octobot.cloud](https://www.octobot.cloud/). +::: + +## The TradingView Trading Mode can + +- [Automate TradingView indicator signals](/guides/octobot-interfaces/tradingview/automating-trading-from-an-indicator) +- [Automate TradingView Pine Script strategies signals](/guides/octobot-interfaces/tradingview/automating-trading-from-a-pine-script-strategy) +- Create and cancel market, limit and stop orders +- Create simple entry or exit orders +- Create entry orders with a pre-defined take profit +- Create entry orders with a pre-defined stop loss +- Create stop loss orders +- Trade SPOT and Futures markets + +## Configuring orders + +- Each TradingView signal contains the details of the order to be created. +- `Cancel previous orders` can be enabled to only maintain one order per trading pair. +- Each order amount can be configured using the [order amounts syntax](order-amount-syntax). +- Each order price can be configured using the [order price syntax](order-price-syntax). diff --git a/docs/content/guides/octobot-usage/_category_.json b/docs/content/guides/octobot-usage/_category_.json new file mode 100644 index 0000000000..b598806615 --- /dev/null +++ b/docs/content/guides/octobot-usage/_category_.json @@ -0,0 +1 @@ +{"label": "Usage", "position": 5} diff --git a/docs/content/guides/octobot-usage/backtesting.md b/docs/content/guides/octobot-usage/backtesting.md new file mode 100644 index 0000000000..da37c3c354 --- /dev/null +++ b/docs/content/guides/octobot-usage/backtesting.md @@ -0,0 +1,172 @@ +--- +title: "Backtesting" +description: "Use backtesting to risk-free test and optimize your OctoBot trading strategies. Evaluate your strategy performances on the past days, weeks, months or years." +sidebar_position: 3 +--- + + + +# Backtesting + +Backtesting is the process testing a system's performances on past data. It uses recorded data of cryptocurrency or stock markets. Learn more about backtesting on <a href="https://www.investopedia.com/terms/b/backtesting.asp" rel="nofollow">investopedia</a>. + +![octobot backtesting result summary](/images/guides/backtesting/octobot-backtesting-result-summary.png) + +In OctoBot, backtesting is a key tool to quickly test and optimize your strategies in a risk-free environment. It enables you to execute your strategy by replaying a past scenario and identify the best settings for your traded markets. + +## Backtesting a trading strategy in OctoBot + +OctoBot includes a backtesting engine that can quickly execute OctoBot trading strategies on historical data. To backtest a strategy, all you need is to: + +1. Select the profile to test in profile selector. +2. Use the data collector to download historical data +3. Start a backtesting +4. Analyse results + +### Selecting a profile to run in backtesting + +Go to the profile selector on your OctoBot and select the profile you want to backtest. + +![octobot backtesting profile selector](/images/guides/backtesting/octobot-backtesting-profile-selector.png) + +#### Trading modes, strategies and evaluators + +In backtesting, OctoBot uses the most recent version of your selected trading mode, strategies and evaluators as well as their latest configuration. + +This means that you can select different trading modes & evaluators and restart backtestings without having to restart OctoBot: your next backtesting will take your latest changes. +This is useful to quickly try different values of an indicator or any other configuration parameter. + +Note: when backtesting a strategy, prefer selecting a profile using `paper trading` (use the [trading simulator](simulator)), this way any change you make won't affect your real trading profiles. + +#### Initial portfolio + +Similarly to simulated trading, your backtesting initial portfolio is built using the configured `Starting-Portfolio` in your profile. + +When running a backtesting, make sure you configured your start portfolio with enough funds for your strategy to be able to trade. Don't forget to add some BTC when trading against BTC for example. + +#### Traded assets settings in backtesting + +- **Coins**: Selected coins and pairs are ignored as the datafile you will select to run your backtestings will provide the traded pairs +- **Reference market**: The selected reference market will change to the common quote of your datafile traded pairs if there is a common quote. Ex: a datafile with BTC/USDT and ETH/USDT will force its reference market to USDT to compute profits from USDT + +### Download historical data + +Using the data collector, available from the backtesting tab, you can download historical data from most crypto exchanges. + +![octobot backtesting data collector](/images/guides/backtesting/octobot-backtesting-data-collector.png) + +You can download data from multiple trading pairs and timeframes at the same time. When using such files, backtesting will run your strategy on each available pair and use the timeframes that are [required in its configuration](../octobot-trading-modes/trading-modes#evaluators-responsabilities). + +#### Full History exchanges + +When selecting historical data to download, exchanges are split into 2 categories: `Full History` and `Other`. Here are the differences. + +**Full history** exchanges allow to download historical data on a selected time range. When doing so, each candle from each timeframe on each symbol will be downloaded for the selected time range. This means that when selecting a time range: + +- Downloaded history is complete for each candle on the selected time range +- The download process can be slow if you selected a large total amount of candles +- Full history data files are marked as `Full` in the datafile selector + **Warning**: not selecting a time range in Full history exchanges will default to downloading the latest candles only, similarly to **Other** exchanges. + +**Other** exchanges are exchanges that do not (currently) allow to download historical data. This means that: + +- Only the most recent candles will be downloaded (usually the last 500 candles) +- Selecting short and large timeframes at the same time will result in short backtestings as a backtesting only run on available candles. Ex: a backtesting data file containing the last 500 1 minute candles and the last 500 daily candles will only run on the past 500 candles, which is less than a day. +- Data files of this type as displaying their candle count in the datafile selector + +Overall, it is better to use **Full history** exchanges and select the time range to run your backtesting on. + +### Starting a backtesting + +Once your data file is downloaded, select it and start your backtesting. +![octobot backtesting data selector starting a backtesting](/images/guides/backtesting/octobot-backtesting-data-selector-starting-a-backtesting.png) + +Backtestings usually last a few seconds and run in the background, if you want, you can do something else with your OctoBot while a backtesting is running. + +You are notified once your Backtesting is complete. + +### Analysing results + +You can access your backtesting results from the backtesting tab. Your backtesting report is below the data selector. +In this report, there is a summary of your backtesting profits, charts which historical prices, trades and open orders as well as a trades explorer. + +#### Profitability + +![octobot backtesting result summary](/images/guides/backtesting/octobot-backtesting-result-summary.png) + +This summary shows your profitability running this strategy on this time range. + +- **Bot profitability** is the profits in % of the reference market your strategy made. +- **Market average profitability** the average profitability of your traded markets. It's given as a comparison of the profits you would have made if you were having a permanent 100% exposure to your traded assets, which is extremely risky. It corresponds to equaly splitting your initial funds into those assets and holding them during the whole backtesting time. +- **Symbol profitability** is the profitability of each traded pair during backtesting time. +- **End portfolio** is the content of your portfolio at the end of the backtesting. +- **Starting portfolio** is the content of your portfolio at the start of the backtesting. +- **Reference market** is the backtesting reference market (used to compute profitabilities) + +#### Historical charts + +![octobot backtesting result graph](/images/guides/backtesting/octobot-backtesting-result-graph.png) +For each traded pair, a historical chart will be displayed. Those charts are interactive and you can select the time frame to be used. On large backtestings, selecting a longer timeframe can be easier to read. Each chart features: + +- Historical candles and trading volume +- Trades made using backtesting +- Pending open orders at the end of the backtesting + +#### Historical trades + +![octobot backtesting result trades](/images/guides/backtesting/octobot-backtesting-result-trades.png) +Each trade executed during a backtesting is available in the trades explorer where you can easily filter and sort trades to understand how your strategy behaves. + +## Going deeper with the Strategy Designer + +Backtesting as presented on this page is the basic, yet very complete already version of the [Strategy Designer](strategy-designer) available on OctoBot cloud plans. + +![octobot strategy designer results on doge btc shib](/images/guides/strategy-designer/octobot-strategy-designer-results-on-doge-btc-shib.png) + +The strategy designer allows you to do everything the regular backtesting does and alows adds: + +- Access the **history of your backtesting** runs +- Charts to analyse your backtesting runs more efficiently with **historical portfolio value**, PNL and more +- The capability to **compare your backtesting results** between runs +- Backtesting only profiles to backtest without affecting your current live trading profile +- And much more ... + +If you are already backtesting your strategies and would like to use a more powerful tool, we strongly suggest to have a look at the [Strategy Designer](strategy-designer). + +## How backtesting works inside OctoBot + +### Backtesting vs live trading + +When running in backtesting, OctoBot uses the same code to execute a trading strategy as when running it live. This means that results of running a strategy in backtesting and live are identical as long as the input data is also identical. + +As backtesting runs using complete candles, there might be a difference with live trading as live trading could access incomplete candles to run its indicators (this is the case with real-time evaluators for example). Therefore, in backtesting **realtime evaluators can't run the same way they do in live trading** because in-construction candles are not available. + +For the same reason, as only candles data are available, backtesting on strategies that run on other data than candles data (following google trends for exmaple) is currently impossible. +The only exeption is **ChatGPT historical signals that are made available for free** thanks to OctoBot cloud when running a backtesting using the ChatGPTEvaluator on traded pairs and time frames used by <a href="https://www.octobot.cloud/explore" rel="nofollow">OctoBot cloud strategies</a> that are also using the ChatGPTEvaluator. + +### Time management + +Backtesting works by executing a strategy using past data. Therefore when running a strategy, the backtesting engine simulates the passage of time from the start of your backtesting data to the end. +Backtesting will iterate from candles to candles and each iteration will: + +1. Update the current candle for each traded pair and timeframe +2. Check if open orders should be filled given the new price data +3. Trigger a evaluation cycle for each trading pair: + 1. Push new candle(s) to evaluators + 2. Trigger strategies to sum up evaluators outputs + 3. Trigger trading modes to create or cancel orders +4. Check if orders should be filled instantly (ex: market orders) + +### Multiple traded pairs + +When selecting a datafile with multiple trading pairs, at each new time tick, associated candles (if any) will be push to evaluators. This happens sequentially, one pair after another. + +### Filling orders + +In backtesting, OctoBot has access to historical candles only. This means that to figure out if an order should be filled, it will have a look at the most recent candle. + +:::info + You can improve the accuracy of orders fills in backtesting by selecting a + short time frame in your datafile. It will make your backtesting slower but it + might be useful if orders execution must be accurate in time. +::: diff --git a/docs/content/guides/octobot-usage/frequently-asked-questions-faq.md b/docs/content/guides/octobot-usage/frequently-asked-questions-faq.md new file mode 100644 index 0000000000..88ca3822c9 --- /dev/null +++ b/docs/content/guides/octobot-usage/frequently-asked-questions-faq.md @@ -0,0 +1,122 @@ +--- +title: "FAQ" +description: "Any question when using OctoBot ? Check out the most common questions from the OctoBot community and find the detailed answers on our FAQ." +sidebar_position: 7 +--- + +# Frequently Asked Questions (FAQ) + + +## Why is my OctoBot not creating orders ? + + +Before creating any order (using [trading simulator](/guides/octobot-usage/simulator) or real trading), +OctoBot asks the exchange for its minimal (and maximal) requirements for +any order. When creating an order (following a buy or sell signal), +these order requirements are checked. If the order is not compliant, it +will not be pushed to the exchange. + +The most common case of signals without created orders is when there is +**not enough funds** of the required asset to proceed with an order. + +Example: not enough **USD** to buy BTC for a BTC/**USD** **buy** signal. + +> In [trading simulator](/guides/octobot-usage/simulator) and [backtesting](/guides/octobot-usage/backtesting) modes, OctoBot uses a simulated portfolio called +`"starting-portfolio"` that is defined in the +[trading simulator configuration](simulator#starting-portfolio). + +## How often will my OctoBot trade ? + + +It can be once in a week or 5 times a minute, this depends on the +strategy your OctoBot is using. + +For example: when using the default settings, the simple mixed strategy +evaluator is using the 1 hour timeframe as the shortest one. Since it's +a technical evaluator based strategy, it will update every hour. In this +setup, your OctoBot will create new trades every hour it sees an +opportunity. There might be hours with no opportunity and no order +creation. + +## I updated my OctoBot and now it's not starting anymore. + + +This is probably due to an issue in your **tentacles** folder. Try +removing it and restarting your OctoBot, it will download the latest +versions of each tentacle and should fix the problem. + +## How to follow my OctoBot's trading activity ? + + +When your OctoBot places an order or has a order that is filled, it will +appear on the web interface. The web interface displays the list of open +orders and the list of filled orders. + +You can also receive Telegram and soon Discord notifications on +orders placement and trades. + +## What part of my portoflio will be traded by OctoBot ? + + +OctoBot will consider it can trade 100% of the portfolio you give it. +However how this funds will be used (size of orders, orders frequency, +...) depends on your risk setting and the trading mode you are using. + +## How to change the backtesting starting portfolio ? + + +Each [backtesting](/guides/octobot-usage/backtesting) run is using the [trading simulator configuration](/guides/octobot-usage/simulator.md#starting-portfolio) +as a base. + + +## Why is my reference market changing in backtesting ? + + +The reference market is automatically switched to the base of the traded +pair in [backtesting](/guides/octobot-usage/backtesting) to compute more accurate profitability. + +Example: a backtesting on ETH/**BNB** would make **BNB** the temporary +reference market for this backtesting. + +## How much of my exchange funds will be traded by OctoBot ? + +For now, OctoBot uses all the available funds to trade. Therefore it's +possible that 100% of the exchange funds on an account will be traded. + +## Why is backtesting not using all available data ? + + +[OctoBot backtesting](/guides/octobot-usage/backtesting) is always using the **maximum available data allowing to keep a realistic simulation**. + +However exchange are usually not giving all of their data: they give the +last X candles (500 for binance). Therefore a regular backtesting data +file has 500 1hour (1h) candles, 500 1minute (1m) candles etc. These +candles are always the most recent ones. That means that when running a +backtesting on 1h and 1d time frames, the maximum backtesting range is +not 1h and 1d with 500 candles each but the time range where **both** 1h +and 1d have data: there the past 500 hours (500 1h candles and +approximately 20 1d candles). + +As an example, in a backtesting with 1m and 1d candles: the common time +range in 1d is `500/(60*24) = 0.35` which means the whole backtesting is +carried out with the data of one day: the last daily candle of the 500 +1d candle only while using 100% of the shortest time frame: 1m (which +all happened during this one day). + +## "RuntimeError: Event loop is closed" in my OctoBot's logs, is there a problem ? + + +This error (or something very similar) might appear in your OctoBot's logs: + +``` +<function _ProactorBasePipeTransport.del at 0x000001064DE8A310> +Traceback (most recent call last): + File "asyncio\proactor_events.py", line 116, in del + File "asyncio\proactor_events.py", line 108, in close + File "asyncio\base_events.py", line 719, in call_soon + File "asyncio\base_events.py", line 508, in _check_closed +RuntimeError: Event loop is closed +``` + +This is a minor issue with the current Windows implementation of the asynchronous +libraries OctoBot is using. It has absolutely no effect and can be completely ignored. diff --git a/docs/content/guides/octobot-usage/futures-trading-with-octobot.md b/docs/content/guides/octobot-usage/futures-trading-with-octobot.md new file mode 100644 index 0000000000..58a2fe774b --- /dev/null +++ b/docs/content/guides/octobot-usage/futures-trading-with-octobot.md @@ -0,0 +1,33 @@ +--- +title: "Futures trading" +description: "OctoBot can be used to trade Futures using configured strategies TradingView on Binance, Bybit, Kucoin and OKX." +sidebar_position: 2 +--- + +# Futures Trading with OctoBot + +OctoBot can be used to configure and automate many Futures trading strategies on multiple exchanges exchanges. + +## Supported Trading Modes +The following [Trading Modes](../octobot-trading-modes/trading-modes) can be used to trade using Futures: +- [DCA Trading Mode](../octobot-trading-modes/dca-trading-mode) +- [Dip Analyser Trading Mode](../octobot-trading-modes/dip-analyser-trading-mode) +- [TradingView Trading Mode](../octobot-trading-modes/tradingview-trading-mode) +- [Daily Trading Mode](../octobot-trading-modes/daily-trading-mode) + +## Supported exchanges +The following exchanges can be used to trade Futures on OctoBot +- [Binance](exchanges/binance) +- [Bybit](exchanges/bybit) +- [Kucoin](exchanges/kucoin) + +## Leverage configuration + +The current Futures trading leverage value to use with a profile can be set from the configuration page of your enabled Trading Mode, which is accessible from your [profile configuration](../octobot-configuration/profile-configuration). +![access octobot trading mode config from profiles](/images/guides/configuration/access-octobot-trading-mode-config-from-profiles.png) + +Note: futures trading must be enabled on your profile exchange for the leverage setting to appear in your trading mode configuration. + +## Cross and Isolated margin + +For now, only Isolated margin is supported by OctoBot. Cross margin should not be used to trade with OctoBot. diff --git a/docs/content/guides/octobot-usage/having-multiple-octobots-on-one-computer.md b/docs/content/guides/octobot-usage/having-multiple-octobots-on-one-computer.md new file mode 100644 index 0000000000..da8d422e11 --- /dev/null +++ b/docs/content/guides/octobot-usage/having-multiple-octobots-on-one-computer.md @@ -0,0 +1,42 @@ +--- +title: "Having multiple OctoBots" +description: "Guide on how to have multiple OctoBots running on the same computer. Use multiple accounts on the same exchange and invest using different strategies." +sidebar_position: 5 +--- + +# Having multiple OctoBots on one computer + +OctoBot is designed to be lightweight. While making OctoBot trade on many pairs and exchanges with a very large amount of trades can make it take a lot of CPU and RAM on your computer, OctoBot usually requires less than 1GB of ram and less than 1% of CPU. + +Running as many OctoBot as you need on a single computer is most often possible, here is how. + +## How to run many OctoBots on the same computer? + +Here are the steps to start another OctoBot on your computer: +1. Stop your current OctoBot if it is running +2. Duplicate the whole folder of your current OctoBot +3. From your new folder, start the new OctoBot. It will start on the same web address as the previous bot +4. Change the new OctoBot web interface port value (see the [web interface guide](../octobot-interfaces/web#configuration)) +5. Restart your new OctoBot. Warning: the address of the interface your new OctoBot with now contain the new port value. Example: if your first OctoBot's address was `http://localhost:5001/`, then `5001` was its port. If you used `5002` for your other OctoBot, then your other OctoBot's address is now `http://localhost:5002/` + +If your initial port was `5001`, then starting your initial OctoBot (from the initial folder) will start the bot on `http://localhost:5001/`. Starting your other bot, from the second folder, will start on `http://localhost:5002/`. Both bots can be used simultaneously and connect to the exchange account of your choice. + +## Why changing OctoBot port and folder? + +Each individual OctoBot requires only two things from your computer in order to run: +1. **A dedicated folder to be executed into**. This is necessary for the bot to have its own configuration and logs management +2. **A unique web interface port**. Two OctoBots can't use the same web interface port. Using the same port value will prevent your second OctoBot from starting its web interface. + +## Benefits of running multiple OctoBots + +While a single OctoBot can be used to trade as many trading pairs as needed on multiple exchanges, running multiple OctoBots enables to: +- Trade on many accounts on the same exchange +- Split an account portfolio into assets that can be traded using different strategies +- Trade both spot and futures markets on the same exchange +- Use multiple strategies at once on real and / or [risk-free simulated trading](simulator) + +## Limits related to running many bots at once + +- **Rate limit**: Exchanges have rate limit policies that can prevent multiple OctoBots running from the same IP address from properly fetching market data. When using multiple OctoBot on the same exchange, it is important to make sure not to receive rate limit related errors, or your IP might get temporarily banned. +- **Bandwidth**: Using multiple OctoBot will increase the required bandwidth to fetch and update all the necessary market data. Always make sure that your internet connection can properly handle this increase, or your strategies will run with a delay. +- **RAM & CPU**: When running multiple OctoBots on a low-end or overloaded computer, your bots might be slowed down if RAM or CPU are insufficient. diff --git a/docs/content/guides/octobot-usage/simulator.md b/docs/content/guides/octobot-usage/simulator.md new file mode 100644 index 0000000000..4b0663e902 --- /dev/null +++ b/docs/content/guides/octobot-usage/simulator.md @@ -0,0 +1,55 @@ +--- +title: "Simulator" +description: "You prefer trading with simulated money before using your real funds ? Use OctoBot trading simulator to run any strategy using paper trading." +sidebar_position: 1 +--- + +# Simulator + + +OctoBot can be used in a simulation mode. In this mode, OctoBot will +simulate trades using the exact same process as with the real trading +mode. + +![octobot trading settings from profiles](/images/guides/configuration/octobot-trading-settings-from-profiles.png) + +The only difference with a real trader is in the starting portfolio that +is set in the Trade Simulator configuration. +Each profile has its own simulated portfolio. This portfolio will be +managed by OctoBot and simulated orders will be using these available +cryptocurrencies as a basis. + +The trader simulator will use the +exchanges' last trades to figure out if the current orders would have +been filled or not. If they would have been filled, simulated orders get +filled and the current simulated portfolio is updated accordingly. + +## Fees + +Fees in % to be deducted at simulated orders completion in simulated orders and [backtesting](backtesting). Examples: +- A maker fee configured to `0.1` corresponds to a 0.1% trading fee on marker orders. +- A taker fee configured to `1.2` corresponds to a 1.2% trading fee on taker orders. + +## Starting portfolio + +This is the imaginary portfolio given to the trader simulator to create +its orders with. It can contain any amount of any cryptocurrency. If +these cryptocurrencies are in the **crypto-currencies** configuration, +they will be traded as if they were from a real portfolio. + +The simulated portfolio is kept between instances of your OctoBot is simulated trading. It will be reset to the value of your profile's Starting portfolio when: +- Clicking `Reset history` on your portfolio view +- Changing the value of your current profile Starting portfolio + +The starting portfolio is also **used for backtesting**. + +## Mode, Reference-market and Risk + + +These parameters are defined in the **trading** section, which is used by the trader simulator as +well as the real trader. This **trading** section is described on +the [trading settings](/guides/octobot-configuration/profile-configuration#trading) + +## Real trader + +Additionally to the simulated trading system, a real trader is available in OctoBot. diff --git a/docs/content/guides/octobot-usage/strategy-designer.mdx b/docs/content/guides/octobot-usage/strategy-designer.mdx new file mode 100644 index 0000000000..2990b6f2d4 --- /dev/null +++ b/docs/content/guides/octobot-usage/strategy-designer.mdx @@ -0,0 +1,142 @@ +--- +title: "Strategy Designer" +description: "Use the best tool to create, test and optimize your trading strategy. The Strategy Designer enables you to deeply analyse and improve your strategies." +sidebar_position: 4 +--- + + + +# Strategy Designer + +The Strategy Designer is an advanced tool to efficiently create, backtest and optimize your trading strategies. It is available from the [Premium OctoBot Extension](/guides/octobot-configuration/premium-octobot-extension). + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="V4Z1xUhqWz8" title="The OctoBot Strategy Designer" /> + +**[Start your OctoBot](https://www.octobot.cloud)** + +The Strategy Designer is a complement to the integrated [backtesting tool](backtesting) of OctoBot allowing to go much deeper when creating, testing and optimizing a trading strategy. + +## Backtesting a strategy + +![octobot strategy designer results on doge btc shib](/images/guides/strategy-designer/octobot-strategy-designer-results-on-doge-btc-shib.png) + +### Overview +When backtesting a strategy using the Strategy Designer, OctoBot uses a similar backtesting engine as on [regular backtesting](backtesting). Everything mentioned on regular backtesting is also true when backtesting with the Strategy Designer. + +On top of regular backtestings, the Strategy Designer adds the capability of: +- Storing a complete history of your backtesting runs with their results +- Using charts to easily visualize your strategy behavior throughout the backtesting run +- Comparing multiple backtesting run results +- Running backtesting with dedicated profiles not to affect your current running OctoBot while optimizing a strategy + +### Backtesting runs history + +With the Strategy Designer, you can view previous backtesting runs and select them to load their results in the main view. + +<div style={{textAlign: "center"}}> +![octobot strategy designer explore your past backtestings](/images/guides/strategy-designer/octobot-strategy-designer-explore-your-past-backtestings.png) +</div> + +The explorer also enables you to efficiently filter the runs you want to display and what you want to display when comparing backtesting runs. +- You might want to sort your runs by profitability while filtering out runs with a profitability larger than 20%. +- Or maybe you want to select all the runs where take profits are enabled and where profits are negative, this is also possible. + +![octobot strategy designer explore your past backtestings customize columns](/images/guides/strategy-designer/octobot-strategy-designer-explore-your-past-backtestings-customize-columns.png) +The backtesting run explorer has a detailed view that can be configured as you wish. + +### Historical charts + +Each Strategy Designer backtesting run results can be displayed with charts on the historical PNL, portfolio value, trade entries and exits. +![octobot strategy designer historical charts](/images/guides/strategy-designer/octobot-strategy-designer-historical-charts.png) + +The whole interface can also be resized to focus on the area that matters. + +### Backtesting runs comparison + +You can use the explorer to select a backtesting run. You can also select multiple runs to display them together. This allows you to easily visualize the outcome of using different strategies or settings on the same strategy. +![octobot strategy designer compare run results](/images/guides/strategy-designer/octobot-strategy-designer-compare-run-results.png) + +Everything that is displayed is compatible with this comparator: you can compare PNL, portfolio histry, trades and more. + +### Backtesting dedicated profile + +Each regular profile you use on OctoBot also contains a backtesting profile enabled by the Strategy Designer. +Therefore, each live profile you select is used as a basis for the associated backtesting profile. This backtesting profile is automatically created when accessing the Strategy Designer and allows you to edit any configuration without affecting the live trading session. + +Selecting another live profile from the profile tab will also select the associated backtesting profile. This way you can have as many backtesting profiles as you want, you just have to remember that each live profile is bound to its backtesting profile used in the Strategy Designer. + +<div style={{textAlign: "center"}}> +![octobot strategy designer use as live profile](/images/guides/strategy-designer/octobot-strategy-designer-use-as-live-profile.png) +</div> + +Backtesting profiles can also be turned into live profiles whenever you want for you to start trading live with your backtesting-optimized trading strategy. + +## Creating a strategy + +When using the Strateg Designer, you can create a trading strategy using a simple step by step process or configure the current backtesting profile on the fly. + +### Creating a brand new strategy + +![octobot strategy designer create a new strategy select coins](/images/guides/strategy-designer/octobot-strategy-designer-create-a-new-strategy-select-coins.png) +1. Select the pairs you want to trade. + +![octobot strategy designer create a new strategy select portfolio](/images/guides/strategy-designer/octobot-strategy-designer-create-a-new-strategy-select-portfolio.png) +2. Configure the initial portfolio of your profile. + +![octobot strategy designer create a new strategy select trading mode](/images/guides/strategy-designer/octobot-strategy-designer-create-a-new-strategy-select-trading-mode.png) +3. Select and configure the [trading mode](../octobot-trading-modes/trading-modes) to use. + +![octobot strategy designer create a new strategy select and configure evaluators](/images/guides/strategy-designer/octobot-strategy-designer-create-a-new-strategy-select-and-configure-evaluators.png) +![octobot strategy designer create a new strategy configure evaluators settings](/images/guides/strategy-designer/octobot-strategy-designer-create-a-new-strategy-configure-evaluators-settings.png) +4. Select and configure the [strategy and evaluators](../octobot-trading-modes/trading-modes#evaluators-responsabilities) to use. + +![octobot strategy designer create a new strategy summary](/images/guides/strategy-designer/octobot-strategy-designer-create-a-new-strategy-summary.png) +5. Check that everything is correctly configured and start your a backtesting on your new strategy. +Tip: you can give a name to your backtesting to quickly identify it from the explorer. + +### Editing the current strategy + +If you just want to edit the current backtesting profile, you can use the configuration shortcuts directly available from the Strategy Designer interface. +![octobot strategy designer edit current profile +](/images/guides/strategy-designer/octobot-strategy-designer-edit-current-profile.png) + +Quickly edit your trading modes, strategy and evaluators configuration. + +<div style={{textAlign: "center"}}> +![octobot strategy designer edit current backtesting +](/images/guides/strategy-designer/octobot-strategy-designer-edit-current-backtesting.png) +</div> + +Or configure the context of your backtesting. + +<div style={{textAlign: "center"}}> +![octobot strategy designer new backtesting](/images/guides/strategy-designer/octobot-strategy-designer-new-backtesting.png) +</div> + +And run a new backtesting with your updated profile or backtesting settings. + +## Configuring the Strategy Designer + +### Optimization campaigns +Your backtesting runs can be associated to an optimization campaign. +Optimization campaigns have no effect on your backtesting results but can be used to select the backtesting runs to display in the explorer. + +<div style={{textAlign: "center"}}> +![octobot strategy designer campaigns selector](/images/guides/strategy-designer/octobot-strategy-designer-campaigns-selector.png) +</div> + +You can have as many campaigns as you want to easily differentiate backtesting runs from different contexts and avoid having to manually filter out previous or unrelated backtesting runs. + +### Display + +Sometimes you might want to change the way the Strategy Designer displays results. +<div style={{textAlign: "center"}}> +![octobot strategy designer display settings](/images/guides/strategy-designer/octobot-strategy-designer-display-settings.png) +</div> +Displaying less elements can sometimes makes display faster or even avoid browser RAM related issues when displaying a large amount of data in charts. + +To address browser performances issues, you can reduce the `Lines plot instead of candlesticks threshold` value. When this threshold is reached, candlesticks are turned into lines in the chart. Unlike candlesticks, lines are very light to process on the browser as they benefit from GPU acceleration. + +**[Use the Strategy Designer](https://www.octobot.cloud)** diff --git a/docs/content/guides/octobot-usage/understanding-profitability.md b/docs/content/guides/octobot-usage/understanding-profitability.md new file mode 100644 index 0000000000..949b529c9e --- /dev/null +++ b/docs/content/guides/octobot-usage/understanding-profitability.md @@ -0,0 +1,40 @@ +--- +title: "Understanding profitability" +description: "Having a hard time understanding how profitability and Profit and loss (PNL) work in OctoBot or how to reset it ? Check out our guide." +sidebar_position: 6 +--- + +# Profitability in OctoBot + +## Historical profitability + +Every asset in OctoBot is valued using the **reference market** setting +(available in [Trading settings](/guides/octobot-configuration/profile-configuration#reference-market)). +Profitably follows this principle. + +![home](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/wiki_resources/home.jpg) + +To compute its profitability, OctoBot evaluates the value of all its +traded assets (the ones available for trading in its configuration) by +getting their value in reference market. Profitability is the difference +between the total value of the traded assets when OctoBot +started and the total value of current holdings at the moment +profitability is displayed. + +## Historical PNL + +Profit and loss (PNL) history, which is the profit from each historical trade independently from your portfolio assets historical valuation, is displayed on the "Trading" tab. + +You can see it as "pure profits or losses from your trading strategy". + +![pnl history](/images/guides/pnl.png) + +> Please note that PNL history is not available on every trading mode. + + +## Resetting profitability history + +You can reset your OctoBot's profitability history from the **Portfolio** tab. + +## Resetting PNL history +Profit and loss history is computed using trades history. You can reset it by clearing the trades history from the **Trading** tab. diff --git a/docs/content/guides/octobot.md b/docs/content/guides/octobot.md new file mode 100644 index 0000000000..67a10e5e41 --- /dev/null +++ b/docs/content/guides/octobot.md @@ -0,0 +1,100 @@ +--- +title: "Starting your OctoBot" +description: "Any question on OctoBot the open source trading robot ? Here are the guides on how to install your bot locally or using a cloud and how to trade using Telegram, ChatGPT or TradingView." +sidebar_position: 1 +--- + + + +# Starting your OctoBot + +:::info +This "Trading bot" section is dedicated to the <a href="https://github.com/Drakkar-Software/OctoBot" rel="nofollow">open source OctoBot</a> users. +::: + +## Setting up your OctoBot + +There are two ways to setup your OctoBot: + +- Using [www.octobot.cloud](https://www.octobot.cloud) to easily invest in crypto +- Using [OctoBot, the open source trading robot](https://www.octobot.cloud/trading-bot) to create and test your own trading strategies + +### Using OctoBot cloud + +Use [OctoBot cloud](/investing/introduction) to easily invest in **crypto baskets**, profit from **ready-made investment startegies** and **automate TradingView strategies**. + +In this case, [www.octobot.cloud](https://www.octobot.cloud) is made for you and the **[Investing](/investing/introduction) section** of those guides is what you are looking for. + +### Using the open source trading robot + +<div style="text-align: center; margin: 1.5rem 0"> + <iframe width="100%" height="400" style="max-width: 640px; border-radius: 8px" src="https://www.youtube.com/embed/TJUU62e1jR8" title="OctoBot - Open Source Crypto Trading Bot" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> +</div> + +With the [OctoBot trading robot](https://www.octobot.cloud/trading-bot) you can create, test and configure your investment strategies, whether it is with OctoBot cloud strategies or your own. It is possible to install your trading bot [on your computer](octobot-installation/install-octobot-on-your-computer) or on a [cloud server](octobot-installation/cloud-install-octobot-on-digitalocean). + +The **Trading bot section** on the guides is dedicated to OctoBot as the + +<a href="https://github.com/Drakkar-Software/OctoBot" rel="nofollow">open source trading robot available on GitHub</a> +. + +<div style="text-align: center"> + <a href="octobot-installation/install-octobot-on-your-computer"><strong>Install OctoBot</strong></a> +</div> + +## Configuration and trading modes + +OctoBot supports many trading strategies: the [trading modes](octobot-trading-modes/trading-modes). Each trading mode is unique and consists in a different trading technique. Trading modes can: + +- Trade on spot or futures markets. +- Use statistics and technical analysis to find the best trade entries and exits. +- Trade based on [AI](https://www.octobot.cloud/features/ai-trading-bot) and [ChatGPT predictions](octobot-trading-modes/chatgpt-trading). +- Use [market-making algorithms](octobot-trading-modes/grid-trading-mode) to minimize risks and grant regular small gains. +- Trade upon alerts from platforms such as [TradingView](octobot-trading-modes/tradingview-trading-mode). +- Use the best [OctoBot cloud strategies](/) + + +<div style="text-align: center; margin: 1.5rem 0"> + <iframe width="100%" height="400" style="max-width: 640px; border-radius: 8px" src="https://www.youtube.com/embed/Exdl94cmMDQ" title="Trading configuration in OctoBot" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> +</div> + +Do something entirely different based on your ideas: + +- [Customize](octobot-configuration/profile-configuration) your OctoBot to make it trade according to your ideas +- Or code and test your ideas directly using [OctoBot script](/guides/octobot-script). + +## Chat directly with OctoBot + +You can remotly command your OctoBot by connecting it to [Telegram](octobot-interfaces/telegram). You can follow what is your robot doing, watch its portfolio, its profits, trigger an emergency sell... Access your OctoBot from wherever you want, whenever you want thanks to its Telegram interface. + +<div style="text-align: center"> + +![telegram connection to octobot illustrated by telegram logo](/images/guides/interfaces/telegram-connection-to-octobot-illustrated-by-telegram-logo.png) + +</div> + +See the [Telegram interface configuration](octobot-interfaces/telegram) for more details on the OctoBot Telegram interface. + +## Optimize your OctoBot using backtesting + +Using OctoBot's backtesting, test your current trading strategy using historical data from the markets you +want to trade. Most OctoBot configurations are testable in +[backtesting](octobot-usage/backtesting). +Fine tune your bot's configuration up to the very last detail and maximise your profits. + +![octobot backtesting result summary](/images/guides/backtesting/octobot-backtesting-result-summary.png) + +## Take it one step further using OctoBot script + +Turn your trading ideas into tested and optimized strategy using [OctoBot script](/guides/octobot-script). +Directly code your strategy in python while benefiting of all the OctoBot tools and an extremely flexible reporting and visualization system. Analyse exactly what is happening when running your ideas on past data and create your best strategies. + +![octobot pro report btc usdt with chart trades portfolio value and rsi](/images/guides/octobot-pro/octobot-pro-report-btc-usdt-with-chart-trades-portfolio-value-and-rsi.jpg) + +## Supported exchanges + +Find the list of supported and partner exchanges [on the exchange summary](/guides/exchanges). + +## Developer guides + +Find the developpper guides [on the developers section](/guides/developers). diff --git a/docs/content/investing/_category_.json b/docs/content/investing/_category_.json new file mode 100644 index 0000000000..6f471515ec --- /dev/null +++ b/docs/content/investing/_category_.json @@ -0,0 +1 @@ +{"label": "OctoBot Cloud"} diff --git a/docs/content/investing/connect-your-binance-account-to-octobot.md b/docs/content/investing/connect-your-binance-account-to-octobot.md new file mode 100644 index 0000000000..2ea07e6d74 --- /dev/null +++ b/docs/content/investing/connect-your-binance-account-to-octobot.md @@ -0,0 +1,126 @@ +--- +title: "Connecting to Binance" +description: "Step by step guide on how to securely use your Binance account with OctoBot cloud and profit from automated crypto investments." +sidebar_position: 22 +--- + + + +# Connecting your Binance account to OctoBot cloud + +To automate the investment strategies of your choice on your Binance account, it is necessary to allow OctoBot to access a part of your account. + +This is done using `API Keys`. API Keys are a standard authentication system that is often used to connect software together. + +If you are wondering what an `API Key` is and why OctoBot is using it, checkout our [introduction to exchanges API Keys](what-is-an-exchange-api-key). + +## Connecting to your Binance account with API Keys + +Here are the 7 simple steps to connect to your Binance account with OctoBot cloud and automate your investment strategies. + +### 1. Log in to your Binance account + +Go to <a href="https://accounts.binance.com/en/register?ref=528112221" rel="nofollow">binance.com</a> and log in to your account (or create an account). + +![binance account login](/images/guides/binance/binance-account-login.png) + +### 2. Go to API Management + +Select "Account" and "API Management" from your account Dashboard or "API Management" from top right profile icon dropdown menu. +![account setting api management](/images/guides/binance/account-setting-api-management.png) + +![account api management from navbar](/images/guides/binance/account-api-management-from-navbar.png) + +### 3. Create a new API Key + +Hit "create API", select "System generated" and name it as you wish. The name is just for you to remember the purpose of this key. +![apis list create new api](/images/guides/binance/apis-list-create-new-api.png) + +![select api type](/images/guides/binance/select-api-type.png) + +![select api name](/images/guides/binance/select-api-name.png) + +### 4. Security verification + +Proceed with the security verification to create the API Key. +![create api security verification](/images/guides/binance/create-api-security-verification.png) + +### 5. Add trading permissions and IP whitelisting + +Your API Key is now created ! + +The only remaining thing is to add the trading permission for OctoBot to be able to create and cancel orders using this API Key. To do this: + +1. Click "Edit restrictions". + +2. Choose "Restrict access to trusted IPs only" + +3. Click the "copy" button from OctoBot cloud to copy the IP whitelist + +4. Paste the list in the field that just appeared + +5. Click "Confirm". + +6. Check "Enable Spot & Margin Trading". + +7. Finally click "Save". + +![api created click edit restrictions](/images/guides/binance/api-created-click-edit-restrictions.png) + +![api created add trading permission](/images/guides/binance/api-created-add-trading-permission.png) + +![api created add trading permission save](/images/guides/binance/api-created-add-trading-permission-save.png) + +![api restrict to trusted ips](/images/guides/binance/api-restrict-to-trusted-ips.png) + +Please note that every other permission than "Enable Reading" and "Enable Spot & Margin Trading" should remain unchecked. + +### 6. Add your API Key to your OctoBot cloud account + +Your API Key is now ready to be used by OctoBot ! + +All you need to do is to copy and paste both `API Key` and `Secret Key` values into your Binance account configuration on OctoBot cloud. This can be done either when starting a trading strategy with a real account or from your profile on [octobot.cloud](https://www.octobot.cloud/) + +Note: When adding an API Key on OctoBot cloud, you can associate a name to it. As for the naming on Binance side, this is a free field where you can enter any name to quickly identify this API Key in the future. +![api creation completed selected values](/images/guides/binance/api-creation-completed-selected-values.png) + +![add API Key to octobot cloud from strategy start](/images/guides/binance/add-api-key-to-octobot-cloud-from-strategy-start.png) + +<div style="text-align: center"> + <em>Adding an API Key when starting a strategy</em> +</div> + +![add API Key to octobot cloud from profile](/images/guides/binance/add-api-key-to-octobot-cloud-from-profile.png) + +<div style="text-align: center"> + <em>Adding an API Key directly from <a href="https://www.octobot.cloud/account" rel="nofollow">your profile</a></em> +</div> + +Your Binance account can now be used on OctoBot cloud ! + +:::info + Please note that when starting a bot, some of the funds available in your API key related portfolio might be sold. This include any stablecoin and fiat related funds as well as cryptocurrencies that are traded by the strategy you selected. This is is part of the [portfolio optimization](invest-with-your-strategy#1-portfolio-optimization). +::: + + +## Troubleshooting + +### Incorrect API Keys + +If you get the `Incorrect API Keys` error, this usually means that: + +- There was an error when copy-pasting your API Key or Secret Key from Binance to OctoBot cloud +- You made a mistake when copying the IP whitelist +- You might have selected the wrong exchange (make sure to select Binance) + +### Incorrect API restrictions: missing spot trading + +If you get the `Incorrect API restrictions: missing spot trading` error, you need to check "Enable Spot & Margin Trading" as explained [on step 6](#6-add-trading-permissions). + +### Incorrect API restrictions: withdrawals enabled + +If you get the `Incorrect API restrictions: withdrawals enabled` error, you need to uncheck "Enable Withdrawals". You can do this following the same path as [on step 6](#6-add-trading-permissions). + +### Other questions + +If you have any other question of if something is unclear, feel free to reach out to the support using the chatbox on the bottom right of the screen on [octobot.cloud](https://www.octobot.cloud/). diff --git a/docs/content/investing/connect-your-coinbase-account-to-octobot.md b/docs/content/investing/connect-your-coinbase-account-to-octobot.md new file mode 100644 index 0000000000..19241ff4f2 --- /dev/null +++ b/docs/content/investing/connect-your-coinbase-account-to-octobot.md @@ -0,0 +1,120 @@ +--- +title: "Connecting to Coinbase" +description: "Step by step guide on how to securely use your Coinbase account with OctoBot cloud and profit from automated crypto investments." +sidebar_position: 24 +--- + + + +# Connecting your Coinbase account to OctoBot cloud + +To automate the investment strategies of your choice on your Coinbase account, it is necessary to allow OctoBot to access a part of your account. + +This is done using `API Keys`. API Keys are a standard authentication system that is often used to connect software together. + +If you are wondering what an `API Key` is and why OctoBot is using it, checkout our [introduction to exchanges API Keys](what-is-an-exchange-api-key). + +## Connecting to your Coinbase account with API Keys + +Here are the 5 simple steps to connect to your Coinbase account with OctoBot cloud and automate your investment strategies. + +### 1. Log in to your Coinbase account + +Go to <a href="https://login.coinbase.com/signin" rel="nofollow">coinbase.com</a> and log in to your account (or create an account). + +![coinbase account login](/images/guides/coinbase/coinbase-account-login.png) + +### 2. Go to API Management + +Display your account settings by clicking on your account icon and select "Settings". +![account setting api management](/images/guides/coinbase/account-setting-api-management.png) + +### 3. Create a new API Key + +Scroll down if necessary and hit "API". + +![account setting api management click api](/images/guides/coinbase/account-setting-api-management-click-api.png) + +Click "Create API Key with Coinbase Developer Platform (Recommended)". + +![apis list create new api](/images/guides/coinbase/apis-list-create-new-api.png) + +1. Name it as you wish. The name is just for you to remember the purpose of this key. + +2. Select the wallet you wish to use with your OctoBot. Note: the "Default" Coinbase wallet usually contains your funds on the regular (non Advanced) version of Coinbase. Please transfer your funds to another Coinbase wallet and select it with your API key if you wish to use different funds. + +3. **Remember to check the "Trade" API-specific restriction.** + +![select api name passphrase and restrictions](/images/guides/coinbase/select-api-name-and-restrictions.png) + +4. Click the "copy" button from OctoBot cloud to copy the IP whitelist and paste the list in the `IP whitelist` field. + +### 4. Save your API Key + +Now that your key is named, the Spot Trading permission is checked and the IP whitelist is configured, click "Create & download". +Proceed with the security verification to create the API Key. + +Your API Key is now created. Do not close this window as long as you are not done entering it on OctoBot cloud. + +<div style="text-align: center"> + +![coinbase api key created](/images/guides/coinbase/coinbase-api-key-created.png) + +</div> + +Note: Coinbase will ask you to download a file containing the API Key details. Downloading it is not necessary, do not download the file or remove it from your computer if you did. + +### 5. Add your API Key to your OctoBot cloud account + +You now have your API key details ! + +All you need to do is to copy and paste the values of `API key name` and `Secret` (step 4) into your Coinbase account configuration on OctoBot cloud. This can be done either when starting a trading strategy with a real account or from your profile on [octobot.cloud](https://www.octobot.cloud/) + +Note: When adding an API Key on OctoBot cloud, you can associate a name to it. As for the naming on Coinbase side, this is a free field where you can enter any name to quickly identify this API Key in the future. + +<div style="text-align: center"> + +![api creation completed selected values](/images/guides/coinbase/api-creation-completed-selected-values.png) + +</div> + +![add API Key to octobot cloud from strategy start](/images/guides/coinbase/add-api-key-to-octobot-cloud-from-strategy-start.png) + +<div style="text-align: center"> + <em>Adding an API Key when starting a strategy</em> +</div> + +![add API Key to octobot cloud from profile](/images/guides/coinbase/add-api-key-to-octobot-cloud-from-profile.png) + +<div style="text-align: center"> + <em>Adding an API Key directly from <a href="https://www.octobot.cloud/account" rel="nofollow">your profile</a></em> +</div> + +Your Coinbase account can now be used on OctoBot cloud ! + +:::info + Please note that when starting a bot, some of the funds available in your API key related portfolio might be sold. This include any stablecoin and fiat related funds as well as cryptocurrencies that are traded by the strategy you selected. This is is part of the [portfolio optimization](invest-with-your-strategy#1-portfolio-optimization). +::: + +## Troubleshooting + +### Incorrect API Keys + +If you get the `Incorrect API Keys` error, this usually means that: + +- There was an error when copy-pasting your API Key or Secret Key from Coinbase to OctoBot cloud +- You made a mistake when copying the IP whitelist +- You might have selected the wrong exchange (make sure to select Coinbase) +- Should you use ECDSA or Ed25519 API keys? You can use any, both ECDSA and Ed25519 key formats are supported. + +### Incorrect API restrictions: missing spot trading + +If you get the `Incorrect API restrictions: missing spot trading` error, you need to check "Trade" as explained [on step 3](#3-create-a-new-api-key). + +### Incorrect API restrictions: withdrawals enabled + +If you get the `Incorrect API restrictions: withdrawals enabled` error, you need to uncheck "Transfer". You can do this following the same path as [on step 3](#3-create-a-new-api-key). + +### Other questions + +If you have any other question of if something is unclear, feel free to reach out to the support using the chatbox on the bottom right of the screen on [octobot.cloud](https://www.octobot.cloud/). diff --git a/docs/content/investing/connect-your-kucoin-account-to-octobot.md b/docs/content/investing/connect-your-kucoin-account-to-octobot.md new file mode 100644 index 0000000000..154178827a --- /dev/null +++ b/docs/content/investing/connect-your-kucoin-account-to-octobot.md @@ -0,0 +1,121 @@ +--- +title: "Connecting to Kucoin" +description: "Step by step guide on how to securely use your Kucoin account with OctoBot cloud and profit from automated crypto investments." +sidebar_position: 23 +--- + + + +# Connecting your Kucoin account to OctoBot cloud + +To automate the investment strategies of your choice on your Kucoin account, it is necessary to allow OctoBot to access a part of your account. + +This is done using `API Keys`. API Keys are a standard authentication system that is often used to connect software together. + +If you are wondering what an `API Key` is and why OctoBot is using it, checkout our [introduction to exchanges API Keys](what-is-an-exchange-api-key). + +## Connecting to your Kucoin account with API Keys + +Here are the 5 simple steps to connect to your Kucoin account with OctoBot cloud and automate your investment strategies. + +### 1. Log in to your Kucoin account + +Go to <a href="https://www.kucoin.com/ucenter/signup?rcode=rJ2Q2T3" rel="nofollow">kucoin.com</a> and log in to your account (or create an account). + +![kucoin account login](/images/guides/kucoin/kucoin-account-login.png) + +### 2. Go to API Management + +Display your account dashboard by clicking on your account and select "API Management". +![account setting api management](/images/guides/kucoin/account-setting-api-management.png) + +### 3. Create a new API Key + +1. Click "Create API", select "API-Based Trading". + +2. Name it as you wish and give it a passphrase. The name is just for you to remember the purpose of this key. The passphrase will have to be entered alongside your API key details on OctoBot cloud. + +3. **Remember to check the "Spot Trading" API Restriction**. + +![apis list create new api](/images/guides/kucoin/apis-list-create-new-api.png) + +![select api name passphrase and restrictions](/images/guides/kucoin/select-api-name-passphrase-and-restrictions.png) + +4. Select the `Restrict to Trusted IPs Only` option. + +5. Click the "copy" button from OctoBot cloud to copy the IP whitelist and paste the list in the IP whitelist field, then click `Add`. + + +### 4. Save your API Key + +Now that your key is named, has a passphrase and the Spot Trading permission is selected, click "Next" + +Proceed with the security verification to create the API Key. + +<div style="text-align: center"> + +![create api security verification](/images/guides/kucoin/create-api-security-verification.png) + +</div> + +Your API Key is now created. Do not close this window as long as your are not done entering it on OctoBot cloud. + +<div style="text-align: center"> + +![kucoin api key created](/images/guides/kucoin/kucoin-api-key-created.png) + +</div> + +### 5. Add your API Key to your OctoBot cloud account + +You now have your API key details ! + +All you need to do is to copy and paste the values of `Key`, `Secret` (step 4) and the passphrase (step 3) into your Kucoin account configuration on OctoBot cloud. This can be done either when starting a trading strategy with a real account or from your profile on [octobot.cloud](https://www.octobot.cloud/) + +Note: When adding an API Key on OctoBot cloud, you can associate a name to it. As for the naming on Kucoin side, this is a free field where you can enter any name to quickly identify this API Key in the future. + +<div style="text-align: center"> + +![api creation completed selected values](/images/guides/kucoin/api-creation-completed-selected-values.png) + +</div> + +![add API Key to octobot cloud from strategy start](/images/guides/kucoin/add-api-key-to-octobot-cloud-from-strategy-start.png) + +<div style="text-align: center"> + <em>Adding an API Key when starting a strategy</em> +</div> + +![add API Key to octobot cloud from profile](/images/guides/kucoin/add-api-key-to-octobot-cloud-from-profile.png) + +<div style="text-align: center"> + <em>Adding an API Key directly from <a href="https://www.octobot.cloud/account" rel="nofollow">your profile</a></em> +</div> + +Your Kucoin account can now be used on OctoBot cloud ! + +:::info + Please note that when starting a bot, some of the funds available in your API key related portfolio might be sold. This include any stablecoin and fiat related funds as well as cryptocurrencies that are traded by the strategy you selected. This is is part of the [portfolio optimization](invest-with-your-strategy#1-portfolio-optimization). +::: + +## Troubleshooting + +### Incorrect API Keys + +If you get the `Incorrect API Keys` error, this usually means that: + +- There was an error when copy-pasting your API Key, Secret Key or passphrase from Kucoin to OctoBot cloud +- You made a mistake when copying the IP whitelist +- You might have selected the wrong exchange (make sure to select Kucoin) + +### Incorrect API restrictions: missing spot trading + +If you get the `Incorrect API restrictions: missing spot trading` error, you need to check "Spot Trading" as explained [on step 3](#3-create-a-new-api-key). + +### Incorrect API restrictions: withdrawals enabled + +If you get the `Incorrect API restrictions: withdrawals enabled` error, you need to uncheck "Transfer". You can do this following the same path as [on step 3](#3-create-a-new-api-key). + +### Other questions + +If you have any other question of if something is unclear, feel free to reach out to the support using the chatbox on the bottom right of the screen on [octobot.cloud](https://www.octobot.cloud/). diff --git a/docs/content/investing/find-your-strategy.md b/docs/content/investing/find-your-strategy.md new file mode 100644 index 0000000000..0dc896e18c --- /dev/null +++ b/docs/content/investing/find-your-strategy.md @@ -0,0 +1,74 @@ +--- +title: "Find your investment" +description: "Learn how to explore, compare and find or create the best trading strategy for you on OctoBot cloud." +sidebar_position: 4 +--- + + + +# Find the best investment for you + +Many investments are available on [octobot.cloud](https://www.octobot.cloud/). Some of them are +created by the OctoBot team, others are made by the community. + +The goal of OctoBot cloud is to help you find the perfect investment for you according to your own goal. + +![OctoBot cloud strategies explorer with crypto baskets and strategies](/images/guides/octobot-cloud-strategies-explorer-with-crypto-baskets-and-strategies.png) + +Finding your ideal investment can be hard. That's why the try to make it as simple as possible to: + +- Choose a crypto basket theme +- Compare strategies +- Understand the idea of each strategy +- Access past performances of each strategy in a transparent manner + +## Crypto basket theme + +<div style="text-align: center"> + +![OctoBot cloud crypto basket example](/images/guides/using-a-crypto-basket.png) + +</div> + +[Many crypto baskets](https://www.octobot.cloud/features/crypto-basket) are offered on OctoBot cloud. A crypto basket is a collection of different cryptocurrencies that share a common theme. These baskets allow you to invest in the cryptocurrencies that interest you or simply in the cryptocurrencies with the highest market capitalization ('the top crypto'). + +Using these baskets allows you to avoid having to individually choose each cryptocurrency to buy and profit from the next increase in value of a cryptocurrency within the basket. + +## Strategies details + +![OctoBot cloud strategy](/images/guides/cloud-strategy.png) + +Each strategy has a historical profit chart. This chart is generated by running the strategy with +historical data on the displayed time period. + +> Note: Profit charts are generated using [OctoBot backtesting](/guides/octobot#optimize-your-octobot-using-backtesting) + +For maximum transparency, each strategy performance is re-evaluated every week. This ensures +that displayed performance are always up to date with the current market. + +## Creating your strategy + +You might prefer to create your own trading strategy rather than using OctoBot cloud ready-to-go strategies. + +<div style="text-align: center"> + +![TradingView automation illustrated by TradingView logo](/images/blog/introducing-the-investor-plus-plan/tradingview-automation-illustrated-by-tradingview-logo.png) + +</div> + +For this purpose, OctoBot connects to TradingView to enable you to create your strategy on TradingView is a way that is: + +- **Clear and appealing**: no coding skills required as TradingView provides a simple visual way to create the strategy. +- **Adapting to the investor** level of expertise: OctoBot cloud enables the best strategies for you as an investor. If for someone this means trading on price targets or using a complex combo of indicators, we work on making it possible and easy to do. +- **Simple to follow and monitor**: your always know what trades are open, what can happen next and what happened in the past. + +Check out the [TradingView automated trading guide](tradingview-automated-trading) to know more about how to create your strategy using OctoBot cloud. + +## Using the investment you found + +Once you identified the investment you would like to use, you have 2 possibilities: + +![trading account type choice real or paper trading](/images/guides/trading-account-type-choice-real-or-paper-trading.png) + +- Test it with [paper trading](paper-trading-a-strategy): start an OctoBot with simulated funds and test the strategy or the crypto basket as much as you want. Paper trading is risk free. +- Use it with [real funds](invest-with-your-strategy): start an OctoBot on your real exchange account and start profiting from the strategy or the crypto basket. diff --git a/docs/content/investing/fine-tune-your-octobots.mdx b/docs/content/investing/fine-tune-your-octobots.mdx new file mode 100644 index 0000000000..7c41b358b1 --- /dev/null +++ b/docs/content/investing/fine-tune-your-octobots.mdx @@ -0,0 +1,73 @@ +--- +title: "Fine tune your OctoBot" +description: "Fine tune your OctoBot by easily creating, replacing and cancelling orders and rebalancing your portfolio directly from your OctoBots." +sidebar_position: 12 +--- + + + +# Fine tune your OctoBots + +Using the <a href="https://www.octobot.cloud/plan" rel="nofollow">Pro plan</a> of OctoBot cloud, you can change the way any of your OctoBots are trading: + +- Cancel any strategy open order +- Replace orders by your own +- Add you own orders to any strategy + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="LwpDxDwGF0w" title="Trading configuration in OctoBot" /> + +## Make your OctoBots trade your way + +Easily change the way your OctoBots are buying or selling crypto. + +<div style={{textAlign: "center"}}> + ![cancel orders directly from your + OctoBot](/images/guides/cancel-orders-directly-from-your-octobot.png) +</div> +Directly from any of your OctoBot, whether it is following an <a href="https://www.octobot.cloud/explore" rel="nofollow">OctoBot cloud strategy</a> +, your [TradingView strategy](tradingview-automated-trading) on your exchange +account or with [paper trading](paper-trading-a-strategy) you can: - Cancel any +open order - Replace existing orders by your own + +## Trade directly from your OctoBots + +Buy and sell crypto from your exchange or [paper trading](paper-trading-a-strategy) account directly from your OctoBot + +<div style={{textAlign: "center"}}> + ![buy and sell crypto directly from your + OctoBot](/images/guides/buy-and-sell-crypto-directly-from-your-octobot.png) +</div> +With the <a href="https://www.octobot.cloud/plan" rel="nofollow">Pro plan</a> +, you can from any OctoBot: - Easily trade directly on your exchange or [risk +free paper trading account](paper-trading-a-strategy) - Create any kind of buy +or sell orders at any time + +**[Switch to Pro](https://www.octobot.cloud/pricing)** + +## Rebalance your portfolio + +Adapt and rebalance your portfolio holdings at anytime using market or limit orders from your OctoBot. + +<div style={{textAlign: "center"}}> + ![buy and sell from your OctoBot + portfolio](/images/guides/buy-and-sell-from-your-octobot-portfolio.png) +</div> + +Simply realize your profits, cut your losses or buy the assets you want to invest in. + +:::info + Pro tip: you can also prevent your OctoBot from trading with a part of your + funds by buying assets that are not traded within your select OctoBot cloud + strategy. +::: + +## Follow your OctoBots' activity + +Each of your OctoBot activities are stored in a clear history of trades, automations and cancel commands that happened with your Octobot. + +<div style={{textAlign: "center"}}> + ![buy and sell crypto octobot activity + history](/images/guides/buy-and-sell-crypto-octobot-activity-history.png) +</div> diff --git a/docs/content/investing/follow-your-profits.md b/docs/content/investing/follow-your-profits.md new file mode 100644 index 0000000000..f37ab55473 --- /dev/null +++ b/docs/content/investing/follow-your-profits.md @@ -0,0 +1,55 @@ +--- +title: "Follow your profits" +description: "Learn how to quickly and easily follow profits and activity of your running and stopped OctoBot trading robots on OctoBot cloud." +sidebar_position: 11 +--- + + + +# Follow your profits + +## Your exchange accounts + +OctoBot cloud allows you to track the holdings of your portfolios on each connected exchange and visualize the total historical value of your cryptocurrencies. + +![octobot multi exchange dashboard with historical portfolio value holdings pie chart and running bots](/images/guides/octobot-multi-exchange-dashboard-with-historical-portfolio-value-holdings-pie-chart-and-running-bots.png) + +Thanks to your OctoBot cloud dashboard, you can easily: + +- View all your exchange account portfolios and follow their growth +- Follow all your OctoBots activities + +## Your OctoBots + +Once you started an OctoBot to run a strategy (with a [paper trader using simulated funds](paper-trading-a-strategy) or [real trading](invest-with-your-strategy) OctoBot) you can follow it from the way you prefer. + +### Fom OctoBot cloud + +The OctoBot cloud website, on <a href="https://www.octobot.cloud/bots" rel="nofollow">octobot.cloud/bots</a> +![OctoBot cloud bots](/images/guides/cloud-bots.png) + +### Fom OctoBot mobile app + +The OctoBot mobile app is available on <a href="https://play.google.com/store/apps/details?id=com.drakkarsoftware.octobotapp&utm_source=www.octobot.cloud&utm_media=investing&utm_content=follow-your-profit" rel="nofollow">Google play</a> and on <a href="https://apps.apple.com/us/app/octobot-crypto-investment/id6502774175" rel="nofollow">App Store</a>. + +<div style="text-align: center"> + <div style="text-align: center"> + <a href="https://apps.apple.com/us/app/octobot-crypto-investment/id6502774175" rel="nofollow"><AppleStoreButton /></a> + <a href="https://play.google.com/store/apps/details?id=com.drakkarsoftware.octobotapp&utm_source=www.octobot.cloud&utm_media=investing&utm_content=follow-your-profit" rel="nofollow"><GoogleStoreButton /></a> + </div> +</div> + +### Fom OctoBot web app + +The OctoBot web app is available on <a href="https://mobile.octobot.cloud" rel="nofollow">mobile.octobot.cloud</a>. + +:::info + It allows you to use the application without having to install it on your + phone. +::: + +## Bot details + +![OctoBot cloud bot details](/images/guides/cloud-bot.png) + +Each OctoBot has a detailed view where you can see is current activity, portfolio and historical profits. diff --git a/docs/content/investing/having-multiple-octobot-strategies.md b/docs/content/investing/having-multiple-octobot-strategies.md new file mode 100644 index 0000000000..f59e22eba5 --- /dev/null +++ b/docs/content/investing/having-multiple-octobot-strategies.md @@ -0,0 +1,80 @@ +--- +title: "Having multiple OctoBots" +description: "Learn how to run multiple trading strategies to optimize your gains and reduce risk. Use OctoBot cloud to run many strategies for free." +sidebar_position: 7 +--- + + + +# Having multiple OctoBots + +When using OctoBot cloud, you can have as many strategies or crypto baskets as you wish. + +![having multiple octobots on different strategies](/images/guides/having-multiple-octobots-on-different-strategies.png) + +While there is nothing mandatory, you can very well use one strategy at a time, it is usually better to use more than one investment strategies. On this point, investment automation is really profitable as it allows you to easily run many strategies simultaneously. + +## Benefits of running multiple OctoBots + +On OctoBot cloud, running one or many trading strategies is always free. Therefore there is no real reason not to do it. If you think it can be profitable for you, then you are free to do it at no cost. + +Running multiple OctoBots at the same time has many advantages, it allows you to: + +1. Diversify your investments by profiting from other type of strategies or basket themes, therefore lowering risks +2. Trade on different exhanges at the same time, allowing to reduce the risk of exchange issues +3. Trade different assets and therfore increase your chances of investing the next crypto gems while reducing risks associated to each traded coin + +Overall, using multiple OctoBot is very similar to the popular saying of "not putting all your eggs in the same basket". + +## How to run multiple OctoBots + +When running a strategy or a crypto basket on [octobot.cloud](https://www.octobot.cloud/), there is only one rule: + +:::info + Only one strategy or crypto basket per exchange portfolio. +::: + +This means that you can run as many simultaneous strategies or baskets as you want, as long as they are running either on different exchanges or using different portfolio on the same exchange account. + +> Why are we enforcing this ? +> Simply to avoid interference between strategies and let OctoBots operate on their full potential. + +### Using different exchanges + +To use different OctoBots, you can just connect different exchanges to your OctoBot cloud account. You will then be able on run a strategy or a crypto basket on each exchange account. + +You could for example run a high-risk, high-reward IA-based strategy on Binance and a low-risk, safe grid strategy on Kucoin. This also reduces the risks associated to holding your funds on a single exchange. + +### Running multiple OctoBots on Binance + +You can also run multiple strategies on the same Binance exchange account by using subaccounts. With subaccounts, you can quickly and easily split your funds between multiple portfolios within the same exchange, enabling you to use many strategies or crypto baskets on the same Binance account. + +<div style="text-align: center"> + +![binance subaccounts](/images/guides/binance/binance-subaccounts.png) + +</div> + +For example, Binance allows you to have up to 10 subaccounts. You can then run up to 11 simultaneous strategies or baskets on your Binance account: one on your main account and 10 on your subaccounts. + +:::info +Wondering how to create a Binance subaccount ? Binance subaccounts are now open to everyone, <a href="https://www.binance.com/support/faq/binance-sub-account-functions-and-frequently-asked-questions-360020632811" rel="nofollow">checkout this guide</a>. +::: + +### Running multiple OctoBots on Coinbase + +To run multiple strategies on the same Coinbase account, just use different wallets: each of them can be bound to an OctoBot. + +<div style="text-align: center"> + +![coinbase multi portfolio](/images/guides/coinbase/coinbase-multi-portfolio.png) + +</div> + +Create as many wallets as you need from the <a href="https://www.coinbase.com/advanced-portfolio" rel="nofollow">Coinbase portfolio interface</a> and create API keys bound to those wallets to make your OctoBot trade on it. + +<div style="text-align: center"> + +![coinbase api key select multi portfolio](/images/guides/coinbase/coinbase-api-key-select-multi-portfolio.png) + +</div> diff --git a/docs/content/investing/how-to-automate-any-tradingview-strategy-on-octobot-cloud.mdx b/docs/content/investing/how-to-automate-any-tradingview-strategy-on-octobot-cloud.mdx new file mode 100644 index 0000000000..e67e90a306 --- /dev/null +++ b/docs/content/investing/how-to-automate-any-tradingview-strategy-on-octobot-cloud.mdx @@ -0,0 +1,290 @@ +--- +title: "Strategy automation tutorial" +description: "Use TradingView strategies to automate your trading on your crypto exchange account. Automate ready-made or custom Pine Script strategies." +sidebar_position: 18 +--- + + + +# How to automate any TradingView strategy on OctoBot cloud + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="OZIYfg7bJf4" title="Automate your strategy with a TradingView account" /> + +Automate any TradingView strategy within seconds using a TradingView account and the <a href="https://www.octobot.cloud/creator" rel="nofollow">AI-powered strategies generator</a>. + +Note: The automation of TradingView alerts by email is unfortunately no longer available due to a recent restriction by TradingView. + +## Any TradingView strategy can be automated using OctoBot + +OctoBot can easily be [connected to TradingView](tradingview-automated-trading) to automate trades based on price events, indicators and also full strategies. + +<div style={{textAlign: "center"}}> + <div> + ![tradingview community + strategies](/images/guides/trading-view/tradingview-community-strategies.png) + </div> +</div> + +Automating a TradingView strategy has many advantages over simple price events or indicators automation. +When automating a strategy, you can: + +- Automate your trading using **any TradingView strategy**, **your own strategy** created by OctoBot's <a href="https://www.octobot.cloud/creator" rel="nofollow">TradingView strategy generator</a>, or a strategy provided by **your favorite strategy creator** +- **Combine multiple indicators** to create your entry and exit orders when all your conditions are met +- Use TradingView built in backtesting engine to **optimize your strategy** + +## 1. Preparing your strategy to connect to your OctoBot + +To make your OctoBot trade on your exchange account (or a [risk-free virtual account](paper-trading-a-strategy)) based on a TradingView strategy, all you need to do is to bind the strategy orders to your [TradingView automations](tradingview-alerts-automation#automating-pine-script-strategies) using regular TradingView alerts. + +Once you know which strategy you want to use, display it on your TradingView chart and click the `{}` icon. + +<div style={{textAlign: "center"}}> + <div> + ![tradingview open strategy + code](/images/guides/trading-view/tradingview-open-strategy-code.png) + </div> +</div> + +This displays the strategy Pine Script code. Changing this code will change the strategy behavior. + +:::info + You can't find the code for the strategy you want to use? Generate it using + OctoBot's{' '} + <a href="https://www.octobot.cloud/creator" rel="nofollow">TradingView strategy generator</a> + . +::: + +Each TradingView Strategy using Pine Script in version 4 and above can use the `strategy.entry`, `strategy.exit`, `strategy.close` and `strategy.order` keywords to trade in order to enter and exit positions. +In order to bind those trades to your exchange account using OctoBot, all you need to do is to add the `alert_message="your-OctoBot-automation-id"` parameter to the keywords. + +<div style={{textAlign: "center"}}> + <div> + ![tradingview adding alert + message](/images/guides/trading-view/tradingview-adding-alert-message.png) + </div> +</div> + +Note that the initial `strategy.entry("RsiLE", strategy.long, comment="Buy")` have been changed to `strategy.entry("RsiLE", strategy.long, comment="Buy", alert_message="your-OctoBot-automation-id")` in order to include the `alert_message="your-OctoBot-automation-id"` parameter. + +In your Pine Script strategy, each and every call of the following keyword should include this `alert_message="your-OctoBot-automation-id"` parameter: + +- `strategy.entry` +- `strategy.exit` +- `strategy.close` +- `strategy.order` + +Forgetting to add the `alert_message` parameter to any of those call will result in missing signals on your exchange. + +Now that each strategy call contains the parameter, go to the <a href="https://www.octobot.cloud/explore?category=tv" rel="nofollow">OctoBot strategy explorer custom tab</a> and start a new TradingView OctoBot. + +<div style={{textAlign: "center"}}> + ![start new tradingview octobot from + explorer](/images/guides/trading-view/start-new-tradingview-octobot-from-explorer.png) +</div> + +Here is a [guide on how to start a TradingView OctoBot](tradingview-trading-tutorial#1-create-your-tradingview-octobot) if you are unsure about how to do it. + +Now that your TradingView OctoBot is up and running, all you need to do is to create an automation for each trading signal of your strategy. + +:::info + Automations are actions to be automatically executed by your OctoBot when + triggered from TradingView alerts. +::: + +## 2. Option A: Using base automations + +### 2.1 Creating base automations + +Each `strategy.entry`, `strategy.exit`, `strategy.close` and `strategy.order` keyword creates a trade signal, just create an automation for each of those signals. + +<div style={{textAlign: "center"}}> + <div> + ![tradingview simple rsi + strategy](/images/guides/trading-view/tradingview-simple-rsi-strategy.png) + </div> +</div> + +In this simple RSI strategy, two signals are emitted: a LONG and a SHORT signal, both of which being created by a `strategy.entry` keyword. + +In this example, we want to buy when the LONG signal is emitted and sell when the SHORT signal is sent. + +We will therefore create the two following automations: + +**Automation 1 : long signal** + +<div style={{textAlign: "center"}}> + <div> + ![octobot automation buy eth 25 percent + usdt](/images/guides/trading-view/octobot-automation-buy-eth-25-percent-usdt.png) + </div> +</div> +Buying ETH/USDT using 25% of the portfolio at market price. +<div style={{textAlign: "center"}}> + <div> + ![octobot automation buy eth 25 percent usdt automation + id](/images/guides/trading-view/octobot-automation-buy-eth-25-percent-usdt-automation-id.png) + </div> +</div> +We will use the ID of this automation to replace the +`your-OctoBot-automation-id` text in the first `strategy.entry` keyword. + +**Automation 2 : short signal** + +<div style={{textAlign: "center"}}> + <div> + ![octobot automation sell 100 percent + eth](/images/guides/trading-view/octobot-automation-sell-100-percent-eth.png) + </div> +</div> +Selling 100% of the holding of ETH/USDT at market price. We will use the ID of +this automation to replace the `your-OctoBot-automation-id` text in the second +`strategy.entry` keyword. + +### 2.2 Binding your TradingView strategy to your automations ids + +**1. Copy your automation ids into your strategy alert_message** + +Copy the id of each of automation into the `alert_message` text value of their associated signal. + +<div style={{textAlign: "center"}}> + <div> + ![octobot open automation connection + panel](/images/guides/trading-view/octobot-open-automation-connection-panel.png) + </div> + <div> + ![octobot automation buy eth 25 percent usdt automation + id](/images/guides/trading-view/octobot-automation-buy-eth-25-percent-usdt-automation-id.png) + </div> +</div> + +There should not be any `your-OctoBot-automation-id` remaining in your strategy Pine Scripe code. + +<div style={{textAlign: "center"}}> + <div> + ![tradingview strategy example with automation + ids](/images/guides/trading-view/tradingview-strategy-example-with-automation-ids.png) + </div> +</div> + +Note the `f2b0b567-d63e-412e-b6cb-1a31c0bc1217` and `af4b897e-6b8e-45d0-a88d-9f11fd57a9b2` texts that are now located in the `strategy.entry` keyword instead of the default `your-OctoBot-automation-id`. Naturally, those two identifiers are example values and your own automation identifiers should be used instead. + +**2. Configure the TradingView alert** + +Create a new TradingView alert bound to your strategy and change the `Message` section of the alert to only contain the value of the `alert_message` parameter (which is your automation id). + +<div style={{textAlign: "center"}}> + ![tradingview create + alert](/images/guides/trading-view/tradingview-create-alert.png) +</div> +Open the `Alerts` view on the right and click `Create Alert`. + +<div style={{textAlign: "center"}}> + <div> + ![tradingview adding strategy alert + message](/images/guides/trading-view/tradingview-adding-strategy-alert-message.png) + </div> +</div> + +1. Select your strategy in the `Condition` section. Please note that changing your strategy configuration will require you to select the newest version of the strategy in this alert. +2. Use **this exact syntax** for the `Message` content : + `{{strategy.order.alert_message}}` + +**3. Make sure your notification settings are up-to-date** + +You will to use [webhook notifications](tradingview-trading-tutorial#25-configure-the-webhook-url) to trade using this strategy. Before confirming the alert, always make sure your alert's **Notification** configuration is updated, or your OctoBot might not receive your alerts. + +<div style={{textAlign: "center"}}> + <div> + ![tradingview notification + configuration](/images/guides/trading-view/tradingview-notification-configuration.png) + </div> +</div> + + +## 2. Option B: Using custom automations + +### 2.1 Creating custom automations + + +<YouTube id="HeOi4PY1ayk" title="TradingView tutorial: automate any strategy with OctoBot custom automation" /> +<div style={{textAlign: "center"}}> + TradingView tutorial: automate any strategy with OctoBot custom automation. +</div> + +Each `strategy.entry`, `strategy.exit`, `strategy.close` and `strategy.order` keyword creates a trade signal, just configure the associated `alert_message` parameter to contain your custom automation parameters. + +<div style={{textAlign: "center"}}> + <div> + ![tradingview simple rsi + strategy](/images/guides/trading-view/tradingview-simple-rsi-strategy.png) + </div> +</div> + +In this simple RSI strategy, two signals are emitted: a LONG and a SHORT signal, both of which being created by a `strategy.entry` keyword. + +In this example, we want to buy when the LONG signal is emitted and sell when the SHORT signal is sent. + +We will therefore update the `alert_message` of each LONG and SHORT signal to send our buy and sell signals: + +**Custom automation: Buy 0.1 ETH on long signal and sell it on short signal** + +<div style={{textAlign: "center"}}> + <div> + ![octobot custom automation buy + eth](/images/guides/trading-view/octobot-custom-automation-buy-eth.png) + </div> +</div> +- Content of the LONG signal `alert_message`: +`SYMBOL=ETHUSDT;SIGNAL=BUY;VOLUME=0.1;BOT_ID=21a7e1e2-d499` - Content of the +SHORT signal `alert_message`: +`SYMBOL=ETHUSDT;SIGNAL=SELL;VOLUME=0.1;BOT_ID=21a7e1e2-d499` + +You will find the list of supported parameters on the [automations guide](tradingview-alerts-automation#tradingview-custom-automations) + +_Note: here `BOT_ID=21a7e1e2-d499` is just an example value and should be replaced by your TradingView OctoBot id, which you can find in as the last component of your TradingView OctoBot url._ + +And this is all, this simple `alert_message` automatically tell OctoBot what to do when the strategy triggers. + +### 2.2 Binding your TradingView strategy to your custom automations + +Create a new TradingView alert bound to your strategy and change the `Message` section of the alert to only contain the value of the `alert_message` parameter (which is your automation content). + +<div style={{textAlign: "center"}}> + ![tradingview create + alert](/images/guides/trading-view/tradingview-create-alert.png) +</div> +Open the `Alerts` view on the right and click `Create Alert`. + +<div style={{textAlign: "center"}}> + <div> + ![tradingview adding strategy alert + message](/images/guides/trading-view/tradingview-adding-strategy-alert-message.png) + </div> +</div> + +1. Select your strategy in the `Condition` section. Please note that changing your strategy configuration will require you to select the newest version of the strategy in this alert. +2. Use **this exact syntax** for the `Message` content : + `{{strategy.order.alert_message}}` + +**3. Make sure your notification settings are up-to-date** + +You will to use [webhook notifications](tradingview-trading-tutorial#25-configure-the-webhook-url) to trade using this strategy. Before confirming the alert, always make sure your alert's **Notification** configuration is updated, or your OctoBot might not receive your alerts. + +<div style={{textAlign: "center"}}> + <div> + ![tradingview notification + configuration](/images/guides/trading-view/tradingview-notification-configuration.png) + </div> +</div> + +## You are all set! + +That's all! With this setup, when your TradingView strategy will see a buy or sell opportunity, it will: + +1. Call `strategy.entry` (or any of the `strategy.` keyword you used) +2. Which will send an alert bound to your TradingView OctoBot Automation id or custom Automation content (identified by the `alert_message` parameter) +3. Your OctoBot automation will be executed on your real of virtual exchange account + +**[Start your TradingView OctoBot](https://www.octobot.cloud)** diff --git a/docs/content/investing/introduction.mdx b/docs/content/investing/introduction.mdx new file mode 100644 index 0000000000..d530649e47 --- /dev/null +++ b/docs/content/investing/introduction.mdx @@ -0,0 +1,59 @@ +--- +title: "Introduction" +description: "Any question on OctoBot cloud investments ? How to use a trading strategy ? How to follow your investment ? It is all on the guides." +sidebar_position: 1 +--- + + + +# Invest with OctoBot cloud + +## Improve your crypto investments with OctoBot + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="WiC5DMw_6ZA" title="Improve your crypto investments with OctoBot cloud" /> +Improve your crypto investments using OctoBot cloud automated investment +strategies. Invest in all top crypto projects or categories to profit from the +latest trends with [crypto baskets](https://www.octobot.cloud/features/crypto-basket). Increase your +profits using AI, DCA or grid trading <a href="https://www.octobot.cloud/explore" rel="nofollow">strategies</a> or automate any [TradingView +strategy](tradingview-automated-trading). + +**[Invest with OctoBot](https://www.octobot.cloud)** + +## Choose a strategy, not a tool + +OctoBot cloud enables you to use the best investment strategies for free. + +- You can use trading strategies for free, in a unlimited way (this is not a free trial) +- You funds stay on your exchange account: OctoBot sends buy & sell orders + to run your strategy + +Ideally, when you want to use a strategy, you want to: + +1. Explore and compare strategies +2. Understand potential profits and risks of the strategy of your choice +3. Apply this strategy on your exchange account + +It shouldn't be more complicated than this. Making these steps **as easy as possible** +is our goal with [OctoBot cloud](/) and its Investor and [Investor Plus](/blog/introducing-the-investor-plus-plan) plans. + +## With OctoBot cloud, you can focus on what matters + +### 1. Select the strategy of your choice + +![OctoBot cloud strategies explorer with crypto baskets and strategies](/images/guides/octobot-cloud-strategies-explorer-with-crypto-baskets-and-strategies.png) + +OctoBot cloud provides a wide range of trading strategies for you to: + +- [Explore and compare these strategies](find-your-strategy) +- [Test any strategy with paper trading](paper-trading-a-strategy) +- [Run strategies on your exchange accounts](invest-with-your-strategy) + +Or if you prefer using your own trading strategy, you can invest using real or paper trading with [your strategy directly from TradingView](tradingview-automated-trading). + +### 2. Follow your gains + +![cloud-bots](/images/guides/cloud-bots.png) + +Once you started your OctoBot: real or paper trader, follow it [directly from OctoBot cloud or the mobile app](follow-your-profits). diff --git a/docs/content/investing/invest-with-your-strategy.md b/docs/content/investing/invest-with-your-strategy.md new file mode 100644 index 0000000000..d279ade0bc --- /dev/null +++ b/docs/content/investing/invest-with-your-strategy.md @@ -0,0 +1,51 @@ +--- +title: "Start to invest" +description: "Learn how to quickly and easily start your investment on OctoBot cloud." +sidebar_position: 6 +--- + + + +# Start your investment + +![cloud strategy](/images/guides/cloud-strategy2.png) + + +Once you found the strategy or the crypto basket you want to use with your real funds, you are ready to really profit from OctoBot cloud. + +## Getting started + +1. From the strategy or the crypto basket you want to use, hit **Start trading**. +2. Select **Real trading**. +![trading account type choice real or paper trading](/images/guides/trading-account-type-choice-real-or-paper-trading.png) +3. Select or enter your exchange account [API key](what-is-an-exchange-api-key). Check out the [Binance connection guide](connect-your-binance-account-to-octobot), [Kucoin connection guide](connect-your-kucoin-account-to-octobot) or [Coinbase connection guide](connect-your-coinbase-account-to-octobot) if you have any question. +![cloud strategy select exchange](/images/guides/cloud-strategy-select-exchange.png) + _Note: OctoBot will make sure that you have enough funds on your exchange account to start the chosen strategy_ +![cloud strategy start](/images/guides/cloud-strategy-start.png) +4. Start your OctoBot to automate your investments with this strategy or basket. + +## What will happen ? + +### 1. Portfolio optimization +Your OctoBot might balance your USD-associated coins (such as USDT, USDC etc), as well as coins traded by your strategy, that available in portfolio in order to create optimal conditions to start your strategy or crypto basket. + +Example: + +Let's imagine a portfolio with 100 USDC and 100 USDT. Starting a strategy that is using USDT will make OctoBot sell your USDC for USDT in order to be able to trade it with the selected strategy. + +:::info +This process only uses the USD-associated coins as well as coins traded by your strategy from your portfolio. If you want the strategy to ignore a part of your funds, just move those funds to a coin that is not USD-associated or traded by your strategy. +::: + + +### 2. Investment execution +Your OctoBot will now automatically apply the selected strategy or basket to your exchange account by creating buy and sell orders on the traded cryptocurrencies. + +As with [paper trading OctoBots](paper-trading-a-strategy), you can [follow your trading bot](follow-your-profits) just as usual. + +Please note that your funds always stay on your exchange account. OctoBot is just creating trading orders on your account but never accesses to your funds directly. +In order to add another security layer, it is recommended to use API keys without withdrawal permission. + +:::info + Pro tip: You can keep testing other investment strategies or baskets risk free using [paper trading](paper-trading-a-strategy), even when running a strategy or a basket with real funds. +::: diff --git a/docs/content/investing/investor-faq.md b/docs/content/investing/investor-faq.md new file mode 100644 index 0000000000..86de20cfe3 --- /dev/null +++ b/docs/content/investing/investor-faq.md @@ -0,0 +1,84 @@ +--- +title: "FAQ" +description: "Any question on OctoBot cloud ? Here are the frequently asked questions and their answers." +sidebar_position: 33 +--- + + + +# OctoBot cloud frequently asked questions (FAQ) + +## How can I test strategies or crypto baskets ? + +On OctoBot cloud, we try to keep everything as simple as possible and this includes testing strategies or crypto baskets. Additionnaly to public historical performances, **every strategy and crypto basket can be tested risk free using [paper trading](paper-trading-a-strategy)**. + +This means that you can run any trading strategy or basket at anytime using virtual funds before [starting to invest on your real exchange account](invest-with-your-strategy). Paper trading allows you to test the strategies you are interested in as much as you want for free free. + +[Learn more about paper trading](paper-trading-a-strategy) + +## How are strategies profits computed ? + +Each strategy on OctoBot cloud is built, run and tested using OctoBot. This means that each strategy past performance is evaluated on a regular basis using historical data and OctoBot's [backtesting](/guides/octobot-usage/backtesting). + +At OctoBot we believe in transparency. This means that sometimes strategies can turn unprofitable as profits depends on so many different factors including market conditions. If a strategy is not making profits during a given period, you will see it before using it. + +## How to create my strategy ? + +OctoBot cloud enables you to trade using your own strategy by [automating TradingView strategies](tradingview-automated-trading). + +## Where are your funds when using OctoBot ? + +You funds always remain on the exchange, on your own exchange account. + +OctoBot is a software allowing you to apply a trading strategy or a crypto basket on your own exchange account. This means that OctoBot is just sending trading orders to your exchange account to buy and sell assets according to your selected strategy or basket. OctoBot never receives or sends funds form its users. + +## Depositing and withdrawing funds + +The OctoBot platform never holds your funds. When using OctoBot, **your funds always remain on the exchange account you selected for your OctoBot**. Your selected investment strategy will operate by sending buy and sell orders to your exchange account. + +As a result, you can deposit and withdraw from your exchange account just as you would normally do if no OctoBot was connected to it. If an OctoBot sees that funds have been added or withdrawn, it will automatically adapt and keep your select investment strategy operating as long as minimal required funds to run this startegy remain available. + +Note: If someone pretends that you need to move your funds to any platform to use OctoBot, then this person is lying and trying to steal your money. The OctoBot team will never ask for such a thing. + +## How much can you loose ? + +This depends on the strategy you selected. In all cases, you can never loose more than your investment. + +When using OctoBot, the same rules as on exchanges apply, this means that you can end up loosing funds, for example if the following events happen: + +- Selling an asset at a lower price than you bought it +- Trading fees taken by the exchange when executing orders +- Issues with the invested asset or exchange itself (ex: if the asset valuation collapses) + +:::info + You can test any strategy **risk free**, therefore without any chance to loose + funds using [paper trading](paper-trading-a-strategy). +::: + +## Is OctoBot cloud secure ? + +Yes, security is among our top priorities. When using OctoBot cloud, the following security measures apply: + +- Your exchange API keys are stored on a secure encrypted vault. This means that even in the unlikely event that exchange API keys would leak from OctoBot servers, they would not be readable. +- Your exchange API keys are configured to only be usable from the IP addresses of OctoBot cloud. This means that in the very unlikely event that your API keys would leak from OctoBot cloud or from you, they would be refused by the exchange. +- OctoBot API keys with withdrawal rights can't be used. OctoBot cloud refuses to store exchange API keys with withdrawal permissions (when technically possible). This means that your funds technically can't be taken out of your exchange account by OctoBot or the company behind it. +- OctoBot relies on automated strategies instead of human actions. This means that each strategy is reliable and predictible. You don't need to trust a human to properly execute the strategy. + +## Can I use the same exchange account on 2 OctoBots ? + +Yes, you can use the same exchange account on multiple OctoBots. Each OctoBot will operate on the budget you have defined for it, from your exchange account's portfolio. + +## Why are there minimal funds to use trading strategies and crypto baskets ? + +There are 2 reasons for minimal funds in trading strategies and crypto baskets: + +- **Exchange trading rules**: OctoBot ultimately send orders to exchange. Those exchanges have trading rules that are enforcing a minimal size for each order. On Binance, this amount <a href="https://www.binance.com/en/trade-rule" rel="nofollow">is usually $5 or $10</a>. Strategies usually trade with a portion of your portfolio for each order, this means this part need to be large enough to comply with trading rules. This is especially true for Grid-based trading strategies where your funds are split into a large amount of smaller orders. +- **The investor plan**: in order to keep the Investor Plan of OctoBot cloud completely free, we are are partnering up with exchanges to bring them trading volume. This means that we have to require a minimum amount in each portfolio to pay our bills. We try to keep this minimum as low as possible but have to set a threshold. + +## How can I connect my exchange account to OctoBot ? + +To help you connect your exchange account to OctoBot, we created detailed step by step guides: + +- [Binance connection guide](connect-your-binance-account-to-octobot) +- [Kucoin connection guide](connect-your-kucoin-account-to-octobot) +- [Coinbase connection guide](connect-your-coinbase-account-to-octobot) diff --git a/docs/content/investing/paper-trading-a-strategy.md b/docs/content/investing/paper-trading-a-strategy.md new file mode 100644 index 0000000000..7675a59b60 --- /dev/null +++ b/docs/content/investing/paper-trading-a-strategy.md @@ -0,0 +1,51 @@ +--- +title: "Test risk-free" +description: "Learn how to use paper trading to test strategies and crypto baskets live and risk free with OctoBot cloud." +sidebar_position: 5 +--- + + + +# Risk-free testing + +![paper trading cloud octobot](/images/guides/paper-trading-cloud-octobot.png) + +With OctoBot, you can use paper trading with any strategy or crypto basket. + +Paper trading is enabling you to test trading strategies in live conditions using a virtual portfolio. + +It is perfect to experiment with a trading strategy or crypto basket without taking any risk as it is only using simulated funds. + +## Getting started + +1. From the strategy or crypto basket you want to paper trade, hit **Start trading** +2. Select **Paper trading** +![trading account type choice real or paper trading](/images/guides/trading-account-type-choice-real-or-paper-trading.png) +3. Select the amount you want to use in your simulated portfolio +![cloud strategy select exchange](/images/guides/paper-trading-virtual-portfolio-configuration.png) + + +:::info + No exchange account is required to use paper trading on OctoBot cloud. +::: + +## Paper trading in OctoBot + +### Your paper trading OctoBot +Your paper trading OctoBot will now apply the selected strategy or basket as if it were on a real exchange account except that it won't actually connect to any exchange account. + +As with real trading OctoBots, you can [follow your paper trading bot](follow-your-profits) just as usual. + +### How long is your paper trading running ? +Using OctoBot cloud, you can have your paper trading OctoBot run for as long as you wish. + +The only requirement is to click on **Extend** once every two weeks when your OctoBot arrives close to its expiry time. This notifies OctoBot cloud that it should keep your paper trading OctoBot running. + +![paper trading cloud octobot expiring in 2 days](/images/guides/paper-trading-cloud-octobot-expiring-in-2-days.png) + +### Can I have bot paper trading and real trading OctoBots ? + +Yes ! +In fact, we even encourage you to use paper trading to experiment with strategies and crypto baskets on OctoBot cloud, even after you found the ones to trade with using your real funds. + +This allows you quickly test new baskets or strategies and optimize your gains by always using the ones you prefer at any time. diff --git a/docs/content/investing/pay-with-crypto.md b/docs/content/investing/pay-with-crypto.md new file mode 100644 index 0000000000..0a181955db --- /dev/null +++ b/docs/content/investing/pay-with-crypto.md @@ -0,0 +1,58 @@ +--- +title: "Pay with crypto" +description: "Step-by-step guide on paying for your OctoBot Cloud subscription with cryptocurrency" +sidebar_position: 30 +--- + + + +# Pay your subscription with cryptocurrencies + +:::info + Warning: Crypto payments are temporarily unavailable. We're actively working to restore this option soon. +::: + +## Payment options for OctoBot Cloud + +You have the option to pay for your OctoBot Cloud subscription either by credit card or with cryptocurrencies. For cryptocurrency payments, you can use [USDC](https://www.octobot.cloud/what-is-usdc) on various blockchains such as [Ethereum](https://www.octobot.cloud/what-is-ethereum), Optimism, BNB Smart Chain, Polygon, Base, and Arbitrum. + +## How to make a payment with cryptocurrencies + +After the trial period of OctoBot Cloud, or following the creation of your account, you will automatically be enrolled in the free plan, the Investor plan. +To access additional features available in the [Investor Plus](/blog/introducing-the-investor-plus-plan) and [Pro](/blog/introducing-the-pro-plan) plans, you will need to update your subscription. + +1. Open the <a href="https://www.octobot.cloud/plan" rel="nofollow">OctoBot cloud plans page</a>. +2. Select the desired plan and click on "Pay with crypto". + +<div style="text-align: center"> + +![choose pay with crypto](/images/investing/pay-with-crypto/pay-with-crypto-en.png) + +</div> + +3. Connect your <a href="https://metamask.io/" rel="nofollow">Metamask</a>, Binance, Brave, Coinbase wallet, <a href="https://walletconnect.com/" rel="nofollow">Wallet Connect</a>, etc... Most wallets are supported. + +![connect your crypto wallet](/images/investing/pay-with-crypto/connect-your-crypto-wallet.png) + +4. Choose the blockchain and token you wish to use for payment. + +![choose a blockchain and a token](/images/investing/pay-with-crypto/select-a-blockchain-and-a-token.png) + +5. Click on "Sign to continue". +6. Sign the transaction with your wallet to accept the terms of service from the payment provider <a href="https://www.loopcrypto.xyz/payments" rel="nofollow">LoopCrypto</a>. +7. Confirm the transaction authorization, which will be higher than the subscription amount, thereby enabling automatic monthly debits. +8. Wait for the transaction to be validated on the blockchain. You will receive a confirmation email once your subscription has been updated. + +## Changing payment method from credit card to cryptocurrencies + +Currently, this option is not available directly from your account. To change your payment method from credit card to cryptocurrencies, please contact customer support at [contact@octobot.cloud](mailto:contact@octobot.cloud). + +## How to stop paying in cryptocurrencies + +To stop your cryptocurrency-paid subscription, follow these steps: + +1. Go to the <a href="https://www.octobot.cloud/account" rel="nofollow">My Account</a> section. +2. Click on the "Stop" button to cancel your subscription. +3. If you also wish to disconnect your wallet, click on "Update My Payment Method" and follow the instructions to remove wallet access. + +By following these steps, you can easily manage your subscription and payment methods for OctoBot Cloud. If you have any questions or need further assistance, do not hesitate to contact customer support. diff --git a/docs/content/investing/share-your-trading-signals.md b/docs/content/investing/share-your-trading-signals.md new file mode 100644 index 0000000000..e2be61fb10 --- /dev/null +++ b/docs/content/investing/share-your-trading-signals.md @@ -0,0 +1,85 @@ +--- +title: "Share your trading signals" +description: "Learn how to share your crypto trading signals on OctoBot cloud and let others automatically copy your signals." +sidebar_position: 27 +--- + + + +# Share your trading signals + +## Sharing trading signals from Telegram + +The OctoBot Telegram bot integration allows you to share trading signals from your Telegram group. +You can choose between two signal formats: + +- OctoBot format (aligned with [TradingView custom alert format](tradingview-alerts-automation#tradingview-custom-automations)) +- Cornix format + +### Steps to configure the Telegram Bot + +1. **Open the strategy management view** + +- Go to the <a href="https://www.octobot.cloud/creator" rel="nofollow">strategy management page</a>, in the `Administration` section +- Select the strategy for which you want to share signals + +2. **Add OctoBot to Your Telegram Group** + +Add the OctoBot Telegram bot to your Telegram group as an admin. This allows the bot to read trading signals from the group. +You can find the bot by searching the bot name in Telegram and adding it to your group with admin privileges. + +3. **Retrieve the Channel ID** + +Forward a message from your Telegram group to `@getidsbot` to obtain the channel ID. The channel ID will be a negative number, such as `-1000000000000`. +Copy this channel ID for use in the next step. + +4. **Enable Telegram Integration and Enter the Channel ID** + +In the "Integrations" section of your OctoBot strategy, locate the **Telegram** tab and enable it by toggling the switch to the "on" position. +In the "Channel ID" field, paste the channel ID you retrieved (e.g., `-1000000000000`). This tells OctoBot where to read the trading signals. + +5. **Select the Signal Type** + +Choose the format for the trading signals to be shared in your Telegram group: + +- **OctoBot Format**: The default format, aligned with TradingView custom alert format, used by OctoBot for sharing signals. +- **Cornix Format**: The same format as Cornix. +- Use the "Signal Type" dropdown menu to select your preferred format. + +## Manage strategy users with HTTP Endpoint + +The HTTP endpoint allows you to manage users of your strategy by adding external IDs and setting expiration dates. This is required for private strategies. + +### Steps to manage users with HTTP Endpoint + +1. **Set Up Access Control for Your Strategy** + +In the "Access control" section, choose between "Public strategy" and "Private strategy." For managing users via HTTP, select **Private strategy** to enable member management. + +- Public strategy: Anyone can access and use the strategy without member management. +- Private strategy: Only approved members can access the strategy, requiring member management. + +2. **Copy the HTTP Endpoint** + +In the "Integrations" section, copy the **HTTP endpoint** the paste it in your code. This allows you to send trading signals or manage members via HTTP requests. + +3. **Generate an API Key** + +Click on the **Create a new API key** button to generate API keys for your HTTP requests. This keys will be used to authenticate your requests. + +**Warning**: API keys are shown only once. API keys should never be shared to anyone. + +4. **Add the secret API Key to Your HTTP Request** + +Include the **secret** API key in the header of your HTTP request as `Your-API-Key`. + +For example, to manage members with telegram ids: + +``` +curl -X POST https://services.octobot.cloud/cloud/creator/webhook/AAAAA-BBBBBBB/CCCCCCC-DDDDDDDD/members/telegram -d '{"user_id": "USER_ID", "expiration_date": "EXPIRATION_DATE"}' -H 'Content-Type: application/json' -H 'Api-Key: XXXXXXXXXXX-YYYYYYYYYYY' +``` + +Where: + +- `USER_ID`: The Telegram user ID of the member you want to add or update (not his telegram handle). +- `EXPIRATION_DATE`: The date until which the member has access to the strategy (e.g., 2025-12-31). diff --git a/docs/content/investing/stop-a-strategy.md b/docs/content/investing/stop-a-strategy.md new file mode 100644 index 0000000000..e5ecb7e30a --- /dev/null +++ b/docs/content/investing/stop-a-strategy.md @@ -0,0 +1,27 @@ +--- +title: "Stop investing" +description: "Learn how to quickly and easily stop and restart your running investment strategy on OctoBot cloud." +sidebar_position: 8 +--- + +# Stopping an investment + +## Stopping an OctoBot + +You can stop a running OctoBot at any time in order to pause or stop its investment strategy execution. + +![stopping cloud octobot cancelling orders](/images/guides/stopping-cloud-octobot-cancelling-orders.png) + +Stopping a running OctoBot will: +- Cancel all its buy and sell open orders +- Prevent it from creating new orders +- Instantly sell each coin bought by the strategy (if this option is selected) +- Free the associated exchange account, allowing it to be used by any other strategy + +## Restarting an OctoBot + +After being stopped, OctoBots can be restarted in order to resume your investment strategy. + +![octobot cloud restart octobot](/images/guides/octobot-cloud-restart-octobot.png) + +A restarted OctoBot will start again to apply its strategy while keeping its original portfolio history and profitability. diff --git a/docs/content/investing/tradingview-alerts-automation.mdx b/docs/content/investing/tradingview-alerts-automation.mdx new file mode 100644 index 0000000000..af1d4ab6ca --- /dev/null +++ b/docs/content/investing/tradingview-alerts-automation.mdx @@ -0,0 +1,214 @@ +--- +title: "TradingView alerts automation" +description: "Learn how to automate your TradingView alerts coming from price events, indicators or Pine Script strategies using paper or real trading on any exchange." +sidebar_position: 17 +--- + + + +# TradingView alerts automation + +With OctoBot cloud, you can easily turn any TradingView price alert, indicator or Pine Script strategy into trades. Trading can be on your favourite exchanges or risk free with [simulated funds](paper-trading-a-strategy). + +<div style={{textAlign: "center"}}> + ![tradingview automation illustrated by tradingview + logo](/images/guides/trading-view/tradingview-automation-illustrated-by-tradingview-logo.png) +</div> + +:::info + To trade on any TradingView alert, you first need to [configure the alert webhook](tradingview-trading-tutorial#25-configure-the-webhook-url) + for your traded pair if not done already. +::: +If you are not sure about how to use TradingView alerts, have a look at our +[TradingView trading tutorial](tradingview-trading-tutorial). + +**[Start a TradingView bot](https://www.octobot.cloud)** + +## Automating trading on TradingView price alerts + +TradingView can automatically send an alert when the price of an asset crosses a given value. + +<div style={{textAlign: "center"}}> + ![creating a price alert from + tradingview](/images/guides/trading-view/creating-a-price-alert-from-tradingview.png) +</div> + +This price alert that will notify my automation of a buy order identified by `d4f18425-b3b6-4e6b-94d0-61f362aa10c7` if BTC crosses 40.000 USDT. + +## Automating trading on TradingView indicators + +TradingView can automatically send an alert when something happens on a indicator. + +<div style={{textAlign: "center"}}> + ![creating an indicator alert from + tradingview](/images/guides/trading-view/creating-a-indicator-alert-from-tradingview.png) +</div> + +This indicator alert using the <a href="https://www.investopedia.com/terms/r/rsi.asp" rel="nofollow">Relative Strength Index</a> (or RSI) that will notify my automation of a sell order identified by `2b82c8b2-7397-44dc-9141-f0ec85fc9ef1` if the RSI value crosses 80, which I consider as a sell signal. + +An indicator alert can be a simple event such as crossing an indicator value or a much more advanced condition such as Bearish Divergences or channel exiting as shown in the RSI indicator example above. + +<div style={{textAlign: "center"}}> + ![creating a indicator alert from tradingview trigger + options](/images/guides/trading-view/creating-a-indicator-alert-from-tradingview-trigger-options.png) +</div> + +:::info + Any TradingView indicator (built-in or custom, paid and free) can be used to + send alerts and automate your trades using your TradingView OctoBot + automations. +::: + +## Automating Pine Script strategies + +TradingView can automatically send an alert when your <a href="https://www.tradingview.com/pine-script-docs/en/v5/index.html#" rel="nofollow">Pine Script strategies</a> create orders. + +To send alerts from a Pine Script strategy, use the <a href="https://www.tradingview.com/pine-script-docs/en/v5/concepts/Alerts.html?highlight=alert_message#order-fill-events" rel="nofollow">`alert_message`</a> parameter from Pine Script strategy functions which can create orders. + +![creating a strategy alert from tradingview](/images/guides/trading-view/creating-a-strategy-alert-from-tradingview.png) + +To send alerts with your Pine Script strategy, create a new alert and make sure to: + +1. Select the name of your strategy as the condition +2. Replace **ALL** the message content with exactly `{{strategy.order.alert_message}}` + +In the strategy's Pine Script code, add `alert_message="yourAutomationIdentifier"` to your strategy `entry`, `exit` or `close` calls. +Example with a `d4f18425-b3b6-4e6b-94d0-61f362aa10c7` automation identifier: + +> `strategy.entry("Buy", strategy.long, comment="Buy Signal Triggered", alert_message="d4f18425-b3b6-4e6b-94d0-61f362aa10c7")` + +Learn more TradingView Pine Script strategies automation on the [TradingView strategy automation tutorial](how-to-automate-any-tradingview-strategy-on-octobot-cloud). + +<div style={{textAlign: "center"}}> + **[Generate your strategy](https://www.octobot.cloud/creator)** +</div> + +:::info + Using TradingView Pine Script strategies to automate you trading is very + powerful as you can also use the TradingView integrated strategy tester to + optimize your strategy. +::: + +## TradingView custom automations + +Additionally to the automation defined from your TradingView OctoBot user interface, it is also possible to use alerts with custom content. + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="HeOi4PY1ayk" title="TradingView tutorial: automate any strategy with OctoBot custom automation" /> + +This format allows for more flexibility in the way automations are executed by specifying your automation content within the alert message. + +Example: **market BUY order of 0.001 BTC on a bot with id 123** + +```bash +SYMBOL=BTCUSDT;SIGNAL=BUY;VOLUME=0.001;BOT_ID=123 +``` +View [all examples](#custom-automation-examples). + +### Custom automation parameters + +| Parameter | Example 1 | Example 2 | Details | +| :-------------------- | :----------------------------------- | :----------------------------------- | :---------------------------------------------------------------------------------------------------------------------------------------------- | +| `SYMBOL` | BTCUSDT | ETH/USDT | You traded symbol, can also be `{{ticker}}`. | +| `SIGNAL` | BUY | CANCEL | Whether to create a buy, sell or cancel an order. | +| `ORDER_TYPE` | LIMIT | MARKET | Type of the order to create (`MARKET`, `LIMIT`, `STOP`). Default value: `MARKET`. | +| `VOLUME` | 0.01 | 50q | The amount to use. Follows the [amount syntax](../guides/octobot-trading-modes/order-amount-syntax). | +| `PRICE` | 30000 | -10% | The price to use. Follows the [price syntax](../guides/octobot-trading-modes/order-price-syntax). _Required when `ORDER_TYPE=LIMIT`_. | +| `TAKE_PROFIT_PRICE` | 45000 | 10% | The price of this order's take profit. Follows the [price syntax](../guides/octobot-trading-modes/order-price-syntax). | +| `TAKE_PROFIT_PRICE_2` | 50000 | 25% | The price of this order's Nth take profit. Follows the [price syntax](../guides/octobot-trading-modes/order-price-syntax). Entry funds are evenly split among take profits unless a `TAKE_PROFIT_VOLUME_RATIO` is set for each take profit. | +| `TAKE_PROFIT_VOLUME_RATIO_2` | 30 | 70 | Ratio of the entry order volume to include in this take profit. When used, a `TAKE_PROFIT_VOLUME_RATIO_X` is required for each take profit. | +| `STOP_PRICE` | 40000 | -25% | The price of this stop loss. Follows the [price syntax](../guides/octobot-trading-modes/order-price-syntax). _Required when `ORDER_TYPE=STOP`_. | +| `TRAILING_PROFILE` | filled_take_profit | filled_take_profit | Enables trailing orders according to the given [trailing profile](#trailing-profiles). Supported profiles: `filled_take_profit`. | +| `TAG ` | entry1 | exit2 | The tag of this order, or tag of orders to cancel. | +| `REDUCE_ONLY` | false | true | Whether the created order should be reduce only or not. _Only used in futures trading_. Default is `false`. | +| `LEVERAGE` | 10 | 2 | The updated leverage value to use. _Only used in futures trading_. | +| `BOT_ID` | c403ee03-ba4c-4d9d-9d78-ad692333a291 | b403ee03-ba4c-4d9d-9d78-ad692333a292 | The ID of your OctoBot to run this signal on. | + +Parameters must be separated using a `;` character and can be included in any order. + +Note: The `BOT_ID` parameter is required. Your `BOT_ID` is the last segment of your TradingView OctoBot URL. +Example: if your OctoBot URL is `https://www.octobot.cloud/bots/0280badc-e884-4637-bb86-44444444`, then your `BOT_ID` is `0280badc-e884-4637-bb86-44444444`. +```bash +BOT_ID=0280badc-e884-4637-bb86-44444444;SYMBOL=BTCUSDT;SIGNAL=BUY;ORDER_TYPE=LIMIT;VOLUME=45q;PRICE=-3% +``` + +### Custom automation examples +> A `MARKET BUY` order using `20` units of quote asset with dynamic `ticker` with bot id `123`. +```bash +SYMBOL={{ticker}};SIGNAL=BUY;VOLUME=20q;BOT_ID=123 +``` + +> A `LIMIT BUY` order of `0.01 ETH` at `-3%` of the current price with a `strategy-1` tag. +```bash +SYMBOL=ETHUSDC;SIGNAL=BUY;ORDER_TYPE=LIMIT;VOLUME=0.01;PRICE=-3%;TAG=strategy-1;BOT_ID=123 +``` + +> A `LIMIT BUY` order of `45 USDT` at `-3%` of the current price immediately followed by a `take profit at +10% of the buying price` and a `stop loss at -20%` as soon as the the initial BUY limit order is filled. +Note: when both `TAKE_PROFIT_PRICE` and `STOP_PRICE` are provided, the created take profit and stop loss will be OCO (one cancels the other) orders. In this case, only the stop order will be pushed to the exchange. It will then be replaced by its take profit if the take profit is reached fist. +_OCO orders are in beta test, instabilities might happen._ +```bash +SYMBOL=BTCUSDT;SIGNAL=BUY;ORDER_TYPE=LIMIT;VOLUME=45q;PRICE=-3%;TAKE_PROFIT_PRICE=10%;STOP_PRICE=-20%;BOT_ID=123 +``` + +> A `MARKET BUY` order of `6 SOL` followed by `3 take profits at 5%, 10% and 20%` from the buying price. Here, each take profit will have a quantity of `2 SOL` as the bought amount is split between take profits. +```bash +SYMBOL=SOLUSDC;SIGNAL=BUY;VOLUME=6;TAKE_PROFIT_PRICE=5%;TAKE_PROFIT_PRICE_2=10%;TAKE_PROFIT_PRICE_3=20%;BOT_ID=123 +``` + +> A `MARKET BUY` order of `6 SOL` followed by `3 take profits at 5%, 10% and 20%` from the buying price. Here, take profits will respectively trade 1, 2 and 3 SOL, which corresponds to `17`, `33` and `50` % of the entry amount. +```bash +SYMBOL=SOLUSDC;SIGNAL=BUY;VOLUME=6;TAKE_PROFIT_PRICE=5%;TAKE_PROFIT_PRICE_2=10%;TAKE_PROFIT_PRICE_3=20%;TAKE_PROFIT_VOLUME_RATIO=17;TAKE_PROFIT_VOLUME_RATIO_2=33,TAKE_PROFIT_VOLUME_RATIO_3=50;BOT_ID=123 +``` + +> `CANCEL` all `SOL/USDC` orders with the `strategy-1` tag. +```bash +SIGNAL=CANCEL;SYMBOL=SOLUSDT;TAG=strategy-1;BOT_ID=123 +``` + +**For futures trading** + +> A futures trading `REDUCE_ONLY` `MARKET SELL` order of `3 SOL`. +```bash +SYMBOL=SOLUSDC;SIGNAL=SELL;VOLUME=3;REDUCE_ONLY=true;BOT_ID=123 +``` + +> A futures trading `MARKET BUY` order of `200 USDC` which also configures the `SOL/USDC` contract leverage to `3`. +```bash +SYMBOL=SOLUSDC;SIGNAL=BUY;VOLUME=200q;LEVERAGE=3;BOT_ID=123 +``` + +### Trailing profiles +_Trailing profiles are in beta test, instabilities might happen._ + +When set to a valid profile, `TRAILING_PROFILE` enables trailing orders according to the given profile. Here is how profiles work: +- `filled_take_profit`: Operates with a stop loss associated to more than 1 take profit. When a take profit is filled, the stop loss price will be updated to first the buy order entry price and then the price of the previously filled take profit. This profile is useful to be sure to alway close your trade in profit as soon as at least one take profit has been reached. + +## Automated TradingView strategies examples + +- [Death and Golden Cross Strategy](tradingview-strategies-tutorials/automating-a-tradingview-death-and-golden-cross-strategy): buy and sell based on Death and Golden Crosses. +- [Bull market RSI Strategy](tradingview-strategies-tutorials/bull-market-strategy-from-tradingview-using-rsi-with-video): buy and sell using RSI to increase bull market profits. +- [Custom TradingView strategy tutorial](how-to-automate-any-tradingview-strategy-on-octobot-cloud): learn how to trade using any TradingView strategy. + +**[Start a bot](https://www.octobot.cloud)** + +## Automation rate limit + +| Automation type | Hourly rate limit | Average execution time | +| :-------------- | :---------------- | :--------------------- | +| Email | 20 | 10 seconds | +| Webhook | 20 | 5 seconds | + +In order to reduce the impact of misconfigured alerts and prevent exploits of the system, there is a limit to the number of times a given automation can be triggered over 60 minutes. + +The amount of bots and automations you can have is unlimited but each individual automation can be triggered at most 20 times over 60 minutes. Moreover, up to 80 automation triggers per OctoBot account can be processed over 60 minutes. Any attempt to breach this limit without prior written consent from the OctoBot team can trigger a temporary or permanent ban of the associated account(s). +Please contact us if you need to increase this limit. + +The average execution time is the time measured between when TradingView sends the alert and when it is executed by OctoBot. This is an average, this time may vary. +This variation is small for webhooks but can, in rare cases, reach several tens of seconds for email alerts. This is due to the technical constraints associated with email transfer, which is a less optimized process than a simple webhook call. + +## TradingView alerts security + +The OctoBot cloud infrastructure is designed with the security in mind. It is the same when it comes to the TradingView alerts integration. + +Only alerts originating from the offical <a href="https://tradingview.com/" rel="nofollow">TradingView</a> website can trigger a TradingView automations. diff --git a/docs/content/investing/tradingview-automated-trading.mdx b/docs/content/investing/tradingview-automated-trading.mdx new file mode 100644 index 0000000000..bddb3b13d9 --- /dev/null +++ b/docs/content/investing/tradingview-automated-trading.mdx @@ -0,0 +1,156 @@ +--- +title: "TradingView automated trading" +description: "Easily automate your trades using TradingView with OctoBot. Enjoy unlimited alerts on price, indicator and Pine Script strategies with paper and real trading." +sidebar_position: 15 +--- + + + +# TradingView automated trading + +<a href="https://www.tradingview.com/?aff_id=27595" rel="nofollow">TradingView</a> can be more than a tool to watch prices. You can also use +it to send alerts to OctoBot cloud and have your OctoBot to instantly buy or +sell according to your targets whenever your condition is met on TradingView. + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="TNRMUP6-a_g" title="Automating TradingView" /> + +When using a TradingView OctoBot, each TradingView alert can trigger an action that we call `automation`. + +**[Start a bot](https://www.octobot.cloud)** + +## Automations to create your strategies + +An Automation is an action such as "buying" "selling", or "cancelling an order" that you can configure directly on your TradingView OctoBot. Each OctoBot has its own set of automations that you can create and use at will. + +Using automations, you can create any type of trading strategy. + +<div style={{textAlign: "center"}}> + ![octobot many tradingview + automations](/images/guides/trading-view/octobot-many-tradingview-automations.png) +</div> + +### Using automations + +Automations can be configured the way you want and you can have as many automations as necessary. + +<div style={{textAlign: "center"}}> + ![octobot create + automations](/images/guides/trading-view/octobot-create-automation.png) +</div> + +Each automation as its own identifier to set on your [TradingView alert](tradingview-trading-tutorial#26-create-a-new-alert) message: this allows your OctoBot to know what to do when a TradingView alert is received. + +<div style={{textAlign: "center"}}> + ![octobot automation + identifier](/images/guides/trading-view/octobot-automation-identifier.png) +</div> + +When your OctoBot receives an alert from TradingView, the associated automation is triggered and its result is displayed. Each OctoBot also has a complete history of its automations and their execution result. + +<div style={{textAlign: "center"}}> + ![octobot automation + history](/images/guides/trading-view/octobot-automations-history.png) +</div> + +Create your first automations using this [TradingView alerts automation tutorial](tradingview-alerts-automation). + +### Automations + +By opting for the <a href="https://www.octobot.cloud/plan" rel="nofollow">Pro plan</a>, you gain unlimited access to each automation enabling you automatically buy and sell whenever your TradingView alerts are fired. + +<div style={{textAlign: "center"}}> + ![octobot create + automation](/images/guides/trading-view/octobot-create-advanced-automation.png) +</div> + +Automations include buying and selling: + +- Any crypto +- On any supported exchange +- Using market or limit orders +- At a predefined price or a % difference from the market price of the traded coin +- With a percent of your portfolio holdings or a fixed amount denominated in the traded asset or the quote currency (in BTC or USDT for BTC/USDT). + +Automations are perfect for investors who want to carefully optimize their buy and sell orders as well as each traded amount, which is especially useful when trading multiple coins within the same strategy. + +### Custom automations + +For those who want to go further in their strategy automation, [custom automations](tradingview-alerts-automation#tradingview-custom-automations) are the way to go. + + +<YouTube id="HeOi4PY1ayk" title="TradingView tutorial: automate any strategy with OctoBot custom automation" /> + +Custom automations are the most flexible way to automate a strategy and enable to go deeper in the automation process by allowing to: + +- Use dynamic price and volume values, set from Pine Script +- Cancel open orders +- Automate take profits and stop losses +- Create advanced futures trading orders + +Learn more on OctoBot automations from the [automations guide](tradingview-alerts-automation). + +## Unlimited strategies + +When you use TradingView OctoBots, there are **no usage limits**. This is our unique approach on TradingView automations. + +Unlike other TradingView automation systems, we did not design the OctoBot TradingView bots around limits in how many alerts you can receive per month, how many bots you can create or exchanges you can connect to. With OctoBot cloud, you can: + +- Create and trigger as many automations as necessary +- Use as many bots as you want +- Connect to all the exchanges and accounts you need + +We think that allowing you to make the strategies you want is what a TradingView bot should do. We don't want you to be wondering if you need to take the paid plan A or B to automate your strategy because you are affraid that too many alerts will be sent from TradingView and your plan limit would be reached. This is an important issue because it can lead to extra charges or worse, your bot could be stuck in open position and not apply your alerts. This can't happen with OctoBot. + +This is why our pricing offers types of automations and even the cheapest plan is unlimited: + +- Plans offer advanced types of automations +- Available automations are unlimited + +## Start your TradingView OctoBot + +At OctoBot cloud, we try to keep things simple yet powerful and easy to use. +That's why, we created this [tutorial to help you get started with TradingView automation](tradingview-trading-tutorial). +In this tutorial you will learn how to: + +- Create a TradingView OctoBot +- Use an automation to buy BTC/USDT on a TradingView alert +- Setup this alert on your TradingView account +- Follow this alert execution history + +Or if you are interested in automating TradingView strategies directly, take a look at our [tutorial on how to automate any TradingView strategy](how-to-automate-any-tradingview-strategy-on-octobot-cloud) + +**[Start a TradingView OctoBot](https://www.octobot.cloud)** + +## Going further + +TradingView is a wonderful website enabling you to create alerts on many things. Of course each of them can be used to automate your trades with OctoBot cloud. + +Check out our [TradingView alerts automation guide](tradingview-alerts-automation) to see how to automate your trades based on: + +- Price events +- Any TradingView indicator +- [Pine Script TradingView strategies](how-to-automate-any-tradingview-strategy-on-octobot-cloud) +- Your own <a href="https://www.octobot.cloud/creator" rel="nofollow">AI generated strategies</a> + +## Automated strategies examples + +Here are a few ideas of TradingView strategies automated using OctoBot without the need for a single line of code. + +### Optimize your Bull Market using RSI + +Discover how to use the RSI to buy and sell at the best time in a Bull Market, as soon as the trend starts to switch. + + +<YouTube id="aa4vr1n2Iwo" title="Optimize your Crypto Bull Market using RSI" /> + +View the [RSI Bull Market Strategy guide](tradingview-strategies-tutorials/bull-market-strategy-from-tradingview-using-rsi-with-video). + +### Death and Golden Cross Strategy + +Discover how to automate buy and sell orders based on a Death and Golden Cross Strategy created on TradingView. + +![tradingview ema strategy illustration with 2 buy and 2 sell](/images/guides/trading-view/tradingview-ema-strategy-illustration-with-2-buy-and-2-sell.png) + +View the [Death and Golden Cross Strategy tutorial](tradingview-strategies-tutorials/automating-a-tradingview-death-and-golden-cross-strategy). diff --git a/docs/content/investing/tradingview-strategies-tutorials/_category_.json b/docs/content/investing/tradingview-strategies-tutorials/_category_.json new file mode 100644 index 0000000000..0642db16e7 --- /dev/null +++ b/docs/content/investing/tradingview-strategies-tutorials/_category_.json @@ -0,0 +1 @@ +{"label": "TradingView Strategy Examples"} diff --git a/docs/content/investing/tradingview-strategies-tutorials/automating-a-tradingview-death-and-golden-cross-strategy.md b/docs/content/investing/tradingview-strategies-tutorials/automating-a-tradingview-death-and-golden-cross-strategy.md new file mode 100644 index 0000000000..4920cae2a1 --- /dev/null +++ b/docs/content/investing/tradingview-strategies-tutorials/automating-a-tradingview-death-and-golden-cross-strategy.md @@ -0,0 +1,194 @@ +--- +title: "Golden Cross Strategy" +description: "Learn to automate a Bitcoin Death and Golden Cross Strategy using TradingView alerts and OctoBot with paper or real trading on any exchange." +sidebar_position: 2 +--- + + + +# Automating a TradingView Death and Golden Cross Strategy + +With this tutorial, you will learn how to trade with Death and Golden Crosses using two <a href="https://www.investopedia.com/terms/e/ema.asp" rel="nofollow">Exponential Moving Averages</a> (or EMA). +The concept is to: + +- Buy when the short term EMA crosses up the long term EMA. This is a <a href="https://www.investopedia.com/terms/g/goldencross.asp" rel="nofollow">Golden Cross</a> and is usually a bullish sign. +- Sell when the short term EMA crosses down the long term EMA. This is a <a href="https://www.investopedia.com/terms/d/deathcross.asp" rel="nofollow">Death Cross</a> and is usually a bearish sign. + +## 1. Automatically identifying Death and Golden Crosses + +### 1.1 Select your traded market + +First, we want to visually see our Death and Golden Crosses. Let's go to <a href="https://www.tradingview.com/?aff_id=27595" rel="nofollow">TradingView</a> and select the trading pair, exchange and time frame we want to trade. + +<div style="text-align: center"> + +![tradingview select btcusdt market](/images/guides/trading-view/tradingview-select-btcusdt-market.png) + +</div> + +For this tutorial, we will trade BTC/USDT on Binance using the 5 minutes time frame. Of course, any other value would also work. +Note: Trading on Death and Golden Crosses is usually more profitable on longer time frames. In this tutorial the 5 minutes time frame is only meant as an example. + +### 1.2 Add the EMA indicators + +Then we will add the Exponential Moving Average indicator twice: + +1. Once for the long term EMA +2. Once for the short term EMA + +<div style="text-align: center"> + +![tradingview adding ema indicator](/images/guides/trading-view/tradingview-adding-ema-indicator.png) + +</div> + +### 1.3 Configure the EMA indicators + +Click on the `Settings` of both of your EMA indicators and set the `Length` value according to how you wish to configure your Death and Golden Crosses. + +<div style="text-align: center"> + +![tradingview configuring ema indicator](/images/guides/trading-view/tradingview-configuring-ema-indicator.png) + +</div> + +In this example, we will use the following values: + +1. `21` for the Length of the long term EMA +2. `9` for the Length of the short term EMA + +Note: you can also configure the `Style` of those EMA to make them easier to visualize on the chart + +### 1.4 Visualize the Strategy + +Death and Golden crosses happen when the long and short term EMA are crossing. We can now easily see what it would look like. + +<div style="text-align: center"> + +![tradingview ema indicator visualization with golden and death crosses](/images/guides/trading-view/tradingview-ema-indicator-visualization-with-golden-and-death-crosses.png) + +</div> + +Our strategy is ready, the only remaining step is to create an OctoBot to trade when those crosses happen. + +## 2. Creating OctoBot automations to buy and sell + +### 2.1 Create a TradingView OctoBot + +Let's open a new tab and go to <a href="https://www.octobot.cloud/dashboard" rel="nofollow">OctoBot cloud</a> to start a new TradingView OctoBot + +<div style="text-align: center"> + +![start new tradingview octobot from explorer](/images/guides/trading-view/start-new-tradingview-octobot-from-explorer.png) + +</div> + +**[Start a bot](https://www.octobot.cloud)** + +For this tutorial, we will start a bot on Binance. If you are unsure about how to start a TradingView OctoBot, check out the `Create your TradingView OctoBot` section of the [TradingView trading tutorial](../tradingview-trading-tutorial#1-create-your-tradingview-octobot). + +### 2.2 Create your BUY automation + +When a Golden Cross happens, we want our OctoBot to buy. For this tutorial, we will buy using 50% of our portfolio's USDT holdings. + +<div style="text-align: center"> + +![octobot automation create buy btc](/images/guides/trading-view/octobot-automation-create-buy-btc.png) + +</div> + +### 2.3 Create your SELL automation + +When a Death Cross occurs, we want our OctoBot to sell. For this tutorial, we will sell all of our portfolio's BTC holdings. + +<div style="text-align: center"> + +![octobot automation create sell btc](/images/guides/trading-view/octobot-automation-create-sell-btc.png) + +</div> + +Note: in this tutorial, we are keeping things simple by using market orders, selling everything at once and having only one type of BUY and SELL automation. +Since there is no limit the the automations you can create, you customize this strategy as much as you want by creating other BUY and SELL automations. + +## 3. Binding automations to trigger on Crosses + +Note: the following steps are assuming that you already configured your TradingView Alerts webhook URL. If it is not the case, please follow the [Configure the webhook URL guide](../tradingview-trading-tutorial#25-configure-the-webhook-url). + +### 3.1 Buying on Golden Crosses + +Open the connection panel of your BUY automation and copy its automation identifier. + +<div style="text-align: center"> + +![octobot open automation connection panel](/images/guides/trading-view/octobot-open-automation-connection-panel.png) + +</div> + +<div style="text-align: center"> + +![octobot automation identifier](/images/guides/trading-view/octobot-automation-identifier.png) + +</div> + +Going back to your TradingView tab, create a new alert + +<div style="text-align: center"> + +![creating an alert from tradingview](/images/guides/trading-view/creating-an-alert-from-tradingview.png) + +</div> + +<div style="text-align: center"> + +![tradingview create golden cross alert](/images/guides/trading-view/tradingview-create-golden-cross-alert.png) + +</div> + +In this alert, pay attention to: + +- Select `Crossing Up` as wall as EMA 9 and 21 as Condition: this is our Golden Cross. +- Select `Once Per Bar Close` as Trigger to check for Golden Crosses on each candle close. +- Give a meaningful name to your alert to easily identify it later on. +- Replace the full Message value by your BUY automation identifier from the OctoBot tab. + +Great ! Your TradingView strategy will not send an alert triggering your OctoBot BUY automation when a Golden Cross is identified according to your EMA settings. + +### 3.2 Selling on Death Crosses + +Similarly to the Golden Cross configuration: + +1. On your OctoBot tab, open your SELL automation connection panel. +2. On the TradingView tab, create a seconds alert to identify Death Crosses and configure it to trigger your SELL automation. + +<div style="text-align: center"> + +![tradingview create death cross alert](/images/guides/trading-view/tradingview-create-death-cross-alert.png) + +</div> + +In this alert, remember to: + +- Select `Crossing Down` as wall as EMA 9 and 21 as Condition: this is our Death Cross. +- Select `Once Per Bar Close` as Trigger to check for Death Crosses on each candle close. +- Give a meaningful name to your alert to easily identify it later on. +- Replace the full Message value by your SELL automation identifier from the OctoBot tab. + +## The strategy is ready + +And that's it ! +We just created an EMA Death and Golden Cross strategy on TradingView and automated its trading using OctoBot. Everytime a Death or Golden cross happen on TradingView, our OctoBot will buy or sell BTC accordingly. + +![tradingview ema strategy illustration with 2 buy and 2 sell](/images/guides/trading-view/tradingview-ema-strategy-illustration-with-2-buy-and-2-sell.png) + +![octobot tradingview trading side of ema strategy illustration with 2 buy and 2 sell](/images/guides/trading-view/octobot-tradingview-trading-side-of-ema-strategy-illustration-with-2-buy-and-2-sell.png) + +Of course, you can use this configuration to trade any pair(s) on any exchange using your real funds or risk free with [simulated funds](../paper-trading-a-strategy). + +**[Start your TradingView OctoBot](https://www.octobot.cloud)** + +We hope this tutorial was clear enough. Please let is know if there is something we should improve. + +:::info + Warning: The strategy presented in this tutorial is only meant for educational + purposes and is not financial advice. +::: diff --git a/docs/content/investing/tradingview-strategies-tutorials/bull-market-strategy-from-tradingview-using-rsi-with-video.mdx b/docs/content/investing/tradingview-strategies-tutorials/bull-market-strategy-from-tradingview-using-rsi-with-video.mdx new file mode 100644 index 0000000000..7bb7473ddf --- /dev/null +++ b/docs/content/investing/tradingview-strategies-tutorials/bull-market-strategy-from-tradingview-using-rsi-with-video.mdx @@ -0,0 +1,120 @@ +--- +title: "Bull Market RSI Strategy" +description: "Optimize your Bull Market gains: profit from altcoin moonshots and trade before market trend switches using RSI. Discover this strategy with our video tutorial" +sidebar_position: 1 +--- + + + +# Bull Market Strategy from TradingView using RSI with video + +How to best take advantage of the Bull Market when it's here? In this strategy, we explore how to use RSI to buy and sell in advance, as soon as the trend starts to change! + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="aa4vr1n2Iwo" title="Optimize your Crypto Bull Market using RSI" /> + +## Goal of the Bull Market RSI Strategy + +In a <a href="https://www.investopedia.com/terms/b/bullmarket.asp" rel="nofollow">Bull Market</a>, the price of cryptocurrencies is rising on a regular basis. To optimize your gains, it is important to be able to buy coins that will increase in value before their prices rise too much. It would be best to also be able to sell quickly when the maximum is reached. Selling allows you to then to take advantage of buying at a lower price or unlock funds to profit from another promising crypto. + +This strategy specializes in **taking advantage of temporary price increases**. + +## Concept of the strategy + +Using the <a href="https://www.investopedia.com/terms/r/rsi.asp" rel="nofollow">Relative Strength Index</a> in order to use overbought and oversold zones to make profit by: + +1. Buying before the price really starts going up +2. Selling only when the local maximum is reached + +<div style={{textAlign: "center"}}> + ![RSI bull market strategy buying and selling + solana](/images/guides/tradingview-tutos/RSI-bull-market-strategy-buying-and-selling-solana.png) +</div> + +## Strategy configuration + +The target of the strategy is to trade when a maximum is reached, identified by changes in RSI trend. Here is the RSI configuration and alerts used in this video. + +This strategy uses TradingView to automate the RSI analysis of any type of cryptocurrency. For more information on how to use TradingView and OctoBot, check out the [trading with TradingView tutorial](../tradingview-trading-tutorial). + +### TradingView configuration + +#### RSI to identify trend switches + +<div style={{textAlign: "center"}}> + ![RSI configuration with rolling moving + averages](/images/guides/tradingview-tutos/RSI-configuration-with-rolling-moving-averages.png) +</div> +- Length of 14, source: close (standard configuration) - MA Type: SMMA, which is +a Rolling Moving Average. This allows to give more weight to the latest values +and therefore get value that is more reactive that the default moving average. - +MA Length: 9 - BB StdDev: 1 + +#### Buy orders + +1. Buy upon strong oversell to profit from heavy price dips + <div style={{textAlign: "center"}}> + ![extreme buy solana on rsi ema threshhold tradingview alert + configuration](/images/guides/tradingview-tutos/extreme-buy-solana-on-rsi-ema-threshhold-tradingview-alert-configuration.png) + </div> + +- When the RSI moving average (RSI-based MA) crosses up the 39.57 threshold +- Once Per Bar Close +- References a buy automation on the Octobot side + +2. Buy upon oversell to profit from regular bull market price dips + <div style={{textAlign: "center"}}> + ![regular buy solana on rsi ema threshhold tradingview alert + configuration](/images/guides/tradingview-tutos/regular-buy-solana-on-rsi-ema-threshhold-tradingview-alert-configuration.png) + </div> + +- When the RSI moving average (RSI-based MA) crosses up the 48.62 threshold +- Once Per Bar Close +- References a buy automation on the Octobot side + +#### Sell orders + +1. Sell upon strong overbuy to profit from bull market heavy price increases + <div style={{textAlign: "center"}}> + ![extreme sell solana on rsi ema threshhold tradingview alert + configuration](/images/guides/tradingview-tutos/extreme-sell-solana-on-rsi-ema-threshhold-tradingview-alert-configuration.png) + </div> + +- When the RSI moving average (RSI-based MA) crosses down the 67.45 threshold +- Once Per Bar Close +- References a sell automation on the Octobot side + +2. Sell upon regular overbuy to profit from bull market smaller price increases + <div style={{textAlign: "center"}}> + ![regular sell solana on rsi ema threshhold tradingview alert + configuration](/images/guides/tradingview-tutos/regular-sell-solana-on-rsi-ema-threshhold-tradingview-alert-configuration.png) + </div> + +- When the RSI moving average (RSI-based MA) crosses down the 53.63 threshold +- Once Per Bar Close +- References a sell automation on the Octobot side + +### OctoBot configuration + +A buy and sell automation for the crypto you wish to trade, with the amount you wish to trade. + +<div style={{textAlign: "center"}}> + ![RSI bull market strategy buying and selling octobot + automations](/images/guides/tradingview-tutos/RSI-bull-market-strategy-buying-and-selling-octobot-automations.png) +</div> + +For more information on OctoBot automations allowing buying and selling with TradingView in the "Automations to create your strategies" section of the [TradingView trading guide](../tradingview-automated-trading#automations-to-create-your-strategies). + +## Further optimisations + +In this video, we use a single-crypto version of the strategy. To optimize gains made, several improvements can be implemented: + +- **Trading against Bitcoin** instead of USDT to also take advantage of Bitcoin's price increase +- Use the strategy on **multiple cryptos at once**: if their upward phases are not simultaneous, this allows you to benefit from rising ones while others remain stable +- Use **multiple buy and sell orders** to optimize order prices based on additional RSI thresholds or event from other technical indicators + +:::info + Warning: The strategy presented in this tutorial is only meant for educational + purposes and is not financial advice. +::: diff --git a/docs/content/investing/tradingview-trading-tutorial.mdx b/docs/content/investing/tradingview-trading-tutorial.mdx new file mode 100644 index 0000000000..7696637f89 --- /dev/null +++ b/docs/content/investing/tradingview-trading-tutorial.mdx @@ -0,0 +1,261 @@ +--- +title: "TradingView trading tutorial" +description: "Simple step by step tutorial to create your TradingView OctoBot and trade on TradingView alerts using paper or real trading on any cryptocurrency exchange." +sidebar_position: 16 +--- + + + +# TradingView trading tutorial + +With OctoBot cloud, it is very easy to automate your trades using your own TradingView-based strategies. + +Here is a step by step tutorial on how to get started and make your first trades with OctoBot using paper trading and TradingView. + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="TNRMUP6-a_g" title="Automating TradingView" /> + +In this tutorial, you will learn how to: + +1. Create a TradingView OctoBot and make it buy BTC whenever a TradingView alert is received +2. Configure TradingView to notify your OctoBot when the BTC price crosses 40.000 USDT +3. Follow your TradingView OctoBot activity + +## 1. Create your TradingView OctoBot + +### 1.1 Start a new TradingView OctoBot + +- If you are creating your OctoBot account, scroll down to `Automate your own strategy with TradingView` in the introduction, after choosing your exchange. + + <div style={{textAlign: "center"}}> + ![octobot create tradingview bot from + intro](/images/guides/trading-view/octobot-create-tradingview-bot-from-intro.png) + </div> + +- Or go to the `Custom` tab of the <a href="https://www.octobot.cloud/explore?category=tv" rel="nofollow">strategy explorer</a>. + <div style={{textAlign: "center"}}> + ![start new tradingview octobot from + explorer](/images/guides/trading-view/start-new-tradingview-octobot-from-explorer.png) + </div> + +**[Start a bot](https://www.octobot.cloud)** + +### 1.2 Choose the exchange you want to trade on + +Just like regular bots, TradingView OctoBots are running on a single exchange at a time. + +<div style={{textAlign: "center"}}> + ![octobot create tradingview bot select + exchange](/images/guides/trading-view/octobot-create-tradingview-bot-select-exchange.png) +</div> + +Select the exchange you want this OctoBot to trade on. + +### 1.3 Select paper or real trading + +<div style={{textAlign: "center"}}> + ![octobot create tradingview bot select paper or real + trading](/images/guides/trading-view/octobot-create-tradingview-bot-select-paper-or-real-trading.png) +</div> + +Also like other bots, you can use your TradingView OctoBot on a [real exchange account](invest-with-your-strategy) or risk free with [paper trading and simulated funds](paper-trading-a-strategy). +We suggest to start with paper trading until you are confortable automating your TradingView strategies with your real funds. + +### 1.4 Start your OctoBot + +Once your account is selected and initialized, you can start your TradingView OctoBot. + +<div style={{textAlign: "center"}}> + ![octobot create tradingview bot start + bot](/images/guides/trading-view/octobot-create-tradingview-bot-start-bot.png) +</div> + +When starting, TradingView bots won't create orders, they will be waiting for TradingView alerts to trade. + +### 1.5 Meet your TradingView bot + +Your TradingView OctoBot is now ready to trade on the account you selected everytime it will be notified from your TradingView alerts. + +<div style={{textAlign: "center"}}> + ![octobot create tradingview bot initial + bot](/images/guides/trading-view/octobot-create-tradingview-bot-initial-bot.png) +</div> + +New TradingView bots are using automations to trade. Therefore at least one automation is required for your bot to start trading. We will now create your bot's first automation. + +### 1.6 Introducing automations + +Automations are the building blocks of every TradingView strategy on OctoBot. + +Automations are actions that are automatically performed when receiving the associated TradingView alert. An automation can be creating or cancelling orders according to your configuration. + +Each bot can have as many automation as necessary, there are no limit in the amount of automations and traded pairs a TradingView OctoBot can handle. + +Learn more about automations on the [tradingView automated trading guide](tradingview-automated-trading) + +<div style={{textAlign: "center"}}> + ![octobot automation connection empty + panel](/images/guides/trading-view/octobot-automation-connection-empty-panel.png) +</div> + +At OctoBot cloud, we try to keep things simple. Here is an example of how to create an automation and as you can see, it's pretty straight forward. + +Let's create an automation to buy BTC on your account when receiving a TradingView alert. + +### 1.7 Create an automation + +Lets create a new automation the `+` button + +<div style={{textAlign: "center"}}> + ![octobot create tradingview bot initial bot highlighted create + button](/images/guides/trading-view/octobot-create-tradingview-bot-initial-bot-highlighted-create-button.png) +</div> + +And enter the following information on the `Create automation` panel: + +<div style={{textAlign: "center"}}> + ![octobot automation connection panel + highlights](/images/guides/trading-view/octobot-automation-connection-panel-highlights.png) +</div> + +- Name: the name of your automation, you can use the name you want to easily find your automation later on +- Order symbol: the traded symbol to create the order on. Please note that it has to follow the `/` format. For example BTCUSDT on TradingView would be BTC/USDT on your automation + +Press `Create` once you are satisfied with your configuration. + +:::info + If you need to use more advanced automations such as using dynamic values from Pine Script, cancelling orders, creating automated take profits or stop losses, check out [custom automations](tradingview-alerts-automation#tradingview-custom-automations). +::: + + +## 2. Configure TradingView to send alerts to your bot + +### 2.1 Open the connection panel of your automation + +<div style={{textAlign: "center"}}> + ![octobot open automation connection + panel](/images/guides/trading-view/octobot-open-automation-connection-panel.png) +</div> + +In the Automation section of your TradingView OctoBot, open the connection panel of the `Market buy BTC - 10% of USDT` automation. + +<div style={{textAlign: "center"}}> + ![octobot automation connection + panel](/images/guides/trading-view/octobot-automation-connection-panel.png) +</div> + +This panel show you all the necessary information to trigger the `Market buy BTC - 10% of USDT` automation from TradingView. + +### 2.2 Log in to TradingView + +Using another tab (to keep the connection panel open), go to <a href="https://www.tradingview.com/?aff_id=27595" rel="nofollow">TradingView</a> and login or <a href="https://www.tradingview.com/?aff_id=27595" rel="nofollow">create an account</a>. + +Binding TradingView to OctoBot uses alerts which can be sent to OctoBot using **Webhooks**. They Requires a "Essential" or higher plan. You can create a TradingView account and start a 30 days free trial using <a href="https://www.tradingview.com/?aff_id=27595" rel="nofollow">this link</a>. + +### 2.3 Select your trading pair + +In our current automation, we are trading BTC/USDT. Let's select a BTCUSDT market on TradingView, for example the Binance one. + +<div style={{textAlign: "center"}}> + ![tradingview select btcusdt + market](/images/guides/trading-view/tradingview-select-btcusdt-market.png) +</div> + +### 2.4 Go to the Alerts tab + +![creating an alert from tradingview](/images/guides/trading-view/creating-an-alert-from-tradingview.png) + +From the alerts tab, click **Create alert**. + +### 2.5 Configure the webhook URL + +In the "Notfications" tab of the "Create Alert on BTCUSDT" menu on TradingView, enter the webhook URL of your OctoBot account. + +1. Select `Webhook URL` + + <div style={{textAlign: "center"}}> + ![creating an alert from tradingview webhook + url](/images/guides/trading-view/creating-an-alert-from-tradingview-webhook-url.png) + </div> + +2. Paste your webhook URL. You will find your webhook URL in the `3. Automate with webhook` section of the [connection panel](#21-open-the-connection-panel-of-your-automation) of your automation, on your OctoBot tab. + +<div style={{textAlign: "center"}}> + ![octobot automation webhook + url](/images/guides/trading-view/octobot-automation-webhook-url.png) +</div> + +You might need to enable two factor authentication on TradingView to enter a webhook URL. + +Your webhook notifications are now ready! + +:::info + The Webhook URL configuration only needs to be done once. Other alerts + you will create will automatically use this notifications configuration. +::: + +### 2.6 Create a new alert + +Going back to the "Settings" tab of the "Create Alert on BTCUSDT" menu on TradingView, fill in your alert details. + +<div style={{textAlign: "center"}}> + ![creating a price alert from + tradingview](/images/guides/trading-view/creating-a-price-alert-from-tradingview.png) +</div> + +In this tutorial, we will create a simple alert based on the BTC/USDT price that will trigger our `Market buy BTC - 10% of USDT` automation when the BTC price crosses 40.000 USDT on TradingView. + +1. Select a condition to trigger your alert on. +2. Name your alert. This name has no impact and is only for you to remember the alert +3. Fill the "Message" field with the **identifier of your automation** that you will find on the `2. Create an Alert` section of the [connection panel](#21-open-the-connection-panel-of-your-automation) of your automation, on your OctoBot tab. + +<div style={{textAlign: "center"}}> + ![octobot automation + identifier](/images/guides/trading-view/octobot-automation-identifier.png) +</div> +4. Press "Create" to save your TradingView alert. + +## 3. You are all set + +Congratulation! You now have a TradingView alert that will automatically trigger your `Market buy BTC - 10% of USDT` automation on your OctoBot account as soon as the alert will fire on TradingView. + +**[Start a TradingView OctoBot](https://www.octobot.cloud)** + +### 3.1 Following alerts + +Every time a new alert is received, your OctoBot will save it and you will be able to see its execution result. + +<div style={{textAlign: "center"}}> + ![octobot automations view with executed tradingview + alerts](/images/guides/trading-view/octobot-automations-view-with-executed-tradingview-alerts.png) +</div> + +You can also view your bot's full alert and automation history using the `See all` button + +<div style={{textAlign: "center"}}> + ![octobot automation + history](/images/guides/trading-view/octobot-automations-history.png) +</div> + +### 3.2 Creating other automations + +For each of your TradingView OctoBot, you can create as many automations as you want and trade on as many symbols as you wish. + +<div style={{textAlign: "center"}}> + ![octobot many tradingview + automations](/images/guides/trading-view/octobot-many-tradingview-automations.png) +</div> + +In this tutorial, we created a BTC/USDT automation triggered by a price event but there are many other ways to trigger TradingView alerts including: + +- Price actions, like the one we created in the tutorial +- Indicators, to trade using simple or sophisticated technical indicators +- Pine Script strategies, to trade from full TradingView strategies written in Pine Script + +<div style={{textAlign: "center"}}> + ![tradingview many btcusdt + alerts](/images/guides/trading-view/tradingview-many-btcusdt-alerts.png) +</div> + +Lean more on the different types of alerts on the [TradingView alerts automation guide](tradingview-alerts-automation). diff --git a/docs/content/investing/what-is-an-exchange-api-key.md b/docs/content/investing/what-is-an-exchange-api-key.md new file mode 100644 index 0000000000..c14df3d9b3 --- /dev/null +++ b/docs/content/investing/what-is-an-exchange-api-key.md @@ -0,0 +1,27 @@ +--- +title: "What are API Keys ?" +description: "Wondering what an exchange API Key is and why you should use it with a trading software ? Here is the simple explanation." +sidebar_position: 34 +--- + +# What is an exchange API Key + +In cryptocurrencies trading, API Keys are the go-to solution to allow trading software to create and cancel orders on your exchange account in a secure manner. It also presents the advantage of not requiring to disclose your exchange email or password. + +## API Keys on OctoBot +On OctoBot, your API Keys are used to execute a strategy, which means to: +- fetch your current portfolio balance +- fetch, create and cancel trading orders on your account + +## Permissions +API Keys can be configured with permissions. This is an additional security layer that is preventing any unwanted behavior. For example, if you have not activated withdrawals on an API Key, the exchange will never let any software proceed to withdrawal when using this API Key. + +For this reason, only **reading and trading permissions are required** for OctoBot to be able to execute a strategy. No other permission is required. + +**We strongly recommend that you do not add any other permission to any API Key given to any trading software, whether it is OctoBot or not.** + +## How to create your exchange account API Keys +To help you connect your exchange account to OctoBot using API Keys, we created detailed step by step guides: +- [Binance connection guide](connect-your-binance-account-to-octobot) +- [Kucoin connection guide](connect-your-kucoin-account-to-octobot) +- [Coinbase connection guide](connect-your-coinbase-account-to-octobot) diff --git a/docs/content/octobot-script/_category_.json b/docs/content/octobot-script/_category_.json new file mode 100644 index 0000000000..4c2e59345d --- /dev/null +++ b/docs/content/octobot-script/_category_.json @@ -0,0 +1 @@ +{"label": "OctoBot Script"} diff --git a/docs/content/octobot-script/creating-trading-orders.md b/docs/content/octobot-script/creating-trading-orders.md new file mode 100644 index 0000000000..5342a25ae9 --- /dev/null +++ b/docs/content/octobot-script/creating-trading-orders.md @@ -0,0 +1,86 @@ +--- +title: "Creating orders" +description: "Learn how to create market, limit, stop loss and trailing orders with python using OctoBot script." +sidebar_position: 6 +--- + +# Creating trading orders + +Orders can be created using the following keywords: +- `market` +- `limit` +- `stop_loss` +- `trailing_market` + +## Amount +Each order accept the following optional arguments: +- `amount`: for spot and futures trading +- `target_position`: futures trading only: create the associated order to update to position size to the given value. Uses the same format as the order amount. + +To specify the amount per order, use the following syntax: +- `0.1` to trade 0.1 BTC on BTC/USD +- `2%` to trade 2% of the total portfolio value +- `12%a` to trade 12% of the available holdings + +``` python +# create a buy market order using 10% of the total portfolio +await obs.market(ctx, "buy", amount="10%") +``` + +## Price +Orders set their price using the `offset` argument. + +To specify the order price, use the following syntax: +- `10` to set the price 10 USD above the current BTC/USD market price +- `2%` to set the price 2% USD above the current BTC/USD market price +- `@15555` to set the price at exactly 15555 USD regardless of the current BTC/USD market price + +``` python +# create a buy limit order of 0.2 units (BTC when trading BTC/USD) +# with a price at 1% below the current price +await obs.limit(ctx, "buy", amount="0.2", offset="-1%") +``` + +Note: market orders do not accept the `offset` argument. + +## Automated take profit and stop losses +When creating orders, it is possible to automate the associated +stop loss and / or take profits. When doing to, the associated take profit/stop loss will have +the same amount as the initial order. + +Their price can be set according to the same rules as the initial order price +(the `offset` argument) using the following optional argument: +- `stop_loss_offset`: automate a stop loss creation when the initial order is filled and set the stop loss price +- `take_profit_offset`: automate a take profit creation when the initial order is filled and set the take profit price + +``` python +# create a buy limit order of 0.2 units (BTC when trading BTC/USD) with: +# - price at 1% below the current price +# - stop loss at 10% loss +# - take profit at 15% profit +await obs.limit(ctx, "buy", amount="0.2", offset="-1%", stop_loss_offset="-10%", take_profit_offset="15%") +``` + +> When using both `stop_loss_offset` and `take_profit_offset`, two orders will be created after the initial order fill. +Those two orders will be grouped together, meaning that if one is cancelled or filled, the other will be cancelled. + +## Futures trading + +### Opening a position +Use regular orders to open a position. When the order is filled, the associated position will be created, updated or closed. + +A sell order will open a short position if your balance becomes negative after filling this order. + +### Closing a position +Set the position size to 0 to close it. You can do it either by: +- Filling an order with the same amount as the position size and an opposite side +- Or using `target_position=0` as order parameters + + +### Updating leverage + +Use `set_leverage` to update the current leverage value when trading futures. + +``` python +await obs.set_leverage(ctx, 5) +``` diff --git a/docs/content/octobot-script/fetching-history.md b/docs/content/octobot-script/fetching-history.md new file mode 100644 index 0000000000..c5e3677e7a --- /dev/null +++ b/docs/content/octobot-script/fetching-history.md @@ -0,0 +1,54 @@ +--- +title: "Historical data" +description: "Learn how to fetch and reuse exchange historical market trading data with python using OctoBot script." +sidebar_position: 14 +--- + + + +# Fetching trading data + +In order to run a backtest, OctoBot script requires historical +trading data, which is at least candles history. + +## Fetching new data + +When using OctoBot script, historical data can be fetched using: +`await obs.get_data(symbol, time frame)` + +Where: + +- symbol: the trading symbol to fetch data from. It can also be a list of symbols +- time frame: the time frame to fetch (1h, 4h, 1d, etc). It can also be a list of time frames + +Optional arguments: + +- start_timestamp: the unix timestamp to start fetching data from. Use <a href="https://www.epochconverter.com/" rel="nofollow">this converter</a> if you are unsure what you should use. +- exchange: the exchange to fetch data from. Default is "binance" +- exchange_type: the exchange trading type to fetch data from. Default is "spot", "future" is also possible on supported exchanges + +```python +data = await obs.get_data("BTC/USDT", "1d", start_timestamp=1505606400) +``` + +## Re-using fetched data + +Calling `data = await obs.get_data` will save the downloaded data into the `backtesting/data` local folder. +If you want to speedup subsequent calls, you can provide the `data_file` optional argument to read +data from this file instead of downloading historical data. This also makes it possible to run a +script while being offline. + +You can get the name of the downloaded backtesting file by accessing +`data.data_files[0]` + +```python +data = await obs.get_data("BTC/USDT", "1d", start_timestamp=1505606400) +# print the name of the downloaded data file +print(data.data_files[0]) +``` + +```python +datafile = "ExchangeHistoryDataCollector_1671754854.5234916.data" +# will not download historical data as a local data_file is provided +data = await obs.get_data("BTC/USDT", "1d", start_timestamp=1505606400, data_file=datafile) +``` diff --git a/docs/content/octobot-script/getting-started.md b/docs/content/octobot-script/getting-started.md new file mode 100644 index 0000000000..495a2c56c0 --- /dev/null +++ b/docs/content/octobot-script/getting-started.md @@ -0,0 +1,103 @@ +--- +title: "Start scripting" +description: "Harness the power of the OctoBot framework within your own python scripted trading strategies while keeping it as simple as a TradingView Pine Script." +sidebar_position: 17 +--- + + + +# OctoBot script + +:::info +For users of <a href="https://github.com/Drakkar-Software/OctoBot-script" rel="nofollow">OctoBot script</a>. +::: + +## The script-based trading framework using OctoBot + +> OctoBot script is in early alpha version + +OctoBot script allows you to harness the power of the OctoBot framework while keeping it as simple as a TradingView Pine Script. + +With OctoBot script, automate your trading strategies using your own highly optimized scripts + +- Whether it is from your scripted strategy ideas, like on <a href="https://www.tradingview.com/?aff_id=27595" rel="nofollow">TradingView</a> Pine Script +- Or using an advanced AI based strategy + +## Install OctoBot script from pip + +> OctoBot script requires **Python 3.10** + +```{.sourceCode .bash} +python3 -m pip install OctoBot wheel appdirs==1.4.4 +python3 -m pip install octobot-script +``` + +## Script example: RSI strategy + +In this example, OctoBot script allows to quickly create a <a href="https://www.investopedia.com/terms/r/rsi.asp" rel="nofollow">RSI</a> +based trading strategy including: + +- a take profit at 25% profits +- a stop loss at 15% loss + +```python + + +async def rsi_test(): + async def strategy(ctx): + # Will be called at each candle. + if run_data["entries"] is None: + # Compute entries only once per backtest. + closes = await obs.Close(ctx, max_history=True) + times = await obs.Time(ctx, max_history=True, use_close_time=True) + rsi_v = tulipy.rsi(closes, period=ctx.tentacle.trading_config["period"]) + delta = len(closes) - len(rsi_v) + # Populate entries with timestamps of candles where RSI is + # below the "rsi_value_buy_threshold" configuration. + run_data["entries"] = { + times[index + delta] + for index, rsi_val in enumerate(rsi_v) + if rsi_val < ctx.tentacle.trading_config["rsi_value_buy_threshold"] + } + await obs.plot_indicator(ctx, "RSI", times[delta:], rsi_v, run_data["entries"]) + if obs.current_live_time(ctx) in run_data["entries"]: + # Uses pre-computed entries times to enter positions when relevant. + # Also, instantly set take profits and stop losses. + # Position exists could also be set separately. + await obs.market(ctx, "buy", amount="10%", stop_loss_offset="-15%", take_profit_offset="25%") + + # Configuration that will be passed to each run. + # It will be accessible under "ctx.tentacle.trading_config". + config = { + "period": 10, + "rsi_value_buy_threshold": 28, + } + + # Read and cache candle data to make subsequent backtesting runs faster. + data = await obs.get_data("BTC/USDT", "1d", start_timestamp=1505606400) + run_data = { + "entries": None, + } + # Run a backtest using the above data, strategy and configuration. + res = await obs.run(data, strategy, config) + print(res.describe()) + # Generate and open report including indicators plots + await res.plot(show=True) + # Stop data to release local databases. + await data.stop() + + +# Call the execution of the script inside "asyncio.run" as +# OctoBot script runs using the python asyncio framework. +asyncio.run(rsi_test()) +``` + +## Generated report + +![octobot pro report btc usdt with chart trades portfolio value and rsi](/images/guides/octobot-pro/octobot-pro-report-btc-usdt-with-chart-trades-portfolio-value-and-rsi.jpg) + +## Join the community + +We recently created a telegram channel dedicated to OctoBot script. + +<a href="https://t.me/+366CLLZ2NC0xMjFk" rel="nofollow">Telegram News</a> diff --git a/docs/content/octobot-script/plotting-anything.md b/docs/content/octobot-script/plotting-anything.md new file mode 100644 index 0000000000..4a7dc5c99b --- /dev/null +++ b/docs/content/octobot-script/plotting-anything.md @@ -0,0 +1,39 @@ +--- +title: "Plotting anything" +description: "Learn how to plot any type of data on your strategy run report with python using OctoBot script." +sidebar_position: 11 +--- + + + +# Plotting anything + +Anything can be plotted on your strategy run report using the `plot(ctx, name, ...)` keyword. +The plot arguments are converted into <a href="https://plotly.com/javascript/" rel="nofollow">plotly</a> charts parameters. + +Where: + +- `name`: name of the indicator on the chart + +Optional arguments: + +- `x`: values to use for the x axis +- `y`: values to use for the y axis +- `z`: values to use for the z axis +- `text`: point labels +- `mode`: plotly mode ("lines", "markers", "lines+markers", "lines+markers+text", "none") +- `chart`: "main-chart" or "sub-chart" (default is "sub-chart") +- `own_yaxis`: when True, uses an independent y axis for this plot (default is False) +- `color`: color the of plot +- `open`: open values for a candlestick chart +- `high`: high values for a candlestick chart +- `low`: low values for a candlestick chart +- `close`: close values for a candlestick chart +- `volume`: volume values for a candlestick chart +- `low`: low values for a candlestick chart + +Example: + +```python +await obs.plot(ctx, "RSI", x=time_values, y=indicator_values, mode="markers") +``` diff --git a/docs/content/octobot-script/plotting-indicators.md b/docs/content/octobot-script/plotting-indicators.md new file mode 100644 index 0000000000..bad802c146 --- /dev/null +++ b/docs/content/octobot-script/plotting-indicators.md @@ -0,0 +1,21 @@ +--- +title: "Plotting indicators" +description: "Learn how to plot technical indicators such as RSI or EMA on your strategy run report with python using OctoBot script." +sidebar_position: 10 +--- + +# Plotting indicators +Indicators and associated signals can be easily plotted using the +`plot_indicator(ctx, name, x, y, signals)` keyword. + +Where: +- `name`: name of the indicator on the chart +- `x`: values to use for the x axis +- `y`: values to use for the y axis +- `signal`: (optional) x values for which a signal is fired + +Example where the goal is to plot the value of the rsi indicator from +the [example script](/guides/octobot-script#script-example-rsi-strategy). +``` python +await obs.plot_indicator(ctx, "RSI", time_values, indicator_values, signal_times) +``` diff --git a/docs/content/octobot-script/run-report.md b/docs/content/octobot-script/run-report.md new file mode 100644 index 0000000000..b5c1ed3fb5 --- /dev/null +++ b/docs/content/octobot-script/run-report.md @@ -0,0 +1,26 @@ +--- +title: "Run report" +description: "Learn how to create, display and find your strategy run report at the end of each OctoBot script strategy execution." +sidebar_position: 9 +--- + +# Run report + +Each full execution of your strategy can generate a complete report. + +To generate a report at the end of a strategy run, add the following instruction + +```python +await res.plot(show=True) +``` + +> Tip: Use the `show` parameter to automatically open the report on your web browser + +![octobot pro report btc usdt with chart trades portfolio value and rsi](/images/guides/octobot-pro/octobot-pro-report-btc-usdt-with-chart-trades-portfolio-value-and-rsi.jpg) + +By default, each run report is stored in its run directory, in +`user/data/BacktesterTradingMode/default_campaign/backtesting/backtesting_X/report.html`. +Where X is the identifier of your backtesting run. + +> This report can be customized to include any information that would be useful to you. +Do customize your report, checkout the following articles. diff --git a/docs/content/octobot-script/strategies.md b/docs/content/octobot-script/strategies.md new file mode 100644 index 0000000000..dcffb687f2 --- /dev/null +++ b/docs/content/octobot-script/strategies.md @@ -0,0 +1,104 @@ +--- +title: "Strategies" +description: "Learn how to create, run and backtest your automated trading strategies using a simple TradingView Pine Script like language with OctoBot script." +sidebar_position: 3 +--- + +# OctoBot script strategies + +On OctoBot script, similarly to TradingView Pine Script, a trading strategy is a python async function that will be called at new price data. +``` python +async def strategy(ctx): + # your strategy content +``` + +In most cases, a strategy will: +1. Read price data +2. Use technical evaluators or statistics +3. Decide to take (or not take) action depending on its configuration +4. Create / cancel or edit orders (see [Creating orders](creating-trading-orders)) + +As OctoBot script strategies are meant for backtesting, it is possible to create a strategy in 2 ways: +## Pre-computed strategies +Pre-computed are only possible in backtesting: since the data is already known, when dealing with technical +evaluator based strategies, it is possible to compute the values of the evaluators for the whole backtest at once. +This approach is faster than iterative strategies as evaluators call only called once. + +Warning: when writing a pre-computed strategy, always make sure to associate the evaluator values to the +right time otherwise you might be reading data from the past of the future when running the strategy. + +``` python +config = { + "period": 10, + "rsi_value_buy_threshold": 28, +} +run_data = { + "entries": None, +} +async def strategy(ctx): + if run_data["entries"] is None: + # 1. Read price data + closes = await obs.Close(ctx, max_history=True) + times = await obs.Time(ctx, max_history=True, use_close_time=True) + # 2. Use technical evaluators or statistics + rsi_v = tulipy.rsi(closes, period=ctx.tentacle.trading_config["period"]) + delta = len(closes) - len(rsi_v) + # 3. Decide to take (or not take) action depending on its configuration + run_data["entries"] = { + times[index + delta] + for index, rsi_val in enumerate(rsi_v) + if rsi_val < ctx.tentacle.trading_config["rsi_value_buy_threshold"] + } + await obs.plot_indicator(ctx, "RSI", times[delta:], rsi_v, run_data["entries"]) + if obs.current_live_time(ctx) in run_data["entries"]: + # 4. Create / cancel or edit orders + await obs.market(ctx, "buy", amount="10%", stop_loss_offset="-15%", take_profit_offset="25%") +``` +This pre-computed strategy computes entries using the RSI: times of favorable entries are stored into +`run_data["entries"]` which is defined outside on the `strategy` function in order to keep its values +throughout iterations. + +Please note the `max_history=True` in `obs.Close` and `obs.Time` keywords. This is allowing to select +data using the whole run available data and only call `tulipy.rsi` once and populate `run_data["entries"]` +only once. + +In each subsequent call, `run_data["entries"] is None` will be `True` and only the last 2 lines of +the strategy will be executed. + +## Iterative strategies +``` python +config = { + "period": 10, + "rsi_value_buy_threshold": 28, +} +async def strategy(ctx): + # 1. Read price data + close = await obs.Close(ctx) + if len(close) <= ctx.tentacle.trading_config["period"]: + # not enough data to compute RSI + return + # 2. Use technical evaluators or statistics + rsi_v = tulipy.rsi(close, period=ctx.tentacle.trading_config["period"]) + # 3. Decide to take (or not take) action depending on its configuration + if rsi_v[-1] < ctx.tentacle.trading_config["rsi_value_buy_threshold"]: + # 4. Create / cancel or edit orders + await obs.market(ctx, "buy", amount="10%", stop_loss_offset="-15%", take_profit_offset="25%") +``` +This iterative strategy is similar to the above pre-computed strategy except that it is evaluating the RSI +at each candle to know if an entry should be created. + +This type of strategy is simpler to create than a pre-computed strategy and can be used in +OctoBot live trading. + +## Running a strategy + +When running a backtest, a strategy should be referenced alongside: +- The [data it should be run on](fetching-history) using `obs.run`: +- Its configuration (a dict in above examples, it could be anything) + +``` python +res = await obs.run(data, strategy, config) +``` + +Have a look [at the demo script](/guides/octobot-script#script-example-rsi-strategy) for a full example of +how to run a strategy within a python script. diff --git a/docs/docusaurus.config.ts b/docs/docusaurus.config.ts new file mode 100644 index 0000000000..41742d05e0 --- /dev/null +++ b/docs/docusaurus.config.ts @@ -0,0 +1,274 @@ +import {themes as prismThemes} from 'prism-react-renderer'; +import type {Config} from '@docusaurus/types'; +import type * as Preset from '@docusaurus/preset-classic'; + +const config: Config = { + title: 'OctoBot Documentation', + tagline: 'Open-source cryptocurrency trading bot', + favicon: 'img/favicon.ico', + + future: { + v4: true, + }, + + url: 'https://docs.octobot.cloud', + baseUrl: '/', + + organizationName: 'Drakkar-Software', + projectName: 'OctoBot', + trailingSlash: false, + + onBrokenLinks: 'warn', + onBrokenAnchors: 'warn', + + i18n: { + defaultLocale: 'en', + locales: ['en', 'fr'], + localeConfigs: { + en: {label: 'English', direction: 'ltr'}, + fr: {label: 'Français', direction: 'ltr'}, + }, + }, + + headTags: [ + { + tagName: 'script', + attributes: {type: 'application/ld+json'}, + innerHTML: JSON.stringify({ + '@context': 'https://schema.org', + '@type': 'SoftwareApplication', + name: 'OctoBot', + applicationCategory: 'FinanceApplication', + operatingSystem: 'Linux, macOS, Windows, Docker', + url: 'https://www.octobot.cloud', + author: { + '@type': 'Organization', + name: 'Drakkar-Software', + url: 'https://github.com/Drakkar-Software', + }, + }), + }, + ], + + markdown: { + format: 'detect', + hooks: { + onBrokenMarkdownLinks: 'throw', + }, + }, + + plugins: [ + [require.resolve('docusaurus-lunr-search'), { + languages: ['en', 'fr'], + }], + [ + '@docusaurus/plugin-client-redirects', + { + redirects: [ + {from: '/guides/developers', to: '/developers/getting-started'}, + {from: '/guides/octobot-developers-environment/setup-your-environment', to: '/developers/environment/setup-your-environment'}, + {from: '/guides/octobot-developers-environment/architecture', to: '/developers/architecture/design-philosophy'}, + {from: '/guides/octobot-developers-environment/environment-variables', to: '/developers/environment/environment-variables'}, + {from: '/guides/octobot-developers-environment/github-repositories', to: '/developers/environment/github-repositories'}, + {from: '/guides/octobot-developers-environment/running-tests', to: '/developers/environment/running-tests'}, + {from: '/guides/octobot-developers-environment/tips', to: '/developers/environment/tips'}, + {from: '/guides/octobot-tentacles-development/create-a-tentacle', to: '/developers/tentacles-dev/create-a-tentacle'}, + {from: '/guides/octobot-tentacles-development/create-a-tentacle-package', to: '/developers/tentacles-dev/create-a-tentacle-package'}, + {from: '/guides/octobot-tentacles-development/customize-your-octobot', to: '/developers/tentacles-dev/customize-your-octobot'}, + {from: '/guides/octobot-script', to: '/octobot-script/getting-started'}, + {from: '/guides/octobot-script-docs/creating-trading-orders', to: '/octobot-script/creating-trading-orders'}, + {from: '/guides/octobot-script-docs/fetching-history', to: '/octobot-script/fetching-history'}, + {from: '/guides/octobot-script-docs/plotting-anything', to: '/octobot-script/plotting-anything'}, + {from: '/guides/octobot-script-docs/plotting-indicators', to: '/octobot-script/plotting-indicators'}, + {from: '/guides/octobot-script-docs/run-report', to: '/octobot-script/run-report'}, + {from: '/guides/octobot-script-docs/strategies', to: '/octobot-script/strategies'}, + ], + createRedirects(existingPath) { + // Redirect /guides/octobot-partner-exchanges/<slug> → /guides/exchanges/<slug> + if (existingPath.startsWith('/guides/exchanges/')) { + const rest = existingPath.replace('/guides/exchanges/', ''); + return [ + `/guides/octobot-partner-exchanges/${rest}`, + `/guides/octobot-supported-exchanges/${rest}`, + ]; + } + return undefined; + }, + }, + ], + ], + + presets: [ + [ + 'classic', + { + docs: { + path: 'content', + routeBasePath: '/', + sidebarPath: './sidebars.ts', + editUrl: + 'https://github.com/Drakkar-Software/OctoBot/tree/dev/docs/', + showLastUpdateTime: true, + showLastUpdateAuthor: true, + }, + blog: { + path: 'blog', + routeBasePath: 'blog', + showReadingTime: true, + blogTitle: 'OctoBot Blog', + blogDescription: 'News, updates, and guides from the OctoBot team', + blogSidebarTitle: 'Recent posts', + blogSidebarCount: 0, + postsPerPage: 9, + onUntruncatedBlogPosts: 'warn', + feedOptions: { + type: ['rss', 'atom'], + title: 'OctoBot Blog', + description: 'News, updates, and guides from the OctoBot team', + copyright: `Copyright ${new Date().getFullYear()} Drakkar-Software`, + }, + }, + theme: { + customCss: './src/css/custom.css', + }, + sitemap: { + priority: 0.5, + filename: 'sitemap.xml', + }, + } satisfies Preset.Options, + ], + ], + + themeConfig: { + metadata: [ + {name: 'robots', content: 'noindex'}, + {name: 'twitter:card', content: 'summary_large_image'}, + {name: 'twitter:site', content: '@OctoBotTrading'}, + {property: 'og:type', content: 'website'}, + {property: 'og:site_name', content: 'OctoBot Documentation'}, + {name: 'keywords', content: 'octobot, crypto, trading bot, open source, automated trading'}, + ], + colorMode: { + defaultMode: 'light', + respectPrefersColorScheme: true, + }, + navbar: { + title: 'OctoBot', + logo: { + alt: 'OctoBot Logo', + src: 'img/logo-light-512.png', + srcDark: 'img/logo-dark-512.png', + }, + items: [ + { + type: 'docSidebar', + sidebarId: 'guides', + position: 'left', + label: 'Guides', + }, + { + type: 'docSidebar', + sidebarId: 'investing', + position: 'left', + label: 'OctoBot Cloud', + }, + { + type: 'docSidebar', + sidebarId: 'octobot-script', + position: 'left', + label: 'OctoBot Script', + }, + { + type: 'docSidebar', + sidebarId: 'developers', + position: 'left', + label: 'Developers', + }, + { + type: 'localeDropdown', + position: 'right', + }, + { + href: 'https://www.octobot.cloud', + label: 'OctoBot Cloud', + position: 'right', + }, + { + href: 'https://github.com/Drakkar-Software/OctoBot', + label: 'GitHub', + position: 'right', + }, + ], + }, + footer: { + style: 'dark', + links: [ + { + title: 'Documentation', + items: [ + {label: 'Guides', to: '/guides/octobot'}, + {label: 'OctoBot Cloud', to: '/investing/introduction'}, + {label: 'Blog', to: '/blog'}, + {label: 'Developers', to: '/developers/getting-started'}, + ], + }, + { + title: 'Community', + items: [ + { + label: 'Discord', + href: 'https://discord.gg/vHkcb8W', + }, + { + label: 'Telegram', + href: 'https://t.me/OctoBot_Project', + }, + { + label: 'X / Twitter', + href: 'https://x.com/OctoBotTrading', + }, + ], + }, + { + title: 'More', + items: [ + { + label: 'OctoBot Cloud', + href: 'https://www.octobot.cloud', + }, + { + label: 'GitHub', + href: 'https://github.com/Drakkar-Software/OctoBot', + }, + { + label: 'Terms of Use', + to: '/terms', + }, + { + label: 'Privacy Policy', + to: '/terms/privacy', + }, + ], + }, + ], + copyright: `Copyright \u00a9 ${new Date().getFullYear()} Drakkar-Software. Built with Docusaurus.`, + }, + docs: { + sidebar: { + autoCollapseCategories: true, + hideable: true, + }, + }, + tableOfContents: { + minHeadingLevel: 2, + maxHeadingLevel: 3, + }, + prism: { + theme: prismThemes.github, + darkTheme: prismThemes.dracula, + additionalLanguages: ['python', 'bash', 'json', 'yaml', 'toml'], + }, + } satisfies Preset.ThemeConfig, +}; + +export default config; diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2022-06-18-trading-strategy.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2022-06-18-trading-strategy.md new file mode 100644 index 0000000000..a47340721c --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2022-06-18-trading-strategy.md @@ -0,0 +1,41 @@ +--- +title: "Qu'est-ce qu'une stratégie de Trading" +description: "Discover what is a trading strategy and why you should automate it." +slug: "trading-strategy" +date: "2022-06-18" +authors: ["guillaume"] +tags: ["Crypto", "Trading", "Strategy", "Automation", "Educational"] +image: "/images/blog/trading-strategy-automation/cover.png" +--- + + + +# Les Stratégies de Trading + +:::info + La traduction française de cette page est en cours. +::: + +![cover](/images/blog/trading-strategy-automation/cover.png) + +A trading strategy is a set of rules or guidelines that traders use to determine when to buy and sell assets in the financial markets. While there are many different strategies that traders can employ, some common elements include the use of technical analysis, risk management, and market psychology. In this article, we will take a look at some of the most popular trading strategies and how they can be used to improve your own trading. + +## Qu'est-ce qu'une stratégie de Trading ? + +A trading strategy is a plan that outlines how you will trade cryptocurrencies, stocks, options and more. It includes when you will buy and sell, what you will buy and sell, and how much you are willing to risk. A trading strategy should be based on your investment goals and risk tolerance. + +## Pourquoi utiliser une stratégie de trading ? + +If you are new to trading, or even if you have some experience, you may be wondering why using a trading strategy is so important. There are many different reasons why having a good trading strategy can be beneficial. + +First of all, having a trading strategy can help to keep you disciplined. It can be very easy to get caught up in the excitement of trading and make decisions based on emotions rather than logic. A trading strategy can help to take the emotion out of decision making by providing clear rules to follow. This can help to prevent impulsive decisions that might lead to losses. + +Another reason why using a trading strategy is important is that it can help you to stay focused. There are so many different things that you need to keep track of when trading that it can be easy to get distracted. Having a clear strategy can help you to stay focused on what is important and ignore everything else. This can lead to better decision making and improved results. + +Finally, having a good trading strategy can help you to manage your risk. Risk management is an essential part of successful trading, and a good strategy will allow you to control your risk while still giving you the opportunity to make profits. Without proper risk management, it becomes difficult to protect your capital and can lead to significant losses. + +## Automatiser votre propre stratégie de trading avec OctoBot + +In the blog section, we will discuss how you can use OctoBot to automate your own trading strategy. OctoBot is a powerful tool that can help you take your trading to the next level. By automating your trading strategy, you can free up your time to focus on other important aspects of your life. OctoBot can help you stay disciplined with your trading and ensure that you are always following your predetermined rules. In addition, OctoBot can also help you manage your risk by automatically adjusting your position size according to your risk profile. + +[Let's start automating your strategy](/fr) diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2022-07-01-hollaex-partnership.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2022-07-01-hollaex-partnership.md new file mode 100644 index 0000000000..23d37d33ff --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2022-07-01-hollaex-partnership.md @@ -0,0 +1,30 @@ +--- +title: "OctoBot est maintenant compatible avec Hollaex" +description: "OctoBot s'associe à Hollaex, la première plateforme d'échange crypto décentralisée." +slug: "hollaex-partnership" +date: "2022-07-01" +authors: ["guillaume"] +tags: ["Crypto", "Trading", "Exchange", "Partnership", "Hollaex"] +image: "/images/blog/hollaex-partnership/cover.jpg" +--- + + + +# OctoBot est maintenant compatible avec Hollaex + +![cover](/images/blog/hollaex-partnership/cover.jpg) + +OctoBot est ravi d'annoncer qu'il a ajouté Hollaex, la première plateforme d'échange de cryptomonnaies décentralisée au monde, dans sa liste de plateformes de trading partenaires ! + +## Qu'est-ce que Hollaex ? + +<a href="https://www.hollaex.com?utm_source=octobot" rel="nofollow">HollaEx</a> est bien plus qu'un simple logiciel crypto en marque +blanche, c'est votre boîte à outils pour connecter votre entreprise au monde de +la blockchain. HollaEx vous permet de lancer un échange, avec vos propres +marchés et actifs sur votre domaine. Hollaex est un kit logiciel d'échange open +source qui permet à quiconque de démarrer une entreprise crypto. OctoBot est +maintenant le premier bot de trading crypto à intégrer le support de Hollaex ! + +## Comment trader sur Hollaex avec OctoBot ? + +OctoBot prend en charge le trading Hollaex via un module dédié. Un guide de configuration simple est disponible sur [la configuration du compte HollaEx](/guides/exchanges/hollaex/account-setup). diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2022-09-30-profile-sharing-in-octobot-cloud.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2022-09-30-profile-sharing-in-octobot-cloud.md new file mode 100644 index 0000000000..21def49962 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2022-09-30-profile-sharing-in-octobot-cloud.md @@ -0,0 +1,71 @@ +--- +title: "Partager un profile sur OctoBot cloud" +description: "You can now share your OctoBot profiles with the community" +slug: "profile-sharing-in-octobot-cloud" +date: "2022-09-30" +authors: ["paul"] +tags: ["Cryptocurrency", "Trading", "Exchange", "OctoBot cloud"] +image: "/images/blog/profile-sharing-in-octobot-cloud/cover.jpg" +--- + + + +# Partager un profile sur OctoBot cloud + +:::info + La traduction française de cette page est en cours. +::: + +![cover](/images/blog/profile-sharing-in-octobot-cloud/cover.jpg) + +## Pourquoi partager un profile OctoBot ? + +In OctoBot, your configuration is stored as a profile. A profile contains: + +- Your activated trading mode and evaluators alongside their configuration +- Your traded pairs +- The exchanges you are currently using (without authentication credentials) + +Sharing your profile means sharing your current trading strategy configuration. Anyone using your profile will be able to trade in the exact same way as you do with your OctoBot. + +## Comment partager un profile ? + +1. Select and download it from your OctoBot + ![Profile-sharing-from-octobot](/images/blog/profile-sharing-in-octobot-cloud/bot-share.jpg) +2. Login on [OctoBot cloud](/fr), go to `Editor` and `Publish a new strategy` + ![Profile-sharing-octobot-cloud-editor](/images/blog/profile-sharing-in-octobot-cloud/editor.jpg) +3. Enter your profile name, description and logo + ![Profile-sharing-octobot-cloud-publish](/images/blog/profile-sharing-in-octobot-cloud/publish.jpg) +4. Upload the profile as downloaded from your OctoBot + ![Profile-sharing-octobot-cloud-publish-profile](/images/blog/profile-sharing-in-octobot-cloud/publish-profile.jpg) +5. Submit your profile to make it available to everyone on OctoBot cloud + +Note: For now, we are manually checking profiles, therefore there will be a short delay before your profile will be available to everyone on OctoBot cloud. + +## Comment utiliser un profile d'OctoBot cloud ? + +1. Go yo the profile you want to use and click `Subscribe` + ![Profile-sharing-octobot-cloud-subscribe](/images/blog/profile-sharing-in-octobot-cloud/sub.jpg) +2. Now that you are subscribing to the profile, click `Copy download url` + ![Profile-sharing-octobot-cloud-copy](/images/blog/profile-sharing-in-octobot-cloud/copy.jpg) +3. From your OctoBot, click `Import a profile` + ![Profile-sharing-from-octobot-import](/images/blog/profile-sharing-in-octobot-cloud/bot-import.jpg) +4. Paste the download url (that was copied from step 2) and click `Import` + ![Profile-sharing-from-octobot-import-url](/images/blog/profile-sharing-in-octobot-cloud/bot-import-link.jpg) +5. The new profile is now available in your OctoBot + ![Profile-sharing-from-octobot-importe](/images/blog/profile-sharing-in-octobot-cloud/bot-imported.jpg) + +## Prochaines étapes + +Sharing profiles is the first step towards [OctoBot cloud](/fr) as a platform where OctoBot users can share whole trading strategies. Profiles are merely strategy configurations and we will soon add the possibility to share whole strategies. + +With OctoBot cloud, you will be able to: + +- Use and share any strategy with our without sharing its code and configuration +- Checkout past performances of available strategies +- Create and use paid or free strategies that are made by the community: creators are incentivized to create strategies with the best results to earn money from paid subscribers that use the strategies + +## Rejoindre la bêta + +Sharing profiles will first be available on the [beta OctoBot cloud](https://beta.octobot.cloud/). +To join the OctoBot beta program, [have a look our beta program](/guides/octobot-advanced-usage/beta-program) diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2022-11-following-strategies-in-octobot-cloud.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2022-11-following-strategies-in-octobot-cloud.md new file mode 100644 index 0000000000..2d3c3a22f5 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2022-11-following-strategies-in-octobot-cloud.md @@ -0,0 +1,66 @@ +--- +title: "Suivre des stratégies avec OctoBot cloud" +description: "You can now follow trading strategies of the community" +slug: "following-strategies-in-octobot-cloud" +date: "2022-11" +authors: ["paul"] +tags: ["Cryptocurrency", "Trading", "Strategy", "Exchange", "OctoBot cloud"] +image: "/images/blog/following-strategies-in-octobot-cloud/cover.png" +--- + + + +# Suivre les meilleurs stratégies + +:::info + La traduction française de cette page est en cours. +::: + +![cover](/images/blog/following-strategies-in-octobot-cloud/cover.png) + +On OctoBot cloud, you can subscribe to trading strategies. Subscribing to a strategy allows you to easily trade using a strategy made by someone else from the OctoBot community. + +When subscribed to a strategy, you can use the strategy profile directly from your OctoBot. When you do so, your OctoBot will follow the strategy by coping any trade made by this strategy. Order amounts will be adapted to your current portfolio. + +## Comment utiliser une stratégie ? + +1. Login on [OctoBot cloud](/fr) and go to the desired strategy page +2. Click `Subscribe` + ![Following-strategies-pre-sub](/images/blog/following-strategies-in-octobot-cloud/pre-sub.png) +3. Now that you are subscribing to the strategy, click `Copy download url` +4. From your OctoBot, login to your OctoBot cloud account + ![Following-strategies-community](/images/blog/following-strategies-in-octobot-cloud/community.png) +5. Go to the `Profile` tab and click on the name of the current profile, click `Import a profile` + ![Following-strategies-import](/images/blog/profile-sharing-in-octobot-cloud/bot-import.jpg) +6. Paste the download url (that was copied from step 3) and click `Import` + ![Following-strategies-imported](/images/blog/following-strategies-in-octobot-cloud/imported.png) +7. Use the imported profile and restart your OctoBot + +## Comment ça fonctionne ? + +When following a strategy, a user gets access to the trading signals of the strategy. Trading signals are emitted at each order created or cancelled by the followed strategy. This way followers of a strategy benefit from trades of the desired strategy in real time directly from their OctoBot. Strategies can be applied to any exchange as long as the strategy trading pairs are supported. You can follow a strategy with real or simulated trading. + +Trading through strategy signals is achieved by using the RemoteTradingSignalsTradingMode configured to follow the strategy you selected. When importing a strategy profile, you are importing an already configured profile that enables this trading mode with the right strategy identifier and the strategy traded pairs and default exchange. + +![Following-strategies-mode-config](/images/blog/following-strategies-in-octobot-cloud/mode-config.png) + +As following a strategy is only possible through OctoBot cloud, you need to login to your OctoBot cloud account from your OctoBot to be able to follow a strategy. + +## Comment publier une stratégie sur OctoBot cloud ? + +Trading strategies are published on [OctoBot cloud](/fr) by the OctoBot community. +When a user wants to share a trading strategy, the only thing to do is to: + +1. Create a strategy on [OctoBot cloud](/fr) +2. Setup the desired OctoBot trading mode to emit trading signals to this strategy + ![Following-strategies-config](/images/blog/following-strategies-in-octobot-cloud/config.png) + +Note: the identifier of the strategy to emit signal on can be found on the strategy page, by clicking on this button +![Following-strategies-id-button](/images/blog/following-strategies-in-octobot-cloud/id-button.png) + +Please note that configuration and content of a published strategy is not uploaded to OctoBot cloud and followers can't access the code or configuration of the strategy. They will only get trading signals when the OctoBot that is actually running the strategy will create or cancel orders. + +## Rejoindre la bêta + +Following strategies will first be available on the [beta OctoBot cloud](https://beta.octobot.cloud/). +To join the OctoBot beta program, [have a look at our beta program](/guides/octobot-advanced-usage/beta-program) diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2022-12-11-octobots-in-octobot-cloud.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2022-12-11-octobots-in-octobot-cloud.md new file mode 100644 index 0000000000..cfe79b452e --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2022-12-11-octobots-in-octobot-cloud.md @@ -0,0 +1,52 @@ +--- +title: "Facilement déployer votre OctoBot dans OctoBot cloud" +description: "You can now easily deploy your OctoBot directly in OctoBot cloud" +slug: "octobots-in-octobot-cloud" +date: "2022-12-11" +authors: ["paul"] +tags: ["Cryptocurrency", "Trading", "Strategy", "OctoBot cloud"] +image: "/images/blog/octobots-in-octobot-cloud/cover.png" +--- + + + +# Facilement déployer votre OctoBot sur OctoBot cloud + +:::info + La traduction française de cette page est en cours. +::: + +![cover](/images/blog/octobots-in-octobot-cloud/cover.png) + +## Votre OctoBot, toujours en ligne et accessible + +The main benefit of hosting OctoBot in the cloud are that it will be remain online, no need to install OctoBot or have it running your computer anymore. +Moreover, your OctoBots can be accessed from anywhere, as long as you have an internet connection. + +This can be especially useful for traders who need to monitor their OctoBot from different locations or devices. + +## Comment déployer votre OctoBot sur OctoBot cloud ? + +1. Login on [OctoBot cloud](/fr) and go to `My bots`. + ![my-bots-button](/images/blog/octobots-in-octobot-cloud/my-bots-button.jpg) +2. Click on `Deploy now` from the `Discover` card. + ![deploy-now](/images/blog/octobots-in-octobot-cloud/deploy-now.jpg) +3. Wait until your OctoBot is available. This may take a few minutes. + ![deploying](/images/blog/octobots-in-octobot-cloud/deploying.png) +4. Access your personal OctoBot using the `Open Interface` button. + ![open-interface](/images/blog/octobots-in-octobot-cloud/open-interface.png) +5. Unlock your OctoBot with your OctoBot cloud account password. + ![login](/images/blog/octobots-in-octobot-cloud/login.png) +6. Enjoy your OctoBot from anywhere. + +Note: running a cloud OctoBot requires OctoBot cloud credits. We will soon add ways to get those credits. Fow now please ask the OctoBot team if you need beta credits. + +## Rejoindre la bêta + +If you're interested in participating, be sure to follow any guidelines and instructions provided by the OctoBot team, and take the time to thoroughly test the application and provide useful feedback. + +Cloud OctoBots will first be available on the [beta OctoBot cloud](https://beta.octobot.cloud/). + +To participate in the beta testing of OctoBot's cloud hosting, you'll need to sign up to be a beta tester. + +Once you've been accepted into the program, please follow [these instructions](/guides/octobot-advanced-usage/beta-program) to access the beta version of the application and any other necessary information. diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2023-05-17-trading-using-tradingview.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2023-05-17-trading-using-tradingview.md new file mode 100644 index 0000000000..6e7892f87b --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2023-05-17-trading-using-tradingview.md @@ -0,0 +1,53 @@ +--- +title: "Trader automatiquement avec TradingView" +description: "Automate your trades using any TradingView indicator" +slug: "trading-using-tradingview" +date: "2023-05-17" +authors: ["paul"] +tags: ["Tradingview", "Pine Script", "Webhook", "Strategy", "OctoBot cloud", "Educational"] +image: "/images/blog/trading-using-tradingview/cover.png" +--- + + + +# Trader automatiquement avec TradingView + +:::info + La traduction française de cette page est en cours. +::: + +![cover](/images/blog/trading-using-tradingview/cover.png) + +## Trader en utilisant vos stratégies TradingView favorites + +You love using TradingView indicators and strategies ? With OctoBot, you can take it to the next level and trade using TradingView strategies and indicators directly the exchange you want. + +This means that you can use all the OctoBot features according to your TradingView tools, this includes: + +- Trading on your favorite exchange(s) using your TradingView strategy +- Test your TradingView strategy in real time with simulated funds +- Get real time notifications when your TradingView strategy sends a buy or sell signal + +## Vos stratégies TradingView directement dans votre OctoBot + +When following a TradingView strategy, your OctoBot will listen for TradingView signals and when signals are received, it will react instantly by creating the associated alert and order(s), which can be simulated or real, on any supported exchange. + +![plan-display](/images/blog/trading-using-tradingview/telegram.png) + +You can send details on the order to create directly from the TradingView signal such as the type of order, the take profit and stop loss prices and much more. View the full details of orders signals on [the TradingView signals guide](/guides/octobot-interfaces/tradingview/#alert-format). + +## Comment lier votre compte TradingView à votre OctoBot ? + +### Utiliser OctoBot cloud + +When using [OctoBot trading bots](https://www.octobot.cloud/trading-bot), all you need to do is to [create TradingView alerts](/guides/octobot-interfaces/tradingview#create-an-alert) on any event, directly from Pine Script or from a custom alert. + +Cloud OctoBots' webhook configuration is done automatically and does not require any work. + +### Utiliser OctoBot en self hosted + +When using a self hosted OctoBot, you will have to configure a way to make your OctoBot reachable from a webhook. This is required for TradingView to send signals to your OctoBot and might require an external paid software. + +Please have a look at the [webhook manual configuration](/guides/octobot-interfaces/tradingview/using-a-webhook). + +Once your webhook setup, you can [create TradingView alerts](/guides/octobot-interfaces/tradingview#create-an-alert) on any event, directly from Pine Script or from a custom alert. diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2023-09-19-introducing-the-new-octobot-cloud.mdx b/docs/i18n/fr/docusaurus-plugin-content-blog/2023-09-19-introducing-the-new-octobot-cloud.mdx new file mode 100644 index 0000000000..a21d442833 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2023-09-19-introducing-the-new-octobot-cloud.mdx @@ -0,0 +1,105 @@ +--- +title: "Présentation du nouvel OctoBot cloud" +description: "OctoBot cloud, a new way to profit from trading strategies" +slug: "introducing-the-new-octobot-cloud" +date: "2023-09-19" +authors: ["guillaume"] +tags: ["Free", "Cryptocurrency", "Trading", "Strategy", "Exchange", "OctoBot cloud"] +image: "/images/blog/introducing-the-new-octobot-cloud/cover.png" +--- + + + +# Présentation du nouvel OctoBot cloud + +:::info + La traduction française de cette page est en cours. +::: + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="0rbUDySkIyg" title="Introducing OctoBot cloud" /> + +The [new Investor plans](new-octobot-cloud-plans-and-trading-bots) allow you to enjoy trading strategies in a very easy yet powerful way. These plans allow you to benefit from OctoBot based strategies without the technicalities of OctoBot. [OctoBot cloud trading bots](https://www.octobot.cloud/trading-bot) also benefit from an improvement and are now designer for users you want to automate their own trading strategies. + +## Pourquoi un nouvel OctoBot cloud ? + +At OctoBot, we realized that the current state of OctoBot is suitable for users with a technical background but is too complex for the majority of people. + +The goal of OctoBot has always been to bring automated trading strategies to any crypto investor. This includes tech savvy users as well as all the others. Up until now, OctoBot failed to be usable by any crypto investor and [the new OctoBot cloud](/fr) is built for them. + +**While the full OctoBot is designed to create and customize strategies, the new OctoBot cloud makes it very easy to use those strategies** + +We are splitting OctoBot plans into 2 different kinds: + +1. Strategy based plans +2. Full OctoBot plans + +## Choisir une stratégie, pas un robot: les plans basés sur les stratégies + +OctoBot can be complicated to use and setup, finding your suitable strategy can be even more difficult. That's why we make strategy based plans as simple and clear as possible + +### OctoBot, mais simple + +Ideally, when you want to use a strategy and not create one, you want to: + +1. Explore and compare available strategies +2. Understand potential profits and risks of the strategy of your choice +3. Apply this strategy on your exchange account + +It shouldn't be more complicated than this. Making these steps as easy as possible is our goal with the Investor and Pro plans. + +Therefore, using those plans, you don't need to care about your OctoBot, we are doing it for you. You just need to: + +1. Select the strategy of your choice + ![strategies](/images/blog/introducing-the-new-octobot-cloud/strategies.png) + +2. follow your gains directly from [OctoBot cloud](/fr) + ![bot](/images/blog/introducing-the-new-octobot-cloud/bot.png) + +Besides simplicity, making strategies financially accessible is also important to us. For this reason, we designed the Investor plan to be completely free and unlimited. + +### Pas de coûts, pas de frais, ca fonctionne + +At OctoBot, we believe that making a free plan will help a lot of people accessing automated trading strategies. + +That's why the Investor plan enables everyone to simply use trading strategies for free. + +This not a free trial: when you use a strategy with the Investor plan, we are not asking for your payment information, there is **no monthly subscription fee, no % taken on gains, no hidden fees**. + +> How is this possible ? When using the Investor plan, we rely on exchange partnerships to pay for our running costs. This means that as long as you use an exchange account from an OctoBot official partner, exchanges reward us and you are free to use the Investor plan forever. + +![plans](/images/blog/introducing-the-new-octobot-cloud/plans.png) + +### La transparency est clée + +Of couse, each strategy on [OctoBot cloud](/fr) is built, run and tested using OctoBot. This means that each strategy past performance is evaluated on a regular basis using historical data and OctoBot's [backtesting engine](/guides/octobot-usage/backtesting). This ensures that displayed statistics are real as strategy based plans are also using OctoBot under the hood. + +![dca](/images/blog/introducing-the-new-octobot-cloud/dca.png) + +At OctoBot we believe in transparency. This means that sometimes strategies can turn unprofitable as profits depends on so many different factors including market conditions. **If a strategy is not making profits during a given period, you will see it before using it.** + +## Créer votre propre stratégie: les robots de trading OctoBot cloud + +![cover](/images/blog/introducing-the-new-octobot-cloud/cover.png) + +[OctoBot trading bots](https://www.octobot.cloud/trading-bot) enable you to run OctoBots on our cloud system and adds a few features such as the [strategy designer](/guides/octobot-usage/strategy-designer), [free TradingView webhooks](trading-using-tradingview) and [ChatGPT integrations](trading-using-chat-gpt). + +> Those offers will now be explicitely targeting users who want to create or customize strategies. + +**[Démarrer votre OctoBot](https://www.octobot.cloud)** + +## L'open source OctoBot + +The current OctoBot (available <a href="https://github.com/Drakkar-Software/OctoBot" rel="nofollow">on github</a>) will stay open source, we will keep updating it as it is the backbone of everything on [OctoBot cloud](/fr). + +Each plan uses it and will help support the development of the free open source trading robot. + +Basically nothing changed for the open source OctoBot, it will continue to grow and improve. + +## Phase de bêta + +At the time of writing, the [new OctoBot cloud](/fr) is in beta stage. While the technical part is fully fonctionnal, we are working on the last user interface improvements before completely replacing the [current octobot.cloud](/fr). + +Feel free to use, experiment and give us your feedback on the [new OctoBot cloud](/fr), we are looking forward to knowing what your think of it and our new strategy-based plans. +Join us on the [beta dedicated telegram channel here](/fr) diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2023-09-30-shape-the-future-with-our-roadmap.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2023-09-30-shape-the-future-with-our-roadmap.md new file mode 100644 index 0000000000..7faedcf74d --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2023-09-30-shape-the-future-with-our-roadmap.md @@ -0,0 +1,81 @@ +--- +title: "Façonnez le future d'OctoBot" +description: "Vote for what matters to you and influence the future of OctoBot" +slug: "shape-the-future-with-our-roadmap" +date: "2023-09-30" +authors: ["guillaume"] +tags: ["Roadmap", "Vote", "Share", "Cryptocurrency", "OctoBot cloud"] +image: "/images/blog/shape-the-future-with-our-roadmap/banner-dark.png" +--- + + + +# Façonnez le future d'OctoBot + +:::info + La traduction française de cette page est en cours. +::: + +![cover](/images/blog/shape-the-future-with-our-roadmap/banner-dark.png) + +## Vos idées en priorité + +### Contexte rapide + +Simplicity and transparency are among the most important values to us as we explaned on [our previous article](/blog/introducing-the-new-octobot-cloud) regarding the [new OctoBot cloud](/fr). + +With OctoBot cloud, we are comitted in creating the best strategy automation system possible. Of course, "best" always depends on what is used as comparison criteria. +For us it means that you, our users, can: + +1. Clearly identify the investment strategies you want to use +2. Easily start the trading strategies you choose +3. Quickly access and understand all the data to follow your investment +4. Adjust things whenever you want to, in a very simple way + +This represents many challenges as each of those 4 steps can become very complex and end up being unsable. We want to avoid that at all cost. + +That is why are building tools to make it easy for you to share your ideas on how to improve each of those steps, according to your own experience. + +### Le système actuel + +Up until the day of writing this article, the <a href="https://github.com/Drakkar-Software/OctoBot" rel="nofollow">open source OctoBot</a> grew based on user feedbacks according to a mix of ideas pushed by the user community on: + +- <a href="https://t.me/octobot_trading" rel="nofollow">Telegram</a> +- <a href="https://discord.com/invite/vHkcb8W" rel="nofollow">Discord</a> +- <a href="https://feedback.octobot.cloud/open-source" rel="nofollow">Feedback website</a> + +### Ce que nous voulons accomplir + +Our goal is, and has always been, to shape the whole OctoBot ecosystem according to its whole community best ideas and needs. + +As the community is growing and we are now releasing new features at a much faster pace, we will give a greater weight to our public feedback and roadmap system and split it into two part: + +- The <a href="https://feedback.octobot.cloud/cloud" rel="nofollow">OctoBot cloud section</a> + +![octobot_cloud](/images/blog/shape-the-future-with-our-roadmap/octobot_cloud.png) + +- The <a href="https://feedback.octobot.cloud/open-source" rel="nofollow">open source OctoBot section</a> + +![open_source_octobot](/images/blog/shape-the-future-with-our-roadmap/open_source_octobot.png) + +## Comment ça fonctionne ? + +On <a href="https://feedback.octobot.cloud/" rel="nofollow">feedback.octobot.cloud</a> you will find our public roadmap showing: + +- What we are currently working on +- What we are planning to do next +- Ideas on the following things to work on based on your and our inputs + +![dca](/images/blog/shape-the-future-with-our-roadmap/roadmap.png) + +What you can do to improve OctoBot: + +- Vote for features that you would like to see added to the <a href="https://github.com/Drakkar-Software/OctoBot" rel="nofollow">open source OctoBot</a> or [OctoBot cloud](/fr) +- Share new ideas +- Spread the word about <a href="https://feedback.octobot.cloud/" rel="nofollow">feedback.octobot.cloud</a> to encourage people to share ideas and vote for what matters the most + +What happens next ? + +- We update this roadmap on a regular basis to reflect our current work +- You get notified when things change to an idea you submitted or are following +- Both the <a href="https://github.com/Drakkar-Software/OctoBot" rel="nofollow">open source OctoBot</a> and [OctoBot cloud](/fr) become better and better thanks you and we are really greateful for this. Thank you diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2023-10-07-trading-with-ai-introduction.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2023-10-07-trading-with-ai-introduction.md new file mode 100644 index 0000000000..7938307eb0 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2023-10-07-trading-with-ai-introduction.md @@ -0,0 +1,72 @@ +--- +title: "Automatiser le trading de crypto avec IA" +description: "Learn how to automate your crypto trading with AI in 5 steps" +slug: "trading-with-ai-introduction" +date: "2023-10-07" +authors: ["paul"] +tags: ["AI", "Deep learning", "Trading", "Cryptocurrency", "OctoBot", "Educational"] +image: "/images/blog/trading-with-ai-introduction/cover.png" +--- + + + +# Comment automatiser le trading de crypto avec IA ? + +:::info + La traduction française de cette page est en cours. +::: + +Dive into the future of cryptocurrency trading using the power of AI with OctoBot script! +We'll walk you through 5 simple steps to automate your crypto trading using artificial intelligence. +No matter your experience level, this guide is designed to provide a step-by-step process for setting up and executing your first automated cryptocurrency trade using AI. + +## IA dans le trading + +Artificial intelligence (AI) has revolutionized how we trade. It helps in analyzing massive amounts of data, predicting market trends, and executing trades at lightning speed. To trade using AI, you need to choose a reliable AI trading software, set your trading parameters, and let the system do the rest. + +![trading](/images/blog/trading-with-ai-introduction/trading.jpg) + +## Comprendre le reinforcement learning + +Reinforcement Learning is a type of machine learning (itself a type of AI) where an agent learns to make decisions by taking actions in an environment to maximize some notion of cumulative reward. An 'agent' in this context refers to the algorithm or program that is making the decisions. It operates by interacting with its environment (in this case, the trading market), taking actions (such as buying or selling stocks), and receiving rewards or penalties based on the outcome. The goal of this agent is to learn over time which actions lead to the best outcomes, in this case, the most profitable trades. +In trading, we can use reinforcement learning to understand market dynamics, make accurate predictions, and execute profitable trades. + +![brain](/images/blog/trading-with-ai-introduction/brain.jpeg) + +## OctoBot script + +[OctoBot script](/guides/octobot-script) is engineered to provide traders with a framework for crafting and testing crypto trading strategies. + +It offers a suite of keywords (Python methods) which simplifies the process of creating trades and calculating TA indicators like RSI, thus facilitating users to design their unique trading strategies. + +OctoBot script also allows users to test their strategies using past data through the [backtesting](/guides/octobot-usage/backtesting) feature. With the generation of an advanced report at the end of each backtesting, users gain valuable insights into the performance of their strategies, enabling a comprehensive understanding of their effectiveness. + +## Comment utiliser OctoBot script pour trader avec de l'IA ? + +- Install OctoBot script by following the get started guide on <a href="https://github.com/Drakkar-Software/OctoBot-Script" rel="nofollow">github</a> +- Install AI requirements with + +``` +pip install -r requirements-ai.txt +``` + +- Install the necessary dependencies to be able to run the script on your GPU by following <a href="https://gretel.ai/blog/install-tensorflow-with-cuda-cdnn-and-gpu-support-in-4-easy-steps" rel="nofollow">this tutorial</a> +- Start to train your own model (model = the "brain" of your AI) on ETH/BTC using + +``` +python3 ai-example.py -t -s ETH/BTC -e 10 +``` + +- Once done your AI model will be saved in the weights folder. Find its name and add it in the end of the following command to run a backtesting using your new AI model + +``` +python3 ai-example.py -p -s ETH/USDT -w weights/202310050722-final-dqn.h5 +``` + +_202310050722-final-dqn.h5 is an example of weight, update it with your own_ + +- Here is an example of a backtesting using an AI model built using OctoBot script AI. There is no human action behind it, all the trades have been triggered by the AI. + +![strategy-ouput](/images/blog/trading-with-ai-introduction/strategy-output.png) + +If you found this content helpful, please give us feedback in our community <a href="https://discord.com/invite/vHkcb8W" rel="nofollow">Discord</a> and <a href="https://t.me/octobot_trading" rel="nofollow">Telegram</a>! Your support will encourage us to create a series of detailed guides exploring more strategies and insights into AI trading. diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2023-10-18-open-source-trading-software.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2023-10-18-open-source-trading-software.md new file mode 100644 index 0000000000..15eeb203c7 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2023-10-18-open-source-trading-software.md @@ -0,0 +1,85 @@ +--- +title: "La force des logiciels de trading open source" +description: "Understand the benefits of open source trading software." +slug: "open-source-trading-software" +date: "2023-10-18" +authors: ["guillaume"] +tags: ["Open source", "Cryptocurrency", "Trading", "Software", "Educational"] +image: "/images/blog/open-source-trading-software/cover.png" +--- + + + +# La force des logiciels de trading open source + +:::info + La traduction française de cette page est en cours. +::: + +![cover](/images/blog/open-source-trading-software/cover.png) + +Welcome to your guide to open source trading software. After going through this blog post, you'll have a thorough understanding of open source trading platforms, with a specific focus on open source crypto trading bots. We'll explore the main advantages they offer and how they use community feedback to optimize the trading experience. + +## Table des matières + +- [Definition of Terms](#définition-des-termes) +- [Benefits of Open Source Software](#les-bénéfices-des-logiciels-open-source) +- [The open source community](#la-communauté-open-source) +- [OctoBot: an open source trading software](#octobot-un-logiciel-de-trading-open-source) +- [Conclusion](#conclusion) + +## Définition des termes + +To fully understand the notion of open source trading software, it's essential to know the individual terms: open source, trading, and crypto trading. + +- **Open Source**: Open source refers to something that is publicly accessible and can be modified or shared. In the context of software, it means the source code is freely available for users to inspect, modify, or enhance according to their needs. Most open source software are available on <a href="https://github.com/Drakkar-Software/OctoBot" rel="nofollow">github</a>. + +- **Trading**: Trading is a fundamental economic concept involving the buying and selling of goods and services, with compensation paid by a buyer to a seller, or the exchange of goods or services between parties. + +- **Crypto Trading**: This refers to the act of buying or selling a cryptocurrency via a trading exchange like <a href="https://accounts.binance.com/en/register?ref=528112221" rel="nofollow">Binance</a>. + +- **Trading Platform Open Source**: This is a publicly accessible software where cryptocurrencies and other forms of assets are bought and sold. Since it is open source, users can modify it to suit their specific trading needs. + +- **Open Source Crypto Trading Bot**: This is a specific type of trading platform which is open source and that uses algorithms to buy and sell cryptocurrencies on behalf of the user, based on parameters set by the user. + +![crypto](/images/blog/open-source-trading-software/crypto.png) + +## Les bénéfices des logiciels open source + +When it comes to trading software, going open source comes with several benefits for the user. + +- **Flexibility**: Open source software allows users to customize and modify the software to fit their specific needs. + +- **Cost Efficiency**: They are generally free, reducing the cost of trading operations. + +- **Community Support**: Open source software often has a supportive community that can provide assistance and share innovative ideas. + +- **Transparency**: Open source software allows users to scrutinize, audit and improve upon the code, promoting trust and security. + +## La communauté open source + +Open source software thrives on community involvement. The collective experience of the community helps to: + +- **Improve the software**: Users can spot bugs, suggest improvements, and contribute to the development of the software. + +- **Foster innovation**: Different users bring diverse perspectives, leading to novel solutions and features. For example, OctoBot encourages users to share feedback on the software with a <a href="https://feedback.octobot.cloud/" rel="nofollow">dedicated website</a>. + +- **Provide support**: The community can offer assistance and share knowledge, making it easier for new users to navigate the software. + +## OctoBot: un logiciel de trading open source + +![A man relaxing in his couch while OctoBot is making money by automating cryptocurrency strategies](/a-man-relaxing-in-his-couch-while-octobot-is-making-money-by-automating-cryptocurrency-strategies-light.png) + +OctoBot is a top recommendation when it comes to open source crypto trading bots. The reasons are: + +- **Customization**: Each OctoBot strategy can be customized to create your own trading strategy. A step by step guide is available in our [guide](/guides/octobot-configuration/profile-configuration). + +- **Community**: OctoBot has a robust community that continually contributes to its development and offers support to new users. You can join it on <a href="https://discord.com/invite/vHkcb8W" rel="nofollow">Discord</a> and <a href="https://t.me/octobot_trading" rel="nofollow">Telegram</a>. + +- **Transparency**: OctoBot's open-source nature ensures transparency, allowing users to verify its security and fairness. + +- **Diversity**: OctoBot supports [most major exchanges](/guides/exchanges) and each of their cryptocurrencies. + +## Conclusion + +In essence, open source trading platforms, such as OctoBot, offer a cost-effective, flexible and transparent trading experience. Stay engaged with the community, keep your software updated, and embrace the journey of successful trading ahead. diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2023-10-23-cloud-octobot-plans.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2023-10-23-cloud-octobot-plans.md new file mode 100644 index 0000000000..62e011c50e --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2023-10-23-cloud-octobot-plans.md @@ -0,0 +1,51 @@ +--- +title: "Les plans OctoBot cloud" +description: "Explorez les différents plans OctoBot conçus pour tous, que vous recherchiez la simplicité ou la personnalisation dans vos stratégies de trading." +slug: "cloud-octobot-plans" +date: "2023-10-23" +authors: ["guillaume"] +tags: ["Cryptocurrency", "Trading", "Plans", "OctoBot cloud"] +image: "/images/blog/introducing-cloud-octobot-plans/cover.png" +--- + + + +# Les plans OctoBot cloud + +![cover](/images/blog/introducing-cloud-octobot-plans/cover.png) + +## Un plan OctoBot pour chaque besoin + +Dans l'équipe OctoBot, nous voulons que tout le monde puisse utiliser OctoBot et tirer profit de stratégies de trading performantes. + +Si vous souhaitiez obtenir une stratégie préconfigurée gratuitement ou une personnalisation infinie pour créer votre propre stratégie, notre objectif est que vous puissiez le faire en utilisant OctoBot. + +C'est pourquoi nous avons créé plusieurs plans conçus pour répondre aux besoins de chaque utilisateur. + +## Les plans + +### Investisseur + +Le plan Investisseur est parfait pour les investisseurs à la recherche de simplicité. Automatisez une stratégie de trading clé en main. + +Le plan Investisseur est gratuit, il utilise la version la plus simple d'OctoBot. + +### Investisseur Plus + +Le [plan Investisseur Plus](introducing-the-investor-plus-plan) offre l'accès à toutes les stratégies d'investissement d'OctoBot cloud ainsi que la possibilité d'exécuter autant d'OctoBot que vous le souhaitez + +### Le plan Pro + +Le [plan Pro](introducing-the-pro-plan) inclu le plan Investisseur Plus et ajoute la possibilité de trader avec vos stratégies TradingView ainsi que d'adapter votre OctoBot et de directement confgurer la façon dont il trade. + +### Self hosting + +Le plan Auto-hébergement vous permet de faire fonctionner votre OctoBot directement chez vous, sur l'appareil de votre choix. Il est gratuit et repose sur vous pour installer, exécuter et maintenir votre OctoBot. + +Si vous préférez faire fonctionner OctoBot chez vous ou sur votre propre serveur, le plan Auto-hébergement est fait pour vous. + +## Comment investir avec OctoBot cloud? + +Sélectionnez une stratégie sur [OctoBot cloud](/fr) et choisissez Investisseur pour commencer à investir gratuitement. + +Vous trouverez tous les détails des plans sur la page [de pricing](https://www.octobot.cloud/pricing) diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2023-10-30-octobot-1-0-2-whats-new.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2023-10-30-octobot-1-0-2-whats-new.md new file mode 100644 index 0000000000..2c5961652d --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2023-10-30-octobot-1-0-2-whats-new.md @@ -0,0 +1,52 @@ +--- +title: "OctoBot 1.0.2 - Les nouveautés" +description: "Discover what's new in OctoBot - Chatgpt strategy upgrade, improved TradingView integration and more" +slug: "octobot-1-0-2-whats-new" +date: "2023-10-30" +authors: ["guillaume"] +tags: ["Tradingview", "Chatgpt", "Release", "DCA", "Backtesting"] +image: "/images/blog/octobot-1-0-2-whats-new/cover.png" +--- + + + +# OctoBot 1.0.2 - Les nouveautés + +:::info + La traduction française de cette page est en cours. +::: + +![cover](/images/blog/octobot-1-0-2-whats-new/cover.png) + +## Présentation d'OctoBot 1.0.2 + +We're thrilled to announce the release of OctoBot 1.0.2, an upgraded version with many improved features, thanks to the great feedback we received from you all. + +## Refonte de la stratégie ChatGPT + +In OctoBot 1.0.2, we've revamped the ChatGPT strategy. Until now, you couldn't run a [backtesting](/guides/octobot-usage/backtesting) on a chatgpt profile due to the excessive prompt, costing around $2 for 6 months history, hence we disallowed it. +However, with the new update, you can run backtesting on some gpt settings because we've already computed the prompt against some exchanges pairs historical data which are downloaded from our servers. + +We've also shifted from Daily Trading mode to a smart DCA trading mode in the chatgpt profile. The previous mode was no longer suited to the current market, hence we updated it to DCA trading mode to develop more accurate sell orders following a chatgpt entry signal. + +Additionally, we've introduced a new prompt setting. You can now ask chatgpt with pure candle history (without any TA indicator) and include the number of candles you want. + +![chatgpt settings](/images/blog/octobot-1-0-2-whats-new/gpt-evaluator-settings.png) + +## Amélioration de la connexion à TradingView + +We've also made noteworthy improvements to the TradingView connection, thanks to some valuable feedback from our OctoBot users who use the TradingView integration. +It's now possible to send a cancel order signal to cancel all current open orders for a symbol, or only to cancel an open order on a specific side using the param SIDE. More details on this can be found at [this link](/guides/octobot-interfaces/tradingview/alert-format#canceling-orders). + +Special thanks to @KidCharlemagne, an active member of our OctoBot <a href="https://discord.com/invite/vHkcb8W" rel="nofollow">Discord community</a>, for helping with the complete refactor of the TradingView [configuration guide](/guides/octobot-interfaces/tradingview). It's clearer now, with ample examples. + +![TradingView guide](/images/blog/octobot-1-0-2-whats-new/tv-guides.png) + +## Correction de bugs + +We've also squashed some bugs in this release. After careful checks, we discovered an issue in the OctoBot [backtesting engine](/guides/octobot-usage/backtesting) that allowed for premature filling of open orders. + +## Conclusion + +We can't wait to hear your thoughts on this new version. +Please use this <a href="https://feedback.octobot.cloud/open-source" rel="nofollow">feedback link</a> to share your suggestions and what you'd like to see in our next release. diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2023-11-01-smart-dca-making-of.mdx b/docs/i18n/fr/docusaurus-plugin-content-blog/2023-11-01-smart-dca-making-of.mdx new file mode 100644 index 0000000000..d12f20728e --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2023-11-01-smart-dca-making-of.mdx @@ -0,0 +1,169 @@ +--- +title: "Making of de la stratégie de trading Smart DCA" +description: "Discover how we created the Smart DCA trading strategies in OctoBot cloud, from the basics on choosing traded coins to the technical details of order sizing and take profit targets" +slug: "smart-dca-making-of" +date: "2023-11-01" +authors: ["guillaume"] +tags: ["DCA", "Strategy designer", "Backtesting", "Cryptocurrency", "Trading", "OctoBot cloud", "Educational"] +image: "/images/blog/smart-dca-making-of/cover.png" +--- + + + +# Making of de la stratégie de trading Smart DCA + +:::info + La traduction française de cette page est en cours. +::: + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="519pwSV1uwE" title="Smart DCA with OctoBot" /> + +At OctoBot, we are always trying to find new ways to trade. After experimenting with many types of strategies, we realized that sometimes, keeping it simple just works. + +Our idea was to take the concept of Dollar Cost Averaging and adapt it to smaller scale investments. + +## Présentation du Smart DCA + +<a href="https://www.investopedia.com/terms/d/dollarcostaveraging.asp" rel="nofollow">Dollar Cost Averaging (or DCA)</a> is a very well known investment strategy where you buy on +a regular basis in order to profit from local price drops. It allows investors +to reduce their overal buying costs. + +The DCA concept can also be applied to selling an asset. Selling something over a long period of time, when the price is going up, is allowing to profit from the whole range of prices and increase your average selling price. + +Buying and selling using DCA is a way to maximise profits when investing in and out of a coin. + +> After running tons of tests with historical market data, we realized that this idea also works very well on shorter term trades. + +We also had the surprise to observe an interesting side effect of smaller term DCA: it's a great way to profit from markets that are not moving synchronously. In other words, it can allow to profit from a rising ETH in the morning, take profits at noon and profit from a rising SOL in the afternoon, as long as ETH and SOL are not moving at the same time. + +## Résumé de la vidéo + +### Les pours et les contres de la stratégie + +1. Advantages of Smart DCA: + +- It works very well in sideway markets and uptrends. +- It does not require big market moves, simple + or minus 0.5 to 1% are enough to make profits. +- It never sells at a loss. + +2. Drawbacks of Smart DCA: + +- It should not be used in downward markets as it would lock your funds in sell orders (since it's not selling at a loss). + +### Trouver les crypto à trader + +In order to multiply trades, trading assets that are moving up and down at different times is optimal. We call such assets **complementary**. + +In the end, to optimize profits, the most important part is to include smartly chosen traded assets, so that the strategy trades as much as possible while lowering risk by investing in multiple coins. + +## Quand utiliser la stratégie Smart DCA + +The Smart DCA strategy is adapted to sideways or upwards markets. In order to be able to quickly fill its sell orders, it relies on the market not being in a pure downtrend. + +![cover](/images/blog/smart-dca-making-of/cover.png) + +Using the Smart DCA strategy in a downwards market might not allow sell order to be filled and therefore lock funds in open sell orders. While this is not a selling at a loss, it is still non optimal and can prevent generating profits from other cryptocurrencies. + +## Analyse des aspects techniques de la stratégie + +Let's now explore the very technical aspects of the Smart DCA strategy. + +After the initial step of identifying complementary coins to trade, the next part is to optimize the way Smart DCA will trade those coins. + +This comes down to how entries and exits should be traded, how much to assign to each entry signal, how to configure take profits, all of this while limiting inherent risks associated with the traded assets. + +We will split this into 3 mains topics: + +1. Configuring entries and exits. +2. Taking multiple markets into account. +3. Going beyond [backtesting](/guides/octobot-usage/backtesting) results. + +### Optimiser les entrées et sorties de la stratégie + +Profits in Smart DCA come from the difference between the sell and buy prices. + +The higher the difference, the bigger the profits. However the bigger the risks of not selling the asset. + +In a ideal world, your Smart DCA configuration is so that each entry quickly finds its exit because the exit price is configured according to your traded assets typical behavior. However in reality this is not always true. + +Therefore the goal of the strategy's entry and exit configuration is to find the sweet point for your traded assets where the large majority of your exit orders end up filled within the next hours or days at maximum. This allows to quickly free up funds and jump to the next opportunity. We don't want to be waiting for a fill that might take weeks to happen and prevent you from making money with this trade funds using other traded markets. + +![profitable results with 0.8 percent take profit](/images/blog/smart-dca-making-of/profitable-results-with-0.8-percent-take-profit.png) +_Steady portfolio growth and regular trades using 0.8% take profit targets_ + +At OctoBot cloud, we realized that for the top 50 altcoins, this point is usually around 0.8% profits. This configuration allows to make profits even after exchange fees while quickly freeing funds to multiply trade opportunities and limit asset exposure. + +![profitable but risky results with 2 percent take profit](/images/blog/smart-dca-making-of/risky-results-with-2-percent-take-profit.png) +_Unoptized portfolio growth: missed trades and higher volatily using 2% take profit targets_ + +Of course this number is highly correlated to the volatility of the traded pairs. If you are trading pairs from top 100 to 200 ranks, it's possible that a 1.5% take profit target would be more profitable as those pairs are much more volatile. + +### Trader avec plusieurs paires + +A key concept to optimize your returns using Smart DCA is to trade complementary coins. This allows to multiply trades while reducing risk by spreading funds between different assets. + +**But how many coins should be traded ?** + +Overal, the more the better providing 3 conditions: + +1. All assets must remain complementary (not making the same moves at the same time), otherwise profits are not increased. +2. Assets should display a similar volatility. +3. Having enough initial funds to create orders on every market. + +#### Les actifs complémentaires + +As explained on the video, the best way we found to identify complementary assets is to select assets from different naratives. This means coins that serve different purposes and therefore won't be moving from the same market events or trends. + +Here are examples of coins naratives: + +- Value transfer/storage coins (BTC, XRP) +- Blockchain coins (ETH, ADA, SOL) +- Privacy coins (XMR, ZEC) +- Oracle coins (LINK, BAND) +- Exchange coins (BNB, UNI) +- Meme coins ([DOGE](https://www.octobot.cloud/what-is-dogecoin), SHIB) +- Supply chain coins (VET) + +There are many more naratives such as gaming, metaverse, NFTs and others. + +An alternative way to explore coin narative is to use coin explorers categories: +![coingecko top coin categories](/images/blog/smart-dca-making-of/coingecko-coin-categories.png) +_CoinGecko's <a href="https://www.coingecko.com/en/categories" rel="nofollow">top coin categories</a>_ + +#### La volatilité + +As the goal of the strategy is to quickly go in and out of each asset, it is important that each asset displays overall the same volatily. This allows to fine tune entries and exits goals in an efficient manner. + +Using markets with different volatility present the following risks: + +- Exiting the market too early and missing on profits from more volatile assets. +- Not exiting the market when an opportunity arises due to targets adapted for a higher volatility market. + +#### Les fonds initiaux + +According to our tests, the ideal way to size orders on DCA is to use a small percent of your total traded portfolio value on each order. Here the meaning of _small_ can vary depending on your context and goals but overal the idea is the following: + +- Using a `%t` order amount settings to size orders according to the total value of traded assets holdings and keep order sizes consistent. +- Sizing `%t` in a manner that complies with the exchange minimal order size rules. For example this is usually $5 or $10 (or USD equivalent) on Binance. Please note that the current version of backtesting is very permissive on this topic and it's better to use the live [trading simulator](/guides/octobot-usage/simulator) or manually check order sizes if you are unsure about minimal order sizes +- Keeping the order amount smaller as you increase the number of traded pairs to profit of each pair and reduce chances of having a large part of your portfolio being stuck in sell orders of a particular asset when your exits did not yet trigger. + +![binance trading rules min funds for each market](/images/blog/smart-dca-making-of/binance-trading-rules-min-funds.png) +_Binance's <a href="https://www.binance.com/en/trade-rule" rel="nofollow">trading rules and minimal order size for each market</a>_ + +On OctoBot cloud, strategies usually trade with between 5% and 8% of the portfolio in each order. This allows to benefit from multiple pairs while allowing for minimum initial portfolios in the range of 100 to 200 USD-equivalent. + +### Au délà des résultats de backtesting + +When creating a trading strategy, it's always important to test it with [backtesting](/guides/octobot-usage/backtesting) to make sure the strategy behaves as expected. Backtesting can also be used to optimize a strategy settings. This is what we do at OctoBot when we create a new strategy. + +However, it's important to keep in mind that backtesting is only using past data. Therefore there are a few key points to pay attention to: + +1. Never over-optimize a strategy for a single backtesting context as the future is very rarely the exact repetition of the past. Prefer finding settings that work good (but not necessarily perfect) in most relevant historical range of your traded assets. +2. Carefully identify areas with no buy trades when there should be some. This usually means that your portfolio is completely invested and probably that you are missing a few opportunities. Your settings can most likely be improved for the selected market. +3. Assets that look complementary only based on their past price chart doesn't mean they will keep doing it. That's why having clear fundamental reasons to explain their price complementary (such as the narative) is better than just relying on price charts. + +## Avertissement + +Veuillez noter que le contenu de cet article est destiné à DES FINS D'INFORMATION GÉNÉRALE et non pas à des conseils financiers. Les informations contenues ici sont uniquement à titre informatif. Rien dans ce document ne doit être interprété comme un conseil financier, juridique ou fiscal. Le contenu de cet article reflète uniquement les opinions de l'auteur et/ou de l'équipe d'OctoBot. Aucun d'entre eux n'est un conseiller financier agréé ou un conseiller en investissement. L'achat de cryptomonnaies comporte des risques considérables de perte. L'auteur et/ou l'équipe OctoBot ne garantissent aucun résultat particulier. Les performances passées ne préjugent pas des résultats futurs. diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2023-11-02-trading-using-chat-gpt.mdx b/docs/i18n/fr/docusaurus-plugin-content-blog/2023-11-02-trading-using-chat-gpt.mdx new file mode 100644 index 0000000000..62d4df7393 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2023-11-02-trading-using-chat-gpt.mdx @@ -0,0 +1,91 @@ +--- +title: "Trader avec ChatGPT" +description: "Utilisez ChatGPT pour améliorer vos stratégies de trading de cryptos avec des prédictions d'une IA en temps réel" +slug: "trading-using-chat-gpt" +date: "2023-11-02" +authors: ["paul"] +tags: ["ChatGPT", "AI", "Cryptocurrency", "Trading", "OctoBot cloud"] +image: "/images/blog/trading-using-chat-gpt/cover.png" +--- + + + +# Trader avec ChatGPT + +![cover](/images/blog/trading-using-chat-gpt/cover.png) + +## Demandez à ChatGPT l'avenir du marché crypto + +Ne serait-il pas formidable de savoir ce que <a href="https://chat.openai.com/" rel="nofollow">ChatGPT</a> pense d'une direction particulière du marché ? + +Bien que ChatGPT ne puisse pas répondre directement à des questions telles que "Le BTC/USD va-t-il monter demain ?", il peut répondre à de nombreuses questions connexes, y compris les prévisions de tendances. + +C'est une excellente opportunité de tirer parti de l'IA extrêmement puissante qu'est ChatGPT (dans sa version 3 ou ultérieure) et de l'intégrer directement dans vos stratégies de trading. + +## Une stratégie de trading ChatGPT + +Dans cette vidéo, nous présentons une stratégie de trading ChatGPT que nous avons créé sur [OctoBot cloud](/fr). Nous expliquons son fonctionnement et ses performances. + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="BV4ZHQrIpRQ" title="Stratégie de trading crypto avec ChatGPT sur OctoBot" /> + +Si vous êtes intéressés pour en savoir plus sur cette stratégie de trading ChatGPT, nous avons créé ce [making of de la stratégie ChatGPT](chatgpt-strategy-deep-dive) dans lequel nous couvrons on détails: + +- Les achats et ventes de cette stratégie +- Ses performances en trading simulé avec les données historiques ([backtesting](/guides/octobot-usage/backtesting)) +- Les performances d'un OctoBot qui trade avec cette stratégie depuis plusieurs semaines. + +**[Investir avec ChatGPT](https://www.octobot.cloud)** + +## ChatGPT dans votre OctoBot + +Depuis OctoBot 0.4.47, nous avons ajouté un nouvel évaluateur : le `GPTEvaluator`. Cet évaluateur peut être utilisé facilement avec le profile `GPT Trading`. + +Une fois activé, il demandera automatiquement à ChatGPT son avis sur chaque paire échangée, sur chaque intervalle de temps. + +Le type de données donné à ChatGPT peut être configuré : la valeur du prix de l'actif ou différents types d'évaluateurs techniques peuvent être envoyés à ChatGPT pour qu'il donne son avis. + +Une fois activé, le `GPTEvaluator` se comporte comme tout autre évaluateur technique, ce qui signifie qu'il peut être combiné avec d'autres, utilisé pour trader, pour créer des signaux de trading ou pour vous notifier lorsque la situation change. + +Vous pouvez donc : + +- Trader automatiquement en se basant uniquement sur les prédictions de ChatGPT +- Trader en combinant les prédictions de ChatGPT avec d'autres évaluateurs +- Obtenir l'avis de ChatGPT sur le marché à tout moment depuis l'interface web et le statut du marché et être notifié de tout changement + +Plus de détails sur le fonctionnement des stratégies de trading ChatGPT avec OctoBot sur le [guide de trading ChatGPT](/guides/octobot-trading-modes/chatgpt-trading). + +## Suivez les signaux de trading ChatGPT + +Nous avons également mis en place une [page](https://www.octobot.cloud/tools/crypto-prediction) où vous pouvez suivre les derniers signaux de trading crypto de ChatGPT. +L'objectif de cette nouvelle page est simple. Il s'agit de vous montrer les derniers signaux de trading crypto de notre stratégie ChatGPT. De cette façon, vous pouvez voir comment cela fonctionne et confirmer que cela fonctionne réellement. + +<div style={{textAlign: "center"}}> + <div> + ![ChatGPT trading signals](/images/blog/trading-using-chat-gpt/gpt-free-tool.png) + _Tableau de bord des derniers signaux de trading ChatGPT_ + </div> +</div> + +Pour des informations en temps réel, nous avons également créé un <a href="https://twitter.com/OctoBotGPT" rel="nofollow">compte Twitter / X</a> et un <a href="https://t.me/octobotgpt" rel="nofollow">Telegram</a> qui publient les nouveaux signaux. En suivant ces comptes, vous pouvez immédiatement recevoir des notifications sur les derniers signaux, ce qui facilite le suivi du marché crypto. + +Pour avoir plus d'informations sur les signaux de trading ChatGPT, lisez notre article [Trading avec les signaux ChatGPT](introducing-chatgpt-trading-tool). + +<div style={{textAlign: "center"}}> + **[Voir les stratégies ChatGPT](https://www.octobot.cloud/fr/explore?category=strategies)** +</div> + +## Comment utiliser ChatGPT dans votre OctoBot + +Étant donné que les appels automatisés à ChatGPT sont une fonctionnalité payante de <a href="https://openai.com/" rel="nofollow">openai.com</a>, il existe 3 façons de l'utiliser dans OctoBot et de [trader avec ChatGPT](/guides/octobot-trading-modes/chatgpt-trading). + +### 1. Utiliser une stratégie de trading OctoBot cloud basée sur ChatGPT + +En utilisant [OctoBot cloud](https://www.octobot.cloud/), vous pouvez utiliser des stratégies de trading ChatGPT simplement et gratuitement. Utilisez les sur votre compte de plateforme d'échange favorite ou sans risque en utilisant le [trading virtuel](/investing/paper-trading-a-strategy). + +### 2. ChatGPT à partir de votre OctoBot auto-hébergé + +Pour utiliser ChatGPT à partir de votre [robot de trading OctoBot cloud](https://www.octobot.cloud/trading-bot), renseignez votre clé API OpenAI et la stocker dans la configuration de votre OctoBot. Les tarifs d'OpenAI seront alors appliqués. + +Plus de détails sur la configuration de ChatGPT et comment estimer le coût de ses demandes dans le [guide ChatGPT](/guides/octobot-interfaces/chatgpt) diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2023-11-06-introducing-chatgpt-trading-tool.mdx b/docs/i18n/fr/docusaurus-plugin-content-blog/2023-11-06-introducing-chatgpt-trading-tool.mdx new file mode 100644 index 0000000000..305fd01fd1 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2023-11-06-introducing-chatgpt-trading-tool.mdx @@ -0,0 +1,143 @@ +--- +title: "Introduction aux prédictions crypto de ChatGPT" +description: "Découvrez comment utiliser ChatGPT pour prédire le prochain mouvement de prix des cryptos. Cet article présente notre outil pour suivre les dernières prédictions sur Twitter et Telegram et recevoir des notifications en temps réel." +slug: "introducing-chatgpt-trading-tool" +date: "2023-11-06" +authors: ["paul"] +tags: ["Chatgpt", "Cryptocurrency", "Trading", "Strategy", "AI", "GPT"] +image: "/images/blog/introducing-chatgpt-trading-tool/chatgpt-logo.png" +--- + + + +# Trading avec les signaux ChatGPT + +Dans cet article nous présentons notre dernier outil conçu pour fournir des signaux d'ACHAT ou de VENTE simples pour différentes cryptomonnaies basés sur des prédictions IA. + +## La puissance de ChatGPT + +### Qu'est-ce que ChatGPT + +Notre nouvel outil exploite la puissance de ChatGPT, un modèle d'intelligence artificielle développé par <a href="https://openai.com/" rel="nofollow">OpenAI</a>. + +<a href="https://openai.com/chatgpt" rel="nofollow">ChatGPT</a> a été entraîné sur divers textes d'Internet, et il +utilise cette connaissance pour générer des textes pertinents en réponse aux +entrées qu'il reçoit. Dans le contexte de notre outil, il utilise sa +connaissance de l'historique des prix du marché, en particulier des modèles de +chandeliers (format d'affichage des prix aussi nommé "candlestick"), pour faire +des prédictions sur les tendances des cryptomonnaies. + +<div style={{textAlign: "center"}}> + <div> + ![chatgpt-logo](/images/blog/introducing-chatgpt-trading-tool/chatgpt-logo.png) + *Logo de ChatGPT* + </div> +</div> + +### Pourquoi ChatGPT + +Les modèles de chandeliers, qui affichent les prix hauts, bas, d'ouverture et de clôture pour une période spécifique, sont fondamentaux pour réaliser des analyses techniques à fin de trading. +ChatGPT utilise <a href="https://openai.com/blog/chatgpt" rel="nofollow">sa base de connaissances étendue</a> pour identifier ces modèles dans les données historiques de prix de diverses cryptomonnaies. + +En reconnaissant ces modèles, il peut alors prédire les tendances futures potentielles en comparant les données du marché actuel avec des situations historiques similaires. +Cette analyse forme la base des signaux 'ACHETER' ou 'VENDRE' que notre outil génère. + +## Comment ça fonctionne + +Nous fournissons à ChatGPT les prix actuels de différentes cryptomonnaies et lui demandons de déterminer la probabilité que ces prix augmentent ou diminuent. +Actuellement, notre outil utilise uniquement les prix des chandeliers pour ses prédictions. Cependant, à l'avenir, nous prévoyons d'inclure des indicateurs d'analyse technique pour une prédiction plus détaillée et précise, vous offrant une perspective plus complète sur les mouvements potentiels du marché. + +![prompt](/images/blog/introducing-chatgpt-trading-tool/prompt.png) + +Voici comment ça fonctionne : + +1. Nous demandons à ChatGPT de prédire la direction potentielle du prix (soit à la hausse, soit à la baisse) en fonction des données de prix les plus récentes des cryptomonnaies, comme illustré dans la capture d'écran suivante. +2. Ensuite, nous utilisons la réponse de ChatGPT pour créer un signal de trading, qui dépend de la direction du prix prédite et de la confiance de ChatGPT dans cette prédiction. + +Plus de détails sur le fonctionnement des stratégies de trading ChatGPT avec OctoBot sur le [guide de trading ChatGPT](/guides/octobot-trading-modes/chatgpt-trading). + +**[Investir avec ChatGPT](https://www.octobot.cloud)** + +## Le nouveau tableau de bord + +Nous avons créé ce [tableau de bord crypto ChatGPT gratuit](https://www.octobot.cloud/tools/crypto-prediction) présentant les signaux de trading de cryptomonnaies les plus récents produits par ChatGPT. + +<div style={{textAlign: "center"}}> + ![shib-prediction](/images/blog/introducing-chatgpt-trading-tool/tool-screenshot.png) + *Capture d'écran de la page de présentation des signaux de trading ChatGPT* +</div> + +Cette fonctionnalité peut être utilisée par quiconque souhaite suivre ce que ChatGPT pense qu'il va arriver sur les différentes cryptomonnaies. +C'est comme avoir un aperçu rapide et à jour des prédictions de l'IA qui pourrait vous aider dans votre prise de décisions. + +## Recevoir les prédictions + +Nous avons franchi une étape supplémentaire en introduisant des comptes Twitter et Telegram dédiés qui publient un signal chaque fois qu'une nouvelle prédiction avec une grande confiance est faite par ChatGPT. +En suivant ces comptes, vous pouvez rester à jour avec les signaux de trading de cryptomonnaies les plus intéressants prédits par ChatGPT. +De cette manière, vous ne manquerez jamais une opportunité. + +### Le compte X / Twitter + +Suivez <a href="https://twitter.com/OctoBotGPT" rel="nofollow">le compte X OctoBotGPT</a> pour recevoir une notification à chaque prédiction de ChatGPT sur X / Twitter. + +<div style={{textAlign: "center"}}> + <div> + ![notification Twitter avec ChatGPT qui prédit une augmentation de SOL avec + 90% de + probabilité](/images/blog/introducing-chatgpt-trading-tool/sol-tweet.png) + *Tweet de prédiction OctoBotGPT SOL/USDT* + </div> +</div> + +### Le compte Telegram + +Rejoignez <a href="https://t.me/octobotgpt" rel="nofollow">le Telegram OctoBotGPT</a> pour recevoir une notification à chaque prédiction de ChatGPT sur Telegram. + +<div style={{textAlign: "center"}}> + <div> + ![notification Telegram avec ChatGPT qui prédit une augmentation de SOL avec + 90% de + probabilité](/images/blog/introducing-chatgpt-trading-tool/sol-telegram.png) + *Notification Telegram de prédiction OctoBotGPT SOL/USDT* + </div> +</div> + +## Performance des prédictions ChatGPT + +Voici un exemple de tweet récent publié par <a href="https://twitter.com/OctoBotGPT" rel="nofollow">le compte X OctoBotGPT</a>: + +<div style={{textAlign: "center"}}> + <div> + ![shib Shiba Inu Twitter notification ChatGPT predicts SHIB going up with + 90% change](/images/blog/introducing-chatgpt-trading-tool/shib-tweet.png) + *Tweet de prédiction OctoBotGPT SHIB/USDT à 21h* + </div> +</div> + +<div style={{textAlign: "center"}}> + ![shib Shiba Inu SHIB/USDT price going up after ChatGPT prediction on + twitter](/images/blog/introducing-chatgpt-trading-tool/shib-prediction.png) + *Prix SHIB/USDT sur Binance à 21h* +</div> +À titre d'illustration, ce tweet a été publié à 21h, prédisant une forte +probabilité d'augmentation du prix de SHIB. Cela s'est avéré être une excellente +prédiction, car juste quelques minutes après la publication du tweet, le prix de +SHIB a effectivement commencé à augmenter. + +## Les stratégies de trading avec ChatGPT + +Pour automatiser le trading selon les signaux de ChatGPT, nous avons créé des stratégies permettant de trader avec ces signaux. + +Dans cette vidéo, nous vous présentons ces stratégies. + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="BV4ZHQrIpRQ" title="Stratégie de trading crypto avec ChatGPT sur OctoBot" /> + +Vous trouverez aussi plus d'informations sur les stratégies de trading utilisant ChatGPT sur notre article [Trader avec ChatGPT](/blog/trading-using-chat-gpt). + +<div style={{textAlign: "center"}}> + **[Voir les stratégies ChatGPT](https://www.octobot.cloud/fr/explore?category=strategies)** +</div> + +Vous avez une question ou une demande concernant les signaux de trading GPT ? N'hésitez pas ! Contactez la communauté OctoBot sur <a href="https://discord.com/invite/vHkcb8W" rel="nofollow">Discord</a> et <a href="https://t.me/octobot_trading" rel="nofollow">Telegram</a>. diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2023-11-10-introducing-the-new-octobot-mobile-app.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2023-11-10-introducing-the-new-octobot-mobile-app.md new file mode 100644 index 0000000000..e50f4c796a --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2023-11-10-introducing-the-new-octobot-mobile-app.md @@ -0,0 +1,51 @@ +--- +title: "Présentation de la nouvelle application mobile OctoBot" +description: "Découvrez la nouvelle application OctoBot pour Android. Suivez en temps réel les profits, actifs et solde de vos OctoBots depuis votre mobile." +slug: "introducing-the-new-octobot-mobile-app" +date: "2023-11-10" +authors: ["paul"] +tags: ["Android", "Mobile", "OctoBot cloud"] +image: "/images/blog/introducing-the-new-octobot-mobile-app/cover.png" +--- + + + +# Présentation de la nouvelle application mobile OctoBot + +<div style={{textAlign: "center"}}> + ![octobot-android-app](/images/blog/introducing-the-new-octobot-mobile-app/cover.png) +</div> + +Nous sommes ravis de vous présenter l'application OctoBot pour Android. Cette nouvelle application offre la possibilité de suivre votre OctoBot, qu'il soit auto-hébergé ou dans le cloud, directement depuis votre mobile. + +## À quoi sert l'application mobile OctoBot ? + +L'<a href="https://play.google.com/store/apps/details?id=com.drakkarsoftware.octobotapp&utm_source=www.octobot.cloud&utm_media=blog&utm_content=introducing-mobile-app" rel="nofollow">application Android OctoBot</a> est une application mobile conçue pour faciliter l'accès à vos bots de trading OctoBot. Elle vous permet de vous connecter en utilisant votre compte OctoBot cloud et de suivre vos bots de trading directement depuis votre appareil Android. + +## Fonctionnalités de l'application mobile OctoBot + +Pour l'instant, l'application Android OctoBot vous permet de suivre les profits du bot, les actifs et le solde. + +<div> + <div> + ![vue de + connexion](/images/blog/introducing-the-new-octobot-mobile-app/app-signin.webp) + </div> + <div> + ![vue des + bots](/images/blog/introducing-the-new-octobot-mobile-app/app-bots.webp) + </div> +</div> + +## Notre engagement sur les retours utilisateur + +Nous croyons que la meilleure façon de créer un outil qui répond véritablement aux besoins de nos utilisateurs est d'écouter ce qu'ils ont à dire. C'est pourquoi nous invitons les utilisateurs à donner leur avis sur l'application Android OctoBot. + +Nous avons mis en place un <a href="https://feedback.octobot.cloud/octobot-mobile-app" rel="nofollow">site de feedback</a> où vous pouvez partager vos pensées, suggestions et expériences avec l'application. + +## Conclusion + +Avec l'application OctoBot pour Android, vous pouvez maintenant gérer vos bots de trading de n'importe où, en quelques gestes sur votre appareil mobile. +Bien que nous soyons fiers de cette réalisation, nous savons qu'il reste encore beaucoup à faire. Votre feedback est crucial, nous vous encourageons à partager votre expérience sur notre <a href="https://feedback.octobot.cloud/octobot-mobile-app" rel="nofollow">site de feedback</a>. + +Alors, pourquoi attendre jusqu'à demain ? Téléchargez dès maintenant l'application OctoBot pour Android : cherchez "OctoBot" dans votre <a href="https://play.google.com/store/apps/details?id=com.drakkarsoftware.octobotapp&utm_source=www.octobot.cloud&utm_media=blog&utm_content=introducing-mobile-app" rel="nofollow">Playstore</a>. diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2023-11-17-paper-trading-with-octobot.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2023-11-17-paper-trading-with-octobot.md new file mode 100644 index 0000000000..6feca80330 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2023-11-17-paper-trading-with-octobot.md @@ -0,0 +1,57 @@ +--- +title: "Trading virtuel avec OctoBot" +description: "Test trading strategies risk free with paper trading. Run OctoBot strategies for free with simulated money." +slug: "paper-trading-with-octobot" +date: "2023-11-17" +authors: ["guillaume"] +tags: ["Trading", "Strategy", "OctoBot cloud"] +image: "/images/blog/paper-trading-with-octobot/cover.jpg" +--- + + + +# Trading virtuel avec OctoBot + +:::info + La traduction française de cette page est en cours. +::: + +<div style={{textAlign: "center"}}> + ![cover](/images/blog/paper-trading-with-octobot/cover.jpg) +</div> + +## Introduction au trading virtuel + +Paper trading is service allowing to test trading strategies in live conditions using a virtual portfolio. + +By using paper trading, you can easily: + +- **Test** the trading strategies you are interested in before using your real funds +- **Experiment** all the trading strategies you are curious to know more about + +## La position d'OctoBot's sur le trading virtuel + +At OctoBot, transparency is one of our core values. It is the reason why OctoBot is [open source](/guides/octobot) and why each strategy comes with its historical results. + +In order to bring this one step further, we decided to make [paper trading](/investing/paper-trading-a-strategy) of trading strategies **free and unlimited**. + +![trading account type choice real or paper trading](/images/guides/trading-account-type-choice-real-or-paper-trading.png) + +While most trading robot services choose to charge for paper trading or limit its duration, we decided to stay true to our ethos and include paper trading to our free services for all users. + +This means that when using OctoBot cloud, you can: + +1. **Explore**: Test the trading strategy (or strategies !) you are interested in using paper trading +2. **Invest**: Once you find a trading strategy you like, start an OctoBot with your real funds +3. **Optimize**: Keep experimenting other trading strategies risk free with your paper trading OctoBot + +## Comment lancer son bot de trading virtuel avec OctoBot ? + +As paper trading is using virtual funds, OctoBot cloud doesn't need your exchange credentials to run a paper trading OctoBot. +This means that you can very quickly experiment with any strategy on OctoBot cloud in just 3 steps: + +1. Create your [OctoBot](https://www.octobot.cloud/fr) account +2. Select the strategies your want to use +3. Choose paper trading and the simulated portfolio you want to trade with + ![paper trading virtual portfolio configuration](/images/guides/paper-trading-virtual-portfolio-configuration.png) +4. Follow your new OctoBot either from <a href="https://www.octobot.cloud/fr/bots" rel="nofollow">OctoBot cloud</a> or the <a href="https://play.google.com/store/apps/details?id=com.drakkarsoftware.octobotapp&utm_source=www.octobot.cloud&utm_media=blog&utm_content=paper-trading" rel="nofollow">OctoBot app</a> diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2023-11-29-octobot-pro-plan-early-access.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2023-11-29-octobot-pro-plan-early-access.md new file mode 100644 index 0000000000..87536ae8ad --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2023-11-29-octobot-pro-plan-early-access.md @@ -0,0 +1,92 @@ +--- +title: "Accès anticipé aux trading bots OctoBot cloud" +description: "Les trading bots OctoBot cloud sont maintenant en accès anticipé. Utilisez le Strategy Designer, les intégrations TradingView et ChatGPT" +slug: "octobot-pro-plan-early-access" +date: "2023-11-29" +authors: ["guillaume"] +tags: ["Strategy Designer", "AI", "Backtesting", "Cryptocurrency", "Trading", "OctoBot cloud", "Release"] +image: "/images/blog/octobot-pro-plan-early-access/octobot-pro-plan-early-access-announcement.png" +--- + + + +# Accès anticipé aux trading bots OctoBot cloud + +![annonce de l'accès anticipé aux trading bots OctoBot cloud](/images/blog/octobot-pro-plan-early-access/octobot-pro-plan-early-access-announcement.png) + +## Les trading bots OctoBot cloud + +Avec les [trading bots OctoBot cloud](https://www.octobot.cloud/trading-bot), vous accédez à la totalité des avantages d'OctoBot. + +- Votre OctoBot sera toujours en ligne et à jour: l'équipe OctoBot s'assure de tous les aspects techniques +- Profitez des extensions exclusives au trading bots OctoBot cloud telles que le [Strategy Designer](/guides/octobot-usage/strategy-designer) pour créer et optimiser vos meilleures stratégies de trading +- Obtenez un support personnalisé pour être certain d'avoir toutes les informations nécessaires afin de lancer votre OctoBot correctement et selon la stratégie de votre choix +- Utilisez et personnalisez facilement les stratégies d'OctoBot cloud +- Automatisez simplement et de façon sécurisée les trades de vos stratégies TradingView sans avoir recours à une configuration additionnelle. +- Profitez du plein potentiel des stratégies basées sur ChatGPT sans avoir à payer pour un abonnement OpenAI + +**[Démarrer votre OctoBot](https://www.octobot.cloud)** + +## Votre OctoBot amélioré + +Les trading bots OctoBot cloud vous donnent accès à une version améliorée d'<a href="https://github.com/Drakkar-Software/OctoBot" rel="nofollow">OctoBot en version open-source</a>. Cependant, à la différence de la version open-source pour laquelle il est nécessaire de s'assurer que votre OctoBot reste en ligne, votre trading bots OctoBot cloud est toujours en ligne et fonctionne dans les meilleures conditions. + +Cela signifie qu'avec un cloud OctoBot, vous pouvez: + +- Créer vos propres stratégies basées sur des indicateurs techniques, [TradingView](#automatisation-facile-de-tradingview), Reddit ou même Google Trends +- Utiliser et personnaliser les stratégies existantes comme le [Dollar Cost Averaging](smart-dca-making-of), les Grilles ou [ChatGPT](#les-évaluations-chatgpt) +- Trader sur les marchés Spot et Futures sur toutes les [plateformes d'échange supportées](/guides/exchanges). Il n'y a aucune restriction sur les plateformes d'échange que vous pouvez utiliser +- Utiliser [le simulateur de trading (trading papier)](/guides/octobot-usage/simulator) ou les fonds présents sur le compte compte de votre plateforme d'échange +- Optimiser vos stratégies en utilisant le [backtesting](/guides/octobot-usage/backtesting) sur tout type de marché et de plateforme d'échange avec la fonctionnalité de [backtesting](/guides/octobot-usage/backtesting) ou le [Strategy Designer](#le-strategy-designer) +- Accéder à votre OctoBot à tout moment depuis [votre navigateur](/guides/octobot-interfaces/web), <a href="https://www.octobot.cloud/fr/bots" rel="nofollow">la page dédiée à vos bots sur OctoBot cloud</a>, [Telegram](/guides/octobot-interfaces/telegram) ou <a href="https://play.google.com/store/apps/details?id=com.drakkarsoftware.octobotapp" rel="nofollow">l'application mobile OctoBot</a> + +Explorons maintenant les avantages exclusifs à utiliser [les trading bots OctoBot cloud](https://www.octobot.cloud/trading-bot) + +## Le Strategy Designer + +Les OctoBots utilisant OctoBot cloud ont un accès exclusif au [Strategy Designer](/guides/octobot-usage/strategy-designer), qui est l'outil de mise au point de stratégies de trading le plus avancé que nous avons créé. + +![Aperçu du Strategy Designer avec des graphiques historiques de [Bitcoin](https://www.octobot.cloud/bitcoin-prediction), ethereum avec polygon](/images/blog/octobot-pro-plan-early-access/strategy-designer-preview.png) + +En utilisant le Strategy Designer, vous pouvez facilement personnaliser, tester et comparer les performances de toutes les stratégies. + +Obtenez des informations détaillées sur votre historique de portefeuille et _Profit and Loss_, explorez chaque trade, comparez vos résultats au fil des exécutions et bien plus. + +Plus de détails sur le [Strategy Designer](/guides/octobot-usage/strategy-designer). + +## Demandez une session personalisée avec l'équipe OctoBot + +Nous savons que démarrer une stratégie de trading peut être compliqué. C'est pourquoi l'équipe OctoBot propose de vous accompagner pendant 30 minutes dans une session en tête-à-tête personnalisée pour lancer votre OctoBot selon vos idées. + +De cette façon, vous serez certain d'être en mesure d'utiliser votre OctoBot à son plein potentiel. + +## Utiliser et personnaliser les stratégies d'OctoBot cloud + +Un des avantages majeurs à utiliser les trading bots OctoBot cloud est la possibilité de configurer les <a href="https://www.octobot.cloud/fr/explore" rel="nofollow">stratégies d'OctoBot cloud</a>. + +![cloud strategies](/images/blog/octobot-pro-plan-early-access/cloud-strategies.png) + +Cela signifie que vous pouvez: + +- Les utiliser sur votre OctoBot, sur la plateforme d'échange de votre choix +- Comprendre comment elles fonctionnent et modifier leur configuration +- Utiliser le [backtesting](/guides/octobot-usage/backtesting) et le [Strategy Designer](/guides/octobot-usage/strategy-designer) pour obtenir les meilleurs résultats sur les cryptomonnaies de votre choix + +## Automatisation facile de TradingView + +En utilisant la version open-source d'OctoBot, vous pouvez déjà [automatiser des signaux TradingView](/guides/octobot-interfaces/tradingview). Cependant cela requiert une [configuration spécifique](/guides/octobot-interfaces/tradingview/using-a-webhook) et très souvent un abonnement ngrok payant. + +En utilisant un robot de trading OctoBot cloud, tout est déjà configuré et aucun compte tiers n'est nécessaire pour recevoir vos signaux TradingView. + +## Les évaluations ChatGPT + +Interroger <a href="https://openai.com/chatgpt" rel="nofollow">ChatGPT</a> depuis un logiciel est une <a href="https://openai.com/pricing" rel="nofollow">fonctionnalité payante d'OpenAI</a>, la société qui a développé ChatGPT. Ansi pour utiliser ChatGPT avec la version open-source d'OctoBot, afin d'utiliser une stratégie employant ChatGPT, devez avoir et configurer un compte payant OpenAI. Chaque utilisation de ChatGPT vous sera facturée. + +Avec les plans [standard ou ultra](https://www.octobot.cloud/trading-bot), les coûts liés à ChatGPT sont pris en charge. Il n'est donc plus nécessaire de disposer d'un compte OpenAI, tout est déjà prêt pour l'utilisation. + +## S'inscrire pour l'accès anticipé + +Nous avons ouvert les inscriptions pour l'accès anticipé aux trading bots OctoBot cloud. Si vous êtes intéressé par les nombreux avantages des robots de trading OctoBot cloud, lancez votre trading bot sur +[trading bots OctoBot cloud](https://www.octobot.cloud/trading-bot) + +**[Démarrer votre OctoBot](https://www.octobot.cloud)** diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2023-12-02-how-does-trading-bot-work.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2023-12-02-how-does-trading-bot-work.md new file mode 100644 index 0000000000..723bd49a81 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2023-12-02-how-does-trading-bot-work.md @@ -0,0 +1,76 @@ +--- +title: "Comment fonctionnent les bots de trading" +description: "Découvrez comment fonctionnent les bots de trading, leurs avantages et inconvénients, et apprenez à sélectionner le bot de trading qui correspond à vos besoins." +slug: "how-does-trading-bot-work" +date: "2023-12-02" +authors: ["guillaume"] +tags: ["Crypto", "Trading", "Backtesting"] +image: "/images/blog/how-does-trading-bot-work/cover.png" +--- + + + +# Comment fonctionnent les bots de trading + +Explorez les bots de trading crypto, comment ils fonctionnent, leurs avantages et inconvénients, et apprenez à sélectionner le bot de trading qui correspond à vos besoins. + +## Qu'est-ce qu'un bot de trading crypto ? +Les bots de trading crypto sont des logiciels conçus pour gérer le trading de cryptomonnaies pour le compte de leurs utilisateurs. +En utilisant divers algorithmes, ces bots peuvent exécuter des trades basés sur un ensemble de paramètres et stratégies prédéfinis, ce qui en fait un excellent moyen d'investir ou de trader des cryptomonnaies. + +<div style={{textAlign: "center"}}> + <div> + ![un bot de trading crypto assis derrière un bureau](/images/blog/how-does-trading-bot-work/cover.png) + </div> +</div> + +## Comment fonctionnent les bots de trading crypto ? + +Comprendre le fonctionnement des bots de trading crypto est essentiel pour trader des cryptomonnaies. +Cette section explique certains des éléments sur lesquels se basent les bots pour fonctionner. + +1. Indicateurs techniques : Ces bots utilisent souvent une gamme d'indicateurs techniques tels que les moyennes mobiles, l'indice de force relative (RSI) et d'autres pour informer leurs décisions de trading. +2. Prédictions des prix des cryptos : Les bots avancés peuvent intégrer des algorithmes d'apprentissage automatique, comme [OctoBot GPT](/blog/trading-using-chat-gpt), pour prédire les prix futurs sur la base d'analyses de données historiques. +3. Signaux crypto : En utilisant des signaux de plateformes comme [TradingView](/blog/trading-using-tradingview), ces bots peuvent exécuter des trades basés sur les tendances du marché et les perspectives d'experts. + +## Avantages des bots de trading crypto +Voici quelques avantages des bots de trading crypto. + +- Efficacité et rapidité : Les bots sont capables de traiter de grands ensembles de données et d'exécuter des trades à une vitesse inégalable par les traders humains. +- Éviter le trading émotionnel : Les bots sont basés sur des algorithmes, éliminant ainsi les biais émotionnels qui conduisent souvent à des pertes. +- Trading 24/7 : Contrairement aux humains, les bots peuvent fonctionner 24 heures sur 24, 7 jours sur 7, tradant chaque opportunité même lorsque leur propriétaire dort. + +## Inconvénients de l'utilisation des bots de trading crypto +Bien qu'avantageux, il est crucial de comprendre les éventuels inconvénients et limitations de l'utilisation des bots de trading crypto dans votre stratégie d'investissement. + +- Complexité d'utilisation : Comprendre et configurer un bot de trading peut être une tâche compliquée, en particulier pour les débutants dans le monde du trading. +- Risques de sécurité : L'utilisation de bots sur des plateformes peu fiables peut poser des risques pour la sécurité de vos cryptos ou de vos données personnelles. + +## Le trading automatisé est-il rentable ? +Le trading automatisé n'a pas un résultat garanti. Pour cette raison, il est nécessaire d'analyser les performances passées des stratégies et d'utiliser le [trading virtuel](/blog/paper-trading-with-octobot) pour tester les stratégies. + +- [Trading virtuel](/guides/octobot-usage/simulator) et [backtesting](/guides/octobot-usage/backtesting) : Avant d'utiliser un bot en trading réel, il est important de tester sa stratégie à l'aide de données historiques ([backtesting](/guides/octobot-usage/backtesting)) et de simuler le trading sans risque (trading virtuel). +- Analyse des performances passées : Bien qu'on ne puisse pas se baser sur les performances passées pour tirer des conclusions sur le futur, elles fournissent des informations précieuses sur la manière dont un bot peut se comporter dans certaines conditions de marché. + +## Comment choisir un bot de trading crypto + +Voici les critères essentiels pour choisir un bot de trading crypto qui s'aligne sur vos objectifs de trading et votre expérience em trading. + +- Facilité d'utilisation : Il est important de sélectionner un bot qui correspond à votre expertise technique pour assurer une utilisation fluide. +- Variété de stratégies : Le bot doit offrir une gamme de stratégies qui s'alignent sur vos objectifs et style de trading. +- Considération du coût : Évaluez le prix du bot par rapport à votre budget et aux retours potentiels qu'il peut offrir. + +![personne détendue utilisant OctoBot pour automatiser son trading de cryptos](/a-man-relaxing-in-his-couch-while-octobot-is-making-money-by-automating-cryptocurrency-strategies-light.png) + +### OctoBot : le bot de trading pour vous +OctoBot, créé en 2018, est reconnu pour sa transparence et ses fonctionnalités de configuration en matière de trading de cryptos. + +- Open Source avec des stratégies gratuites : En tant que [projet open-source](/blog/open-source-trading-software), il offre de multiples stratégies de trading gratuitement, offrant aux utilisateurs contrôle et transparence. +- OctoBot cloud : [La plateforme cloud d'OctoBot](/blog/introducing-the-new-octobot-cloud) simplifie la configuration d'OctoBot, la rendant accessible même aux novices, et est gratuite. +- Transparence des performances : OctoBot maintient un [historique de performances](/investing/find-your-strategy#détails-dune-stratégie) de chaque stratégie, aidant les utilisateurs à prendre des décisions éclairées. + +## Conclusion + +Les bots de trading crypto, comme OctoBot, offrent une efficacité d'investissement et un trading dénué d'émotions. +Cependant, il est essentiel de prendre en compte leurs complexités, les risques potentiels, et la nécessité de tester avant utilisation. +En choisissant le bon bot, les traders peuvent améliorer significativement leurs stratégies de trading et potentiellement augmenter leurs chances de succès. diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2023-12-05-safu-meaning.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2023-12-05-safu-meaning.md new file mode 100644 index 0000000000..117c240c04 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2023-12-05-safu-meaning.md @@ -0,0 +1,37 @@ +--- +title: "Que veut dire SAFU" +description: "Explorez les origines, le but et l'impact culturel de SAFU, en particulier dans son rôle de protection des fonds des utilisateurs." +slug: "safu-meaning" +date: "2023-12-05" +authors: ["guillaume"] +tags: ["Crypto", "Trading", "Educational"] +image: "/images/blog/safu-meaning/cover.png" +--- + + + +# Comprendre la signification de SAFU + +Dans le monde de la cryptomonnaie, où la sécurité et la confiance sont vraiment importantes, le concept de Secure Asset Fund for Users (SAFU) représente une innovation majeure. Cet article décrit les origines, le but et l'impact culturel de SAFU, en particulier dans son rôle de protection des fonds des utilisateurs. + +## L'origine et le but de SAFU + +L'acronyme SAFU fait référence au "Secure Asset Fund for Users", un fonds d'assurance d'urgence établi par la plateforme d'échange de cryptomonnaies <a href="https://www.binance.com" rel="nofollow">Binance</a>. En juillet 2018, Binance a créé ce <a href="https://www.binance.com/en/support/announcement/binance-secure-asset-fund-for-users-safu-valued-at-1bn-9c513d91af3f497b99da2962322fb3c3" rel="nofollow">fonds</a>, en conservant 10 % de tous les frais de trading pour garantir que les fonds des utilisateurs soient protégés dans des situations extrêmes. + +## L'aspect financier de SAFU + +Le 29 janvier 2022, le Secure Asset Fund for Users était évalué à un milliard de dollars. SAFU contient principalement trois types d'actifs : BNB (Binance Coin), BTC ([Bitcoin](https://www.octobot.cloud/bitcoin-prediction)) et BSC-USD (un [stablecoin](/blog/what-are-stablecoins) de Binance), tous conservés dans des portefeuilles froids (ou "cold wallet" en anglais) séparés pour maximiser la sécurité. + +## La phrase "Funds are SAFU" + +Le terme "SAFU" a gagné en popularité à partir d'un jeu de mots sur le mot "safe", originaire d'une <a href="https://www.youtube.com/watch?v=DelF6zEHXpE" rel="nofollow">vidéo YouTube</a> par le créateur de contenu Bizonacci en 2018. Cette vidéo, intitulée "Funds are Safu", était une réponse à un <a href="https://twitter.com/cz_binance/status/1326458569974181891" rel="nofollow">tweet de Changpeng Zhao</a>, le PDG de Binance à ce moment là, qui assurait aux utilisateurs que leurs "fonds sont en sécurité" pendant une période de maintenance non planifiée. + +<div style={{textAlign: "center"}}> + <div> + ![Tweet de Changpeng Zhao "SAFU"](/images/blog/safu-meaning/safu-tweet.png) + </div> +</div> + +## La naissance du meme SAFU + +La phrase "Funds are SAFU" est née à la suite d'un incident survenu le 7 mars 2018. Les utilisateurs de [Binance](https://www.octobot.cloud/binance-trading-bot) ont rencontré une période de maintenance inattendue en raison d'un dysfonctionnement dans la paire de trading SYS/BTC. Cet événement a conduit au tweet rassurant de Changpeng Zhao. La vidéo virale qui a suivi et l'adoption du terme par Zhao ont ancré "Funds are SAFU" comme un élément de base dans l'argot crypto, souvent utilisé dans des situations similaires pour rassurer les utilisateurs. diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2023-12-07-hodl-meaning.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2023-12-07-hodl-meaning.md new file mode 100644 index 0000000000..52ac5a423a --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2023-12-07-hodl-meaning.md @@ -0,0 +1,42 @@ +--- +title: "Comprendre la signification de HODL" +description: "Explorer les origines, la signification et l'impact culturel de ce célèbre mot." +slug: "hodl-meaning" +date: "2023-12-07" +authors: ["guillaume"] +tags: ["Crypto", "Trading", "Educational"] +image: "/images/blog/hodl-meaning/cover.png" +--- + + + +# Comprendre la signification de HODL en crypto : bien plus qu'une faute de frappe + +HODL est un mot célèbre dans le monde de la crypto. Il est l'abréviation de “hold on for dear life” (s'y accrocher pour la vie), et signifie garder sa crypto même lorsque les prix fluctuent beaucoup. + +## Qu'est-ce que HODL ? + +HODL, c'est comme dire : "Ne vendez pas votre crypto, même quand les choses se compliquent." Cela a commencé comme une façon amusante d'écrire "hold" mais maintenant, cela signifie beaucoup plus. C'est comme l'idée ancienne de garder des actions pendant longtemps, mais appliquée à la crypto. + +## HODL dans la culture crypto + +Dans les forums crypto en ligne, vous trouverez HODL et d'autres mots comme [SAFU](/blog/safu-meaning), [FUD](/blog/fud-meaning), [FOMO](/blog/fomo-meaning), Moon, et Sats. Ces mots sont devenus une grande partie du langage crypto. Même les personnes qui investissent dans d'autres choses utilisent maintenant HODL. + +## L'origine de HODL + +L'histoire de HODL a commencé avec une faute d'orthographe. Le 18 décembre 2013, lorsque le prix du [Bitcoin](https://www.octobot.cloud/bitcoin-prediction) a beaucoup chuté, un trader nommé GameKyuubi a écrit <a href="https://bitcointalk.org/index.php?topic=375643.0" rel="nofollow">"I AM HODLING"</a> dans un forum en ligne. Il voulait dire que, même s'il n'était pas bon en trading, il garderait son Bitcoin. + +<div style={{textAlign: "center"}}> + <div> + ![Une personne zen qui HODL sa crypto](/images/blog/hodl-meaning/cover.png) + *Une personne qui "HODL" sa crypto* + </div> +</div> + +## Comprendre la stratégie HODL + +HODL signifie garder sa crypto, même lorsque les prix chutent. Cela diffère du day trading, où les gens achètent et vendent souvent pour réaliser de petits profits. + +## Savoir quand HODL + +Décider de HODL ou vendre dépend de vous. Vous devriez faire des recherches et y réfléchir attentivement. Certaines personnes HODL toute leur crypto, tandis que d'autres en échangent une partie. diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2023-12-09-fud-meaning.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2023-12-09-fud-meaning.md new file mode 100644 index 0000000000..ea7a4a4d04 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2023-12-09-fud-meaning.md @@ -0,0 +1,48 @@ +--- +title: "Comprendre le terme FUD" +description: "Découvrez la signification de FUD dans la crypto dans ce guide concis pour comprendre son influence sur les marchés et dans vos décisions d'investissement." +slug: "fud-meaning" +date: "2023-12-09" +authors: ["guillaume"] +tags: ["Crypto", "Trading", "Educational"] +image: "/images/blog/fud-meaning/cover.png" +--- + + + +# Comprendre la signification de FUD : peur, incertitude et doute dans son investissement + +Le FUD, abréviation de "fear, uncertainty, and doubt" (peur, incertitude et doute), est un terme qui résonne profondément dans le monde de la finance et de la cryptomonnaie. Il représente un état d'esprit pouvant influencer fortement les décisions d'investissement, menant souvent à un comportement irrationnel sur les marchés (actions, crypto etc...). + +## Signification de FUD en investissement + +En investissement, le FUD fait référence à un sentiment général de pessimisme ou de négativité pouvant affecter le sentiment du marché. Il va au-delà des préoccupations normales des investisseurs, touchant aux rumeurs et au battage médiatique pouvant conduire à des décisions impulsives et souvent irrationnelles. Un exemple de cela est la folie des actions "meme" (comme SHIB ou DOGE), où le FUD a joué un rôle significatif. + +## FUD vs FOMO : comprendre la différence + +Le FUD peut être considéré comme l'opposé de [FOMO](/blog/fomo-meaning). Alors que le FOMO pousse les investisseurs à rejoindre le mouvement par peur de manquer des gains, le FUD diffuse un sentiment négatif collectif, souvent exacerbé par les médias sociaux, provoquant la panique et la vente. + +## L'histoire du FUD + +L'histoire du FUD remonte aux années 1920, devenant plus largement reconnue comme un acronyme dans les années 1970. Initialement une stratégie de marketing et de vente, le FUD a évolué en un concept clé dans l'investissement, particulièrement avec l'avènement de marchés hautement volatils comme la cryptomonnaie. + +<div style={{textAlign: "center"}}> + <div> + ![Une personne regarde anxieusement un ordinateur avec un graphique de + marché crypto en baisse, illustrant le FUD dans la + cryptomonnaie.](/images/blog/fud-meaning/cover.png) *Une illustration du + sentiment de FUD.* + </div> +</div> + +## Le FUD dans la cryptomonnaie + +Dans le monde de la crypto, le FUD prend deux formes principales : propager le doute pour manipuler les prix du marché et le scepticisme général quant à la légitimité des cryptomonnaies. La nature volatile des marchés crypto les rend particulièrement sensibles au FUD. + +## Exemples de FUD dans la crypto + +Plusieurs exemples démontrent l'impact du FUD sur le marché de la crypto. Des exemples notables incluent les rumeurs de <a href="https://www.bbc.com/news/technology-58678907" rel="nofollow">la Chine interdisant le Bitcoin</a> et les craintes autour des réglementations gouvernementale. Ces exemples montrent comment des nouvelles négatives exagérées peuvent conduire à l'hystérie sur les marchés. + +## L'impact du FUD + +Le FUD peut conduire à des mouvements importants sur le marché, les investisseurs réagissant à des menaces perçues ou à des nouvelles négatives. Dans les marchés régulés, propager du FUD avec l'intention de <a href="https://www.binance.com/en-NG/feed/post/1279693" rel="nofollow">manipuler les prix est illégal</a> et considéré comme une manipulation de marché. diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2023-12-10-octobot-1-0-4-whats-new.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2023-12-10-octobot-1-0-4-whats-new.md new file mode 100644 index 0000000000..4ebc357b91 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2023-12-10-octobot-1-0-4-whats-new.md @@ -0,0 +1,49 @@ +--- +title: "OctoBot 1.0.4 - Les nouveautés" +description: "Découvrez les nouveautés d'OctoBot - Téléchargez les stratégies d'OctoBot cloud sur votre OctoBot, tradez sur BingX, utilisez les trading modes Daily et DCA amélioré et bien plus encore" +slug: "octobot-1-0-4-whats-new" +date: "2023-12-10" +authors: ["guillaume"] +tags: ["OctoBot cloud", "Release", "DCA", "Exchanges"] +image: "/images/blog/octobot-1-0-4-whats-new/with-octobot-1.0.4-use-octobot-cloud-strategies-and-trade-on-bingx.png" +--- + + + +# OctoBot 1.0.4 - Les nouveautés + +![utilisez les statégies OctoBot cloud et tradez sur BingX](/images/blog/octobot-1-0-4-whats-new/with-octobot-1.0.4-use-octobot-cloud-strategies-and-trade-on-bingx.png) + +## Présentation d'OctoBot 1.0.4 + +Nous sommes fiers d'annoncer la nouvelle version d'OctoBot. 1.0.4 est une mise à jour ajoutant la possibilité de télécharger des stratégies d'OctoBot cloud directement dans votre OctoBot, l'ajout de la <a href="https://bingx.com/en-us/invite/Z4UUVX/" rel="nofollow">plateforme d'échange BingX</a> parmi les [échanges partenaires](/guides/exchanges#plateformes-déchange-partenaires---supporter-octobot) et de nombreuses améliorations. + +## Télécharger les stratégies d'OctoBot cloud + +A partir d'OctoBot 1.0.4, vous pouvez tirer profit des <a href="https://www.octobot.cloud/fr/explore" rel="nofollow">stratégies d'OctoBot cloud</a> directement depuis votre [OctoBot auto-hébergé](/guides/octobot). + +![télécharger les stratégies d'OctoBot cloud dans votre bot open source](/images/blog/octobot-1-0-4-whats-new/download-octobot-cloud-strategies-in-open-source-bot.png) + +Directement depuis votre OctoBot, téléchargez les stratégies OctoBot cloud et: + +- Utilisez les avec des fonds réels ou simulés +- Configurez les pour trader différemment, sur d'autres plateformes d'échange ou d'autres assets +- Profitez du [moteur de backtesting](/guides/octobot-usage/backtesting) OctoBot ou du [Strategy Designer](/guides/octobot-usage/strategy-designer) disponible dans les [robots de trading OctoBot](https://www.octobot.cloud/trading-bot) pour les optimiser en fonction des vos idées + +## BingX est maintenant disponible sur OctoBot + +Chez OctoBot, nous travaillons à rendre le trading aussi accessible que possible. Pour cela , rendre compatible OctoBot avec la majorité des plateformes d'échanges est une étape nécessaire. En suivant cette philosophie, nous venons d'ajouter le support de <a href="https://bingx.com/en-us/invite/Z4UUVX/" rel="nofollow">BingX</a>. Nous espérons que cet ajout aidera un maximum de nos utilisateurs. + +## Correction de bugs liés aux plateformes d'échanges + +Dans OctoBot 1.0.4, nous avons corrigé plusieurs problèmes liés à la connexion aux plateformes d'échanges. Cela concerne en particulier le trading de futures et les ordres de take profit et stop loss. Un grand merci à Nes, Grr, Gerhard et Artem de notre communauté pour nous avoir aidé à trouver ces problèmes. + +## Autres améliorations et corrections + +Dans cette version, nous avons ajouté des paramètres permettant aux trading modes DCA et Daily d'être personalisés de façon plus fine en fonction de vos idées. + +De nombreux bugs ont été corrigés, en particulier sur l'interface web, à propos du sélecteur de cryptomonnaies, plusieurs problèmes de connexion aux plateformes d'échange, de configuration de Ngrok et bien plus encore. + +## Conclusion + +Nous avons hate de savoir ce que vous pensez de cette nouvelle version. Partagez vos idées et suggestions que vous aimeriez dans la prochaine version avec ce <a href="https://feedback.octobot.cloud/open-source" rel="nofollow">lien de feedback</a>. diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2023-12-12-fomo-meaning.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2023-12-12-fomo-meaning.md new file mode 100644 index 0000000000..04293171ae --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2023-12-12-fomo-meaning.md @@ -0,0 +1,52 @@ +--- +title: "Comprendre la signification du FOMO" +description: "Explorez la signification du FOMO dans la crypto-monnaie. Comprenez comment la peur de manquer influence les décisions de trading et apprenez des stratégies efficaces pour la combattre." +slug: "fomo-meaning" +date: "2023-12-12" +authors: ["guillaume"] +tags: ["Crypto", "Trading", "Educational"] +image: "/images/blog/fomo-meaning/cover.png" +--- + + + +# Comprendre la signification du FOMO + +Le FOMO, abréviation de "fear of missing out" (peur de manquer quelque chose), survient lorsque les traders s'inquiètent de rater une chance de gagner de l'argent, surtout sur le marché crypto en rapide évolution. + +## Le FOMO dans le contexte de la crypto + +Dans le monde de la crypto-monnaie, le FOMO est particulièrement intense. Le FOMO peut être considéré comme l'opposé de [FUD](/blog/fud-meaning). Les traders ont souvent l'impression de passer à côté de la prochaine grande chose, les amenant à prendre des décisions impulsives basées sur des informations non vérifiées ou des spéculations sur les réseaux sociaux. + +## Psychologie derrière le FOMO + +Le FOMO se produit lorsque les gens ont peur de manquer une occasion de gagner de l'argent, surtout lorsque les prix du marché augmentent. Cette peur peut rendre les gens anxieux, trop concentrés sur les nouvelles du marché et rapides à prendre des décisions en fonction des fluctuations des prix. + +<div style={{textAlign: "center"}}> + <div> + ![Une personne avec une expression excitée regarde un graphique de marché + crypto en hausse sur leur ordinateur, symbolisant le FOMO dans la + cryptomonnaie.](/images/blog/fomo-meaning/cover.png) *Une illustration du + sentiment de FOMO.* + </div> +</div> + +## Conséquences négatives du FOMO + +Le FOMO peut avoir de sérieux inconvénients. C'est l'outil principal utilisé par les schémas de pump-and-dump, où les investisseurs sont induits en erreur en faisant de mauvais choix. Un exemple notable est le <a href="https://www.wired.com/story/squid-game-coin-crypto-scam/" rel="nofollow">SQUID Coin scheme</a>, alimenté par l'intense communication sur les réseaux sociaux et le FOMO des investisseurs. + +## Dealing with FOMO in crypto + +To combat FOMO, traders should keep a detailed trading journal, adhere to a solid trading plan, and have a clear risk management strategy. It's crucial to research independently and not base decisions solely on social media buzz. + +## Exemples de FOMO dans la crypto + +Les fluctuations du prix de Dogecoin, souvent <a href="https://www.cbc.ca/news/business/dogecoin-1.6020408" rel="nofollow">influencées par les tweets d'Elon Musk</a>, illustrent le FOMO en pratique. Motivés par la peur de manquer, les traders répondent rapidement à ces opportunités de profit. + +## Identifier les causes du FOMO + +Savoir pourquoi le sentiment FOMO se existe est important. Il peut venir du désir de faire partie d'un grand changement de marché ou de ne pas vouloir perdre, d'avoir beaucoup d'informations, et d'être attiré par les histoires de personnes qui ont réussi tôt dans l'investissement. + +## Signes de FOMO + +Les signes du FOMO sont souvent liés à un besoin urgent d'investir en se basant sur la popularité récente ou les hausses de prix. Pour éviter le FOMO, une recherche approfondie et la dépendance à des sources d'information fiables sont essentielles. diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2023-12-15-best-crypto-trading-bots.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2023-12-15-best-crypto-trading-bots.md new file mode 100644 index 0000000000..d9fa682dde --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2023-12-15-best-crypto-trading-bots.md @@ -0,0 +1,345 @@ +--- +title: "Les 10 meilleurs robots de trading crypto" +description: "Découvrez les meilleurs robots de trading crypto. Comparez les robots gratuits et payants les plus performants, leurs fonctionnalités, leur facilité d'utilisation et leur tarification. Idéal pour les débutants comme pour les experts en trading de cryptomonnaies." +slug: "best-crypto-trading-bots" +date: "2023-12-15" +authors: ["guillaume"] +tags: ["Cryptocurrency", "Trading", "Plans"] +image: "/images/blog/best-crypto-trading-bots/cover.png" +--- + + + +# Les 10 meilleurs robots de trading crypto + +Choisir le bon robot de trading crypto parmi les nombreuses options disponibles peut être difficile. Cet article est là pour vous aider à trouver le meilleur pour vos besoins. + +## Qu'est-ce qu'un bot de trading crypto ? + +Les bots de trading crypto sont comme vos assistants numériques pour le trading de cryptomonnaies. +Ils fonctionnent automatiquement, suivant vos stratégies établies, pour acheter et vendre de la crypto pour vous. +Cela signifie que vous n'avez pas à surveiller constamment les marchés. + +Avec de nombreux traders les utilisant aujourd'hui, ils sont un choix populaire tant pour les débutants que pour les experts. +Pour ceux qui sont nouveaux dans le trading de crypto, ces bots offrent souvent des stratégies pour vous aider à démarrer facilement. + +<div> + Maintenant que nous avons une compréhension claire de ce qu'est un bot de + trading crypto, explorons les différents types de bots de trading disponibles + sur le marché. +</div> + +## 1. OctoBot + +<div style={{textAlign: "center"}}> + <div> + ![A man relaxing in his couch while OctoBot is making money by automating + cryptocurrency + strategies](/a-man-relaxing-in-his-couch-while-octobot-is-making-money-by-automating-cryptocurrency-strategies-light.png) + </div> +</div> + +[OctoBot](/fr) est un bot de trading flexible et facile à utiliser qui propose une variété de stratégies gratuites, y compris des stratégies basées sur l'[IA](https://www.octobot.cloud/features/ai-trading-bot), le [DCA](smart-dca-making-of) intelligent et GRID. +Il est [open-source](open-source-trading-software). Avec son accent sur la transparence, les utilisateurs peuvent tester des stratégies avec du [trading virtuel](/guides/octobot-usage/simulator) et suivre leurs performances. +OctoBot prend en charge la plupart des exchanges crypto et propose des plans premium pour les utilisateurs avancés, le rendant adapté tant pour les débutants que pour les investisseurs crypto expérimentés. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Facilité d'utilisation" + level={3} + h="14px" + tooltipText="Facile d'utilisation avec des stratégies basées sur l'IA et diverses stratégies préétablies pour débutants et professionnels" + /> + <Rating + title="Prix" + level={3} + h="14px" + tooltipText="Plusieurs offres gratuites avec des options pour des plans premium avancés" + /> + <Rating + title="Fonctionnalités" + level={3} + h="14px" + tooltipText="Open-source, supporte les principaux exchanges, variété de stratégies de trading, backtesting et suivi de performance" + /> + </div> +</Card> + +## 2. Bitsgap + +Bitsgap est connu pour ses bots de grille qui rendent le trading accessible à davantage de traders. +Ces bots fonctionnent dans une plage définie, plaçant des ordres d'achat et de vente pour capitaliser sur les fluctuations du marché. + +Bitsgap propose également un bot de trading Futures pour gérer plusieurs petites positions quotidiennes, visant des retours fréquents et plus petits tout en minimisant les risques. +La plateforme est basée sur le cloud pour une facilité d'utilisation, et inclut du [trading virtuel](/investing/paper-trading-a-strategy) et des stratégies testées pour aider les traders à démarrer rapidement et en toute sécurité. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Facilité d'utilisation" + level={3} + h="14px" + tooltipText="Plateforme basée sur le cloud avec trading de bot grille accessible" + /> + <Rating title="Prix" level={1} h="14px" tooltipText="Service payant" /> + <Rating + title="Fonctionnalités" + level={3} + h="14px" + tooltipText="Offre un bot de trading de contrats à terme et du trading virtuel" + /> + </div> +</Card> + +## 3. 3Commas + +<div style={{textAlign: "center"}}> + <div> + ![3commas-logo](/images/blog/best-crypto-trading-bots/3commas.png) + </div> +</div> + +3Commas est un bot de trading crypto payant, offrant des bots GRID, DCA et Signal. +Connu pour son interface ergnonomique, 3Commas prend en charge de multiples stratégies de trading et indicateurs techniques. + +Il propose également une place de marché pour les signaux crypto tiers. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Facilité d'utilisation" + level={3} + h="14px" + tooltipText="Interface facile à naviguer avec plusieurs stratégies de trading" + /> + <Rating + title="Prix" + level={1} + h="14px" + tooltipText="Service payant à prix élevé" + /> + <Rating + title="Fonctionnalités" + level={2} + h="14px" + tooltipText="Propose des bots GRID, DCA et Signal avec une marketplace" + /> + </div> +</Card> + +## 4. Cryptohopper + +Cryptohopper est un bot de trading crypto payant, offrant un essai gratuit de 3 jours pour les nouveaux utilisateurs. +Il se distingue par son bot de market-making et la possibilité pour les utilisateurs de créer des stratégies de trading personnalisées. + +La plateforme prend également en charge le trading automatisé via un bot Telegram et propose des services supplémentaires comme des signaux crypto, des modèles de stratégie et du trading virtuel. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Facilité d'utilisation" + level={1} + h="14px" + tooltipText="Difficile pour les débutants, offre un bot de market-making" + /> + <Rating + title="Prix" + level={2} + h="14px" + tooltipText="Plans gratuits et payants, adaptés à divers budgets" + /> + <Rating + title="Fonctionnalités" + level={3} + h="14px" + tooltipText="Création de stratégie personnalisée, trading par bot Telegram et trading virtuel" + /> + </div> +</Card> + +## 5. CoinRule + +Coinrule est un bot de trading crypto sans code. +Il propose une configuration de règle simple "si-ceci-alors-cela", plus de 150 règles de trading prédéfinies et du trading virtuel. + +Le bot est disponible via une plateforme web, prend en charge tous les principaux tokens et propose divers plans d'abonnement, y compris une option gratuite. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Facilité d'utilisation" + level={1} + h="14px" + tooltipText="Configuration de règles 'si ceci alors cela' compliquée pour les débutants" + /> + <Rating + title="Prix" + level={3} + h="14px" + tooltipText="Plans gratuits et payants, adaptés à divers budgets" + /> + <Rating + title="Fonctionnalités" + level={3} + h="14px" + tooltipText="Plus de 150 règles préétablies et du trading virtuel" + /> + </div> +</Card> + +## 6. Bot de trading Binance + +Les bots de trading Binance sont des outils automatisés conçus pour exécuter des transactions de cryptomonnaies en fonction de paramètres prédéfinis, permettant aux utilisateurs de trader 24h/24 sans surveillance constante. +Ces bots améliorent l'efficacité du trading en analysant les données de marché et en prenant des décisions instantanées, ce qui peut aider à capitaliser sur la volatilité du marché. +Binance propose une variété de bots, dont le populaire bot Spot Grid, particulièrement efficace dans les marchés latéraux en achetant bas et vendant haut au sein d'une fourchette de prix définie. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Facilité d'utilisation" + level={1} + h="14px" + tooltipText="Exploration et création de bots complexes" + /> + <Rating + title="Prix" + level={3} + h="14px" + tooltipText="Frais d'échange uniquement" + /> + <Rating + title="Fonctionnalités" + level={3} + h="14px" + tooltipText="Grande variété de bots prêts à l'emploi" + /> + </div> +</Card> + +## 7. Altrady + +Altrady est une plateforme de trading crypto qui s'intègre à plus de 17 échanges, offrant des bots automatisés GRID et Signal pour un trading efficace. +Elle comprend des fonctionnalités avancées telles que la prise de bénéfices, le stop loss et des outils de gestion des risques. + +Son interface est complétée par des analyses en temps réel et des outils de gestion de portefeuille. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Facilité d'utilisation" + level={2} + h="14px" + tooltipText="Modérément facile avec un ensemble de fonctionnalités avancées" + /> + <Rating title="Prix" level={2} h="14px" tooltipText="Prix équilibré" /> + <Rating + title="Fonctionnalités" + level={2} + h="14px" + tooltipText="Quelques outils avancés comme Take Profit, Stop Loss et gestion de portefeuille" + /> + </div> +</Card> + +## 8. Pionex + +<div style={{textAlign: "center"}}> + <div> + ![logo-pionex](/images/blog/best-crypto-trading-bots/pionex.jpg) + </div> +</div> + +[Pionex](https://www.pionex.com/en/signUp?r=octobot) ([Pionex.us](https://accounts.pionex.us/en/signup?ref=octobot) pour les citoyens américains) est une plateforme de trading de pointe connue pour ses bots de trading automatisés faciles à utiliser, permettant aux traders d'exécuter des stratégies sans effort. +Elle propose une variété de bots personnalisables adaptés à différents styles de trading, ce qui la rend idéale pour les traders novices et expérimentés recherchant de la flexibilité. +Avec des outils avancés de backtesting et de suivi des performances, Pionex permet aux utilisateurs d'optimiser leurs stratégies et de gérer efficacement les risques. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Facilité d'utilisation" + level={2} + h="14px" + tooltipText="Bot de copie facile à utiliser mais création de bots complexe" + /> + <Rating + title="Prix" + level={3} + h="14px" + tooltipText="Frais d'échange faibles" + /> + <Rating + title="Fonctionnalités" + level={3} + h="14px" + tooltipText="Grande variété de bots prêts à l'emploi" + /> + </div> +</Card> + +## 9. Cornix + +Cornix est une plateforme de trading crypto automatisée, réputée pour être le plus grand marché de signaux crypto. +Elle propose des bots DCA, une application mobile dédiée et une intégration avec Telegram pour automatiser facilement les trades. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Facilité d'utilisation" + level={2} + h="14px" + tooltipText="Facilité modérée avec néanmoins une application mobile et une intégration Telegram" + /> + <Rating + title="Prix" + level={1} + h="14px" + tooltipText="Competitif en termes de coût avec les autres fournisseurs de signaux" + /> + <Rating + title="Fonctionnalités" + level={2} + h="14px" + tooltipText="Bots DCA et intégration avec les principales plateformes de trading" + /> + </div> +</Card> + +## 10. TradeSanta + +<div style={{textAlign: "center"}}> + <div> + ![tradesanta-logo](/images/blog/best-crypto-trading-bots/tradesanta.png) + </div> +</div> + +TradeSanta est un bot basé sur le cloud qui automatise le trading avec des indicateurs techniques et des outils de gestion des risques tels que le stop loss. +Il propose également le trading démo, des notifications Telegram en temps réel et une option pour convertir rapidement les actifs lors de fluctuations du marché. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Facilité d'utilisation" + level={3} + h="14px" + tooltipText="Facile à utiliser, avec des indicateurs techniques et du trading démo" + /> + <Rating + title="Prix" + level={1} + h="14px" + tooltipText="Options payantes pour des outils de trading essentiels" + /> + <Rating + title="Fonctionnalités" + level={3} + h="14px" + tooltipText="Basique, inclut stop loss et notifications Telegram en temps réel" + /> + </div> +</Card> + +## Conclusion + +En résumé, que vous soyez un débutant ou un trader expérimenté, il existe un bot de trading crypto qui peut répondre à vos besoins spécifiques. +Des bots offrant une large gamme de stratégies et de paires de trading à ceux qui permettent une personnalisation et une programmation approfondies, chaque bot possède son propre ensemble de fonctionnalités uniques adaptées à différents styles et préférences de trading. diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2023-12-17-best-open-source-crypto-trading-bots.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2023-12-17-best-open-source-crypto-trading-bots.md new file mode 100644 index 0000000000..7b756e2e3d --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2023-12-17-best-open-source-crypto-trading-bots.md @@ -0,0 +1,200 @@ +--- +title: "Les 5 meilleurs robots de trading crypto open source" +description: "Découvrez les robots de trading crypto open source performants. Trouvez les meilleures solutions pour un trading de cryptomonnaie efficace." +slug: "best-open-source-crypto-trading-bots" +date: "2023-12-17" +authors: ["guillaume"] +tags: ["Cryptocurrency", "Trading", "Plans"] +image: "/images/blog/best-open-source-crypto-trading-bots/cover.png" +--- + + + +# Les 5 meilleurs robots de trading crypto open source + +Trouver le robot de trading crypto parfait peut être difficile avec autant de choix disponibles. +Ce guide vise à simplifier votre recherche concernant les options open source. +Dans cet article, nous vous présenterons les 5 meilleurs robots de trading crypto open source. + +## Qu'est-ce qu'un robot de trading crypto open source ? + +An [open-source crypto trading bot](open-source-trading-software) est un outil logiciel qui automatise l'achat et la vente de cryptomonnaies sur les plateformes d'échange. +Ces robots se connectent aux plateformes via des API pour collecter des données de marché et exécuter des transactions en fonction d'une [strategie de trading](/investing/find-your-strategy). + +Les robots open source sont uniques car leur code est librement accessible à toute personne souhaitant le consulter, le [modifier et l'améliorer](/guides/developers). +Cette transparence et cette personnalisation en font des favoris parmi les traders qui veulent adapter leurs stratégies de trading. +Ils conviennent aussi bien aux débutants qu'aux traders expérimentés qui souhaitent avoir plus de contrôle sur leurs stratégies de trading. +De plus, leur nature open source les rend souvent plus abordables. + +## 1. OctoBot + +![A man relaxing in his couch while OctoBot is making money by automating cryptocurrency strategies](/a-man-relaxing-in-his-couch-while-octobot-is-making-money-by-automating-cryptocurrency-strategies-light.png) + +[OctoBot](/) est un robot de trading flexible et facile à utiliser qui offre une variété de stratégies gratuitement, y compris des stratégies basées sur l'[IA](https://www.octobot.cloud/features/ai-trading-bot), des stratégies [DCA](smart-dca-making-of) intelligentes et des stratégies GRID. +Il propose des options d'exécutions basées sur le cloud ou en auto-hébergement. +Son support communautaire actif en fait un choix attrayant pour les traders recherchant un équilibre entre fonctionnalité et facilité d'utilisation. +Grâce à son accent sur la transparence, les utilisateurs peuvent tester leurs stratégies et suivre les performances. +OctoBot prend en charge la plupart des principales plateformes d'échange de cryptomonnaies et propose des fonctionnalités professionnelles pour les utilisateurs avancés, ce qui en fait un outil adapté aussi bien aux débutants qu'aux investisseurs crypto expérimentés. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Facilité d'utilisation" + level={3} + h="14px" + tooltipText="Une interface web conviviale qui facilite la navigation pour les débutants, ainsi qu'une version cloud pour un hébergement simplifié" + /> + <Rating + title="Fonctionnalités" + level={3} + h="14px" + tooltipText="Propose une variété de stratégies de trading et de fonctionnalités, y compris le trading basé sur l'IA et l'optimisation de la stratégie" + /> + <Rating + title="Docs et communauté" + level={3} + h="14px" + tooltipText="Une documentation très détaillée et une communauté active sur Telegram et Discord" + /> + </div> +</Card> + +## 2. FreqTrade + +<a href="https://www.freqtrade.io/en/stable/" rel="nofollow">Freqtrade</a> +, écrit en Python, est réputé pour sa facilité d'utilisation et son intégration +à Telegram. Il permet de tester des stratégies de manière approfondie et +d'exécuter simultanément plusieurs robots, ce qui en fait un choix flexible pour +différents styles de trading. La communauté de développement active du robot +travaille constamment sur de nouvelles fonctionnalités et améliorations. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Facilité d'utilisation" + level={3} + h="14px" + tooltipText="Connue pour son interface conviviale sur Telegram ou sur le web, ce qui la rend accessible aux débutants" + /> + <Rating + title="Fonctionnalités" + level={2} + h="14px" + tooltipText="Offre une gamme solide de fonctionnalités, notamment les tests de stratégies et l'exploitation de plusieurs robots" + /> + <Rating + title="Docs et communauté" + level={3} + h="14px" + tooltipText="Une documentation très détaillée et une communauté très active et solidaire" + /> + </div> +</Card> + +## 3. HummingBot + +<a href="https://hummingbot.org/" rel="nofollow">Hummingbot</a> est renommé pour ses stratégies de trading automatisées +et sa compatibilité avec les plateformes d'échange centralisées et +décentralisées. Sa fonction de market making est idéale pour les passionnés du +trading de spreads. La plateforme prend également en charge le liquidity mining, +offrant ainsi des opportunités de gains supplémentaires. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Facilité d'utilisation" + level={1} + h="14px" + tooltipText="Bien que puissant, il peut nécessiter une courbe d'apprentissage pour les nouveaux utilisateurs" + /> + <Rating + title="Fonctionnalités" + level={3} + h="14px" + tooltipText="Propose une large gamme de fonctionnalités, dont le market making, le liquidity mining et le support des plateformes d'échange centralisées et décentralisées" + /> + <Rating + title="Docs et communauté" + level={3} + h="14px" + tooltipText="Dispose d'une documentation complète et d'une communauté solide" + /> + </div> +</Card> + +## 4. Jesse + +<div style={{textAlign: "center"}}> + <div> + ![jesse-logo](/images/blog/best-open-source-crypto-trading-bots/jesse.png) + </div> +</div> + +<a href="https://jesse.trade/" rel="nofollow">Jesse</a> est un robot de trading open source moins connu, mais +très efficace, conçu pour la simplicité et l'efficacité dans le développement de +stratégies. Il prend en charge différentes cryptomonnaies et offre un +environnement de [backtesting](/guides/octobot-usage/backtesting) simplifié. +Jesse est particulièrement apprécié pour sa structure de code propre, ce qui +facilite la personnalisation et l'extension de ses capacités pour les +développeurs. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Facilité d'utilisation" + level={1} + h="14px" + tooltipText="Optimisé pour le développement de stratégies, mais peut nécessiter des connaissances techniques" + /> + <Rating + title="Fonctionnalités" + level={2} + h="14px" + tooltipText="Bonne gamme de fonctionnalités pour le développement et le backtesting de stratégies" + /> + <Rating + title="Docs et communauté" + level={2} + h="14px" + tooltipText="Une documentation adéquate et une communauté en croissance" + /> + </div> +</Card> + +## 5. Superalgos + +<a href="https://superalgos.org/" rel="nofollow">Superalgos</a> propose une suite complète de trading avec une conception +de stratégie visuelle et des outils de données étendus, adaptés aux traders +individuels et professionnels. Sa scalabilité pour les opérations de grande +envergure et son approche décentralisée permettent le développement collaboratif +de stratégies. L'intégration de l'exploration de données et des outils +d'apprentissage automatique en fait une option puissante pour les traders +expérimentés. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Facilité d'utilisation" + level={1} + h="14px" + tooltipText="Offre de nombreuses fonctionnalités qui peuvent être déconcertantes pour les débutants, mais très puissantes pour les utilisateurs expérimentés" + /> + <Rating + title="Fonctionnalités" + level={3} + h="14px" + tooltipText="Suite complète d'outils, y compris la conception de stratégie visuelle, l'analyse de données et la scalabilité pour une utilisation professionnelle" + /> + <Rating + title="Docs et communauté" + level={3} + h="14px" + tooltipText="Communauté solide et documentation étendue disponible" + /> + </div> +</Card> + +## Conclusion + +En conclusion, il existe un robot de trading crypto pour chaque type de trader. +Que vous ayez besoin d'une large gamme de stratégies ou que vous préfériez des options personnalisables, chaque robot possède des fonctionnalités uniques pour s'adapter à différents styles et besoins de trading, rendant votre trading de cryptomonnaie plus efficace et adapté à vous. diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2023-12-20-chatgpt-strategy-deep-dive.mdx b/docs/i18n/fr/docusaurus-plugin-content-blog/2023-12-20-chatgpt-strategy-deep-dive.mdx new file mode 100644 index 0000000000..1d738c9518 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2023-12-20-chatgpt-strategy-deep-dive.mdx @@ -0,0 +1,80 @@ +--- +title: "Making of de la stratégie de trading ChatGPT" +description: "Découvrez comment nous avons créé la stratégie de trading basée sur ChatGPT d'OctoBot cloud en alliant les bénéfices de l'IA et des stratégies de Smart DCA" +slug: "chatgpt-strategy-deep-dive" +date: "2023-12-20" +authors: ["paul"] +tags: ["ChatGPT", "AI", "DCA", "Strategy designer", "Backtesting", "Cryptocurrency", "Trading", "OctoBot cloud", "Educational"] +image: "/images/blog/chatgpt-strategy-deep-dive/cover.png" +--- + + + +# Making of de la stratégie de trading ChatGPT + +Depuis quelques mois maintenant, nous expérimentons des stratégies de trading basées sur <a href="https://chat.openai.com/" rel="nofollow">ChatGPT</a>. Nous avons commencé par ajouter la [possibilité d'utiliser ChatGPT en tant qu'indicateur OctoBot](/guides/octobot-trading-modes/chatgpt-trading) et avons continué à construire des stratégies. + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="P23oiE8gW4Y" title="Making of de la stratégie de trading ChatGPT" /> + +Dans cette vidéo, nous détaillons comment fonctionne la stratégie et parcourons ses performances historiques et réelles en suivant un OctoBot tradant avec cette stratégie depuis 4 semaines. + +Nous présentons aujourd'hui notre dernière stratégie ChatGPT, une stratégie qui bénéficie des avantages à la fois des optimisations de [Smart DCA (Dollar Cost Averaging)](smart-dca-making-of) et des prédictions d'intelligence artificielle. + +## Fonctionnement de la stratégie ChatGPT + +Le concept de cette stratégie est d'identifier les entrée sur les marchés tradés en utilisant les prédictions de ChatGPT. C'est à dire lorsque ChatGPT prédit que le marché va bientôt monter. + +Pour cela, nous fournissons à ChatGPT le contexte du marché en lui mettant à disposition les données de marché récente. Ensuite nous demandons à ChatGPT une projection pour savoir si la prochaine évolution de marché sera ascendante ou descendante. + +Une fois l'entrée identifiée, la partie DCA de la stratégie entre en jeu et optimise les étapes d'achat et de vente + +## Focus vidéo + +### Création des ordres + +Les ordres d'achat sont créés lorsque ChatGPT prédit avec un degré de confiance suffisant que le marché va augmenter. + +Chaque signal d'achat déclenche ensuite une stratégie rapide de DCA où plusieurs ordres d'achat à différents prix sont créés. L'utilisation de plusieurs prix d'achat permet de tirer profit des petites baisses du marché pour réduire le prix moyen d'achat. + +Une fois rempli, chaque ordre d'achat est remplacé par un ordre de vente du même montant. Cet ordre de vente a pour objectif un profit optimisé et est conçu pour être exécuté dans les prochaines heures ou jours, générant ainsi des bénéfices à court terme. + +### Backtest de la stratégie + +Pour concevoir la stratégie ChatGPT, nous avons effectué d'importantes campagnes de [backtesting](/guides/octobot-usage/backtesting) afin de trouver les meilleurs marchés et paramètres et optimiser les trades effectués. + +To design the ChatGPT strategy, we ran extensive backtesting campaigns in order to find the best markets and settings to trade on. + +![le strategy designer avec bitcoin et la stratégie ChatGPT](/images/blog/chatgpt-strategy-deep-dive/strategy-designer-bitcoin-with-chat-gpt-strategy.png) + +De manière similaire aux [actifs complémentaires Smart DCA](smart-dca-making-of#les-actifs-complémentaires), la stratégie ChatGPT bénéficie du trading simultané de plusieurs actifs complémentaires. Il est donc important d'identifier les marchés appropriés afin d'optimiser vos profits en utilisant cette stratégie. + +### Résultats live + +Nous testons actuellement la stratégie ChatGPT avec des robots de trading réels depuis quelques semaines et nous sommes très heureux de constater qu'elle se comporte comme prévu, c'est à dire que l'OctoBot associé : + +- Achète et vend rapidement. +- Réalise des profits. +- Ne reste pas bloquée dans des ordres de vente ouverts. + +Nous avons même réalisé des bénéfices ! + +## Quand utiliser la stratégie ChatGPT + +De la même façon que les stratégies de DCA, la stratégie ChatGPT requière un marché stable ou ascendant pour faire des profits. Il est alors important de toujours bien choisir le marchés tradés pour que la stratégie soit capable de vendre ses cryptomonnaies rapidement et éviter de les bloquer dans des ordres de vente. + +Utiliser la stratégie ChatGPT sur un marché descendant peut bloquer les cryptomonnaies achetées dans des ordres de vente. Bien que la stratégie ne vende pas à perte, cette situation n'est pas optimale et peut empêcher de trader d'autres cryptomonnaies. + +## Autres resources + +Si vous souhaitez en savoir plus sur la manière d'exécuter votre stratégie de trading ChatGPT sur OctoBot, consultez notre article [Trader avec ChatGPT](trading-using-chat-gpt) qui couvre les détails techniques sur la façon d'utiliser ChatGPT selon vos préférences, directement depuis votre OctoBot. + +![prédictions de prix crypto par ChatGPT](/images/blog/chatgpt-strategy-deep-dive/chatgpt-crypto-price-predictions.png) + +Nous publions gratuitement nos signaux de trading ChatGPT sur [l'outil Prédictions de prix des crypto-monnaies avec ChatGPT](https://www.octobot.cloud/tools/crypto-prediction), pour lequel vous pouvez trouver plus de détails dans notre article [Trading avec les signaux ChatGPT +](introducing-chatgpt-trading-tool). + +## Avertissement + +Veuillez noter que le contenu de cet article est destiné à DES FINS D'INFORMATION GÉNÉRALE et non pas à des conseils financiers. Les informations contenues ici sont uniquement à titre informatif. Rien dans ce document ne doit être interprété comme un conseil financier, juridique ou fiscal. Le contenu de cet article reflète uniquement les opinions de l'auteur et/ou de l'équipe d'OctoBot. Aucun d'entre eux n'est un conseiller financier agréé ou un conseiller en investissement. L'achat de cryptomonnaies comporte des risques considérables de perte. L'auteur et/ou l'équipe OctoBot ne garantissent aucun résultat particulier. Les performances passées ne préjugent pas des résultats futurs. diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2024-01-01-first-blog-post.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-01-01-first-blog-post.md new file mode 100644 index 0000000000..94a3902a23 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-01-01-first-blog-post.md @@ -0,0 +1,54 @@ +--- +title: "first blog post" +slug: "first-blog-post" +date: "2024-01-01" +--- +--- +title: Bienvenue sur le blog d'OctoBot +description: +tags: Crypto, Trading, Python, OctoBot +image: /images/blog/welcome-to-octobot-blog/cover.png +domain: blog.octobot.online +sidebar: false +breadcrumb: false +date: 2022-06 +searchable: false +archived: true +author: guillaume +--- + + +# Bienvenue sur le blog d'OctoBot + +:::info + La traduction française de cette page est en cours. +::: + +![cover](/images/blog/welcome-to-octobot-blog/cover.png) + +Since 2018, OctoBot is an open source cryptocurrency trading bot that allow hundreds of users to trade easily with professional features. + +The new OctoBot blog + +The OctoBot blog is a great place to learn about the open source OctoBot project. You can find out about the latest news, features, and releases. You can also learn about the people behind the project and how they are working to make OctoBot the best it can be. If you're using OctoBot, then we'd like to hear from you! Tell us what you think of the project, what you would like to see changed or added, and any other comments you have. + +## Qu'est ce que le trading de cryptomonnaies ? + +Cryptocurrency trading is the process of buying and selling cryptocurrencies. Cryptocurrencies are digital or virtual tokens that use cryptography to secure their transactions and to control the creation of new units. Cryptocurrencies are decentralized, meaning they are not subject to government or financial institution control. [Bitcoin](https://www.octobot.cloud/what-is-bitcoin), the first and most well-known cryptocurrency, was created in 2009. + +Cryptocurrency trading is a relatively new practice, but it has grown in popularity as more people become interested in cryptocurrencies. In 2018, Bitcoin's value increased by over 1000%, leading to a surge in interest in other cryptocurrencies. Cryptocurrency trading can be done through online exchanges, and many people trade cryptocurrencies as a way to make money. + +If you're interested in trading cryptocurrencies, there are a few things you should know. First, you need to understand how the market works and what factors can affect cryptocurrency prices. You also need to choose an exchange and wallet that you trust, and have a good understanding of the risks involved. Finally, don't invest more than you can afford to lose, and always remember that cryptocurrency prices can fluctuate wildly. + +## Pourquoi utiliser un robot de trading ? + +There are many reasons why you might want to use a trading bot. Perhaps you want to free up your time so that you can focus on other things, or maybe you want to take advantage of the 24-hour trading cycle by having a bot that can trade for you around the clock. Whatever your reason, a trading bot can be a helpful tool in your trading arsenal. + +One of the main advantages of using a trading bot is that it can help to take emotion out of the equation. When you are manually trading, it can be easy to let emotions like greed or fear influence your decisions. This can lead to bad trades that lose you money. A trading bot, on the other hand, is not influenced by emotions and will only execute trades based on pre-determined rules that you have set. This can help you to stick to your trading plan and make better, more informed decisions. + +Another benefit of using a trading bot is that it can help you to diversify your portfolio. If you are only manually trading one or two assets, you are missing out on the opportunity to profit from the moves in other markets. By using a bot, you can trade multiple assets simultaneously and capitalize on opportunities as they arise. + +## Qu'est-ce que l'open source ? + +In the world of software development, open source is a term used to describe a project or program that is available for anyone to use or modify. Open source projects are usually developed by a community of developers who work together to create and improve the code. OctoBot is an open source project that anyone can contribute to. We welcome new contributors! + diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2024-01-01-introducing-the-strategy-designer.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-01-01-introducing-the-strategy-designer.md new file mode 100644 index 0000000000..e1d053b00b --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-01-01-introducing-the-strategy-designer.md @@ -0,0 +1,67 @@ +--- +title: "introducing the strategy designer" +slug: "introducing-the-strategy-designer" +date: "2024-01-01" +--- +--- +title: Présentation du Stratégie Designer +description: Meet the new Strategy Designer and get the best out of your strategies +tags: Strategy designer, Backtesting, Cryptocurrency, Trading, OctoBot cloud +image: /images/blog/introducing-the-strategy-designer/cover.png +domain: blog.octobot.online +sidebar: false +breadcrumb: false +date: 2023-03-23 +searchable: false +difficulty_level: 3 +reading_time: 5 +author: guillaume +--- + + +# Présentation du Stratégie Designer + +![cover](/images/blog/introducing-the-strategy-designer/cover.png) + +## Visualisez exactement ce qu'il c'est passé +Lors de la création d'une stratégie de trading, la partie la plus difficile est souvent de tester la stratégie afin de trouver la meilleure configuration. + +Avec le Strategy Designer, vous trouverez de nombreux outils pour expérimenter, analyser, comparer et optimiser vos stratégies et leurs configurations à leur plein potentiel. + +![l'écran du strategy designer](/images/blog/introducing-the-strategy-designer/full-page.png) + +En utilisant le [Strategy Designer](/guides/octobot-usage/strategy-designer), pour toute stratégie de trading lors de chaque exécution de [backtesting](/guides/octobot-usage/backtesting), vous pouvez visualiser : + +- Les trades +- Les positions (lors du trading de futures) +- Le PNL +- La valeur du portefeuille +- La configuration +- D'autres métriques pertinentes + +## Comparer les résultats de backtesting +Être capable de voir le comportement exact de votre stratégie au fil du temps dans chaque backtest est un bon début mais pouvoir comparer ces valeurs avec d'autres résultats de backtesting est encore mieux. + +![comparaison graphique](/images/blog/introducing-the-strategy-designer/comparison.png) + +Chaque outil d'analyse peut être utilisé pour comparer les résultats des exécutions. + +![comparaison des trades](/images/blog/introducing-the-strategy-designer/trades-comp.png) + +## Stocker les résultats + +Lors de l'exécution d'un backtesting du Strategy Designer, les résultats sont stockés. Ainsi, vous pouvez toujours revenir à une configuration précédente et la comparer facilement avec d'autres. + +![historique](/images/blog/introducing-the-strategy-designer/history.png) + +## Encore plus à venir + +Cet article présente la première version du Strategy Designer. D'autres fonctionnalités seront ajoutées au concepteur pour vous permettre d'être encore plus efficace dans la création de vos meilleures stratégies de trading. + +![aperçu de l'optimiseur](/images/blog/introducing-the-strategy-designer/preview.png) + +## Comment utiliser le Stratégie Designer ? + +Le [Strategy Designer](/guides/octobot-usage/strategy-designer) est disponible pour les [ trading bot OctoBot](https://www.octobot.cloud/trading-bot). + +**[Démarrer votre OctoBot](https://www.octobot.cloud)** diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2024-01-01-strategy-designer-revamp.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-01-01-strategy-designer-revamp.md new file mode 100644 index 0000000000..b73070c012 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-01-01-strategy-designer-revamp.md @@ -0,0 +1,76 @@ +--- +title: "strategy designer revamp" +slug: "strategy-designer-revamp" +date: "2024-01-01" +--- +--- +title: Refonte du Stratégie Designer +description: Enjoy the revamped Strategy Designer and created your best trading strategies +tags: Strategy designer, Configuration, Backtesting, Cryptocurrency, OctoBot cloud +image: /images/blog/strategy-designer-revamp/cover.png +domain: blog.octobot.online +sidebar: false +breadcrumb: false +date: 2023-04-25 +searchable: false +difficulty_level: 3 +reading_time: 5 +author: guillaume +--- + + +# Refonte du Stratégie Designer + +:::info + La traduction française de cette page est en cours. +::: + +![cover](/images/blog/strategy-designer-revamp/cover.png) + +## Un affichage clair de vos résultats de backtesting +Based on your feedback, we have been working a lot making the [Strategy Designer](/guides/octobot-usage/strategy-designer) much easier to use and understand. The strategy designer is now split into 2 parts: +1. The Strategy Viewer: where you can quickly analyze and compare trading strategies based on their performances +2. The Strategy Creator: where you can easily create a new strategy in order to test it in backtesting](/fr/guides/octobot-usage/backtesting). + +The Strategy Designer is now our most advanced interface to create, test and compare strategies, we hope you will enjoy it. + +## Le visualisateur de stratégie +Quickly view, analyze and compare how your strategies behave and are more or less profitable or risky through time. +![viewer](/images/blog/strategy-designer-revamp/viewer.png) +Clearly identify key metrics of your strategy performances +![summary](/images/blog/strategy-designer-revamp/summary.png) +View each trade details +![trades](/images/blog/strategy-designer-revamp/trades.png) + +## Le créateur de stratégies + +1. Select the markets to trade on as well as the starting portfolio of your OctoBot for this strategy. +![step1.1](/images/blog/strategy-designer-revamp/stepper1.1.png) +![step1.2](/images/blog/strategy-designer-revamp/stepper1.2.png) + +2. Select the trading mode to use for your strategy. You can also change its configuration. +![step2](/images/blog/strategy-designer-revamp/stepper2.png) + +3. Select the strategy and evaluators to use. You can also change their configuration. +![step3](/images/blog/strategy-designer-revamp/stepper3.png) + +4. Configure the final touch: the type of exchange and backtesting time window. You can also give a name to your backtesting run to quickly identify it later on. +![step4](/images/blog/strategy-designer-revamp/stepper4.png) + +5. Start your backtesting: if necessary, OctoBot will fetch the required historical data from your exchange and will then launch a backtesting using your strategy. Results of the backtesting run will be stored in your history and available for later views and comparisons. + +## De backtesing à trading en direct + +Strategies created within the Strategy Designer remain in the Strategy Designer until converted into a live profile. They are independent and not affecting your live OctoBot. This means that you can test a Daily Trading strategy on the Strategy Designer that should be trading MATIC while having a live Dip Analyzer strategy that is trading BTC and ETH. + +Once you are satisfied with your trading strategy created in the Strategy Designer, you can instantly convert it into a live OctoBot profile + +![convert to live profile](/images/blog/strategy-designer-revamp/use-as-live.png) + +This profile will then be usable as any profile, which means that you can also share and edit it just the way you want. + +## Comment obtenir le Stratégie Designer ? + +Le [Strategy Designer](/guides/octobot-usage/strategy-designer) est disponible sur les [robots de trading OctoBot](https://www.octobot.cloud/trading-bot). + +**[Démarrer votre trading bot OctoBot](https://www.octobot.cloud)** diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2024-01-01-trading-orders.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-01-01-trading-orders.md new file mode 100644 index 0000000000..d1066c4dca --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-01-01-trading-orders.md @@ -0,0 +1,63 @@ +--- +title: "trading orders" +slug: "trading-orders" +date: "2024-01-01" +--- +--- +title: Les bases du trading - Les ordres +description: Read this article and get a basic understanding of the orders available. +tags: Crypto, Trading, Automation, Orders, Educational +image: /images/blog/trading-orders/cover.png +domain: blog.octobot.online +sidebar: false +breadcrumb: false +date: 2022-08-10 +searchable: false +difficulty_level: 1 +reading_time: 3 +author: guillaume +--- + + +# Les bases du trading - Les ordres + +:::info + La traduction française de cette page est en cours. +::: + +![cover](/images/blog/trading-orders/cover.png) + +Trading cryptocurrencies can be exciting, but also take a lot of time if you don’t know how to do it. There are many different types of orders you can make to buy or sell on your exchange according to what you want. Read our introductory article and get a basic understanding of the orders available. + +## Les ordres au marché, les plus simples + +When you place a market order, you're telling your broker (the exchange) to buy or sell shares or crypto-assets at the best available price. If you're buying, your broker will try to get the lowest possible price. If you're selling, it will try to get the highest. + +That's it! You don't have to worry about timing the market perfectly or anything like that. Just remember that if you're buying, you might not get the exact price you want, and if you're selling, you might not get the highest price possible. + +There are other types of orders, but market orders are the easiest to understand and use. + +## Les orders limités, les plus optimisés + +When it comes to trading, there are different types of orders that can be placed in order to execute a trade. Among these, the limit order is often considered the most valuable due to its ability to help traders get the best possible price for their desired asset. +A limit order is a type of trade that gives you exact instructions on when and how much to buy or sell an asset at any time in future. Your trade will be executed either once your price is met. + +## Les ordres stop, sécuriser vos gains + +When you're ready to start trading, you need to know the different types of orders that you can place. One important type of order is the stop order. A stop order is an order to buy or sell an asset at a specified price. This type of order becomes active only when the asset's price reaches the specified price. At that point, the order is executed at the next available price. + +Stop orders are often used to limit losses or protect profits. They can also be used to enter or exit a trade. For example, if you wanted to buy a crypto at $50 but it is currently trading at $49, you could place a stop order at $50. If the crypto reaches $50, your order will be executed and you will buy the crypto at $50. Or, if you wanted to sell a crypto if its price reaches $40 (in order to protect your gains) while this asset is currently trading at $51, you could place a stop order at $40. If the asset reaches $40, your order will be executed and you will sell the crypto at $40. This would prevent you from holding the asset at lower prices. + +Remember, stop orders are not guaranteed to be executed at the specified price. This is because once the stop price is reached, there may not be enough buyers or sellers. Regular stop orders are executed at the best available price at the time of their trigger. Stop loss limit orders can be used to create a limit order when the stop order is trigger and avoid this risk. However since those are creating a limit order, if the price of the limit order is not met, you will end up with an unfilled open limit order. + +## Les ordres take profit, réaliser vos gains avant qu'il ne soit trop tard + +When it comes to trading, one of the most important things to remember is to take profit before it's too late. That's why a take profit order is such an important tool for traders. A take profit order is an order to buy or sell an asset at a certain price once it reaches a certain level of profit. This ensures that you lock in your profits and don't let them slip away. + +One of the biggest mistakes traders make is waiting too long to take profits. They think that they can ride the wave of a rising crypto and make even more money. But the reality is that the market can turn on a dime and all those profits can evaporate just as quickly. That's why it's important to take profits when you have them. + +Another thing to remember is that you can always cash out before it's too late. If you're ever in doubt about a trade, or if you start to see signs that the market is about to turn, don't be afraid to cash out and take your profits. It's better to be safe than sorry when it comes to trading. + +## OctoBot automatise tout type de stratégie de trading + +OctoBot is an automated crypto trading bot that can implement any trading strategy using any type of order. It is simple to use and you can be up and running in minutes. There is no need to be a expert trader to use OctoBot, it will do all the work for you. diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2024-01-01-what-are-stablecoins.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-01-01-what-are-stablecoins.md new file mode 100644 index 0000000000..0c5cd6f663 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-01-01-what-are-stablecoins.md @@ -0,0 +1,103 @@ +--- +title: "what are stablecoins" +slug: "what-are-stablecoins" +date: "2024-01-01" +--- +--- +title: Que sont les stablecoins +description: Explore the stablecoins, the crypto designed for stability in a volatile market. Understand the different types, their benefits, risks, and potential future impact on global finance. +tags: Cryptocurrency, Trading, Educational +image: /images/blog/what-are-stablecoins/cover.png +sidebar: false +breadcrumb: false +date: 2023-11-05 +searchable: false +difficulty_level: 1 +reading_time: 10 +author: guillaume +--- + + +# Que sont les stablecoins ? + +:::info + La traduction française de cette page est en cours. +::: + +<div style={{textAlign: "center"}}> +![cover](/images/blog/what-are-stablecoins/cover.png) +</div> + +Stablecoins are a type of cryptocurrency designed to minimize price volatility. While traditional cryptocurrencies like [Bitcoin](https://www.octobot.cloud/bitcoin-prediction) and [Ethereum](https://www.octobot.cloud/ethereum-prediction) experience wild price swings, stablecoins aim to maintain a consistent value over time. + +Stablecoins play a critical role in the cryptocurrency ecosystem. They provide stability in a volatile market, enabling businesses and individuals to transact using crypto without worrying about price fluctuations. + +## Comprendre les Stablecoins + +### Définition des Stablecoins + +Stablecoins are digital tokens designed to maintain a stable value against a specific asset or a pool of assets. They are often pegged to traditional fiat currencies like the US Dollar, Euro, or Gold. + +### Les types de Stablecoins + +There are three main types of stablecoins: fiat-collateralized, crypto-collateralized, and non-collateralized. Each type has its own mechanism to maintain price stability. + +#### Stablecoins adossés à une monaie fiduciaire + +These are stablecoins that are directly backed by fiat currencies like the US Dollar or Euro. For every stablecoin issued, there is a corresponding unit of real-world currency stored in a bank or other regulated financial institution. This physical backing provides a 1:1 value ratio, maintaining the stablecoin's price stability. + +<div style={{textAlign: "center"}}> +![usdt-price](/images/blog/what-are-stablecoins/usdt.png) +*USDT price history* +</div> + +#### Stablecoins associés à des cryptomonnaies + +Unlike fiat-collateralized stablecoins, these are backed by other cryptocurrencies. Due to the volatile nature of cryptocurrencies, these stablecoins are typically over-collateralized, meaning that the total value of the cryptocurrency collateral is higher than the value of the stablecoins issued, providing a buffer against market fluctuations. + +#### Stablecoins non adossés à un actif particulier + +These stablecoins are not backed by any collateral, whether it's fiat or crypto. Instead, they use algorithms and smart contracts to automatically adjust the supply of the stablecoin in response to changes in demand, aiming to keep the stablecoin's price close to a specific target value. + +### Comment fonctionnent les Stablecoins ? + +Stablecoins maintain their value by holding reserves of a stable asset, using smart contracts to maintain price stability, or through an algorithmic supply mechanism. + +## L'histoire des Stablecoins + +Stablecoins were introduced to tackle the problem of high volatility in the crypto market. The first stablecoin, Tether (USDT), was launched in 2014. +Since the launch of Tether, the stablecoin market has seen significant growth and evolution, with many new stablecoins being introduced, each with unique features and mechanisms. + +<div style={{textAlign: "center"}}> +![usdt-logo](/images/blog/what-are-stablecoins/usdt-logo.png) +*USDT (Tether) logo* +</div> + +## Bénéfices des Stablecoins + +- Stablecoins offer a safe haven during market volatility. Traders can convert their volatile cryptocurrencies into stablecoins to avoid market downturns. +- Stablecoins provide a stable medium of exchange, making them ideal for transactions, payments, and remittances. +- Stablecoins can play a crucial role in financial inclusion, providing access to digital currencies to the unbanked and underbanked populations. + +## Risques et Challenges des Stablecoins + +- Stablecoins face regulatory scrutiny worldwide, as they could potentially disrupt the traditional financial system. Managing collateral reserves for fiat-collateralized stablecoins can be complex and challenging. +- There's always a possibility that a stablecoin could fail to maintain its peg, leading to a sharp drop in value. This could have wider implications for the cryptocurrency market as a whole. + +## Le Future des Stablecoins + +Beyond just providing stability in the crypto market, stablecoins could be used for a wide range of purposes, including remittances, payments, smart contracts, and more. + +Stablecoins could have a significant impact on global finance by enabling faster and cheaper cross-border transactions, improving access to financial services, and potentially reshaping the global monetary system. + +There's a lot of potential for stablecoins in the future, particularly as blockchain technology continues to evolve and mature. However, the path forward will depend on a range of factors, including regulatory developments, technological advancements, and market dynamics. + +<div style={{textAlign: "center"}}> +![future](/images/blog/what-are-stablecoins/future.png) +</div> + +## Conlusion + +In conclusion, stablecoins play a critical role in the cryptocurrency market by providing stability in a volatile environment. However, they also come with their own set of challenges and risks. + +Despite the challenges, the potential benefits of stablecoins are significant, and they could play a key role in the future of finance. As the technology continues to evolve, it will be interesting to see how the role of stablecoins develops in the coming years. diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2024-01-01-what-is-dca.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-01-01-what-is-dca.md new file mode 100644 index 0000000000..567650e0d1 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-01-01-what-is-dca.md @@ -0,0 +1,64 @@ +--- +title: "what is dca" +slug: "what-is-dca" +date: "2024-01-01" +--- +--- +title: DCA - Une des stratégies les plus profitables +description: Dollar cost averaging is one of the most profitable strategies for a long term investment +tags: Cryptocurrency, Trading, Strategy, How-to, DCA, Educational +image: /images/blog/what-is-dca/cover.png +sidebar: false +breadcrumb: false +date: 2022-09-20 +searchable: false +difficulty_level: 1 +reading_time: 5 +author: guillaume +--- + + +# DCA - Une des stratégies les plus profitables + +:::info + La traduction française de cette page est en cours. +::: + +![cover](/images/blog/what-is-dca/cover.png) + + +## Pourquoi le "dollar cost averaging" est-il une des stratégies les plus profitables pour un investissement long terme ? + +When it comes to investing your money, there are a lot of different strategies that you can choose from. But if you're looking for a strategy that is reliable and profitable in the long term, dollar cost averaging is definitely one to consider. In this article, we'll explain exactly what dollar cost averaging is and why it's such a great strategy for long-term investors! + +## Qu'est-ce que la stratégie de "dollar cost averaging" ? + +Dollar cost averaging is an investing technique whereby an investor purchases a fixed dollar amount of a particular investment on a regular schedule, regardless of the cryptocurrency price. By buying the same dollar amount each time, more cryptocurrency are purchased when prices are low and fewer cryptocurrency are bought when prices are high. Over time, this technique can help reduce the effects of volatility and risk on an investment portfolio. + +One of the main advantages of dollar cost averaging is that it takes the emotion out of investing. By buying into an investment on a regular schedule, investors can avoid the temptation to "time the market" by trying to predict when prices will go up or down. This strategy can also help investors stick to their long-term goals, even when markets are going through short-term ups and downs. + +## Pourquoi le "dollar cost averaging" est encore plus intéressant dans l'univers des cryptomonnaies ? + +As we all know, cryptocurrencies are a volatile market. The prices of [Bitcoin](https://www.octobot.cloud/bitcoin-prediction), [Ethereum](https://www.octobot.cloud/ethereum-prediction), Litecoin and other altcoins can fluctuate wildly from day to day, and even from hour to hour. This makes investing in cryptocurrencies a risky proposition. + +However, there is a way to mitigate this risk somewhat, and that is by dollar cost averaging (DCA). DCA is an investing strategy where you spread your investment into multiple smaller investments over time, rather than investing all at once. + +For example, let's say you want to invest $100,000 in [Bitcoin](https://www.octobot.cloud/what-is-bitcoin). You could do this by buying 4 Bitcoins all at once at the current market price. However, if the price of Bitcoin falls tomorrow, you will have lost 10% of your investment. + +Instead, you could spread your investment out over a period of time using DCA. For example, you could invest $1,000 in Bitcoin every week for 100 weeks. This way, if the price of Bitcoin falls one week, you will only lose $1,000 instead of $100,000. + +DCA is a great way to mitigate risk in the volatile world of cryptocurrencies. It is also a great long-term strategy for building up your investment portfolio + +## Quels sont les inconvénients du "dollar cost averaging" ? + +Dollar cost averaging is a technique that can be used when investing in order to minimize risk. The idea is to spread your investment into equal increments and invest at regular intervals. This technique smooths out the effects of market volatility and can help you avoid the effects of buying high and selling low. + +However, there are few drawbacks to using dollar cost averaging as your investment strategy. One of the biggest drawbacks is that it can take a long time to see results from this type of investing. This is because you are investing a set amount of money at regular intervals, regardless of what the market is doing. This means that it may take years for your investment to grow to its full potential. + +Another drawback of dollar cost averaging is that you may end up paying more for your investment than if you had invested all at once. This is because you are buying more cryptocurrency when the price is high and fewer cryptocurrency when the price is low. Over time, this can average out to a higher overall cost for your investment. + +Finally, dollar cost averaging does not guarantee that you will make a profit on your investment. It simply helps to minimize the risk of losses in a volatile market. If the market crashes, you could still lose money. + +## Comment utiliser le DCA avec OctoBot ? + +OctoBot can help you take advantage of dollar cost averaging by automatically buying a fixed dollar amount of your chosen investment on a regular schedule. This way, you can focus on your long-term goals and let OctoBot handle the day-to-day fluctuations in the market. diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2024-01-01-what-is-future-trading.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-01-01-what-is-future-trading.md new file mode 100644 index 0000000000..5c880fa4ee --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-01-01-what-is-future-trading.md @@ -0,0 +1,109 @@ +--- +title: "what is future trading" +slug: "what-is-future-trading" +date: "2024-01-01" +--- +--- +title: Qu'est-ce que le trading de futures +description: Discover the world of cryptocurrency future trading and how to automate it in few steps. +tags: Cryptocurrency, Trading, Exchange, Future, Strategy, Educational +image: /images/blog/what-is-future-trading/cover.png +domain: blog.octobot.online +sidebar: false +breadcrumb: false +date: 2022-10-25 +searchable: false +difficulty_level: 2 +reading_time: 5 +author: guillaume +--- + + +# Qu'est-ce que le trading de futures ? + +:::info + La traduction française de cette page est en cours. +::: + +![cover](/images/blog/what-is-future-trading/cover.png) + + +## Qu'est-ce que le trading de futures de cryptomonnaies + +Cryptocurrency future trading is a process of buying and selling digital assets with the aim of making a profit from the difference in prices. It is one of the most popular forms of cryptocurrency trading, as it allows traders to speculate on the future price movements of their chosen asset. In this article, we will take a look at what cryptocurrency future trading is, how it works, and whether it is right for you. + +## Qu'est-ce que le trading de futures ? + +Cryptocurrency future trading is an innovative way to trade digital assets. It allows traders to speculate on the future price of a cryptocurrency. By correctly predicting the future price, traders can make a profit. + +However, cryptocurrency future trading is also risky. If a trader makes a wrong prediction, they can lose all of their investment. For this reason, it is important to carefully research the market before trading. + +The future of cryptocurrency trading is exciting. With the right approach, it can be profitable and rewarding. + +### Qu'est-ce que la vente à découvert ? + +Short selling is a trading strategy where a trader sells an asset, hoping to buy it back at a lower price so they can profit from the difference. Short selling is sometimes also called "shorting" or "going short". + +Short selling can be used in any market, but it is particularly popular in the cryptocurrency market. This is because cryptocurrencies are often volatile, which can create opportunities for traders to make profits. + +However, short selling is risky. If the price of the asset goes up instead of down, the trader will lose money. For this reason, it is important to carefully research the market before short selling. + +## Pourquoi trades des contrats de futures ? + +When it comes to trading cryptocurrencies, there are a few different options available. One popular option is trading with future contracts. In this article, we'll take a look at what exactly cryptocurrency future trading is and why it might be a good option for you. + +Cryptocurrency future trading is essentially betting on the future price of a particular coin. For example, let's say you think the price of [Bitcoin](https://www.octobot.cloud/bitcoin-prediction) is going to increase in the next month. You could buy a Bitcoin future contract that expires in one month and allows you to buy Bitcoin at the current price. If the price of Bitcoin does indeed increase in the next month, you would make a profit on your contract. + +There are a few reasons why cryptocurrency future trading can be a good option. First, it allows you to get exposure to the price movement of a particular coin without actually having to own any of the coins. This can be helpful if you don't want to tie up your capital in a particular coin but still want to benefit from its price movement. + +Another reason why cryptocurrency future trading can be attractive is that it often provides leverage. This means that you can control a larger position than if you were just buying the coins outright. + +## Quelques stratégies de trades de futures + +When trading cryptocurrencies, it's important to have a strategy. Here are some future trading strategies to consider: + +1. Buy and hold: This strategy involves buying a currency and holding it for a long period of time, regardless of market conditions. + +2. Buy and sell: This strategy involves buying a currency and selling it when the price increases. + +3. Sell and buy back: This strategy involves selling a currency and then buying it back at a lower price. + +4. Short selling: This strategy involves selling a currency in the hopes that the price will fall so that it can be bought back at a lower price. + +5. Arbitrage: This strategy involves taking advantage of price differences between exchanges. + +6. Hedging: This strategy involves taking both long and short positions in different currencies to offset risk. + +## Automatiser le trading de futures avec OctoBot + +OctoBot is an open-source software project that automates cryptocurrency trading. It is designed to be easily extensible and adaptable, allowing it to be used with a wide range of exchanges and strategies. + +OctoBot has a number of features that make it well suited for future trading. First, it supports multiple exchanges, allowing you to trade on multiple platforms simultaneously. Second, it includes a risk management system that can automatically adjust your position size to limit your losses. Third, it features a number of built-in strategies that you can use or customize to suit your own trading style. + +Finally, and perhaps most importantly, OctoBot is constantly being updated with new features and improvements. This means that it will continue to get better over time, making it an ideal tool for long-term future trading. + +### Comment utiliser OctoBot pour trader des futures ? + +There are two ways to use OctoBot for future trading: + +1. Use the built-in strategies + +OctoBot includes a number of built-in trading strategies that you can use out-of-the-box. To access these strategies, go to the "Strategies" tab in the OctoBot interface. + +From here, you can view a list of all the available strategies, as well as their performance over time. You can also backtest each strategy to see how it would have performed in the past. + +To start using a strategy, simply click on its name and then click "Enable". OctoBot will then begin using the strategy on your behalf. + +2. Create your own strategy + +If you want more control over your future trading, you can create your own custom strategy with OctoBot's Strategy Builder. This is a powerful tool that allows you to customize every aspect of your trading strategy, from the entry and exit conditions to the position sizing and risk management rules. + +From here, you'll be able to give your strategy a name and description. You can then start adding rules and conditions to your strategy. OctoBot's Strategy Builder includes a wide range of options, so you'll be able to create a strategy that suits your own trading style. + +Once you're happy with your strategy, click "Save" and OctoBot will begin using it on your behalf. + +## Conclusion + +OctoBot is a powerful tool that can help you automate your future trading. It includes a number of built-in strategies that you can use out-of-the-box, as well as a powerful Strategy Builder that allows you to create your own custom strategies. + +If you're looking for a tool to help you automate your future trading, OctoBot is definitely worth considering. diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2024-01-03-introducing-trading-modes-guides.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-01-03-introducing-trading-modes-guides.md new file mode 100644 index 0000000000..018a9cbf75 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-01-03-introducing-trading-modes-guides.md @@ -0,0 +1,44 @@ +--- +title: "Introduction aux guides des trading modes" +description: "Découvrez les multiples façons de trader en utilisant OctoBot grâce aux trading modes basés sur le DCA, le grid trading, l'IA et TradingView" +slug: "introducing-trading-modes-guides" +date: "2024-01-03" +authors: ["guillaume"] +tags: ["Trading", "Educational"] +image: "/images/blog/introducing-trading-modes-guides/person-looking-at-his-screens-using-many-trading-strategies.jpg" +--- + + + +# Introduction aux guides des trading modes + +Lorsque vous tradez avec OctoBot, vous utilisez un trading mode. Les [Trading modes](/guides/octobot-trading-modes/trading-modes) sont responsables de la création, du maintien et de l'annulation des ordres. + +Les modes de trading sont un élément clé de toute stratégie commerciale et sont compatibles avec chaque [plateforme d'échange prise en charge](/guides/exchanges). + + +<div style={{textAlign: "center"}}> + <div> + ![Une personne regardant ses écrans en utilisant plusieurs stratégies de trading](/images/blog/introducing-trading-modes-guides/person-looking-at-his-screens-using-many-trading-strategies.jpg) *Un trader utilisant plusieurs stratégies de trading.* + </div> +</div> + +Sur la base de vos commentaires, nous avons créé des [guides pour chaque trading mode](/guides/octobot-trading-modes/trading-modes) afin d'expliquer clairement à quoi ils servent et comment les utiliser. Nous sommes impatients d'avoir vos commentaires sur ces guides. + +## Décomposition d'une stratégie OctoBot + +Une stratégie OctoBot est généralement divisée en 2 parties : + +1. Le trading mode : il décide comment créer des ordres sur une plateforme d'échange, combien investir dans chaque ordre, quand les annuler. +2. Les évaluators : ils envoient des signaux au trading mode pour l'activer lorsque cela est nécessaire. On pourrait dire qu'ils "réveillent" le trading mode lorsqu'il se passe quelque chose. + +Remarque : Certains trading modes, tels que ceux basés sur une grille ou les automatisations TradingView n'utilisent aucun évaluateur ; ils se "réveillent" automatiquement soit lorsqu'un ordre est exécuté soit lorsqu'ils reçoivent une notification depuis TradingView. + + +## Types de trading modes + +Lorsque vous utilisez OctoBot à partir du [des trading bots OctoBot](https://www.octobot.cloud/trading-bot), vous avez accès à [plusieurs types de trading modes](/guides/octobot-trading-modes/trading-modes#built-in-trading-modes). Voici les principales catégories de trading modes: + +- **Trading modes basés sur les statistiques**: Les entrées (et éventuellement les sorties) sont calculées à l'aide des statistiques. Cela peut provenir d'évaluateurs techniques, d'IA, des réseaux sociaux, d'événements de prix ou bien plus encore. +- **Trading modes à faible risque basés sur une grille*: Les ordres d'achat et de vente sont créés de manière déterministe selon la configuration du trading mode. Il n'y a pas de probabilité dans ces algorithmes. +- **Stratégies TradingView automatisées**: Les entrées et sorties sont créées en fonction des signaux provenant de votre compte TradingView. Dans ce mode de trading, le cœur de votre stratégie repose sur TradingView et OctoBot agit en automatisation pour synchroniser votre stratégie avec n'importe quel compte de plateforme d'échange. diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2024-02-04-new-octobot-cloud-plans-and-trading-bots.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-02-04-new-octobot-cloud-plans-and-trading-bots.md new file mode 100644 index 0000000000..0f4dccfba1 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-02-04-new-octobot-cloud-plans-and-trading-bots.md @@ -0,0 +1,69 @@ +--- +title: "Nouvelles offres OctoBot cloud" +description: "OctoBot cloud lance de nouvelles offres et une nouvelle gamme de robots pour automatiser vos stratégies de trading" +slug: "new-octobot-cloud-plans-and-trading-bots" +date: "2024-02-04" +authors: ["guillaume"] +tags: ["AI", "Backtesting", "Cryptocurrency", "Trading", "OctoBot cloud", "Release"] +image: "/images/blog/new-octobot-cloud-plans-and-trading-bots/a-man-relaxing-in-his-couch-while-octobot-is-making-money-by-automating-cryptocurrency-strategies-light.png" +--- + + + +# Nouvelles offres OctoBot cloud + +![a man relaxing in his couch while octobot is making money by automating cryptocurrency strategies dark](/images/blog/new-octobot-cloud-plans-and-trading-bots/a-man-relaxing-in-his-couch-while-octobot-is-making-money-by-automating-cryptocurrency-strategies-light.png) + +2024 sera une grande année pour OctoBot ! + +En 2023, nous avons [annoncé le lancement du nouveau Octobot cloud](introducing-the-new-octobot-cloud) où nous avons présenté notre approche en matière de stratégies de trading et notre volonté de diviser OctoBot en deux mondes : + +Où il est très facile d'automatiser les stratégies d'investissement crypto. Cette offre est destinée aux utilisateurs qui ne souhaitent pas créer leurs propres stratégies commerciales ou qui recherchent une diversification dans leurs propres systèmes commerciaux. Nous élargissons ces offres pour inclure plus d'options et vous apporter plus de valeur lors de l'utilisation des stratégies OctoBot cloud. + +C'est ce qui est présenté sur [octobot.cloud](/fr) + +## Changements dans les offres pour investisseurs + +Nous avons beaucoup réfléchi à la manière d'ajouter davantage de valeur aux offres des stratégies de trading prêtes à l'emploi proposées par OctoBot cloud. Notre objectif est de les rendre aussi simples et accessibles que possible afin d'intéresser les investisseurs crypto qui ne souhaitent pas créer leur propre stratégie mais veulent toutefois tirer profit d'excellentes stratégies d'investissement. + +Bien sûr, l'offre Investisseur actuel reste gratuit et permet toujours d'utiliser plusieurs stratégies OctoBot cloud avec de [vrais fonds sur plateforme d'échange](/investing/invest-with-your-strategy) ainsi qu'avec des [fonds virtuels](/investing/paper-trading-a-strategy). + +Nous ajoutons maintenant 2 autres plans liés qui étendent la porté de l'offre Investisseur. + +### L'offre Investisseur Plus +L'offre [Investisseur Plus](introducing-the-investor-plus-plan) est idéal pour profiter pleinement du cloud OctoBot à un prix abordable. + +Il permet d'utiliser chaque stratégie OctoBot, qu'elle soit basée sur des paniers de crypto, l'IA, le DCA ou les investissements en grille. + +L'offre Investisseur Plus permet également d'utiliser jusqu'à 10 OctoBots simultanés, que ce soit pour du trading réel ou virtuel sans risque. + +**[Essayer Investisseur Plus](https://www.octobot.cloud/pricing)** + +Jetez un œil à notre [article dédié à Investisseur Plus](introducing-the-investor-plus-plan) pour en savoir plus sur les détails du plan. + +### L'offre Pro + +L' [offre Pro](introducing-the-pro-plan) est conçu pour les utilisateurs qui souhaitent affiner précisément leurs stratégies ou même automatiser leurs propres startégies. + +Il inclut l'ensemble des avantages de l'offre Investor Plus. + +L'offre Pro offre l'accès aux [OctoBots TradingView](/investing/tradingview-automated-trading). Les OctoBots TradingView facilitent la création et l'automatisation d'un grand nombre de stratégie directement depuis TradingView sur n'importe quelle plateforme d'échange, en utilisant des fonds réels ou virtuels. + +Il permet également d'affiner vos OctoBots en cours d'exécution en offrant la possibilité de : + +- Annuler ou remplacer n'importe quel ordre OctoBot +- Créer manuellement des ordres d'achat et de vente quand vous le souhaitez, sur les cryptomonnaies de votre choix + +**[Passer à Pro](https://www.octobot.cloud/pricing)** + +Découvrez tous les détails de l'offre Pro [sur notre article dédié](introducing-the-pro-plan). + +## En conclusion + +Nous avons réfléchi à de nombreuses façons d'offrir des offres payantes et nous pensons que ces nouvelles offres sont excellents car ils rendent OctoBot très accessible: +- Vous pouvez continuer à utiliser les stratégies OctoBot cloud gratuitement et de manière illimitée, comme vous le faites peut-être déjà avec l'offre Investisseur +- La [version gratuite et open source d'OctoBot](/guides/octobot-installation/install-octobot-on-your-computer) bénéficie de mise à jours régulières qui sont possibles grâce aux abonnements payants d'[OctoBot cloud](/fr). + +D'autre part, si vous recherchez des fonctionnalités supplémentaires pour simplifier et surveiller facilement vos échanges, créer des stratégies de trading avancées ou obtenir un support personnalisé de l'équipe OctoBot, tout est maintenant possible. + +Nous espérons que vous apprécierez les nouvelles offres cloud d'OctoBot. Nous sommes impatients de recevoir vos commentaires à ce sujet ! diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2024-02-05-trading-on-coinex-with-octobot.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-02-05-trading-on-coinex-with-octobot.md new file mode 100644 index 0000000000..689864b08f --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-02-05-trading-on-coinex-with-octobot.md @@ -0,0 +1,39 @@ +--- +title: "Trader sur CoinEx avec OctoBot" +description: "OctoBot s'associe à CoinEx pour permettre aux traders de cryptomonnaies d'automatiser facilement n'importe quelle stratégie de trading sur CoinEx." +slug: "trading-on-coinex-with-octobot" +date: "2024-02-05" +authors: ["guillaume"] +tags: ["Crypto", "Trading", "Exchange", "Partnership", "CoinEx"] +image: "/images/blog/trading-on-coinex-with-octobot/trading-on-coinex-with-octobot.png" +--- + + + +# Trader sur CoinEx avec OctoBot + +![trader sur coinex avec octobot](/images/blog/trading-on-coinex-with-octobot/trading-on-coinex-with-octobot.png) + +Comme présenté sur <a href="https://twitter.com/coinexcom/status/1750701867867537762" rel="nofollow">cette annonce de CoinEx</a>, l'équipe d'OctoBot est fière d'annoncer <a href="https://www.coinex.com/register?refer_code=d6muk" rel="nofollow">CoinEx</a> comme nouvelle [plateforme d'échange partenaire](/guides/exchanges#plateformes-déchange-partenaires---supporter-octobot). + +## Utilisez votre stratégie de trading sur CoinEx + +À partir d'OctoBot 1.0.7, il est possible d'automatiser n'importe quelle [stratégie de trading OctoBot](/guides/octobot-trading-modes/trading-modes) sur l'échange CoinEx when en utilisant [les robots de trading OctoBot](https://www.octobot.cloud/trading-bot). + +Vous pouvez désormais utiliser OctoBot pour trader sur CoinEx: + +- En utilisant un [compte de démo sans risque](/guides/octobot-usage/simulator) et de l'argent simulé pour tester votre stratégie en conditions réelles +- Avec le [backtesting](/guides/octobot-usage/backtesting) pour optimiser rapidement les performances de vos stratégies +- Avec vos fonds réels sur CoinEx pour vraiment tirer profit de votre stratégie + +## Le point technique + +À partir d’OctoBot 1.0.7, le trading SPOT sur CoinEx est [entièrement pris en charge](/guides/exchanges/coinex), ainsi que sa connexion via websocket qui permet la mise à jour rapide des données du marché. + +CoinEx a également été intégré aux plateformes d'échanges régulièrement testées par OctoBot. Cela signifie que l'équipe OctoBot veille à ce que la connexion CoinEx reste stable et nous (l'équipe OctoBot) ferons tout notre possible pour maintenir cet état afin de vous offrir la meilleure automatisation de trading possible sur CoinEx. + +## En conclusion + +Chez OctoBot, nous cherchons à rendre l'automatisation des stratégies de trading accessible au plus grand nombre. Dans cette optique, il est important pour nous de permettre le trading automatisé sur chaque plateforme d'échange importante aux yeux de nos utilisateurs. + +Si vous tradez sur une plateforme qui n'est pas actuellement prise en charge, veuillez créer ou voter pour le post associé à votre plateforme sur <a href="https://feedback.octobot.cloud/open-source" rel="nofollow">notre site de feedback</a>. diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2024-02-23-introducing-the-investor-plus-plan.mdx b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-02-23-introducing-the-investor-plus-plan.mdx new file mode 100644 index 0000000000..52303efa9d --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-02-23-introducing-the-investor-plus-plan.mdx @@ -0,0 +1,114 @@ +--- +title: "Découvrez l'offre Investisseur Plus" +description: "Découvrez l'offre Investisseur Plus et investissez avec toutes les stratégies OctoBot et les paniers de crypto que vous souhaitez." +slug: "introducing-the-investor-plus-plan" +date: "2024-02-23" +authors: ["guillaume"] +tags: ["TradingView", "Cryptocurrency", "Trading", "OctoBot cloud", "Release"] +image: "/images/blog/introducing-the-investor-plus-plan/octobot-investor-plus-plan-announcement-with-tradingview-automations.png" +--- + + + +# Découvrez l'offre Investisseur Plus + +![annonce de l'offre octobot investisseur plus incluant les automatisations TradingView](/images/blog/introducing-the-investor-plus-plan/octobot-investor-plus-plan-announcement-with-tradingview-automations.png) + +Après des mois de brainstorming, d'essais et d'erreurs avec de bonnes et mauvaises idées, notre nouvel abonnement est enfin là. + +Nous sommes très heureux d'annoncer le lancement de l'offre d'abonnement Investisseur Plus sur OctoBot cloud ! + +## Qu'est-ce qu'Investisseur Plus ? + +Investisseur Plus est le premier plan d'abonnement global à compte d'OctoBot cloud. Il améliorera l'intégralité de votre compte OctoBot cloud. + +En choisissant l'offre Investisseur Plus, vous aurez accès immédiat à: + +- Tous les paniers de crypto +- Toutes les stratégies d'IA, de DCA et de grilles +- Jusqu'à 10 OctoBots simultanés sur des comptes réels ou virtuels +- Récapitulatif personalisé des nouvelles crypto par email _(bientôt disponible)_ + +**[Essayer Investisseur Plus](https://www.octobot.cloud/pricing)** + +## Les paniers de crypto + +Les [paniers de crypto](https://www.octobot.cloud/features/crypto-basket) sont un moyen simple d'investir dans plusieurs cryptomonnaies. Il existe de nombreux paniers de crypto vous permettant: + +- D'investir de façon diversifiée sur le marché des cryptomonnaies en investissant automatiquement dans les meilleures crypto +- D'investir dans plusieurs cryptomonnaies suivant un thème que vous aimez, tel que <a href="https://www.coingecko.com/fr/categories/artificial-intelligence" rel="nofollow">l' intelligence artificielle</a>, <a href="https://www.coingecko.com/fr/categories/decentralized-finance-defi" rel="nofollow">la finance décentralisée</a>, <a href="https://www.coingecko.com/fr/categories/gaming" rel="nofollow">le gaming</a> et bien d'autres encore. + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="4E7lO9U0hj4" title="Investir dans les meilleures crypto" /> + +Utiliser un [panier crypto](https://www.octobot.cloud/features/crypto-basket) vous permet de ne pas avoir à choisir individuellement chaque crypto à acheter. +Vous pouvez investir dans un thème qui vous intéresse et ainsi <a href="https://www.investopedia.com/terms/d/diversification.asp" rel="nofollow">diversifier votre investissement</a> pour réduire les risques. + +## Stratégies d'IA, DCA Intelligent et Grid + +OctoBot propose de nombreuses stratégies préconfigurées. Certaines d'entre elles sont disponibles gratuitement et d'autres nécessitent l'offre Investor Plus. Voici un aperçu des différentes stratégies disponibles sur OctoBot. + +### Stratégies d'IA + +L'intelligence artificielle est l'un des sujets sur lesquels OctoBot se concentre beaucoup, et elle propose également des stratégies d'investissement. + + +<YouTube id="BV4ZHQrIpRQ" title="Stratégies de trading crypto ChatGPT avec OctoBot" /> + +Nous avons expérimenté avec différents systèmes d'IA pour améliorer les stratégies d'investissement, et nous avons abouti à plusieurs stratégies basées sur du DCA et améliorées par l'IA. + +Ces stratégies d'IA ont l'avantage de combiner la simplicité des stratégies DCA et l'IA pour augmenter les profits. Consultez nos articles [Trader avec ChatGPT](trading-using-chat-gpt) et [Making of de la stratégie de trading ChatGPT](chatgpt-strategy-deep-dive) pour en savoir plus sur les stratégies d'IA dans OctoBot. + +### Stratégies de Smart DCA + +<a href="https://www.investopedia.com/terms/d/dollarcostaveraging.asp" rel="nofollow">Dollar Cost Averaging (or DCA)</a> est une stratégie d'investissement très connue consistant +à acheter régulièrement afin de profiter des baisses de prix locales et vendre +progressivement par la suite. + +Ce que nous appelons Smart DCA dans OctoBot est une stratégie de trading très simple mais puissante, car elle réduit le risque en répartissant les transactions sur différents prix tout en prenant des bénéfices régulièrement. + + +<YouTube id="519pwSV1uwE" title="DCA Intelligent avec OctoBot" /> + +Les stratégies DCA peuvent être utilisées pour investir dans plusieurs cryptomonnaies en même temps, réduisant ainsi le risque en diversifiant les investissements et en profitant de la volatilité des monnaies. + +En savoir plus sur les stratégies DCA dans OctoBot dans notre article de blog sur le [Smart DCA](smart-dca-making-of). + +### Stratégies en grille + +Les Grilles sont des stratégies de trading à faible risque qui génère des bénéfices modestes mais régulier tant que les conditions sont remplies. + +<div style={{textAlign: "center"}}> + <div> + ![trading en grille illustré par un homme montant des escaliers verts en + attrapant des + pièces](/images/blog/introducing-the-investor-plus-plan/grid-trading-illustrated-by-a-man-stepping-up-on-green-stairs-grabbing-coins.png) + </div> +</div> + +Lors de l'utilisation d'une stratégie en grille, votre OctoBot divisera votre portefeuille dans les deux crypto de la grille et placera des ordres d'achat et de vente autour du prix actuel à des intervalles de prix réguliers. + +Ensuite, lorsque le prix change et qu'un ordre est exécuté, un ordre du côté opposé est créé. Une fois cet ordre exécuté, des bénéfices sont encaissés. + +Une stratégie en grille est idéale pour profiter des marchés qui sont stables dans la fenêtre de prix de la grille : plus le prix varie dans cette fenêtre, plus il y a de bénéfices. + +## Plus d'OctoBots + +Avec l'offre Investor Plus, vous pouvez utiliser jusqu'à 10 OctoBots simultanés sur votre compte. + +Ces OctoBots peuvent : + +- Trader sur n'importe quelle plateforme d'échange prise en charge (voir le [guide sur l'utilisation de plusieurs OctoBots sur le même compte](/investing/having-multiple-octobot-strategies) pour utiliser plusieurs OctoBots sur un compte d'échange) +- Connecter à votre portefeuille de compte d'échange pour investir en utilisant vos fonds avec la stratégie de votre choix +- Utiliser de [l'argent virtuel](/investing/paper-trading-a-strategy) pour tester autant de stratégies que vous le souhaitez sans risque + +## Essai gratuit + +Êtes-vous intéressé pour essayer Investisseur Plus ? Nous avons créé un essai gratuit pour que vous puissiez le tester ! + +**[Essayer Investisseur Plus](https://www.octobot.cloud/pricing)** + +## Conclusion + +Nous espérons que vous allez aimer le nouvel abonnement Investisseur Plus. Nous l'avons conçu pour ajouter plus flexibilité à OctoBot cloud et faciliter vos investissements en cryptomonnaies. Nous sommes impatients d'avoir votre retour à ce sujet ! diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2024-02-27-bingx-wheel-of-fortune-event.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-02-27-bingx-wheel-of-fortune-event.md new file mode 100644 index 0000000000..f95ac9f073 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-02-27-bingx-wheel-of-fortune-event.md @@ -0,0 +1,59 @@ +--- +title: "Roue de la fortune BingX" +description: "Rejoignez l'événement BingX Wheel of Fortune pour gagner jusqu'à 1000 USDT simplement en tradant avec OctoBot en utilisant OctoBot cloud ou votre propre OctoBot" +slug: "bingx-wheel-of-fortune-event" +date: "2024-02-27" +authors: ["guillaume"] +tags: ["Crypto", "Trading", "Exchange", "Partnership", "BingX", "Event"] +image: "/images/blog/bingx-wheel-of-fortune-event/bingx-and-octobot-wheel-of-fortune-event-with-usdt-to-earn.png" +--- + + + +# Roue de la fortune BingX + +![evenement Roue de la fortune bingx et octobot avec 1000 usdt à gagner](/images/blog/bingx-wheel-of-fortune-event/bingx-and-octobot-wheel-of-fortune-event-with-usdt-to-earn.png) + +En décembre, nous avons [annoncé](octobot-1-0-4-whats-new) le support officiel de BingX dans OctoBot, <a href="https://bingx.com/invite/Z4UUVX/" rel="nofollow">BingX</a> est maintenant également pris en charge sur les [stratégies OctoBot cloud](https://www.octobot.cloud/fr). + +Aujourd'hui, nous sommes heureux d'annoncer que du 28 février au 18 mars, BingX organisera un événement spécial pour les utilisateurs d'OctoBot avec jusqu'à 1000 USDT à gagner ! + +## Comment la Roue de la fortune fonctionne + +Après avoir <a href="https://bingx.com/invite/Z4UUVX/" rel="nofollow">créé votre compte BingX</a> (si vous n'en avez pas déjà un) et vous être inscrit sur la <a href="https://bingx.com/fr-fr/act/turntable/8628992176/" rel="nofollow">Roue de la fortune BingX</a>, vous gagnerez des tickets en tradant simplement sur BingX en avec l'une des options suivantes : + +- Les stratégies préconfigurées de BingX sur [OctoBot cloud](https://www.octobot.cloud/fr). +- Les nouveaux [OctoBots TradingView](/investing/tradingview-automated-trading) d' [OctoBot cloud](https://www.octobot.cloud/fr) pour automatiser simplement vos trades en utilisant des événements de prix, indicateurs ou stratégies Pine Script directement depuis TradingView. +- Votre propre stratégie OctoBot en utilisant des [robots de trading OctoBot](https://www.octobot.cloud/trading-bot). + +<div style={{textAlign: "center"}}> + **[Rejoindre l'évènement](https://bingx.com/fr-fr/act/turntable/8628992176)** +</div> + +Remarque : les robots de trading utilisant de l'argent simulé ne sont pas pris en compte pour générer des tickets de Roue de la fortune. Les trades doivent s'exécuter sur votre compte BingX. + +## Chaque utilisateur d'OctoBot peut gagner + +C'est le point fort de cet événement: + +- Tout le monde qui trade avec Octobot sur BingX gagnera des tickets. +- Chaque ticket offre une chance égale de gagner instantanément jusqu'à 1000 USDT. + +Cela signifie qu'il n'est pas nécessaire d'être le trader avec le plus gros volume échangé ou le meilleur PNL pour être éligible aux récompenses. + +OctoBot est conçu pour aider un maximum de personnes à automatiser leurs stratégies d'investissement et il en va de même pour la Roue fortune BingX: elle profite à tout le monde. + +Nous avons hâte de savoir quelles récompenses vous allez remporter sur BingX grâce à cet événement, rejoignez <a href="https://t.me/octobot_trading" rel="nofollow">groupe Telegram d'OctoBot</a> pour nous dire ce que vous aurez gagné ! + +## Gagnez plus en parlant d'OctoBot + +Avez vous des amis qui aimeraient [facilement suivre des stratégies d'investissement préconfigurées](/investing/introduction), [automatiser leur trading en utilisant TradingView](/investing/tradingview-automated-trading) ou encore [créer leur propre bot de trading](https://www.octobot.cloud/trading-bot) ? + +Nous pensons que cet événement BingX est un moment idéal pour parler à vos amis d'OctoBot: ils pourraient être parmi les heureux gagnants de l'événement ! Et cela ne réduira pas vos propres chances de gagner. + +Saviez-vous également qu'OctoBot dispose d'un programme de parrainage ? Lorsque vos amis s'inscrivent en utilisant votre lien de parrainage OctoBot : + +- Ils bénéficient d'une réduction de 5 $ sur l'abonnement de leur choix. +- Vous recevez 25 % de leurs frais d'abonnement ainsi qu'une partie des frais de transaction perçus par OctoBot cloud auprès des plateformes d'échange partenaires grâce à leurs OctoBots en trading réel. + +Obtenez votre lien de parrainage sur <a href="https://www.octobot.cloud/fr/rewards" rel="nofollow">votre tableau de bord de parrainage</a>. diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2024-03-20-introducing-the-pro-plan.mdx b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-03-20-introducing-the-pro-plan.mdx new file mode 100644 index 0000000000..1b8d5110a6 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-03-20-introducing-the-pro-plan.mdx @@ -0,0 +1,213 @@ +--- +title: "Découvrez l'offre Pro" +description: "Découvrez la nouvelle offre Pro et ajustez vos stratégies OctoBot cloud, automatisez vos trades depuis TradingView et configurez vos paniers de crypto." +slug: "introducing-the-pro-plan" +date: "2024-03-20" +authors: ["guillaume"] +tags: ["Cryptocurrency", "Trading", "OctoBot cloud", "Release"] +image: "/images/blog/introducing-the-pro-plan/octobot-trading-plan-announcement-with-TradingView-automations-and-advanced-coins-trading.png" +--- + + + +# Découvrez l'offre Pro + +![annonce de l'offre Octobot avec automatisations TradingView et trading avancé de cryptomonnaies](/images/blog/introducing-the-pro-plan/octobot-trading-plan-announcement-with-TradingView-automations-and-advanced-coins-trading.png) + +Suite au lancement de l'offre [Investisseur Plus](introducing-the-investor-plus-plan), nous sommes très heureux d'annoncer la nouvelle offre Pro. + +## Qu'est ce qui est dans l'offre Pro ? + +L'offre Pro vous donne accès à toutes les possibilités d'OctoBot cloud. + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="w3RxwrBQxp0" title="Le Plan Pro d" /> + +Il inclut naturellement tout ce que proposent les offres Investisseur et [Investisseur Plus](introducing-the-investor-plus-plan), ce qui signifie: + +- Investir sur tous les échanges supportés +- Accès à l'ensemble des stratégies pré-configurées et paniers de crypto +- Jusqu'à 10 OctoBots sur des comptes réels ou virtuels +- Récapitulatif personalisé des nouvelles crypto par email _(bientôt disponible)_ + +Avec l'offre Pro, vous pouvez aussi: + +- Ajuster vos OctoBot pour configurer leur façon d'investir +- Placer des ordres manuellement depuis vos OctoBots +- L'automatisation de vos stratégies TradingView +- Investir avec des paniers de crypto personnalisés _(bientôt disponible)_ +- Utiliser jusqu'à 20 OctoBots sur des comptes réels ou virtuels +- Bénéficier du support prioritaire + +**[Passer à Pro](https://www.octobot.cloud/pricing)** + +## Ajustez vos OctoBots + +En utilisant l'offre Pro, vous pouvez modifier la façon dont chacun de vos OctoBots [achète et vend ses cryptomonnaies](/investing/fine-tune-your-octobots) + +### Placer des ordres manuellement depuis vos OctoBots + +Faites acheter ou vendre vos OctoBots + +<div style={{textAlign: "center"}}> + ![Acheter et vendre des cryptomonnaies directement depuis votre + OctoBot](/images/blog/introducing-the-pro-plan/acheter-et-vendre-des-crypto-directement-depuis-votre-octobot.png) +</div> +Avec l'offre Pro, vous pouvez depuis n'importe quel OctoBot: - Facilement trader +vous-même, directement sur votre échange ou votre [compte de trading virtuel +sans risque](/fr/investing/paper-trading-a-strategy) - Créer tous types d'ordres +d'achat ou vente à tout moment - Rééquilibrer votre portefeuille - Exclure une +partie de vos fonds de vos stratégies de trading + +### Faites trader vos OctoBots à votre façon + +Vous préféreriez qu'une stratégie cloud OctoBot achète ou vende différemment ? + +<div style={{textAlign: "center"}}> + ![Annulez les ordres directement depuis votre + OctoBot](/images/blog/introducing-the-pro-plan/annuler-des-ordres-directement-depuis-octobot.png) +</div> +Avec l'offre Pro, tous vos OctoBots peuvent: - Annuler n'importe quel ordre issu +d'une stratégie OctoBot cloud ou TradingView - Remplacer les ordres existants +par les vôtres + +### Suivez l'activité de vos OctoBots + +Profitez d'un historique clair des trades, automatisations et annulations qui ont eu lieu avec votre OctoBot. + +<div style={{textAlign: "center"}}> + ![Historique de l'activité d'achat et de vente de cryptomonnaies sur + OctoBot](/images/blog/introducing-the-pro-plan/acheter-et-vendre-des-crypto-historique-activite-octobot.png) +</div> + +Plus de détails sur l'ajustement de vos OctoBots sur le [guide Investisseur](/investing/fine-tune-your-octobots). + +## Les OctoBots d'automation TradingView + +> Quelle serait la meilleure façon, pour la plupart des investisseurs crypto, de créer leurs stratégies d'investissement ? + +Chez OctoBot, nous passons beaucoup de temps à réfléchir à la façon d'offrir des stratégies personnalisables de la manière la plus simple possible. Alors que les stratégies préconfigurées d'OctoBot cloud sont et resteront disponibles gratuitement, il est important pour nous permettre aux investisseurs crypto d'investir avec Octobot Cloud selon leurs propres idées. + +En réfléchissant aux différentes possibilités, nous avons réalisé qu'il était important pour les investisseurs, lorsqu'ils créent une stratégie d’investissement, que celle-ci soit: + +- **Claire et attrayante**: Utiliser un outil graphique pour créer la stratégie et ne pas nécessiter aucune compétence en codage. +- **Adaptée aux connaissances** de l'investisseur: Nous voulons qu’Octobot Cloud offre les meilleures stratégies possibles pour vous en tant qu’investisseur. Il est donc nécessaire qu'Octobot d'adapte au niveau de connaissances de chacun. +- **Facile à suivre et surveiller**: Il est nécessaire de toujours savoir quels trades sont en cours et quelles ont été les actions précédentes. + + +<YouTube id="bZwyQMsgYYE" title="Automatiser TradingView" /> + +Après avoir testé plusieurs possibilités, nous avons réalisé qu'intégrer <a href="https://www.tradingview.com/?aff_id=27595" rel="nofollow">TradingView</a> est de loin la meilleure option car TradingView est: + +- **Très connu**: la plupart des investisseurs crypto savent comment utiliser TradingView et l'utilisent régulièrement +- **Focalisé sur la visualisation**: TradingView permet de faire des analyses visuelles des prix ainsi que d'utiliser des indicateurs et des stratégies Pine Script +- **Extensible** : TradingView peut être connecté à d'autres services tels qu'OctoBot cloud facilement et de façon sécurisé. + +C'est pourquoi nous avons décidé d'intégrer les stratégies TradingView dans OctoBot cloud en suivant ces principes. Vous pouvez maintenant automatiser facilement vos propres stratégies d'investissement ou de trading en utilisant [TradingView et OctoBot](/investing/tradingview-automated-trading). + +### Créez votre stratégie sur TradingView + +Avec TradingView, il est facile de créer des alertes automatisées basées sur: + +- Des événements de prix, par exemple "BTC dépasse les 50 000 $" +- N'importe quel indicateur technique (gratuit ou payant), tel que "le RSI est supérieur à 80" ou "la moyenne mobile sur 9 jours vient de croiser la moyenne mobile sur 21 jours" +- Les stratégies Pine Script, qui sont des stratégies intégrées à TradingView pouvant être optimisées via backtesting et dessinées visuellement directement sur les graphiques. + +Prenons un exemple avec une stratégie basée sur une <a href="https://www.investopedia.com/terms/e/ema.asp" rel="nofollow">Moyenne Mobile Exponnentielle</a> (ou EMA). Le principe est d'acheter et vendre lorsque les EMA sur 9 et 21 bougies se croisent. + +La stratégie TradingView ressemble à ceci : elle utilise simplement deux EMA classiques ... +![illustration de la stratégie ema sur TradingView avec 2 achats et 2 ventes](/images/blog/introducing-the-investor-plus-plan/tradingview-ema-strategy-illustration-with-2-buy-and-2-sell.png) + +... avec deux alertes : un ACHAT et une VENTE. + +<div style={{textAlign: "center"}}> + ![configuration de la stratégie ema + TradingView](/images/blog/introducing-the-investor-plus-plan/tradingview-ema-strategy-configuration.png) +</div> + +Il s'agit de l'ensemble la configuration de cette Stratégie TradingView. Plutôt simple ! +Apprenez-en davantage sur comment automatiser les alertes TradingView depuis des événements de prix, des indicateurs ou des stratégies Pine Script dans notre [guide d'automatisation des alertes TradingView](/investing/tradingview-alerts-automation). + +La prochaine étape consiste à faire en sorte que cette stratégie crée réellement des ordres sur les plateformes d'échanges cryptos pour obtenir une stratégie d’investissement automatique fonctionnelle. + +### Tradez avec TradingView en utilisant OctoBot cloud + +Pour transformer les alertes TradingView en trades, nous avons créé un nouveau système appelé `Automatisations` qui fonctionne en duo avec les OctoBots TradingView : une automatisation est une action comme la création ou l'annulation d'ordres. Les automatisations peuvent être liées aux alertes TradingView pour automatiser facilement les aspects trading de votre stratégie TradingView sur les plateformes d'échange. + +Revenons sur la strategy EMA que nous venons juste de créer sur TradingView, voici ce qu'elle donne côté OctoBot. + +![illustration de la vue octobot de stratégie ema sur TradingView avec 2 achats et 2 ventes](/images/blog/introducing-the-investor-plus-plan/octobot-tradingview-trading-side-of-ema-strategy-illustration-with-2-buy-and-2-sell.png) + +Vous avez peut-être remarqué les identifiants d'automatisation dans la section `Message` des alertes ACHETER et VENDRE sur TradingView, ils sont utilisés pour indiquer à OctoBot quelle automation déclencher lorsque l'alerte est reçue: `5a6e0e4d-4c8a-4212-881c-3174cd322002` correspond à `Buy BTC - 50% USDT` et `b1791518-b92c-4dc7-8dc1-4905bcaf3165` correspond à `Sell BTC - 100%`. + +<div style={{textAlign: "center"}}> + ![automatisation TradingView d'un achat de BTC au marché par + octobot](/images/blog/introducing-the-investor-plus-plan/octobot-tradingview-market-buy-btc-automation.png) +</div> + +Le principal avantage de cette approche est qu'elle est à la fois très simple et puissante : vous créez votre stratégie TradingView puis référencez simplement les automatisations de votre OctoBot. +De plus, les automatisations sont faciles à configurer et illimitées : vous pouvez créer un OctoBot TradingView avec des automatisations d'achat et de vente très simples ou utiliser un ensemble avancé d'automatisations configurées pour investir avec plusieurs cryptomonnaies et différents si vous le souhaitez. + +Vous vous demandez comment démarrer une automatisation OctoBot TradingView ? Consultez notre [tutoriel de trading TradingView](/investing/tradingview-trading-tutorial). + +### De nombreux types d'ordres + +Utilisez les automatisations pour optimiser davantage vos prix d'entrée et de sortie en utilisant des ordres limités avec un prix fixe ou relatif. + +<div style={{textAlign: "center"}}> + ![Achetez SOL avec 50 USDT à 10 % de réduction grâce aux automatisation + TradingView + d'Octobot](/images/blog/introducing-the-pro-plan/buy-sol-with-50-usdt-at-10-percent-discount-octobot-tradingview-automation.png) +</div> + +Bénéficiez du réglage fin des ordres pour vérifier, annuler ou remplacer facilement tout ordre créé à partir de vos automatisations TradingView. + +### Des montants d'ordres avancés + +Optimisez vos stratégies TradingView utilisant une ou plusieurs cryptomonnaies et en configurant précisément les montants des ordres créés. + +<div style={{textAlign: "center"}}> + ![Vendez 1 AVAX à 120 USDT grâce à l'automatisation TradingView + d'Octobot](/images/blog/introducing-the-pro-plan/sell-1-avax-at-120-usdt-octobot-tradingview-automation.png) +</div> + +Configurez les montants des ordres d'automatisation selon: + +- Un pourcentage des fonds de votre portefeuille +- Un montant spécifique de la cryptomonnaie échangée +- Un montant défini en USDT ou toute autre devise de cotation de votre paire d'échange + +Apprenez-en plus sur les automatisations des OctoBots TradingView sur le [guide des automatisations](/investing/tradingview-automated-trading#les-automatisations-pour-créer-vos-stratégies). + +**[Passer à Pro](https://www.octobot.cloud/pricing)** + +:::info + Nous continuerons à travailler sur l'ajout de nouveaux types d'automatisations + pour offrir plus d'options et augmenter les possibilités. N'hésitez pas à nous + dire si vous avez besoin de nouveaux types d'automatisations et nous ferons de + notre mieux pour les intégrer ! +::: + +## Les paniers de crypto personnalisables + +:::info + Bientôt disponible +::: + +<div style={{textAlign: "center"}}> + <div> + ![panier de crypto + personalisé](/images/blog/introducing-the-pro-plan/crypto-basket-landing.png) + </div> +</div> + +Configurez le contenu de vos [paniers de crypto](https://www.octobot.cloud/features/crypto-basket) directement depuis votre OctoBot. + +- Ajoutez des cryptomonnaies à vos paniers. +- Supprimez des cryptomonnaies de vos paniers. +- Modifiez les proportions de chaque crypto dans vos paniers. + +## Support prioritaire + +Si vous avez des questions sur OctoBot, les stratégies d'investissement, les stratégies TradingView ou autre chose, vous aider sera notre priorité. diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2024-04-15-invest-with-crypto-baskets.mdx b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-04-15-invest-with-crypto-baskets.mdx new file mode 100644 index 0000000000..090b0cb7ef --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-04-15-invest-with-crypto-baskets.mdx @@ -0,0 +1,93 @@ +--- +title: "Investir simplement avec les paniers de crypto" +description: "Découvrez une nouvelle façon d'investir et de diversifier facilement votre portefeuille crypto avec les paniers par thèmes" +slug: "invest-with-crypto-baskets" +date: "2024-04-15" +authors: ["paul"] +tags: ["Cryptocurrency", "Trading", "OctoBot cloud", "Release"] +image: "/images/blog/invest-with-crypto-baskets/crypto-basket.png" +--- + + + +# Investir simplement avec les paniers de crypto + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="4E7lO9U0hj4" title="Investir dans les meilleures crypto" /> + +Dans notre objectif de rendre l'investissement en cryptomonnaies simple et accessible, nous sommes très heureux d'annoncer la sortie de la fonctionnalité des [paniers de crypto](https://www.octobot.cloud/features/crypto-basket). + +## Qu'est-ce qu'un panier de cryptomonnaies ? + +Un panier de crypto est une collection de différentes cryptomonnaies qui partagent un même thème. + +<div style={{textAlign: "center"}}> + <div> + ![annonce de la fonctionnalité d'investissement avec des paniers de + crypto](/images/blog/invest-with-crypto-baskets/crypto-basket.png) + </div> +</div> + +Par exemple, vous pouvez trouver un panier consacré aux cryptos liées à l'<a href="https://www.coingecko.com/fr/categories/artificial-intelligence" rel="nofollow">intelligence artificielle</a>, <a href="https://www.coingecko.com/fr/categories/meme-token" rel="nofollow">aux memes</a>, <a href="https://www.coingecko.com/fr/categories/non-fungible-tokens-nft" rel="nofollow">aux NFT</a>, <a href="https://www.coingecko.com/fr/categories/metaverse" rel="nofollow">au metaverse</a>, et bien d'autres encore. + +## Pourquoi utiliser un panier ? + +Utiliser un panier de crypto vous permet de ne pas avoir à choisir individuellement chaque crypto à acheter. +Vous pouvez investir dans un thème qui vous intéresse et ainsi <a href="https://www.investopedia.com/terms/d/diversification.asp" rel="nofollow">diversifier votre investissement</a> pour réduire les risques. + +Les paniers de crypto vous permettent aussi de tirer profit de toutes les variations des cryptos. +En effet, dès que le prix d'une crypto du panier augmente, sa part dans votre portefeuille augmentera et pourra ainsi dépasser le pourcentage de répartition du panier. +Dans ce cas, une opération de rééquilibrage du portefeuille sera déclenchée. Cette opération va vendre ou acheter des crypto pour rééquilibrer le portefeuille. + +Ainsi, si une crypto de votre panier a augmenté de 50%, lors du rééquilibrage vous encaisserez le profit et vous le réinvestirrez automatiquement dans les autres cryptomonnaies du panier. + +Vous avez même la possibilité d'utiliser plusieurs paniers pour investir dans différents thèmes simultanément, cela réduit encore plus le risque et vous permet de profiter encore plus des variations du marché. + +## Peut-on tester les paniers avec de l'argent virtuel ? + +Oui, sur OctoBot cloud, vous pouvez tester gratuitement n'importe quel panier de crypto avec des [fonds virtuels](/investing/paper-trading-a-strategy). +Cela vous permet d'avoir une idée du fonctionnement et des performances des paniers avant de passer à l'argent réel. + +## Qui crée ces paniers de crypto ? + +Les paniers de crypto sur OctoBot cloud sont principalement créés en utilisant <a href="https://www.coingecko.com/fr/categories" rel="nofollow">les catégories de Coingecko</a>, une plateforme qui réalise des classements des différentes cryptomonnaies et des plateformes d'échange. +Nous utilisons aussi le classement par capitalisation des cryptos fourni par <a href="https://www.coingecko.com/fr" rel="nofollow">Coingecko</a> afin de proposer d'autres types de paniers. + +<div style={{textAlign: "center"}}> + <div> + ![exemple d'un panier avec le top 3 des crypto par + capitalisation](/images/blog/invest-with-crypto-baskets/utiliser-le-panier-top-marketcap.png) + </div> +</div> + +Par exemple, nous avons des paniers comprenant les trois plus grandes capitalisations. Il vous permet d'investir dans ces trois crypto en même temps et de profiter des variations de leurs prix. + +Découvrez quelques-uns de nos paniers sur la [page de présentation](https://www.octobot.cloud/features/crypto-basket) de la fonctionnalité. + +## Comment sont-ils mis à jour ? + +Les paniers de crypto sur OctoBot cloud sont automatiquement mis à jour régulièrement pour suivre les évolutions du marché. La fréquence peut varier d'un panier à l'autre. + +Lorsque votre portefeuille de crypto diverge trop de la répartition du panier qui vous avez choisi, OctoBot réalise automatiquement une opération de rééquilibrage pour maintenir la même répartition que celle du panier. + +## Comment investir avec un panier de crypto ? + +Sur OctoBot cloud, vous pouvez investir gratuitement (avec une période d'essai de 14 jours) dans n'importe quel [panier de crypto](https://www.octobot.cloud/features/crypto-basket), en illimité, en suivant les étapes suivantes : + +1. Créez un compte ou connectez-vous à votre compte OctoBot cloud +2. Suivez l'introduction ou rendez-vous sur l'<a href="https://www.octobot.cloud/fr/explore" rel="nofollow">explorateur de stratégies</a> pour choisir le thème de votre panier de crypto +3. Sélectionnez un portefeuille d'[argent virtuel](/investing/paper-trading-a-strategy) ou réel (pour utiliser de l'argent réel vous devrez [connecter votre compte d'échange](/investing/investor-faq#comment-puis-je-connecter-mon-compte-de-plateforme-déchange-à-octobot-)). +4. Lancez votre OctoBot pour commencer à investir. + +## Peut-on créer son propre panier ? + +Oui, vous pouvez créer votre propre panier grâce au plan [Pro](introducing-the-pro-plan) d'OctoBot cloud. +Ce plan vous permet de personnaliser un panier existant ou de créer votre propre panier avec la répartition que vous souhaitez. +Attention, si vous utilisez des paniers personnalisés, vous ne pourrez plus bénéficier des mises à jour automatiques du panier que vous avez personnalisé. + +Plus besoin de passer des heures à choisir les meilleures cryptomonnaies. Avec les paniers de crypto, l'investissement devient plus simple et plus efficace. + +<div style={{textAlign: "center"}}> + **[Investir avec un panier de crypto](https://www.octobot.cloud)** +</div> diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2024-04-16-coinbase-and-binance.us-trading-bot.mdx b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-04-16-coinbase-and-binance.us-trading-bot.mdx new file mode 100644 index 0000000000..e9812c6263 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-04-16-coinbase-and-binance.us-trading-bot.mdx @@ -0,0 +1,100 @@ +--- +title: "Robot de trading Coinbase et Binance.us" +description: "Automatisez vos investissements crypto avec des paniers de crypto, du DCA, de l'IA, des Grilles ou encore TradingView sur Coinbase et Binance.us avec OctoBot." +slug: "coinbase-and-binance.us-trading-bot" +date: "2024-04-16" +authors: ["guillaume"] +tags: ["Cryptocurrency", "Trading", "Exchange", "Binance.us", "Coinbase"] +image: "/images/blog/coinbase-and-binance.us-trading-bot/binance.us-and-coinbase-support-on-octobot.png" +--- + + + +# Robot de trading Coinbase et Binance.us + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="4Ez9vMJIiRc" title="Robot de Trading Coinbase" /> + +La prise en charge de Coinbase et Binance.us sur OctoBot cloud est de loin la fonctionnalité la plus demandée, et nous sommes heureux d'annoncer que les deux échanges sont désormais intégrés à OctoBot cloud ! + +Vous pouvez maintenant automatiser facilement vos investissements en cryptomonnaie en utilisant OctoBot cloud à la fois sur <a href="https://www.coinbase.com/" rel="nofollow">Coinbase</a> et sur <a href="https://www.binance.us/" rel="nofollow">Binance.us</a> avec vos comptes d'échange ou [sans risque en trading virtuel](/investing/paper-trading-a-strategy). + +## Investir avec les paniers de crypto sur Coinbase et Binance.us + +[Les paniers de crypto](invest-with-crypto-baskets) vous permettent d'investir facilement dans l'ensemble du marché des cryptomonnaies ou dans des thèmes spécifiques dans lesquels vous croyez. + +<div style={{textAlign: "center"}}> + ![panier de crypto octobot contenant du bitcoin ethereum solana et + dogecoin](/images/blog/coinbase-and-binance.us-trading-bot/crypto-basket.png) +</div> + +Lorsque vous utilisez des paniers de cryptomonnaies, votre OctoBot répartira vos fonds entre les cryptos du panier. Chaque panier est automatiquement mis à jour selon le classement par capitalisation et les catégories de <a href="https://www.coingecko.com/" rel="nofollow">CoinGecko</a> ainsi que la disponibilité des crypto sur chaque échange. + +**[Investir avec OctoBot](https://www.octobot.cloud)** + +### Paniers de top + +Les paniers de top vous permettent d'investir dans les meilleures cryptos du marché selon leur capitalisation boursière d'après les plateformes telles que <a href="https://www.coingecko.com/" rel="nofollow">CoinGecko</a>. + +Investir dans les meilleurs paniers de crypto est un excellent moyen de diversifier vos fonds et de profiter de l'ensemble du marché crypto. Suivre un panier crypto de top maintiendra votre portefeuille d'échange équilibré parmi les cryptos du panier, assurant ainsi que vous profitiez des augmentations de prix de chacune de ces monnaies. + +### Paniers de crypto à theme + +Les paniers de crypto thématiques, quant à eux, vous permettent d'investir dans des thèmes définis par les <a href="https://www.coingecko.com/en/categories" rel="nofollow">catégories CoinGecko</a>. + +Les paniers de crypto orientés autour d'un thème sont utiles pour profiter des tendances globales et investir dans les sujets auxquels vous croyez comme par exemple l'intelligence artificielle, les NFTs, les actifs réels, les Meme coins et plus encore. + +### En apprendre plus sur les paniers de crypto + +Chez OctoBot, nous pensons qu'investir avec des paniers crypto est l'une des meilleures façons d’investir en crypto car cela permet: + +- D’investir dans toutes les cryptomonnaies principales ou encore un ensemble thématique au lieu choisir quelques crypto individuellement +- De réduire le risque en diversifiant ses investissements sur plusieurs crypto +- Utiliser une référence neutre: les classements par capitalisation et catégorisation + +Si vous souhaitez en savoir plus, consultez notre [article détaillé sur les paniers crypto](invest-with-crypto-baskets) + +## Bénéficiez des stratégies DCA, IA et Grilles sur Coinbase et Binance.us + +Avec OctoBot cloud, vous pouvez également investir en utilisant des stratégies préconfigurées de Dollar Cost Averaging (DCA), d'intelligence artificielle et de grille. + +<div style={{textAlign: "center"}}> + ![octobot collabore avec + chatgpt](/images/blog/coinbase-and-binance.us-trading-bot/octobot-collaborating-with-chatgpt-light.png) +</div> + +Ces stratégies peuvent être utilisées gratuitement sur OctoBot cloud et leurs performances historiques sont publiques. + +### Stratégies de Smart Dollar Cost Averaging + +[Les stratégies de Smart Dollar Cost Averaging](smart-dca-making-of) sont un moyen puissant de profiter de la hausse de plusieurs cryptomonnaies en tirant parti de multiples achats et ventes afin de minimiser votre prix d'achat et maximiser votre prix de vente. + +Pour en savoir plus sur les stratégies de smart DCA, consultez notre article [de Smart DCA avec OctoBot](smart-dca-making-of) + +### Stratégies avec Intelligence Artificielle + +Vous pouvez également intégrer de l'intelligence artificielle dans le processus décisionnel des achats des stratégies DCA. + +Avec OctoBot cloud, vous pouvez utiliser des stratégies Smart DCA basées sur des modèles ChatGPT pour analyser les tendances du marché et décider d'acheter lorsque ChatGPT estime que le moment est propice. + +Voici notre [article de trading ChatGPT](trading-using-chat-gpt) détaillant les stratégies IA. + +## Automatiser vos stratégies TradingView sur Coinbase et Binance.us + +Enfin, grâce à OctoBot cloud, vous pouvez automatiser facilement vos stratégies TradingView directement sur vos comptes Binance.us et Coinbase. + +<div style={{textAlign: "center"}}> + ![automatisation tradingview illustratée par le logo + tradingview](/images/blog/coinbase-and-binance.us-trading-bot/tradingview-automation-illustrated-by-tradingview-logo.png) +</div> + +De cette manière, vous pouvez automatiser vos achats et ventes en vous basant sur : + +- Les changements de prix du marché +- Des indicateurs intégrés ou personnalisés +- Des stratégie Pine Script + +Consultez notre [guide présentant les OctoBots TradingView](/investing/tradingview-automated-trading) avec vidéos et exemples de stratégies pour en savoir plus. + +**[Investir avec OctoBot](https://www.octobot.cloud)** diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2024-04-18-one-click-cloud-deployment-with-octobot-1-0-9.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-04-18-one-click-cloud-deployment-with-octobot-1-0-9.md new file mode 100644 index 0000000000..0ae35543b6 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-04-18-one-click-cloud-deployment-with-octobot-1-0-9.md @@ -0,0 +1,95 @@ +--- +title: "Déploiement cloud en un clic avec OctoBot 1.0.9" +description: "OctoBot 1.0.9 est disponible ! Déployez votre OctoBot depuis le marketplace DigitalOcean et créez votre panier de crypto personnalisé." +slug: "one-click-cloud-deployment-with-octobot-1-0-9" +date: "2024-04-18" +authors: ["paul"] +tags: ["Tradingview", "Hosting", "Release"] +image: "/images/blog/one-click-cloud-deployment-with-octobot-1-0-9/octobot-1.0.9-ditigtalocean-1-click-deployment-custom-crypto-baskets.png" +--- + + + +# Déploiement cloud en un clic avec OctoBot 1.0.9 + +![octobot 1.0.9 ditigtalocean déploiement en 1 clic et paniers de crypto personalisables](/images/blog/one-click-cloud-deployment-with-octobot-1-0-9/octobot-1.0.9-ditigtalocean-1-click-deployment-custom-crypto-baskets.png) + +## Déploiement cloud en un clic + +Faire fonctionner votre robot de trading OctoBot dans le cloud n'a jamais été aussi **facile et économique** ! OctoBot est désormais disponible en tant que Droplet 1-Click sur la <a href="https://digitalocean.pxf.io/octobot-app" rel="nofollow">marketplace officielle de DigitalOcean</a>. + +<div style={{textAlign: "center"}}> + ![octobot on the digitalocean + marketplace](/images/blog/one-click-cloud-deployment-with-octobot-1-0-9/octobot-on-the-digitalocean-marketplace.png) +</div> +En utilisant DigitalOcean, vous pouvez désormais exécuter simplement votre +propre bot de trading OctoBot dans le cloud et l'avoir disponible pour +automatiser vos stratégies de trading 100% du temps. + +<div style={{textAlign: "center"}}> + **[Deployer votre OctoBot](/guides/octobot-installation/cloud-install-octobot-on-digitalocean)** +</div> + +Avoir votre OctoBot opérationnel sur DigitalOcean se fait en **un seul clic** et commence à seulement **6$ par mois** avec une configuration minimale. + +## OctoBot 1.0.9 + +Nous sommes heureux d'annoncer la sortie d'OctoBot 1.0.9. Cette version ajoute notamment la prise en charge des [déploiements cloud en un clic](/guides/octobot-installation/cloud-install-octobot-on-digitalocean) mentionnés ci-dessus et ajoute également des Paniers Crypto personnalisés dans OctoBot tout en améliorant les modes de trading existants et en corrigeant de nombreux problèmes. + +### Paniers de crypto + +Tout comme les [paniers de crypto d'OctoBot cloud](https://www.octobot.cloud/features/crypto-basket), vous pouvez maintenant créer votre propre panier de crypto en utilisant Octobot et le nouvel [Index Mode Trading](/guides/octobot-trading-modes/index-trading-mode). + +<div style={{textAlign: "center"}}> + <div> + ![panier de + crypto](/images/blog/one-click-cloud-deployment-with-octobot-1-0-9/crypto-basket.png) + </div> +</div> + +Lorsque vous utilisez l'Index Mode Trading, votre Octobot divisera vos fonds en marché de référence entre les différentes crypto de vos paires configurées. Vous pouvez également définir un intervalle et un seuil de rééquilibrage pour personnaliser la manière dont votre Octobot doit réagir lorsque les crypto détenues dans votre panier changent de valeur. + +Et bien sûr, vous pouvez utiliser le backtesting pour optimiser le contenu de vos paniers ! + +### Trading modes améliorés + +Les trading modes DCA et TradingView ont tous deux été améliorés dans Octobat 1.0.9. + +**DCA Trading Mode** + +Le [DCA Trading Mode](/guides/octobot-trading-modes/dca-trading-mode) supporte maintenant un paramètre supplémentaire. En définissant le `Max asset holding` dans vos stratégies DCA, vous pouvez limiter l'exposition à un actif donné. Cela est particulièrement utile pour les configurations de DCA basée sur des évaluateurs car cela empêche votre bot de DCA d'augmenter indéfiniment votre exposition à une crypto en particulier lorsque les conditions d'achat de cette même crypto se répètent. + +**TradingView Trading Mode** + +<div style={{textAlign: "center"}}> + <div> + ![tradingview logo montrant le trading mode tradingview + octobot](/images/blog/one-click-cloud-deployment-with-octobot-1-0-9/tradingview-logo-showing-octobot-tradingview-trading-mode.png) + </div> +</div> + +Les ordres limites et stop créés par le [Trading Mode TradingView](/guides/octobot-trading-modes/tradingview-trading-mode) sont désormais beaucoup plus flexibles. + +Le Trading Mode TradingView prend maintenant en charge les [prix relatifs](/guides/octobot-trading-modes/order-price-syntax) pour les ordres de type limite et stop. Cela signifie que vous pouvez configurer vos alertes TradingView pour déclencher par exemple: + +- Un ordre d'achat BTC/USDT à -10% du prix actuel +- Un ordre de vente ETH/BTC au prix actuel + 0,01 BTC +- Un stop loss BTC/USDT au prix de 35000 USDT + +### Amélioration du support des échanges + +- **Coinbase**: OctoBot prend désormais en charge à la fois l'ancien et le nouveau format de clé d'API Coinbase +- **MEXC**: Le trading sur MEXC est désormais beaucoup plus stable +- **Tous les échanges**: Le flux d'ordres dans OctoBot a été amélioré. Cela résout de nombreux problèmes liés à la synchronisation des ordres ainsi qu'aux erreurs lors de la création des ordres. + +<div style={{textAlign: "center"}}> + **[Mettre à jour OctoBot](/guides/octobot-installation/install-octobot-on-your-computer)** +</div> + +### Liste des changements + +Retrouvez l'intégralité des modifications d'OctoBot 1.0.9 sur le <a href="https://github.com/Drakkar-Software/OctoBot/blob/master/CHANGELOG.md" rel="nofollow">dépôt GitHub d'Octobot</a>. + +## Le mot de la fin + +Nous tenons à remercier la communauté OctoBot pour ses excellentes idées d'amélioration et son support support ainsi que pour avoir signalé bon nombre des problèmes qui ont été corrigés dans la version 1.0.9. diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2024-04-19-mobile-app-revamp.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-04-19-mobile-app-revamp.md new file mode 100644 index 0000000000..4bd9764e76 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-04-19-mobile-app-revamp.md @@ -0,0 +1,72 @@ +--- +title: "Nouveau design de l'application mobile" +description: "Une nouvelle version de l'application mobile Android et iPhone OctoBot est disponible vous permettant de mieux suivre vos investissements" +slug: "mobile-app-revamp" +date: "2024-04-19" +authors: ["paul"] +tags: ["Mobile", "App", "OctoBot cloud", "Release"] +image: "/images/blog/mobile-app-revamp/thumb.png" +--- + + + +# Nouveau design de l'application mobile OctoBot + +Merci à tous ceux qui ont téléchargé notre application mobile en version expérimentale. Vos retours nous ont beaucoup aidés. +C'est pourquoi nous avons entièrement revu le design de l'application pour mieux répondre à vos besoins. + +<div style={{textAlign: "center"}}> + **[Télécharger la dernière version](https://www.octobot.cloud)** +</div> + +## Une nouvelle vue détaillée pour chaque OctoBot + +Nous avons également créé une nouvelle vue détaillée de votre OctoBot. Vous pourrez suivre précisément ce que font vos OctoBot. +Vous aurez accès à l'historique des performances de vos bots, la répartition de leur portefeuille, les ordres en cours et leurs dernières actions. + +Cette nouvelle vue est disponible que vous utilisiez un OctoBot depuis OctoBot cloud ou que vous l'ayez installé vous-même. + +<div style={{textAlign: "center"}}> + <div> + ![nouvelle vue détaillée des + OctoBot](/images/blog/mobile-app-revamp/bot-view-pf-fr.png) + </div> +</div> + +## Un nouveau dashboard + +Dès que vous vous connecterez à la nouvelle version, vous découvrirez un nouveau tableau de bord. Ce dashboard vous montre l'historique de votre portefeuille réel, regroupant tous vos comptes d'échanges. +Vous aurez aussi un aperçu de comment votre portefeuille est réparti actuellement. + +<div style={{textAlign: "center"}}> + <div> + ![nouveau dashboard de l'application + mobile](/images/blog/mobile-app-revamp/mobile-dashboard-fr.png) + </div> +</div> + +Ce nouveau dashboard est conçu pour que vous puissiez surveiller les performances de vos portefeuilles crypto facilement depuis votre smartphone. + +## Aussi disponible sur iPhone + +Avec cette nouvelle version de l'application nous ajoutons aussi une version web disponible sur <a href="https://mobile.octobot.cloud" rel="nofollow">mobile.octobot.cloud</a>. +Cela rend l'application mobile OctoBot installable sur iPhone directement depuis votre navigateur pour obtenir une expérience proche de l'application Android. Il s'agit de la première étape vers la version iPhone de l'application OctoBot. + +Pour installer l'app sur iPhone, suivez les 4 étapes suivantes: + +1. Avec votre mobile, aller sur <a href="https://mobile.octobot.cloud" rel="nofollow">mobile.octobot.cloud</a> +2. Depuis l'interface de votre navigateur web, cliquer que le bouton `Partager` +3. Scroller vers le bas et cliquer sur `sur l'écran d'accueil` +4. Cliquer sur `Ajouter` + +🎉 L'application OctoBot est maintenant installée sur votre iPhone ! + +## Conclusion + +Nous en sommes juste au début. D'autres améliorations et nouvelles fonctionnalités viendront enrichir l'application mobile. + +Nous espérons que vous apprécierez cette nouvelle version. Si c'est le cas, n'oubliez pas de nous laisser une évaluation ! + +<div style={{textAlign: "center"}}> + <a href="https://play.google.com/store/apps/details?id=com.drakkarsoftware.octobotapp&utm_source=www.octobot.cloud&utm_media=blog&utm_content=mobile-app-revamp" rel="nofollow"><GoogleStoreButton /></a> +</div> diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2024-05-02-crypto-bubble.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-05-02-crypto-bubble.md new file mode 100644 index 0000000000..8f07422baf --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-05-02-crypto-bubble.md @@ -0,0 +1,107 @@ +--- +title: "Comprendre la bulle des cryptomonnaies" +description: "Découvrez le monde passionnant des cryptomonnaies ! Apprenez à identifier les bulles et à protéger vos investissements" +slug: "crypto-bubble" +date: "2024-05-02" +authors: ["guillaume"] +tags: ["Crypto", "Ecosystem", "Finance", "Educational"] +image: "/images/blog/fomo-meaning/cover.png" +--- + + + +# Comprendre la bulle des cryptomonnaies + +Découvrez le monde passionnant des cryptomonnaies ! Apprenez à identifier les bulles et à protéger vos investissements. + +## Qu'est-ce qu'une bulle crypto ? + +Imaginez un ballon qui grossit à mesure qu'on y souffle de l'air. +Dans le monde des cryptomonnaies, il peut se passer la même chose. +Une bulle crypto se produit lorsque les prix des cryptomonnaies, comme [Bitcoin](https://www.octobot.cloud/bitcoin-prediction), augmentent très rapidement à cause de beaucoup d'enthousiasme et d'investissement de la part des gens espérant gagner de l'argent rapidement. + +Cependant, tout comme un ballon ne peut contenir qu'une certaine quantité d'air avant d'éclater, une bulle crypto peut éclater, entraînant une baisse significative des prix. + +## Pourquoi les bulles crypto se produisent-elles ? + +1. <b>Nouveaux investisseurs</b> : Beaucoup de gens entendent parler d'autres + qui ont fait d'énormes profits avec les cryptomonnaies et décident d'investir + également, dans l'espoir de réaliser les mêmes. Cette ruée fait monter + rapidement les prix. +2. <b>[La peur de manquer quelque chose](fomo-meaning) (FOMO)</b> : Craignant de + rater des profits, les gens se précipitent souvent pour acheter des + cryptomonnaies, ce qui peut pousser les prix encore plus haut. +3. <b>Hype médiatique</b> : Lorsque les médias parlent constamment de + l'augmentation des prix des cryptos, encore plus de personnes veulent + investir, ce qui gonfle davantage la bulle. +4. <b>Effet de mode</b> : Les gens ont souvent tendance à suivre ce que font les + autres. Si tout le monde achète des cryptos, cela peut sembler une bonne idée + de faire de même, même si les prix sont très élevés. + +<div style={{textAlign: "center"}}> + <div> + ![Une personne avec une expression excitée regarde un graphique du marché + des cryptos en hausse sur son ordinateur, symbolisant le FOMO dans la + cryptomonnaie.](/images/blog/fomo-meaning/cover.png) *Une illustration du + sentiment de FOMO.* + </div> +</div> + +## Bulles historiques des cryptomonnaies + +Le Bitcoin, la première cryptomonnaie, a connu plusieurs bulles depuis sa création en 2009. +Par exemple, en 2017, le prix du Bitcoin a grimpé jusqu'à près de 20 000 $, suivi d'une baisse à environ 3 000 $ un an plus tard. + +## Le signes d'une bulle de crypto + +- Augmentation rapide des prix : Lorsque les prix des cryptomonnaies augmentent très rapidement sans raison valable, cela pourrait être une bulle. +- Haute volatilité : Les prix changent drastiquement sur une courte période. +- Volumes de trading importants : Une augmentation soudaine des achats et des ventes peut être aussi un signe. + +## Comment gérer une bulle de crypto + +- <b>Diversifiez vos investissements</b> : Ne placez pas tout votre argent dans + les mêmes cryptomonnaies, répartissez vos investissements à travers plusieurs + cryptos via [les paniers de cryptos](https://www.octobot.cloud/features/crypto-basket) pour réduire + le risque. Évitez de mettre tous vos fonds dans la cryptomonnaie. Avoir un + mélange de différents types d'investissements peut vous protéger en cas de + crash du marché crypto. +- <b>Surveillez attentivement les tendances du marché</b> : Gardez un œil sur la + performance des cryptomonnaies et sur le sentiment du marché. Des outils comme + l' + <a href="https://coinstats.app/fear-and-greed/" rel="nofollow">Indice de Peur et de Cupidité</a> peuvent vous donner une idée de si les émotions + dominent trop le marché, ce qui est souvent le cas dans une bulle. +- <b>Restez discipliné</b> : Ayez un plan clair pour votre investissement et + tenez-vous-y, quel que soit ce que font les autres. Évitez de prendre des + décisions basées sur des changements de prix soudains ou la pression sociale. + +<div style={{textAlign: "center"}}> + ![paniers de crypto investissement aidant les investisseurs à diversifier leur + portefeuille de + cryptos](/images/blog/invest-with-crypto-baskets/crypto-basket.png) *Un + portefeuille de cryptos diversifié* +</div> + +## Préparation pour le post éclatement de la bulle + +Si vous vous retrouvez dans une situation où la bulle crypto a éclaté, il est important de rester calme. + +Voici quelques conseils sur la manière de procéder : + +- Évaluez et rééquilibrez votre portefeuille : Examinez vos investissements et voyez si vous devez apporter des modifications pour réduire le risque ou profiter de nouvelles opportunités. Les [paniers de cryptos](https://www.octobot.cloud/features/crypto-basket) sont un moyen facile de diversifier et de rééquilibrer votre portefeuille de cryptomonnaies. +- Apprenez de l'expérience : Analysez ce qui s'est passé et pourquoi, et utilisez ces connaissances pour prendre de meilleures décisions d'investissement à l'avenir. +- Gardez un œil sur le marché : Après un crash, des opportunités d'acheter de bonnes cryptomonnaies à bas prix peuvent apparaître, mais rappelez-vous qu'il est important faire ses propres recherches et de ne pas agir sur un coup de tête. + +## Questions Fréquemment Posées + +### Comment puis-je savoir s'il s'agit vraiment d'une bulle ? + +Recherchez des signes comme des augmentations de prix extrêmement rapides, des volumes de trading élevés sans raisons substantielles, et un enthousiasme public extrême davantage motivé par les médias et la [FOMO](fomo-meaning). + +### Les cryptomonnaies sont-elles encore un bon investissement après l'éclatement d'une bulle ? + +Oui, les cryptomonnaies peuvent encore être de bons investissements après l'éclatement d'une bulle. Assurez-vous simplement de faire vos propres recherches et de considérer le niveau de risque avec lequel vous êtes à l'aise avant d'investir. + +### Y aura-t-il toujours des bulles crypto ? + +Étant donné les tendances actuelles et les modèles historiques, il est probable que le marché des cryptomonnaies continue de connaître des bulles. Cela fait partie intégrante d'un marché jeune et en évolution, où l'enthousiasme et l'innovation entraînent des mouvements de prix significatifs. Chaque bulle apporte également une attention et des investissements accrus, renforçant potentiellement le marché au fil du temps. diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2024-05-07-best-ai-trading-bots.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-05-07-best-ai-trading-bots.md new file mode 100644 index 0000000000..efbd2cba85 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-05-07-best-ai-trading-bots.md @@ -0,0 +1,209 @@ +--- +title: "5 Meilleurs Bots de Trading IA" +description: "Découvrez les meilleurs bots de trading avec de l'intelligence artificielle. Comparez les meilleurs bots gratuits et payants, leurs fonctionnalités et leur facilité d'utilisation." +slug: "best-ai-trading-bots" +date: "2024-05-07" +authors: ["paul"] +tags: ["Cryptocurrency", "Trading", "Plans"] +image: "/images/blog/best-crypto-trading-bots/cover.png" +--- + + + +# 5 Meilleurs Bots de Trading IA + +Choisir le bon bot de trading avec intelligence artificielle parmi les nombreuses options disponibles peut être difficile. Cet article est là pour vous aider à trouver le meilleur pour vos besoins. + +## Qu'est-ce qu'un robot de trading crypto ? + +Les robots de trading crypto sont comme vos assistants numériques pour le trading de cryptomonnaies. +Ils fonctionnent automatiquement, exécutant des transactions basées sur vos stratégies pré-définies, ce qui signifie que vous n'avez pas besoin de surveiller constamment les marchés. + +## Qu'est-ce qu'un robot de trading IA crypto ? + +Un robot de trading crypto IA utilise des algorithmes d'intelligence artificielle pour automatiser le trading sur le marché des crypto-monnaies. +En analysant de vastes quantités de données et de tendances du marché, ces bots peuvent exécuter des transactions basées sur des stratégies prédéfinies. + +Avec de nombreux traders les utilisant aujourd'hui, ils sont un choix populaire tant pour les débutants que pour les experts. +Pour ceux qui sont nouveaux dans le trading de cryptomonnaies, ces bots proposent souvent des stratégies pour vous aider à démarrer facilement. + +<div> + Maintenant que nous avons une compréhension claire de ce qu'est un bot de + trading crypto, explorons les différents types de bots de trading disponibles + sur le marché. +</div> + +## 1. OctoBot + +<div style={{textAlign: "center"}}> + <div> + ![A man relaxing in his couch while OctoBot is making money by automating + cryptocurrency + strategies](/a-man-relaxing-in-his-couch-while-octobot-is-making-money-by-automating-cryptocurrency-strategies-light.png) + </div> +</div> + +[OctoBot](/fr) est un bot de trading flexible et facile à utiliser qui propose une variété de stratégies gratuites, y compris des stratégies basées sur l'[IA](https://www.octobot.cloud/features/ai-trading-bot), le [DCA](smart-dca-making-of) intelligent et GRID. +Il est [open-source](open-source-trading-software). Avec son accent sur la transparence, les utilisateurs peuvent tester des stratégies avec du [trading virtuel](/guides/octobot-usage/simulator) et suivre leurs performances. +OctoBot prend en charge la plupart des exchanges crypto et propose des plans premium pour les utilisateurs avancés, le rendant adapté tant pour les débutants que pour les investisseurs crypto expérimentés. +Il offre également des [prédictions IA sur les crypto](https://www.octobot.cloud/tools/crypto-prediction) et des [paniers de crypto](https://www.octobot.cloud/features/crypto-basket) d'IA. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Facilité d'utilisation" + level={3} + h="14px" + tooltipText="Facile d'utilisation avec des stratégies basées sur l'IA et diverses stratégies préétablies pour débutants et professionnels" + /> + <Rating + title="Prix" + level={3} + h="14px" + tooltipText="Plusieurs offres gratuites avec des options pour des plans premium avancés" + /> + <Rating + title="Fonctionnalités" + level={3} + h="14px" + tooltipText="Open-source, supporte les principaux exchanges, variété de stratégies de trading, backtesting et suivi de performance" + /> + </div> +</Card> + +## 2. CryptoHero + +CryptoHero propose un trading automatisé de cryptomonnaies pour de nombreuses cryptos et une intégration avec les principaux échanges comme [Binance](https://www.octobot.cloud/binance-trading-bot) et Kraken. +Les utilisateurs peuvent définir des paramètres, utiliser des bots optimisés avec l'IA pour des simulations et utiliser du backtesting de stratégie. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Facilité d'utilisation" + level={2} + h="14px" + tooltipText="Le tableau de bord peut sembler un peu compliqué." + /> + <Rating + title="Prix" + level={3} + h="14px" + tooltipText="Offre gratuite et tarification compétitive par rapport aux plateformes similaires" + /> + <Rating + title="Fonctionnalités" + level={2} + h="14px" + tooltipText="Simulation, Backtesting et intégration avec les principales plateformes de trading" + /> + </div> +</Card> + +## 3. 3Commas + +<div style={{textAlign: "center"}}> + <div> + ![3commas-logo](/images/blog/best-crypto-trading-bots/3commas.png) + </div> +</div> + +3Commas est un bot de trading crypto payant, offrant des bots GRID, DCA et Signal. +Connu pour son interface ergnonomique, 3Commas prend en charge de multiples stratégies de trading et indicateurs techniques. + +Il propose également une place de marché pour les signaux crypto tiers. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Facilité d'utilisation" + level={3} + h="14px" + tooltipText="Interface facile à naviguer avec plusieurs stratégies de trading" + /> + <Rating + title="Prix" + level={1} + h="14px" + tooltipText="Service payant à prix élevé" + /> + <Rating + title="Fonctionnalités" + level={2} + h="14px" + tooltipText="Propose des bots GRID, DCA et Signal avec une marketplace" + /> + </div> +</Card> + +## 4. Cryptohopper + +Cryptohopper est un bot de trading crypto payant, offrant un essai gratuit de 3 jours pour les nouveaux utilisateurs. +Il se distingue par son bot de market-making et la possibilité pour les utilisateurs de créer des stratégies de trading personnalisées. + +La plateforme prend également en charge le trading automatisé via un bot Telegram et propose des services supplémentaires comme des signaux crypto, des modèles de stratégie et du trading virtuel. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Facilité d'utilisation" + level={1} + h="14px" + tooltipText="Difficile pour les débutants, offre un bot de market-making" + /> + <Rating + title="Prix" + level={2} + h="14px" + tooltipText="Plans gratuits et payants, adaptés à divers budgets" + /> + <Rating + title="Fonctionnalités" + level={3} + h="14px" + tooltipText="Création de stratégie personnalisée, trading par bot Telegram et trading virtuel" + /> + </div> +</Card> + +## 5. Pionex + +<div style={{textAlign: "center"}}> + <div> + ![logo-pionex](/images/blog/best-crypto-trading-bots/pionex.jpg) + </div> +</div> + +[Pionex](https://www.pionex.com/en/signUp?r=octobot) ([Pionex.us](https://accounts.pionex.us/en/signup?ref=octobot) pour les citoyens américains) est une plateforme de trading avancée permettant d'automatiser des stratégies d'investissement à l'aide de ses bots de trading. Créés pour être faciles à utiliser, ils permettent aux traders d'exécuter des stratégies simplement. +La plateforme propose une variété de bots personnalisables avec IA adaptés à différents types de trading, rendant la plateforme d'échange particulièrement pertinente pour les investisseurs avancés. +Pionex permet aux utilisateurs d'optimiser leurs stratégies et de gérer efficacement les risques grâce à ses outils de backtesting et de suivi des performances. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Facilité d'utilisation" + level={2} + h="14px" + tooltipText="Bot de copie facile à utiliser mais création de bots complexe" + /> + <Rating + title="Prix" + level={3} + h="14px" + tooltipText="Frais d'échange faibles" + /> + <Rating + title="Fonctionnalités" + level={3} + h="14px" + tooltipText="Grande variété de bots prêts à l'emploi" + /> + </div> +</Card> + +## Conclusion + +En conclusion, que vous soyez débutant ou investisseur expérimenté, il existe un bot de trading de crypto via IA pour vous. +Ces bots offrent différentes fonctionnalités et tarifications, vous permettant de trouver celui qui correspond à votre façon d'automatiser votre trading. + +Vous n'avez pas trouvé ce que vous cherchiez ? +Si vous recherchez davantage d'outils utilisant l'IA pour automatiser et améliorer vos investissements, <a href="https://www.insidr.ai/" rel="nofollow">Insidr.ai</a> est un excellent site web qui référence de nombreux d'outils très utiles. diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2024-06-18-making-octobot-more-accessible.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-06-18-making-octobot-more-accessible.md new file mode 100644 index 0000000000..e545c39a57 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-06-18-making-octobot-more-accessible.md @@ -0,0 +1,81 @@ +--- +title: "Rendre OctoBot plus accessible" +description: "Les offres d'OctoBot ont été améliorées. Elles sont maintenant plus accessibles, contiennent plus de fonctionnalités et ajoutent de nouvelles possibilités." +slug: "making-octobot-more-accessible" +date: "2024-06-18" +authors: ["paul"] +tags: ["Cryptocurrency", "Trading", "Plans"] +image: "/images/blog/making-octobot-more-accessible/octobot-plans-improvements.png" +--- + + + +# Rendre OctoBot plus accessible + +<div style={{textAlign: "center"}}> + ![amélioration des offres octobot](/images/blog/making-octobot-more-accessible/octobot-plans-improvements.png) +</div> + +Chez OctoBot, notre mission est de simplifier l'investissement en cryptomonnaies. Pour cela, nous passons beaucoup de temps à essayer de comprendre ce qui est le plus compliqué lorsqu'on investit dans les cryptomonnaies. + +Une des choses que nous avons comprise est que les stratégies d'investissement automatisées (qu'il s'agisse d'investir dans l'ensemble du marché avec des [paniers de crypto](https://www.octobot.cloud/features/crypto-basket) ou avec des stratégies algorithmiques simples) sont très populaires tant qu'elles restent **facilement accessibles**. + +Le monde des cryptomonnaies est déjà compliqué à aborder, si les investisseurs doivent également payer pour utiliser leurs premières stratégies automatisées telles que les paniers de crypto, OctoBot ne rend pas vraiment les choses plus accessibles. Cela est particulièrement vrai si vous devez calculer combien de gains vous devez réaliser pour justifier un abonnement de 14 $/mois. + +**Nous avons décidé de changer cela.** + +## Des stratégies automatisées gratuites pour chaque investisseur en crypto + +Le plan gratuit Investisseur débloque désormais un ensemble de paniers de crypto et de stratégies algorithmiques. Cela permettra aux investisseurs en crypto utilisant des portefeuilles de toute taille, de profiter directement des stratégies d'investissement d'OctoBot. + + +<div style={{textAlign: "center"}}> + ![explorateur de stratégie OctoBot avec des paniers de crypto et des stratégies DCA](/images/blog/making-octobot-more-accessible/octobot-strategy-explorer-with-crypto-baskets-and-dca-strategies.png) +</div> + +En plus des stratégies existantes, nous avons également ajouté de nouvelles stratégies à risque modéré, toutes disponibles avec le plan Investor et spécifiquement créées pour les investisseurs qui souhaitent profiter d'un niveau de risque raisonnable. + +<div style={{textAlign: "center"}}> + ![stratégies de paniers de crypto à risque raisonnable OctoBot](/images/blog/making-octobot-more-accessible/octobot-low-risk-crypto-baskets-strategies.png) +</div> + +Nous avons également rendu le tableau de bord multi-comptes disponible avec le plan Investor, pour faciliter le suivi des comptes pour tout le monde. + +<div style={{textAlign: "center"}}> + ![tableau de bord multi-échanges OctoBot](/images/blog/making-octobot-more-accessible/octobot-multi-exchange-dashboard.png) +</div> + +## Les stratégies d'investissement avancées plus accessibles + +L'offre [Investisseur Plus](introducing-the-investor-plus-plan) se concentre désormais sur le déblocage de types spécifiques de [paniers de crypto](https://www.octobot.cloud/features/crypto-basket) et de stratégies d'investissement. + +Elle augmente également le nombre d'OctoBots simultanés qu'un compte peut utiliser. + +L'objectif de ce nouveau plan Investisseur Plus est d'être pertinent pour les investisseurs en crypto qui souhaitent investir en utilisant des stratégies plus avancées tout en restant très abordable. + + +Par conséquent, le plan Investisseur Plus est maintenant beaucoup moins cher et peut également être acheté comme un plan à vie. + +**[Essayer Investisseur Plus](https://www.octobot.cloud/pricing)** + +## Les stratégies d'investissement personnalisées + +Le [plan Pro](introducing-the-pro-plan) débloque toutes les fonctionnalités du plan Investisseur Plus et ajoute la possibilité de configurer vos stratégies. + +- Tradez avec vos propres stratégies en utilisant les [OctoBots TradingView](/investing/tradingview-automated-trading): automatisez vos trades basés sur les alertes de TradingView directement à partir du prix, des indicateurs ou même des stratégies Pine Script. +- Investissez avec vos propres paniers de crypto personnalisés. +- [Ajustez vos OctoBots](/investing/fine-tune-your-octobots) : avec le plan Pro, vous pouvez facilement interagir avec vos OctoBots en cours d'exécution pour créer, remplacer ou annuler des ordres directement depuis votre OctoBot. +- Profitez du support prioritaire de l'équipe OctoBot. + +Nous avons également adapté le prix de l'offre Pro pour qu'elle ait moins d'impact sur votre portefeuille : le plan Pro est maintenant à 25 $/mois. + +**[Passer à l'offre Pro](https://www.octobot.cloud/pricing)** + + +## Conclusion + +Nous pensons que cette refonte des plans OctoBot augmentera considérablement l'accessibilité des stratégies d'investissement automatisées pour tous les investisseurs en crypto. + +Avec ces nouvelles offres, nous voulons faire d'[OctoBot cloud](/fr) la meilleure plateforme pour investir facilement dans les cryptomonnaies en utilisant des stratégies d'investissement. + +Nous espérons que vous apprécierez ces changements et avons hâte de recevoir vos retours à ce sujet. diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2024-06-18-what-are-octobot-rewards-and-how-to-get-them.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-06-18-what-are-octobot-rewards-and-how-to-get-them.md new file mode 100644 index 0000000000..8af6831fac --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-06-18-what-are-octobot-rewards-and-how-to-get-them.md @@ -0,0 +1,92 @@ +--- +title: "Que sont les récompenses OctoBot et comment les obtenir ?" +description: "Les récompenses OctoBot permettent de débloquer gratuitement les fonctionnalités payantes en étant un utilisateur actif et en invitant des amis." +slug: "what-are-octobot-rewards-and-how-to-get-them" +date: "2024-06-18" +authors: ["guillaume"] +tags: ["Rewards", "OctoBot cloud", "Release"] +image: "/images/blog/what-are-octobot-rewards-and-how-to-get-them/octobot-rewards-get-rewarded-for-using-octobot-and-use-advanced-strategies.png" +--- + + + +# Que sont les récompenses OctoBot et comment les obtenir ? + +<div style={{textAlign: "center"}}> + ![Les récompenses OctoBot sont attribuées pour l'utilisation d'OctoBot et + débloquent l'utilisation de stratégies + avancées](/images/blog/what-are-octobot-rewards-and-how-to-get-them/octobot-rewards-get-rewarded-for-using-octobot-and-use-advanced-strategies.png) +</div> + +Nous sommes ravis d'annoncer le lancement du système de récompenses OctoBot. Les récompenses offrent une alternative pour que les comptes utilisant l'abonnement gratuit d'Investisseur puissent profiter des stratégies d'investissement payantes ainsi que d'autres avantages sans avoir à dépenser un centime. + +## Débloquez gratuitement les stratégies payantes d'OctoBot + +Lors de la création d'un compte OctoBot, vous pouvez choisir d'utiliser l'offre gratuite "Investisseur" ou l'une des offres payantes suivantes : + +- L'offre [Investisseur Plus](introducing-the-investor-plus-plan) qui, entre autres avantages, débloque instantanément toutes les stratégies d'IA, de DCA, de paniers crypto et de grille d'OctoBot. +- L'offre [Pro](introducing-the-pro-plan) qui, elle, débloque tout de l'offre Investisseur Plus tout en ajoutant les OctoBots TradingView, le support prioritaire et plus encore. + +Les récompenses sont le moyen alternatif de débloquer les stratégies IA, DCA, paniers crypto et grille qui sont normalement disponibles à partir de l'offre Investisseur Plus. + +<div style={{textAlign: "center"}}> + ![Tableau de bord des récompenses OctoBot montrant les récompenses de crypto + apprenti](/images/blog/what-are-octobot-rewards-and-how-to-get-them/octobot-rewards-dashboard-showing-crypto-apprentice-rewards.png) +</div> + +Alors que les offres payantes vous accordent un accès instantané à toutes les stratégies, il est désormais également possible de débloquer progressivement l'accès à ces stratégies en étant un utilisateur actif d'OctoBot. + +En visitant votre <a href="https://www.octobot.cloud/rewards" rel="nofollow">tableau de bord des récompenses</a>, vous verrez votre niveau actuel de récompense. Chaque niveau améliore de manière permanente votre compte OctoBot gratuit. + +Les récompenses sont particulièrement utiles pour débloquer l'accès à: + +- Chaque [stratégie d'IA](chatgpt-strategy-deep-dive) +- Chaque [stratégie de panier de crypto](https://www.octobot.cloud/features/crypto-basket) +- Chaque [stratégie de DCA](smart-dca-making-of) +- Chaque stratégie de grille +- Un nombre accru d'OctoBots que vous pouvez utiliser à la fois + +<div style={{textAlign: "center"}}> + ![Tableau de bord des récompenses montrant les récompenses du niveau grand + maître + crypto](/images/blog/what-are-octobot-rewards-and-how-to-get-them/octobot-rewards-dashboard-showing-grandmaster-of-crypto-rewards.png) +</div> + +Grand maître crypto est le niveau de récompense maximum. Un certain temps est nécessaire pour l'acquérir, mais une fois atteint, il débloque de manière permanente toutes les fonctionnalités de l'offre Investisseur Plus. + +## Comment gagner des récompenses + +Les récompenses sont gagnées en complétant des missions dans la section "Missions" de votre <a href="https://www.octobot.cloud/rewards" rel="nofollow">tableau de bord des récompenses</a>. + +<div style={{textAlign: "center"}}> + ![Liste des récompenses OctoBot avec missions complétées et non + complétées](/images/blog/what-are-octobot-rewards-and-how-to-get-them/octobot-rewards-list-with-completed-and-uncompleted-missions.png) +</div> + +Il existe de nombreux types de missions, telles que démarrer plusieurs OctoBots, suivre des cours de crypto ou simplement avoir un OctoBot en cours d'exécution. + +Les principaux aspects à garder à l'esprit pour gagner rapidement des récompenses sont de: + +- S'assurer de configurer votre compte OctoBot et d'installer l'application pour obtenir toutes les récompenses de bienvenue +- Avoir au moins un OctoBot qui investit sur votre compte d'échange pour obtenir vos récompenses quotidiennes +- Parler à vos amis de votre expérience avec OctoBot et les aider à configurer leur compte : vous êtes récompensé lorsque vos filleuls démarrent leur OctoBot et lorsqu'ils achètent un plan annuel + +:::info + **Astuce** : Parrainez 2 amis qui achètent l'abonnement annuel Investisseur + Plus fera instantanément passer votre compte au niveau de récompense le plus + élevé. +::: + +## La raison derrière le système de récompenses OctoBot + +Nous réalisons que de nombreux investisseurs en crypto investissent avec un portefeuille contenant moins de quelques centaines d'euros. Dans ce contexte, payer pour un abonnement mensuel peut être compliqué. + +Notre objectif chez OctoBot cloud est de rendre l'investissement en crypto plus accessible, et nous voulons également améliorer les investissements pour les petits portefeuilles, ainsi que pour ceux qui ne veulent tout simplement pas payer. + +C'est pourquoi nous avons conçu ce système de récompenses. Si vous ou un de vos amis souhaitez automatiser vos investissements avec OctoBot cloud, il existe maintenant de plusieurs façons pour ce faire : + +- Utiliser l'une des offres Investisseur Plus ou Pro pour débloquer instantanément toutes les stratégies +- Utiliser des stratégies disponibles gratuitement +- Être un utilisateur actif d'OctoBot et accéder aux stratégies et fonctionnalités de l'offre Investor Plus comme un cadeau de la part d'OctoBot cloud + +**[Découvrir les plans OctoBot](https://www.octobot.cloud/pricing)** diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2024-07-07-octobot-2-0-0-whats-new.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-07-07-octobot-2-0-0-whats-new.md new file mode 100644 index 0000000000..1da5763bc0 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-07-07-octobot-2-0-0-whats-new.md @@ -0,0 +1,84 @@ +--- +title: "OctoBot 2.0.0 - Les nouveautés" +description: "OctoBot 2.0.0 améliore OctoBot avec une refonte complète de l'interface utilisateur et inclut l'extension Premium OctoBot." +slug: "octobot-2-0-0-whats-new" +date: "2024-07-07" +authors: ["guillaume"] +tags: ["Tradingview", "Chatgpt", "Release", "Backtesting", "Strategy designer", "OctoBot cloud"] +image: "/images/blog/octobot-2-0-0-whats-new/octobot-2.0.0-annoucement-with-new-design-preview.png" +--- + + + +# OctoBot 2.0.0 - Les nouveautés + +![annonce d'octobot 2.0.0 avec preview du nouveau design](/images/blog/octobot-2-0-0-whats-new/octobot-2.0.0-annoucement-with-new-design-preview.png) + +## OctoBot 2.0.0 + +Nous sommes très heureux d'annoncer la sortie d'OctoBot 2.0.0 ! Cette version représente un grand pas en avant pour l'ensemble du projet OctoBot, car elle : + +1. Rénove entièrement l'interface utilisateur +2. Introduit l'extension premium d'OctoBot +3. Comprend de nombreuses corrections et améliorations. + +<div style={{textAlign: "center"}}> + ![octobot premium nouveau tentacles + disponible](/images/blog/octobot-2-0-0-whats-new/octobot-premium-new-tentacles-available.png) +</div> + +## Refonte de l'interface utilisateur + +Dans OctoBot 2.0.0, l'interface utilisateur est profondément améliorée et modernisée. + +L'ensemble de l'interface utilisateur est désormais beaucoup plus moderne et utilise la nouvelle identité visuelle d'OctoBot. + +<div style={{textAlign: "center"}}> + ![octobot 2.0.0 preview + dark](/images/blog/octobot-2-0-0-whats-new/octobot-2.0.0-preview-dark.png) +</div> + +L'interface utilisateur propose désormais des modes sombre et clair, et offre un meilleur rendu sur différentes tailles d'écran. + +<div style={{textAlign: "center"}}> + ![octobot 2.0.0 pnl + light](/images/blog/octobot-2-0-0-whats-new/octobot-2.0.0-pnl-light.png) +</div> + +Nous espérons que vous apprécierez le nouveau design d'OctoBot ! + +## L'extension premium d'OctoBot + +La deuxième grande nouveauté de cette version est l'introduction de [l'extension premium d'OctoBot](/guides/octobot-configuration/premium-octobot-extension). + +<div style={{textAlign: "center"}}> + ![preview de l'extension premium + octobot](/images/blog/octobot-2-0-0-whats-new/octobot-premium-extension-preview.png) +</div> + +L'extension Premium OctoBot est une extension payante facultative visant à améliorer de manière permanente votre OctoBot open source. Elle ajoute : + +- Le [Strategy Designer](/guides/octobot-usage/strategy-designer) pour créer des stratégies OctoBot avancées +- Les [webhooks sécurisés d'OctoBot cloud](/guides/octobot-interfaces/tradingview/using-a-webhook) pour vos stratégies TradingView +- [Les paniers de crypto d'OctoBot cloud](https://www.octobot.cloud/features/crypto-basket) directement dans votre Octobot open source + +<div style={{textAlign: "center"}}> + ![octobot open source utilisant les paniers de crypto avec extension premium + octobot](/images/guides/trading-modes/octobot-open-source-using-crypto-baskets-from-premium-extension.png) +</div> + +Nous avons créé l'extension Premium OctoBot afin de: + +- Améliorer le confort d'automation de vos stratégies TradingView +- Offrir un outil adapté à ceux qui veulent aller plus loin dans la création de stratégies avancées +- Permettre aux OctoBots open source d'utiliser paniers de crypto OctoBot cloud qui sont maintenus automatiquement à jour + +Ces fonctionnalités ayant un important coût de fonctionnement et de développement, nous avons décidé de les inclure dans [l'extension premium d'OctoBot](/guides/octobot-configuration/premium-octobot-extension) et nous sommes impatients de recevoir vos retours à ce sujet. + +Si vous avez des idées de fonctionnalités que vous aimeriez avoir sur l'extension Premium OctoBot, n'hésitez pas à nous contacter. + +## Autres améliorations + +OctoBot 2.0.0 met à jour la version de <a href="https://github.com/ccxt/ccxt" rel="nofollow">CCXT</a>, son connecteur d'échange, à la version 4.3.56, améliorant ainsi la connexion aux échanges de cryptomonnaie et leur compatibilité avec OctoBot. + +Cette mise à jour inclut également de nombreuses corrections de bugs ainsi que des améliorations dans la synchronisation des échanges. diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2024-09-08-announcing-the-bitmart-and-octobot-partnership.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-09-08-announcing-the-bitmart-and-octobot-partnership.md new file mode 100644 index 0000000000..17674aaccb --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-09-08-announcing-the-bitmart-and-octobot-partnership.md @@ -0,0 +1,71 @@ +--- +title: "Annonce du partenariat entre BitMart et OctoBot" +description: "Profitez du trade to earn, automatisez vos stratégies TradingView, de DCA, d'IA, de panier de crypto ou de grilles sur BitMart avec OctoBot" +slug: "announcing-the-bitmart-and-octobot-partnership" +date: "2024-09-08" +authors: ["guillaume"] +tags: ["Crypto", "Trading", "Exchange", "Partnership", "BitMart"] +image: "/images/blog/announcing-the-bitmart-and-octobot-partnership/bitmart-and-octobot-partnership.png" +--- + + + +# Annonce du partenariat entre BitMart et OctoBot + +![bitmart and octobot partnership](/images/blog/announcing-the-bitmart-and-octobot-partnership/bitmart-and-octobot-partnership.png) + +L'équipe OctoBot est fière d'annoncer le <a href="https://support.bitmart.com/hc/en-us/articles/28977125395355-BitMart-and-Octobot-Broker-Partnership" rel="nofollow">partenariat avec BitMart</a> afin d'aider les investisseurs de BitMart à automatiser simplement leurs stratégies de trading. + +## Automatiser le trading sur BitMart + +BitMart rejoint maintenant les [plateformes officiellement supportées](/guides/exchanges) par la [version open source d'OctoBot](https://www.octobot.cloud/trading-bot). + +Il est désormais facile d'utiliser OctoBot pour automatiser vos stratégies de trading sur BitMart: + +- Utilisez le [trading simulé sans risque] pour tester votre stratégie dans des conditions réelles +- Avec le [backtesting](/guides/octobot-usage/backtesting) et le [Strategy Designer](/guides/octobot-usage/strategy-designer) pour optimiser rapidement les performances de vos stratégies +- Avec vos fonds directement stockés sur BitMart pour réellement profiter de votre stratégie + +**[Démarrer votre OctoBot](https://www.octobot.cloud)** + +## Trade to earn pour les utilisateurs d'OctoBot sur BitMart + +Pour célébrer le partenariat, BitMart organise une campagne de récompenses pour tous les utilisateurs d'OctoBot qui tradent sur BitMart! Pour en profiter, il vous suffit de connecter votre compte BitMart à OctoBot et de commercer à trader avec votre stratégie pour recevoir une récompense de 5$. + +Naturellement, cela s'ajoute aux récompenses habituelles de BitMart et rend l'utilisation d'OctoBot sur BitMart plus rentable. + +Pour en savoir plus sur cette campagne, rendez-vous sur <a href="https://support.bitmart.com/hc/en-us/articles/28977125395355-BitMart-and-Octobot-Broker-Partnership" rel="nofollow">l'annonce de BitMart</a>. + +## Les stratégies OctoBot sur BitMart + +Le support de BitMart par OctoBot ouvre la porte à une large gamme de stratégies de trading gratuites et facilement accessibles depuis la version open source d'OctoBot. + +### Paniers de crypto BitMart + +Investissez dans l'ensemble du marché crypto ou dans des thèmes que vous aimez directement depuis BitMart en utilisant les [paniers crypto](/guides/octobot-trading-modes/index-trading-mode) basés sur OctoBot cloud ou vos paniers personnalisés. Pour utiliser les paniers crypto basés sur OctoBot cloud sur BitMart, il vous suffit de télécharger le panier crypto que vous souhaitez directement depuis votre OctoBot et d'activer le plateforme d'échange BitMart + +### Trading automatisé depuis TradingView sur BitMart + +Automatisez vos [stratégies TradingView(/fr/guides/octobot-trading-modes/tradingview-trading-mode) sur BitMart et tradez facilement à partir de vos alertes TradingView + +### DCA intelligent avec IA sur BitMart + +Optimisez vos stratégies de trading BitMart en utilisant des [stratégies de DCA intelligent](/guides/octobot-trading-modes/dca-trading-mode) et automatisez vos trades avec des analyse technique ou l'intelligence artificielle. + +### Trading en grille personnalisé BitMart + +Profitez des marchés stables en utilisant [le trading en grille](/guides/octobot-trading-modes/grid-trading-mode) et automatisez les trades pour créer un revenu passif régulier. + +## BitMart est disponible sur OctoBot 2.0.5 + +À partir d'OctoBot 2.0.5, le trading SPOT de BitMart est [entièrement pris en charge](/guides/exchanges/bitmart), ainsi que sa connexion en websocket permettant d'accélérer les mises à jour des données du marché. + +BitMart a été inclus parmi les échanges régulièrement testés par OctoBot. L'équipe d'OctoBot veillera régulièrement à ce que la connexion avec BitMart reste stable et fera tout son possible pour maintenir cet état afin de vous offrir la meilleure automatisation possible du trading sur BitMart. + +## Conclusion + +Chez OctoBot, nous essayons de rendre l'automatisation des stratégies de trading aussi accessible que possible à tous. Suivant cette philosophie, il est important pour nous d'activer le trading automatisé sur chaque échange important pour nos utilisateurs. + +L'ajout de [BitMart](https://www.octobot.cloud/bitmart-trading-bot) aux échanges pris en charge par OctoBot open source est une première étape vers une intégration plus large avec BitMart. Si vous êtes intéressé par l'utilisation de BitMart avec les stratégies cloud d'OctoBot, faites-le nous savoir en votant pour <a href="https://feedback.octobot.online/cloud/p/bitmart-support" rel="nofollow">le support de BitMart</a> sur notre feuille de route OctoBot cloud. + +Si vous tradez sur un échange qui n'est pas actuellement pris en charge, veuillez créer ou voter pour le post associé à votre échange sur notre <a href="https://feedback.octobot.online" rel="nofollow">notre site de feedback</a>. diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2024-09-26-how-to-automate-trading-in-tradingview.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-09-26-how-to-automate-trading-in-tradingview.md new file mode 100644 index 0000000000..359b1f95d7 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-09-26-how-to-automate-trading-in-tradingview.md @@ -0,0 +1,138 @@ +--- +title: "Comment automatiser ses trades avec TradingView" +description: "Automatisez vos trades avec les meilleures stratégie depuis TradingView avec des webhooks ou des e-mails directement sur votre échange." +slug: "how-to-automate-trading-in-tradingview" +date: "2024-09-26" +authors: ["paul"] +tags: ["Crypto", "Trading", "Educational", "TradingView"] +image: "/images/blog/how-to-automate-trading-in-tradingview/automate-your-tradingview-trades-to-trade-on-any-indicator-or-strategy.png" +--- + + + +# Comment automatiser ses trades avec TradingView + +<div style={{textAlign: "center"}}> + <div> + ![automatisez vos trades tradingview pour trader avec tout type d'indicateur + ou de + stratégie](/images/blog/how-to-automate-trading-in-tradingview/automate-your-tradingview-trades-to-trade-on-any-indicator-or-strategy.png) + </div> +</div> + +## Est-ce possible d'automatiser ses trades avec TradingView? + +Oui, et vous pouvez le faire avec un plan gratuit <a href="https://www.tradingview.com/?aff_id=27595" rel="nofollow">TradingView</a>. TradingView dispose d'un système d'alerte intégré qui peut être utilisé pour déclencher automatiquement des transactions sur des courtiers tels que <a href="https://www.binance.com" rel="nofollow">Binance</a>. + +Ces alertes peuvent être envoyées gratuitement par e-mail, ou en utilisant des webhooks, qui nécessitent un abonnement TradingView. Ces alertes peuvent être automatiquement traitées par une plateforme comme [OctoBot](/investing/tradingview-automated-trading) pour convertir instantanément chaque alerte en trade sur votre compte d'échange. + +## Automatiser ses trades avec TradingView + +TradingView permet la création d'alertes qui se déclencheront dès qu'une condition spécifique est remplie. + +<div style={{textAlign: "center"}}> + <div> + ![créer une alerte depuis + tradingview](/images/blog/how-to-automate-trading-in-tradingview/creating-an-alert-from-tradingview.png) + </div> +</div> + +Les alertes sont déclenchées dès que leur `Condition` est remplie. Les conditions peuvent être : + +- Des **événements de prix** tels que le prix du [Bitcoin](https://www.octobot.cloud/what-is-bitcoin) atteignant une certaine valeur +- Des seuils d'**indicateurs** tels que la valeur du RSI entrant dans une plage spécifique +- Des **stratégies Pine Script** créant des ordres d'achat ou de vente + <div style={{textAlign: "center"}}> + <div> + ![formulaire d'alerte + tradingview](/images/blog/how-to-automate-trading-in-tradingview/tradingview-alert-form.png) + </div> + </div> + +De manière générale, les alertes sont très flexibles et constituent un outil idéal pour connecter TradingView à d'autres plateformes afin d'automatiser des trades. Cette connexion peut se faire par e-mail ou par webhooks. + +### Automatiser avec les alertes par e-mail + +En cochant l'option de notification `Envoyer du texte but` dans l'onglet `Notifications` de l'alerte, TradingView enverra automatiquement un e-mail à l'adresse e-mail de votre alerte chaque fois que l'alerte est déclenchée. + +<div style={{textAlign: "center"}}> + <div> + ![formulaire de notification d'alerte + tradingview](/images/blog/how-to-automate-trading-in-tradingview/tradingview-alerte-email-form.png) + </div> +</div> + +Automatiser les transactions via e-mail est possible, mais peut être compliqué. Pour ce faire, des plateformes spécialisées telles qu'[OctoBot](/investing/tradingview-automated-trading) sont nécessaires pour pouvoir automatiser vos trades TradingView tout en restant sur l'abonnement gratuit de TradingView. + +### Automatiser avec les alertes par webhook + +La manière la plus courante d'automatiser les trades sur TradingView est d'utiliser des webhooks. Un webhook est une URL que TradingView appellera automatiquement dès que votre alerte sera déclenchée. + +Il s'agit de la manière la plus efficace de connecter TradingView à n'importe quelle plateforme de trading. + +L'utilisation des webhooks nécessite un abonnement payant à TradingView et est compatible avec la plupart des plateformes. + +## Robot de trading automatisé TradingView + +Ajouter des alertes à TradingView peut être utilisé pour automatiser les stratégies de trading en utilisant des robots de trading automatisés. + +Un robot de trading est un logiciel qui trade automatiquement sur un compte d'échange en fonction de sa stratégie. Un robot de trading peut écouter vos alertes TradingView et trader instantanément sur votre compte d'échange dès qu'il reçoit la notification de votre alerte. + +Un robot de trading TradingView peut facilement être créé en utilisant un [OctoBot TradingView](/investing/tradingview-automated-trading). Ce robot permet de trader facilement en fonction des e-mails d'alerte TradingView ou des appels de webhook. + +En utilisant OctoBot, vous pouvez créer votre propre robot de trading TradingView qui tradera sur votre compte d'échange ou avec de l'[argent virtuel sans risque](/investing/paper-trading-a-strategy) et automatisera tout type de stratégie de trading. + +Vous avez une stratégie TradingView que vous aimeriez automatiser ? Consultez notre [guide sur comment automatiser une stratégie TradingView](/investing/how-to-automate-any-tradingview-strategy-on-octobot-cloud) pour en savoir plus. + +## Trouver la meilleure stratégie TradingView + +TradingView est une excellente plateforme pour trouver des stratégies de trading. En utilisant l'explorateur de stratégies, vous pouvez tester des centaines de stratégies. + +<div style={{textAlign: "center"}}> + <div> + ![explorer de stratégies + tradingview](/images/blog/how-to-automate-trading-in-tradingview/tradingview-strategy-explorer.png) + </div> +</div> + +Une stratégie TradingView est écrite en <a href="https://www.tradingview.com/pine-script-docs/welcome/" rel="nofollow">Pine Script</a>, un langage spécialement conçu pour créer et visualiser des stratégies de trading + +<div style={{textAlign: "center"}}> + <div> + ![exemple de stratégie + tradingview](/images/blog/how-to-automate-trading-in-tradingview/tradingview-strategy-example.png) + </div> +</div> + +Lorsque vous sélectionnez une stratégie, TradingView affichera ses signaux sur votre graphique et vous pourrez afficher et modifier son code Pine Script directement depuis le site web. + +<div style={{textAlign: "center"}}> + <div> + ![stratégie rsi simple avec + tradingview](/images/blog/how-to-automate-trading-in-tradingview/tradingview-simple-rsi-strategy.png) + </div> +</div> + +Il existe de nombreuses autres façons de trouver des stratégies TradingView, telles que : + +- L'<a href="https://www.tradingview.com/scripts/" rel="nofollow">explorateur de scripts</a> de TradingView +- Des chaînes Youtube spécialisées telles que <a href="https://www.youtube.com/@DaviddTech" rel="nofollow">Trading with DaviddTech</a> ou <a href="https://www.youtube.com/@TradeIQ" rel="nofollow">TradeIQ</a> + +## Tester une stratégie TradingView + +Lorsque vous utilisez une stratégie TradingView, vous pouvez toujours visualiser son comportement directement sur votre graphique de prix. Vous pouvez également utiliser le moteur de backtesting intégré de TradingView pour exécuter votre stratégie de trading sur des données historiques. + +Le backtesting d'une stratégie est particulièrement utile lors de l'optimisation des paramètres de votre stratégie pour générer le plus de profits sur les marchés que vous tradez. + +<div style={{textAlign: "center"}}> + <div> + ![testeur de stratégie + tradingview](/images/blog/how-to-automate-trading-in-tradingview/tradingview-strategy-tester.png) + </div> +</div> + +Pour tester une stratégie TradingView, il vous suffit de vous rendre dans l'onglet `Testeur de stratégie` et de consulter les performances historiques de votre stratégie de trading. + +Toute modification de votre stratégie entraînera automatiquement la mise à jour des résultats de backtesting de votre stratégie. + +**[Démarrer votre bot TradingView](https://www.octobot.cloud)** diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2024-11-09-how-to-create-your-tradingview-strategy-with-ai.mdx b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-11-09-how-to-create-your-tradingview-strategy-with-ai.mdx new file mode 100644 index 0000000000..5579a1e9f0 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-11-09-how-to-create-your-tradingview-strategy-with-ai.mdx @@ -0,0 +1,133 @@ +--- +title: "Comment créer votre stratégie TradingView avec l'IA" +description: "Créez vos meilleures stratégies de trading sur TradingView en utilisant l'IA. Décrivez votre stratégie avec vos propres mots, améliorez-la et investissez avec." +slug: "how-to-create-your-tradingview-strategy-with-ai" +date: "2024-11-09" +authors: ["paul"] +tags: ["Crypto", "Trading", "Educational", "TradingView", "AI"] +image: "/images/blog/how-to-create-your-tradingview-strategy-with-ai/trading-strategy-with-ai-in-5-minutes-using-octobot-cloud-and-free-tradingview-account.png" +--- + + + +# Comment créer votre stratégie TradingView avec l'IA + +> Et si vous pouviez créer une stratégie de trading simplement en la décrivant ? + +C'est exactement ce que nous avons construit, et nous sommes très fiers d'annoncer que cela fonctionne très bien ! + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="1g4T2IsIKBk" title="Trading automatisé TradingView avec AI" /> +Dans cette vidéo, Guillaume crée une stratégie de trading <a href="https://www.investopedia.com/terms/m/macd.asp" rel="nofollow">MACD</a> + <a href="https://www.investopedia.com/terms/s/sma.asp" rel="nofollow">SMA</a> exclusivement en utilisant l'IA OctoBot pour générer puis +améliorer la stratégie. Enfin, la stratégie est automatisée sur un échange en +direct avec un OctoBot TradingView utilisant de l'[argent virtuel sans +risque](/fr/investing/paper-trading-a-strategy) avec un compte TradingView +gratuit. + +Voici les grandes étapes de cette nouvelle manière de créer des stratégies TradingView. + +## Créez votre stratégie en la décrivant à l'IA + +Il vous suffit de décrire votre stratégie sur le nouveau <a href="https://www.octobot.cloud/creator" rel="nofollow">générateur de stratégie OctoBot</a> et il créera l'équivalent TradingView de votre stratégie en utilisant <a href="https://www.tradingview.com/pine-script-docs/welcome/" rel="nofollow">Pine Script</a>. + +<div style={{textAlign: "center"}}> + <div> + ![générateur de stratégie OctoBot AI créant une stratégie + MACD](/images/blog/how-to-create-your-tradingview-strategy-with-ai/octobot-ai-strategy-generator-creating-a-macd-strategy.png) + _prompt: macd strategy_ + </div> +</div> + +Cette intelligence artificielle est similaire à ChatGPT et est spécialement entrainée pour générer des stratégies Pine Script prêtes à l'emploi avec les critères suivants : + +- La stratégie peut être configurée à l'aide de la configuration graphique de TradingView +- Aucune modification ne doit être nécessaire pour exécuter et tester la stratégie depuis TradingView +- La stratégie peut être automatisée telle quelle, sans avoir à la modifier + +Il vous suffit de décrire la stratégie que vous souhaitez créer, et le générateur de stratégie IA l'écrira pour vous ! + +<div style={{textAlign: "center"}}> + <div> + ![code PineScript de la stratégie MACD générée par le générateur de + stratégie OctoBot + AI](/images/blog/how-to-create-your-tradingview-strategy-with-ai/octobot-ai-strategy-generator-macd-strategy-pinescript-code.png) + _Code PineScript de la stratégie MACD générée_ + </div> +</div> + +Cette stratégie générée peut simplement être collée dans le "Pine Editor" de TradingView et utilisée immédiatement. + +<div style={{textAlign: "center"}}> + **[Générer votre stratégie](https://www.octobot.cloud/creator)** +</div> + +Et ce n'est pas tout. Le générateur de stratégie par IA peut également être utilisé pour améliorer une stratégie. + +## Améliorez votre stratégie de trading avec l'AI + +Une fois que vous avez créé votre stratégie, vous pouvez y apporter des modifications de la même manière que vous l'avez créée : en utilisant le langage naturel et vos propres mots. + +Il vous suffit de demander au générateur de mettre à jour la stratégie comme vous le souhaitez : vous pouvez ajouter un indicateur, changer la manière dont les profits sont réalisés, utiliser plusieurs time frames ou même plusieurs cryptos. Il n'y a pas de limite. + +<div style={{textAlign: "center"}}> + <div> + ![ajouter une moyenne mobile à la stratégie MACD générée par le générateur + de stratégie OctoBot + AI](/images/blog/how-to-create-your-tradingview-strategy-with-ai/octobot-ai-strategy-generator-macd-strategy-add-moving-average-to-the-strategy.png) + _Prompt: add a moving average condition to the strategy to only buy when the price is bellow average and sell when the price is above average_ + </div> +</div> + +Le générateur de stratégie fera exactement ce que vous lui demandez et mettra à jour votre code de stratégie pour vous. + +<div style={{textAlign: "center"}}> + <div> + ![code PineScript de la stratégie MACD + SMA générée par le générateur de + stratégie OctoBot + AI](/images/blog/how-to-create-your-tradingview-strategy-with-ai/octobot-ai-strategy-generator-macd-sma-strategy-pinescript-code.png) + _Code PineScript de la stratégie MACD + SMA générée_ + </div> +</div> + +Ici, nous avons demandé à l'IA de tenir aussi compte des moyennes mobiles (SMA) lors de l'identification de ses signaux d'achat et de vente. Nous avons maintenant une stratégie TradingView MACD + SMA fonctionnelle sans écrire une seule ligne de code. + +Nous pouvons l'utiliser directement sur TradingView pour la tester et l'optimiser. Il s'agit d'une stratégie TradingView standard, tout comme si vous aviez utilisé le code de la stratégie de quelqu'un d'autre, sauf que c'est la vôtre. + +<div style={{textAlign: "center"}}> + <div> + ![test de la stratégie MACD + SMA sur TradingView créée par OctoBot + AI](/images/blog/how-to-create-your-tradingview-strategy-with-ai/octobot-ai-testing-the-macd-sma-strategy-on-tradingview.png) + _Test de la stratégie MACD + SMA sur TradingView_ + </div> +</div> + +Vous avez maintenant votre propre stratégie TradingView. Et si vous l'utilisiez en conditions réelles sur votre échange ? + +## Automatisez votre stratégie TradingView IA + +Lier votre stratégie TradingView à votre échange peut se faire en utilisant un OctoBot TradingView. Ce sont des OctoBots spéciaux que vous pouvez démarrer sur l'échange de votre choix et qui peuvent être utilisés avec : + +- **De l'argent virtuel sans risque** - La solution idéale pour tester votre stratégie en direct avant d'utiliser vos fonds réels. +- **Vos fonds sur l'échange** - Une fois que vous êtes suffisamment confiant pour utiliser votre stratégie avec vos fonds, vous pouvez commencer à réaliser des gains avec un OctoBot en argent réel. + +Votre stratégie TradingView peut être automatisée avec OctoBot en utilisant des alertes TradingView. Il vous suffit de créer une alerte, d'entrer l'adresse e-mail de votre alerte TradingView OctoBot ou l'URL de son webhook et de définir le message d'alerte à `{{strategy.order.alert_message}}`. + +Votre OctoBot appliquera alors automatiquement votre stratégie sur votre compte d'échange. + +Vous vous demandez l'origine de ce `{{strategy.order.alert_message}}` ? Il s'agit d'un paramètre spécial qui a été défini dans votre code de stratégie par le générateur IA afin de rendre votre stratégie prête à être automatisée par OctoBot sans avoir à en modifier le code. + +<div style={{textAlign: "center"}}> + **[Automatiser TradingView](https://www.octobot.cloud/explore?category=tv)** +</div> + +:::info + Économisez de l'argent : utilisez des alertes par e-mail, elles sont + compatibles avec un compte gratuit TradingView. +::: + +## Aller plus loin + +Nous avons créé un [ensemble de guides et de vidéos](/investing/tradingview-automated-trading) pour aider les utilisateurs à créer et automatiser leur stratégie TradingView. Si vous avez déjà voulu essayer quelque chose avec des indicateurs ou des automatisations TradingView quels qu'ils soient, c'est l'endroit idéal pour commencer ! + +PS : si vous souhaitez utiliser la stratégie décrite dans la vidéo, vous la trouverez sous "MACD SMA strategy" dans l'<a href="https://www.octobot.cloud/explore?category=tv" rel="nofollow">explorateur de stratégies TradingView</a>. diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2024-11-12-automated-trading-bot.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-11-12-automated-trading-bot.md new file mode 100644 index 0000000000..628b11c80f --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-11-12-automated-trading-bot.md @@ -0,0 +1,95 @@ +--- +title: "Robot de Trading Automatisé" +description: "Découvrez comment les robots de trading automatisés peuvent révolutionner votre stratégie de trading. Explorez les avantages, meilleures pratiques et stratégies pour optimiser vos profits." +slug: "automated-trading-bot" +date: "2024-11-12" +authors: ["guillaume"] +tags: ["Cryptocurrency", "Trading", "Bot"] +image: "/images/blog/automated-trading-bot/cover.png" +--- + + + +# Robot de Trading Automatisé + +Fatigué de passer des heures à analyser les tendances du marché pour finalement prendre des décisions de trading basées sur vos émotions ? + +> Vous souhaitez maximiser vos profits et minimiser vos pertes ? + +Si c'est le cas, vous n'êtes pas seul. Ces derniers temps, l'utilisation des robots de trading automatisés s'est développée dans le but de révolutionner la stratégie de trading de nombreux traders. +Voici toutes les informations importantes sur le monde des robots de trading automatisés, couvrant les avantages, les risques et les meilleures pratiques pour réussir. + +## Qu'est-ce qu'un Robot de Trading Automatisé ? + +Un robot de trading est simplement un programme informatique qui utilise les données du marché ou tout autre type de données pour analyser et effectuer des transactions au nom de l'utilisateur. +Ils peuvent être configurés pour suivre une stratégie de trading définie. De plus, ils sont capables d'effectuer des transactions plus rapidement et plus fréquemment qu'un humain ne pourrait jamais le faire. +De nombreux robots de trading sont conçus pour être utilisés dans le trading d'actions, de forex et de cryptomonnaies, et ils offrent de nombreux avantages aux traders. + +Ils peuvent analyser les données du marché et effectuer des transactions sans biais émotionnel, donnant au trader plus de temps pour l'analyse et le développement de stratégies. +Tout comme une voiture autonome, les robots de trading automatisés nécessitent des contrôles et des mises à jour réguliers pour s'assurer qu'ils continuent de fonctionner correctement et efficacement. + +<div style={{textAlign: "center"}}> + <div> + ![Un homme se détendant sur son canapé pendant qu'OctoBot gagne de l'argent + en automatisant des stratégies de + crypto-monnaies](/a-man-relaxing-in-his-couch-while-octobot-is-making-money-by-automating-cryptocurrency-strategies-light.png) + </div> +</div> + +## Comment Fonctionnent les Robots de Trading Automatisés ? + +Les robots de trading automatisés utilisent des [algorithmes et l'apprentissage automatique](how-does-trading-bot-work) pour analyser les données du marché et exécuter des transactions. +Ils peuvent être simplement programmés pour suivre une stratégie de trading particulière, comme le [DCA](smart-dca-making-of), le GRID et sont capables d'exécuter des trades avec une vitesse et une fréquence impossibles pour les humains. +Ils peuvent être facilement intégrés à d'autres outils, tels que les indicateurs techniques et les logiciels de gestion des risques, pour offrir une solution de trading complète. + +## Avantages d'un Robot de Trading Automatisé + +Parmi les avantages associés à un robot de trading, on trouve : + +- Efficacité et Productivité Accrues : Les robots de trading automatisés peuvent exécuter des transactions plus rapidement et plus fréquemment que n'importe quel être humain ; ainsi, cela libère plus de temps pour l'analyse et le développement de stratégies. +- Précision Accrue et Moins d'Émotions : Ce robot de trading automatisé peut analyser les marchés et prendre des décisions de trading sans biais émotionnel, évitant les décisions impulsives. +- Évolutivité et Flexibilité : On peut programmer un robot de trading automatisé pour suivre n'importe quelle stratégie de trading spécifique ; de plus, ils sont faciles à adapter à la hausse ou à la baisse en fonction des conditions du marché qui évoluent constamment. + +## Choisir le Meilleur Logiciel de Trading Automatisé + +Avec tant d'options de logiciels de trading automatisé disponibles, il est difficile de savoir lequel choisir. Voici quelques points à garder à l'esprit : + +- Recherchez un logiciel qui offre une variété de stratégies de trading. Un logiciel de trading automatisé devrait pouvoir offrir à son utilisateur différentes stratégies de trading, notamment DCA et GRID. +- Considérez le niveau de personnalisation : Un bon logiciel de trading automatisé devrait vous permettre de le personnaliser en fonction de vos besoins de trading. +- Vérifiez s'il dispose d'outils de gestion des risques. Un bon logiciel de trading automatisé devrait avoir des outils de gestion des risques intégrés, tels que les ordres stop-loss et le dimensionnement des positions. + +Nous avons également fait un classement des [meilleurs robots de trading crypto](best-crypto-trading-bots). + +<div style={{textAlign: "center"}}> + <div> + ![Un podium des meilleurs robots de + trading](/images/blog/best-crypto-trading-bots/cover.png) + </div> +</div> + +## Meilleures Stratégies de Trading Automatisé + +Parmi les stratégies de trading automatisé les plus populaires, on trouve : + +- Stratégie DCA : Le Dollar-Cost Averaging (DCA) consiste à investir régulièrement un montant fixe, quel que soit le prix. Cela aide à lisser les coûts d'achat dans le temps et réduit l'impact des hausses et des baisses du marché. +- Stratégie Grid : La stratégie Grid place des ordres d'achat et de vente à intervalles réguliers autour d'un prix. Elle tire profit des fluctuations de prix, permettant aux traders de profiter des petits mouvements sans avoir besoin de prédire la direction du marché. +- Stratégie IA : Le trading par IA utilise des algorithmes pour analyser les données et effectuer des transactions rapidement. Ces systèmes peuvent repérer les tendances et réagir plus rapidement que les humains, s'adaptant aux changements du marché pour de meilleurs résultats. + +## Gestion des Risques et Optimisation du Portefeuille + +La gestion des risques et l'optimisation du portefeuille sont des éléments clés du trading automatisé. Voici quelques points à considérer : + +- Les ordres stop-loss peuvent vous aider à limiter vos pertes si le marché évolue contre vous. +- Dimensionnement des positions : Le dimensionnement des positions est le facteur le plus important dans la gestion des risques et la maximisation des profits. +- Diversifiez votre portefeuille : Vous pouvez réduire votre risque et potentiellement augmenter vos rendements en diversifiant votre portefeuille en utilisant par exemple des [paniers crypto](https://www.octobot.cloud/features/crypto-basket) + +## OctoBot : Votre robot de trading automatisé + +[OctoBot](/) est un robot de trading automatisé facile à utiliser pour automatiser vos stratégies et la gestion des risques, qui peut être personnalisé pour s'adapter à n'importe quel plan de trading. +Vous pouvez démarrer votre propre robot de trading automatisé et le personnaliser selon vos besoins particuliers de trading en utilisant OctoBot ou choisir n'importe quelle stratégie préconfigurée. + +## Conclusion + +Offrant aux traders de nombreux avantages, les robots de trading automatisés augmenteront l'efficacité et la productivité, amélioreront la précision et réduiront les émotions, tout en étant évolutifs et flexibles. +Le bon choix de logiciel de trading automatisé et le développement de votre propre robot de trading automatisé maximiseront vos profits et réduiront les pertes potentielles. +Il est également important de ne pas commettre d'erreurs courantes telles que la sur-optimisation et une mauvaise gestion des risques. diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2024-11-14-what-is-spot-trading.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-11-14-what-is-spot-trading.md new file mode 100644 index 0000000000..22ef325e00 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-11-14-what-is-spot-trading.md @@ -0,0 +1,75 @@ +--- +title: "Qu'est-ce que le Spot Trading" +description: "Découvrez les tenants et aboutissants du spot trading et investissez sur le marché des cryptomonnaies. Découvrez les avantages, les stratégies et les meilleures pratiques pour réussir dans le spot trading." +slug: "what-is-spot-trading" +date: "2024-11-14" +authors: ["guillaume"] +tags: ["Cryptomonnaie", "Trading", "Éducatif"] +image: "/images/blog/what-is-spot-trading/cover.png" +--- + + + +# Qu'est-ce que le Spot Trading ? + +> Vous vous intéressez aux cryptomonnaies, mais vous ne savez vraiment pas par où commencer ? + +Le spot trading est un excellent point de départ, mais la multitude d'options et de stratégies disponibles peut vous impressioner. Dans cet article, nous aborderons les aspects les plus fondamentaux du spot trading, nous fournirons un guide étape par étape pour démarrer, et nous partagerons également des conseils précieux concernant les meilleures stratégies et pratiques qui mèneront au succès. + +Le spot trading consiste à acheter ou vendre une cryptomonnaie au prix actuel du marché. Cela s'oppose au trading de contrats à terme (futures) ou d'options, où vous négocieriez un contrat qui vous donnerait éventuellement le droit d'acheter ou de vendre un actif à un certain prix à un moment donné. + +De nombreux traders apprécient le spot trading car le marché offre de nombreuses opportunités de gagner rapidement de l'argent en spéculant sur ses hausses et ses baisses. Cependant, il comporte aussi ses risques et ses défis, qui seront abordés plus loin dans cet article. + +<div style={{textAlign: "center"}}> + <div> + ![traders avec un ordinateur portable achetant et vendant des + crypto](/images/blog/what-is-spot-trading/cover.png) + </div> +</div> + +## Comment Débuter dans le Spot Trading + +Débuter dans le spot trading est relativement facile mais nécessite quelques connaissances et une préparation de base. Voici les étapes à suivre : + +Tout d'abord, choisissez une plateforme d'échange réputée qui bénéficie d'une bonne crédibilité. Sélectionnez une plateforme qui liste votre paire de cryptomonnaies pour le spot trading. Créez un compte sur cette plateforme et faites-le vérifier. Ensuite, vous pourrez déposer les fonds avec lesquels vous souhaitez trader sur votre compte. Familiarisez-vous avec la plateforme d'échange et ses fonctionnalités, y compris les graphiques et les carnets d'ordres. + +Commencez à trader : [Effectuez votre premier trade](/investing/invest-with-your-strategy) et commencez à faire croître votre portefeuille. + +## Stratégie de Spot Trading + +Il existe de nombreuses stratégies de spot trading que vous pouvez utiliser en fonction de vos objectifs et de votre tolérance au risque. +Examinons quelques-unes des plus utilisées ci-dessous : + +- Analyse technique : Vous pouvez utiliser des graphiques et des [indicateurs techniques](/investing/tradingview-strategies-tutorials/automating-a-tradingview-death-and-golden-cross-strategy) pour identifier les tendances et les motifs qui se produisent sur le marché. +- Analyse fondamentale : Vous devrez rechercher les fondamentaux sous-jacents d'une monnaie numérique, son cas d'utilisation, son équipe et la demande du marché. +- [Scalping](https://www.octobot.cloud/tools/scalping-signals) : Gagnez de l'argent en achetant et en vendant une cryptomonnaie dans un laps de temps aussi court que possible. +- Swing trading : Gardez simplement la cryptomonnaie pendant une période prolongée, en attendant un mouvement important du marché. + +<div style={{textAlign: "center"}}> + <div> + ![illustration du concept abstrait du bureau de trading de + cryptomonnaies](/images/blog/what-is-spot-trading/cryptocurrency-desk.png) + </div> +</div> + +## Le Spot Trading vs Les Autres Types de Trading + +Le spot trading n'est qu'un type de trading parmi d'autres, et il présente ses avantages et ses inconvénients par rapport aux autres types de trading. + +Voici quelques-unes des principales différences : + +- [Le trading de futures](what-is-future-trading) : Le trading de futures implique l'achat et la vente + d'un contrat qui donne le droit d'acheter ou de vendre à un prix particulier + dans le futur. Cela nécessite une compréhension plus approfondie du marché. +- <a href="https://www.investopedia.com/terms/o/option.asp" rel="nofollow">Le trading d'options</a> est généralement défini comme un échange de contrats où + l'on reçoit le droit de vendre ou d'acheter une forme spécifique d'actif dans + le futur à un prix prédéterminé. Le trading d'options est également une forme + de trading plus complexe et nécessite une compréhension beaucoup plus complète + du marché. +- Day trading : Comme son nom l'indique, ce type de trading implique l'achat et la vente d'une cryptomonnaie dans une seule journée de trading. Il est plus rapide et nécessite une meilleure connaissance du marché. + +## Erreurs Courantes à Éviter dans le Spot Trading + +Le spot trading offre des opportunités lucratives mais comporte des risques inhérents. +Alors que les émotions peuvent faire dérailler les décisions de trading, une gestion disciplinée des risques par le biais d'ordres stop-loss est cruciale. +Le succès nécessite une analyse minutieuse du marché et une compréhension approfondie des cryptomonnaies négociées. diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2024-11-15-grid-trading.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-11-15-grid-trading.md new file mode 100644 index 0000000000..85e7b0a3c5 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-11-15-grid-trading.md @@ -0,0 +1,82 @@ +--- +title: "Trading en Grille" +description: "Découvrez des stratégies de trading en grille pour profiter de la volatilité du marché. Apprenez à configurer votre système de trading automatisé avec OctoBot pour des gains optimaux." +slug: "grid-trading" +date: "2024-11-15" +authors: ["guillaume"] +tags: ["Trading", "Stratégie", "OctoBot cloud"] +image: "/images/guides/grid-trading-illustrated-by-a-man-stepping-up-on-green-stairs-grabbing-coins.png" +--- + + + +# Trading en grille + +Le trading en grille peut être l'une des méthodes les plus puissantes de trading qui aide à réaliser de bons profits grâce à la volatilité des marchés. Construire un système de trading en grille vous donne la capacité de tirer parti des fluctuations de prix à la hausse pour augmenter vos profits. +Dans ce guide approfondi, nous examinons de près ce qu'est le trading en grille, en abordant les avantages, les stratégies et les erreurs courantes à éviter. + +## Qu'est-ce que le trading en grille ? + +Le trading en grille est une technique de trading qui consiste à placer un certain nombre d'ordres d'achat et de vente à des intervalles de prix fixes. +Cela crée alors une "grille" d'ordres qui peuvent être activés lorsque le prix monte ou descend. + +> L'idée derrière le trading en grille est d'essayer de réaliser un profit à partir de la différence entre l'achat et la vente, mais sans prévoir la tendance. + +<div style={{textAlign: "center"}}> + ![trading en grille illustré par un homme montant sur des escaliers verts en + attrapant des + pièces](/images/guides/grid-trading-illustrated-by-a-man-stepping-up-on-green-stairs-grabbing-coins.png) +</div> + +## Comment fonctionne le trading en grille ? + +<a href="https://www.investopedia.com/terms/g/grid-trading.asp" rel="nofollow">Le trading en grille</a> est un processus qui consiste à définir plusieurs ordres +d'achat et de vente à certains niveaux de prix. En général, ils sont configurés +selon un modèle en grille, où chacun d'eux se déclenche avec chaque mouvement du +prix vers le haut ou vers le bas. En d'autres termes, vous pourriez trader des +cryptomonnaies, en plaçant un ordre d'achat à $50 et un ordre de vente à $55. +Une fois que le prix atteint $55, l'ordre de vente est déclenché, et vous +réalisez du profit en vendant cette crypto. Si le prix descend à $50, cela +déclenche un ordre d'achat, par lequel vous achèterez la crypto à un prix +inférieur. + +## Types de stratégies de trading en grille + +Vous pouvez employer plusieurs types de stratégies [de trading en grille](/guides/octobot-trading-modes/grid-trading-mode) en fonction de vos objectifs en matière de trading et votre tolérance au risque. + +La stratégie de trading en grille la plus commune est le trading en grille basé sur des plages de prix. Cette stratégie implique l'établissement d'une grille d'ordres qui seront déclenchés une fois que le prix commence à se déplacer dans une plage prédéfinie. +Par exemple, supposons que le prix se négocie entre $50 et $60, vous allez définir des ordres d'achat et de vente à des niveaux supérieurs ou inférieurs à cette plage. + +Les variations dans le trading en grille ont plusieurs avantages et inconvénients. + +**Avantages du trading en grille :** + +- **Plus de profits** : Grâce au trading en grille, vous pouvez réaliser des gains lors des fluctuations, même sur un marché latéral. +- **Réduction du risque** : La mise en place d'une grille d'ordres permet réellement de réduire les risques et donc d'éviter des pertes importantes. +- **Flexibilité** : Grâce au trading en grille, vous pouvez trader efficacement dans une variété de marchés tels que les actions, le forex et les crypto-monnaies. + +Cependant, il présente également certains inconvénients. + +**Inconvénients du trading en grille :** + +- **Complexité** : Le trading en grille peut être fastidieux à configurer. +- **Stress** : Il peut être stressant d'utiliser le trading en grille, surtout dans les cas où vous tradez avec une grande partie de votre capital. + +## Bots de trading en grille + +<div style={{textAlign: "center"}}> + ![stratégies de trading en grille disponibles sur + octobot.cloud](/images/blog/grid-trading/grid-strategies.png) +</div> + +Les bots de trading en grille, comme [OctoBot](/fr), sont des programmes automatisés qui placent automatiquement des ordres d'achat et de vente selon des paramètres prédéfinis par le trader. Il crée ainsi une série de niveaux ou "grilles" dans lesquels il exécutera automatiquement des transactions en réponse aux fluctuations du marché. Il effectue automatiquement des ventes partielles lorsque le prix atteint le niveau d'un ordre de vente et achète davantage si le prix descend au niveau d'un ordre d'achat. + +De cette manière, un trader peut continuellement réaliser un profit grâce aux petites fluctuations du prix sans avoir besoin d'une surveillance constante du marché. L'automatisation fournie par ces bots réduit la charge de travail des traders tout en mettant en œuvre des stratégies efficaces tant sur les marchés volatils que latéraux. + +<div style={{textAlign: "center"}}> + **[Démarrer un bot de trading en grille](https://www.octobot.cloud/fr/explore?category=strategies)** +</div> + +## Conclusion + +La stratégie du trading en grille est parmi les meilleures stratégies pour réaliser des gains grâce à la volatilité sur les marchés. Vous pourrez capitaliser sur les fluctuations des prix en créant un système de trading en grille qui augmente vos profits. D'un autre côté, le trading en grille nécessite une gestion sérieuse des risques et une réelle discipline. diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2024-11-26-best-free-crypto-trading-bots.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-11-26-best-free-crypto-trading-bots.md new file mode 100644 index 0000000000..3306f33aeb --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2024-11-26-best-free-crypto-trading-bots.md @@ -0,0 +1,349 @@ +--- +title: "10 Meilleurs Bots de Trading Crypto Gratuits" +description: "Découvrez les 10 meilleurs bots de trading crypto gratuits qui peuvent vous aider à automatiser votre stratégie de trading de cryptocurrences, à minimiser les risques et à maximiser les profits potentiels sans vous ruiner." +slug: "best-free-crypto-trading-bots" +date: "2024-11-26" +authors: ["guillaume"] +tags: ["Cryptocurrency", "Trading", "Plans"] +image: "/images/blog/best-crypto-trading-bots/cover.png" +--- + + + +# 10 Meilleurs Bots de Trading Crypto Gratuits + +Dans le monde complexe du trading de cryptocurrences, rester à jour avec les tendances du marché peut ressembler à un travail à temps plein. + +Les bots de trading crypto sont des outils révolutionnaires qui permettent aux traders d'exécuter des stratégies 24h/24, sans être constamment rivés à leurs écrans. +Bien que de nombreux bots de trading soient coûteux, les versions gratuites démocratisent le trading algorithmique pour les investisseurs de tous niveaux. +Ce guide complet vous présentera les 10 meilleurs bots de trading crypto gratuits qui peuvent vous aider à automatiser votre stratégie de trading, à réduire les décisions émotionnelles et à potentiellement augmenter votre efficacité de trading. +Que vous soyez un trader crypto expérimenté ou un débutant curieux. + +## Qu'est-ce qu'un bot de trading crypto ? + +Un bot de trading crypto est un programme logiciel avancé conçu pour interagir avec les exchanges de cryptocurrences, en exécutant automatiquement des trades selon des stratégies prédéfinies et une analyse de marché. +Ces algorithmes sophistiqués peuvent surveiller les conditions du marché, analyser les mouvements de prix et prendre des décisions de trading à votre place. +En utilisant des modèles mathématiques complexes et des techniques d'[apprentissage automatique](best-ai-trading-bots), les bots de trading peuvent mettre en œuvre diverses stratégies comme l'arbitrage, le suivi de tendance, la [tenue de marché](https://market-making.octobot.cloud) et l'analyse de sentiment, sans subir les biais émotionnels qui impactent souvent les décisions de trading humaines. + +<div> + Maintenant que nous avons une compréhension claire de ce qu'est un bot de + trading crypto, explorons les différents types de bots disponibles sur le + marché. +</div> + +## 1. OctoBot + +<div style={{textAlign: "center"}}> + <div> + ![Un homme se détendant sur son canapé pendant qu'OctoBot génère de l'argent + en automatisant des stratégies de + cryptomonnaies](/a-man-relaxing-in-his-couch-while-octobot-is-making-money-by-automating-cryptocurrency-strategies-light.png) + </div> +</div> + +[OctoBot](/) est un bot de trading flexible et facile à utiliser qui offre diverses stratégies gratuites, incluant des stratégies basées sur l'[IA](https://www.octobot.cloud/features/ai-trading-bot), les [paniers de crypto](https://www.octobot.cloud/features/crypto-basket), le [DCA](smart-dca-making-of) intelligent, les stratégies en grille et les stratégies [TradingView](https://www.octobot.cloud/features/tradingview-bot). +Il est [open-source](open-source-trading-software). Avec son focus sur la transparence, les utilisateurs peuvent tester les stratégies avec des données historiques, utiliser le trading virtuel et suivre leurs performances. +OctoBot supporte la plupart des principales exchanges crypto et propose également des plans premium gratuits en complétant des missions, ce qui le rend adapté aux débutants comme aux investisseurs crypto expérimentés. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Facilité d'utilisation" + level={3} + h="14px" + tooltipText="Facile à utiliser avec des stratégies IA et prédéfinies pour débutants et pros" + /> + <Rating + title="Limites version gratuite" + level={3} + h="14px" + tooltipText="Possibilité de débloquer des fonctionnalités payantes en complétant des missions gratuitement" + /> + <Rating + title="Fonctionnalités" + level={3} + h="14px" + tooltipText="Open-source, supporte les principales exchanges, variété de stratégies de trading, test rétrospectif et suivi des performances" + /> + </div> +</Card> + +## 2. 3Commas + +<div style={{textAlign: "center"}}> + <div> + ![logo-3commas](/images/blog/best-crypto-trading-bots/3commas.png) + </div> +</div> + +3Commas est un bot de trading crypto, proposant des bots GRID, DCA et Signal. +Connu pour son interface conviviale, 3Commas supporte plusieurs stratégies de trading et indicateurs techniques. +Il dispose également d'une communauté pour le support et l'apprentissage, et d'un marketplace de signaux crypto tiers. Il offre un plan limité gratuit +et nécessite une vérification KYC en Europe. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Facilité d'utilisation" + level={2} + h="14px" + tooltipText="Interface facile à naviguer avec plusieurs stratégies de trading mais nécessite une vérification KYC en Europe" + /> + <Rating + title="Limites version gratuite" + level={1} + h="14px" + tooltipText="Peu de trades et de bots" + /> + <Rating + title="Fonctionnalités" + level={2} + h="14px" + tooltipText="Supporte les bots GRID, DCA et Signal avec un marketplace" + /> + </div> +</Card> + +## 3. CoinRule + +Coinrule est un bot de trading crypto sans code. +Il propose une configuration de règles simple "si-alors", plus de 150 règles de trading prédéfinies et un exchange de démonstration sans risque. +Le bot est accessible via une plateforme web, supporte tous les principaux tokens et propose différents plans d'abonnement, dont une option gratuite limitée. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Facilité d'utilisation" + level={1} + h="14px" + tooltipText="Configuration de règles 'si-alors' complexe pour débutants" + /> + <Rating + title="Limites version gratuite" + level={2} + h="14px" + tooltipText="Seulement 1 exchange et 2 règles" + /> + <Rating + title="Fonctionnalités" + level={3} + h="14px" + tooltipText="Plus de 150 règles prédéfinies et un exchange de démonstration sans risque" + /> + </div> +</Card> + +## 4. Cryptohopper + +Cryptohopper est un [bot de trading crypto](automated-trading-bot) en cloud. +Il se distingue par son bot de tenue de marché et la possibilité pour les utilisateurs de créer des stratégies de trading personnalisées ou de copier celles d'autres utilisateurs depuis son marketplace. +La plateforme supporte également le trading automatisé via un bot Telegram et propose des services supplémentaires comme des signaux crypto, des modèles de stratégie et du trading fictif. +Le plan gratuit n'offre que la copie de bots et la gestion de portefeuille. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Facilité d'utilisation" + level={1} + h="14px" + tooltipText="Difficile pour les débutants, propose un bot de tenue de marché" + /> + <Rating + title="Limites version gratuite" + level={1} + h="14px" + tooltipText="Seulement copie de bots" + /> + <Rating + title="Fonctionnalités" + level={3} + h="14px" + tooltipText="Création de stratégies personnalisées, trading via bot Telegram et trading fictif" + /> + </div> +</Card> + +## 5. Pionex + +<div style={{textAlign: "center"}}> + <div> + ![logo-pionex](/images/blog/best-crypto-trading-bots/pionex.jpg) + </div> +</div> + +[Pionex](https://www.pionex.com/en/signUp?r=octobot) ([Pionex.us](https://accounts.pionex.us/en/signup?ref=octobot) pour les citoyens américains) est une plateforme de trading de pointe connue pour ses bots de trading automatisés et conviviaux, permettant aux traders d'exécuter des stratégies sans effort. +Il offre une variété de bots personnalisables adaptés à différents styles de trading, ce qui le rend idéal pour les traders novices et expérimentés recherchant de la flexibilité. +Avec des outils avancés de test rétrospectif et de suivi des performances, Pionex permet aux utilisateurs d'optimiser leurs stratégies et de gérer efficacement les risques. +Comme c'est également un exchange, il n'y a pas de frais supplémentaires pour démarrer un bot de trading. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Facilité d'utilisation" + level={2} + h="14px" + tooltipText="Facile à utiliser pour copier un bot, mais création de bot complexe" + /> + <Rating + title="Limites version gratuite" + level={3} + h="14px" + tooltipText="Frais d'exchange faibles" + /> + <Rating + title="Fonctionnalités" + level={3} + h="14px" + tooltipText="Grande variété de bots prédéfinis" + /> + </div> +</Card> + +## 6. Bot de trading Binance + +Les bots de trading [Binance](/guides/exchanges/binance) sont des outils automatisés conçus pour exécuter des trades de cryptocurrences selon des paramètres prédéfinis, permettant aux utilisateurs de trader 24h/24 sans surveillance constante. +Ces bots améliorent l'efficacité du trading en analysant les données de marché et en prenant des décisions instantanées, ce qui peut aider à capitaliser sur la volatilité du marché. +Binance offre une variété de bots, dont le populaire Spot Grid bot, particulièrement efficace dans les marchés latéraux en achetant bas et vendant haut dans une fourchette de prix définie. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Facilité d'utilisation" + level={1} + h="14px" + tooltipText="Exploration et création de bot complexes" + /> + <Rating + title="Limites version gratuite" + level={3} + h="14px" + tooltipText="Frais d'exchange uniquement" + /> + <Rating + title="Fonctionnalités" + level={3} + h="14px" + tooltipText="Grande variété de bots prédéfinis" + /> + </div> +</Card> + +## 7. Cornix + +Cornix est une plateforme de trading crypto automatisé, réputée pour être le plus grand marketplace de signaux crypto. +Il propose des bots DCA, une application mobile dédiée et une intégration avec Telegram pour une automatisation de trading facile. Il offre un plan gratuit limité. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Facilité d'utilisation" + level={2} + h="14px" + tooltipText="Facilité modérée avec application mobile et intégration Telegram" + /> + <Rating + title="Limites version gratuite" + level={2} + h="14px" + tooltipText="1 bot de tous types" + /> + <Rating + title="Fonctionnalités" + level={2} + h="14px" + tooltipText="Bots DCA et intégration avec principales plateformes de trading" + /> + </div> +</Card> + +## 8. Bot de trading Bybit + +Les bots de trading [Bybit](/guides/exchanges/bybit) sont des outils automatisés qui exécutent des trades de cryptocurrences selon des stratégies prédéfinies, permettant un trading sans surveillance humaine constante. +Ces algorithmes sophistiqués analysent rapidement les tendances du marché, offrant des options comme le Grid Bot pour les marchés latéraux et le Bot DCA pour des investissements systématiques et à risque maîtrisé. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Facilité d'utilisation" + level={2} + h="14px" + tooltipText="Exploration de bot complexe" + /> + <Rating + title="Limites version gratuite" + level={3} + h="14px" + tooltipText="Frais d'exchange uniquement" + /> + <Rating + title="Fonctionnalités" + level={2} + h="14px" + tooltipText="Fonction de remplissage automatique" + /> + </div> +</Card> + +## 9. Bot de trading KuCoin + +Les bots de trading [KuCoin](/guides/exchanges/kucoin) sont des outils automatisés qui effectuent des trades de cryptocurrences selon des paramètres définis par l'utilisateur, permettant un trading continu sans nécessiter de surveillance constante. +Ils analysent les données de marché en temps réel pour prendre des décisions rapides et profiter des fluctuations de prix. +Les options principales incluent le Spot [Grid](/guides/octobot-trading-modes/grid-trading-mode) bot pour les marchés latéraux et le bot [Dollar-Cost Averaging](smart-dca-making-of) (DCA) pour un investissement régulier. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Facilité d'utilisation" + level={1} + h="14px" + tooltipText="Exploration et configuration de bot complexes" + /> + <Rating + title="Limites version gratuite" + level={3} + h="14px" + tooltipText="Frais d'exchange uniquement" + /> + <Rating + title="Fonctionnalités" + level={2} + h="14px" + tooltipText="Pas de bots prédéfinis" + /> + </div> +</Card> + +## 10. Bot de trading OKX + +Les bots de trading [OKX](/guides/exchanges/okx) sont des systèmes de trading de cryptocurrences algorithmiques qui exécutent automatiquement des stratégies prédéfinies en analysant des données de marché en temps réel. +Ces outils automatisés exploitent des techniques comme le trading en grille, le DCA, et l'[arbitrage](https://www.octobot.cloud/tools/triangular-arbitrage-crypto) pour optimiser les performances de trading. +En minimisant l'intervention humaine, les bots permettent des interactions précises et rapides sur le marché à travers plusieurs plateformes de cryptocurrences. + +<Card className="mx-auto mt-5 py-5 border-0"> + <div> + <Rating + title="Facilité d'utilisation" + level={1} + h="14px" + tooltipText="Exploration et configuration de bot complexes" + /> + <Rating + title="Limites version gratuite" + level={2} + h="14px" + tooltipText="Frais d'exchange élevés" + /> + <Rating + title="Fonctionnalités" + level={2} + h="14px" + tooltipText="Pas de bots prédéfinis" + /> + </div> +</Card> + +## Conclusion + +En résumé, que vous soyez un débutant ou un trader expérimenté, il existe un bot de trading crypto qui peut répondre à vos besoins spécifiques. +Bien que ces outils puissent être de puissants alliés dans votre parcours de trading, il est crucial de garder à l'esprit qu'aucun bot ne garantit des profits. +Effectuez toujours des recherches approfondies, comprenez la stratégie du bot et commencez avec de petits investissements ou testez avec le [trading virtuel](/investing/paper-trading-a-strategy). diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2025-02-25-kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2025-02-25-kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai.md new file mode 100644 index 0000000000..35055139cb --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2025-02-25-kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai.md @@ -0,0 +1,87 @@ +--- +title: "Kucoin x OctoBot Fireside chat - Simplifier l'investissement crypto avec l'IA" +description: "Écoutez notre Fireside chat avec Kucoin où nous discutons de la manière dont l'intelligence artificielle change la donne en investissements crypto." +slug: "kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai" +date: "2025-02-25" +authors: ["guillaume"] +tags: ["Partnership", "Trading", "Exchange", "AI"] +image: "/images/blog/kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai/kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai-announcement-banner.png" +--- + + + +# Kucoin x OctoBot Fireside chat - Simplifier l'investissement en crypto avec l'IA + +> **Comment l'IA peut-elle vraiment vous aider dans vos investissements en crypto ?** + +Dans <a href="https://x.com/i/spaces/1ynJOlgWzaExR" rel="nofollow">ce Fireside chat</a> avec <a href="https://x.com/swingy369" rel="nofollow">Serena</a> de <a href="https://www.kucoin.com/ucenter/signup?rcode=rJ2Q2T3" rel="nofollow">Kucoin</a>, Paul et Guillaume, co-fondateurs d'[OctoBot](/), nous discutons de la manière dont l'IA change la donne dans l'investissement crypto. + +<div style={{textAlign: "center"}}> + <div> + <a href="https://x.com/i/spaces/1ynJOlgWzaExR" rel="nofollow">![kucoin x octobot fireside chat simplifying crypto investment with ai + announcement banner](/images/blog/kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai/kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai-announcement.jpeg)</a> + </div> +</div> + +<div style={{textAlign: "center"}}> + **[Écoutez le Fireside chat](https://x.com/i/spaces/1ynJOlgWzaExR)** +</div> + +Voici un résumé des principaux sujets abordés lors de ce Fireside chat. + + +## OctoBot est conçu pour la simplicité +Chez OctoBot, nous croyons que les stratégies d'investissement en crypto devraient être accessibles à tous ceux qui les recherchent. Cependant, la sélection et la mise en place d'une stratégie d'investissement peuvent être très complexes et risquées. + +C'est pourquoi [octobot.cloud](/) est conçu avec la simplicité en tête. Toute stratégie de trading peut être facilement lancée à partir de stratégies préconfigurées. + +L'IA pour créer ses stratégies +Créer une stratégie de trading ne doit pas être une tâche complexe. En utilisant <a href="creator" rel="nofollow">OctoBot AI</a>, tout le monde peut créer sa propre stratégie de trading sur n'importe quel exchange supporté. + +Naturellement, chaque stratégie de trading peut être utilisée d'abord avec de l'[argent virtuel sans risque](/investing/paper-trading-a-strategy) puis sur le compte exchange de l'utilisateur. + +### Créer un panier de crypto avec l'IA + +![creating a meme coins crypto basket using ai](/images/blog/kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai/creating-a-meme-coins-crypto-basket-using-ai.png) + +Créez n'importe quel panier de crypto en le décrivant simplement à l'IA. Le panier de crypto créé sera prêt à l'emploi sur l'exchange que vous avez spécifié. + +### Créer une stratégie DCA avec l'IA +![creating a sol eth usdc dca strategy on conbase using ai](/images/blog/kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai/creating-a-sol-eth-usdc-dca-strategy-on-conbase-using-ai.png) + +Les stratégies DCA peuvent également être créées à partir de l'IA, décrivez ce que vous souhaitez trader et comment le trader, et votre stratégie DCA sera prête. + +### Créer une stratégie de grille avec l'IA + +Les stratégies de grille sont un excellent moyen de réaliser des profits sur les marchés relativement stables, mais elles peuvent être difficiles à concevoir. L'IA facilite grandement cette tâche. En utilisant OctoBot AI, vous pouvez obtenir votre grille personnalisée en quelques secondes. + +![creating a sol usdt grid strategy on kucoin using ai](/images/blog/kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai/creating-a-sol-usdt-grid-strategy-on-kucoin-using-ai.png) + +L'IA décrit les informations clés de votre grille de manière simple. Ici, cette grille Solana à 10 ordres couvre un intervalle de prix assez serré et suivra automatiquement le prix en ajustant vers le haut lorsque le prix SOL/USDT dépassera 146.291 USDT. + +### Créer une stratégie TradingView avec l'IA + +Lorsque vous utilisez TradingView pour investir, Pine Script permet d'automatiser une stratégie. Cependant, il peut être difficile de créer une nouvelle stratégie TradingView. OctoBot AI rend cette étape beaucoup plus facile. Décrivez simplement votre idée de stratégie TradingView et l'IA créera votre script Pine en un instant. + +<div style={{textAlign: "center"}}> + **[Créer votre propre stratégie avec l'IA](https://www.octobot.cloud/creator)** +</div> + +## Investir en utilisant des paniers de crypto pendant le bull market + +Pendant le bull market, les paniers de crypto performent extrêmement bien. Nous-mêmes, chez OctoBot, utilisons beaucoup les paniers de crypto pour nos investissements personnels. + +![top 5 basket with 73 percent profit on kucoin](/images/blog/kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai/top-5-basket-with-73-percent-profit-on-kucoin.png) + +Bien sûr, chez OctoBot, nous utilisons OctoBot pour nos investissements et voici une capture d'écran de l'un de nos bots personnels de paniers de crypto. Nous pensons que les paniers de crypto sont une excellente stratégie d'investissement simple, surtout pour un marché haussier. + +<div style={{textAlign: "center"}}> + **[Voir les paniers de crypto](https://www.octobot.cloud/explore?category=indexes)** +</div> + +## La transparence est au cœur d'OctoBot + +Depuis 2018, OctoBot est open source. Nous ne voulons pas que les utilisateurs aient à faire aveuglément confiance à OctoBot, c'est pourquoi l'ensemble du code est disponible sur <a href="https://github.com/Drakkar-Software/OctoBot" rel="nofollow">GitHub</a>. Tout le monde peut télécharger et installer le logiciel. + +## Mot de la fin +Chez OctoBot, nous voulons rendre les stratégies d'investissement faciles à utiliser pour tous. L'IA est récemment devenue suffisamment puissante pour vraiment aider les investisseurs dans leurs stratégies, et nous sommes fiers d'annoncer qu'OctoBot est maintenant beaucoup plus facile à utiliser et ouvre plus de possibilités que jamais. diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/2025-11-05-how-to-use-a-self-custody-crypto-trading-bot.md b/docs/i18n/fr/docusaurus-plugin-content-blog/2025-11-05-how-to-use-a-self-custody-crypto-trading-bot.md new file mode 100644 index 0000000000..279b0fbc0f --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/2025-11-05-how-to-use-a-self-custody-crypto-trading-bot.md @@ -0,0 +1,190 @@ +--- +title: "Comment utiliser un robot de trading crypto self-custody" +description: "Apprenez à utiliser un robot de trading crypto self-custody pour automatiser vos stratégies d'investissement sur les exchanges centralisés et décentralisés." +slug: "how-to-use-a-self-custody-crypto-trading-bot" +date: "2025-11-05" +authors: ["guillaume"] +tags: ["Trading", "Exchange", "Self custody", "Crypto"] +image: "/images/blog/how-to-use-a-self-custody-crypto-trading-bot/not-your-keys-not-your-coins-writen-on-paper-with-keys-and-a-bitcoin-logo.png" +--- + + + + BarChart, + CheckCircle, + CircleDollarSign, + Globe, + Shield, + TrendingUp, + Zap, +} from 'lucide-react' + +# Comment utiliser un robot de trading crypto self-custody + + +<div style={{textAlign: "center"}}> + <div> + ![chatgpt-logo](/images/blog/how-to-use-a-self-custody-crypto-trading-bot/not-your-keys-not-your-coins-writen-on-paper-with-keys-and-a-bitcoin-logo.png) + _"Pas vos clés, pas vos cryptos"_ + </div> +</div> + +C'est l'un des principes fondamentaux de la crypto : la self-custody de vos cryptomonnaies garantit la sécurité et l'indépendance offertes par la technologie blockchain. + +- Tout a commencé avec des portefeuilles tels que <a href="https://metamask.io/" rel="nofollow">MetaMask</a> ou <a href="https://electrum.org/" rel="nofollow">Electrum</a> pour vous donner un contrôle total sur vos cryptos. +- Ensuite, les exchanges décentralisés tels qu'<a href="https://app.uniswap.org/" rel="nofollow">Uniswap</a> ont permis de trader facilement vos cryptos sans avoir à faire confiance à une autorité centrale. +- Les exchanges décentralisés ont continué à s'améliorer et proposent désormais également des instruments de trading sophistiqués tels que les contrats perpétuels sur <a href="https://app.hyperliquid.xyz/" rel="nofollow">Hyperliquid</a>. + +Enfin, les robots de trading self-custody permettant d'exploiter les exchanges cryptographiques pour automatiser les stratégies d'investissement indépendamment de toute autorité centralisée, commencent à faire leur apparition. + +## Qu'est-ce qu'un robot de trading crypto non-custodial +Un robot de trading crypto non-custodial est un robot de trading entièrement contrôlé par vous, son utilisateur. +1. Il n'est pas contrôlé par une autorité centrale telle qu'un exchange crypto ou une plateforme de robot de trading qui détient vos clés API (ou de portefeuille). +2. C'est un robot de trading qui vous permet de toujours être seul maître de vos clés API d'exchange ou de votre portefeuille crypto. + + +Bien que la self-custody soit un principe fondamental de la crypto, elle n'est pas toujours facile à mettre en œuvre. +Pour y parvenir, vous devez configurer votre propre portefeuille, ce qui peut être fait rapidement avec un portefeuille par navigateur tel que <a href="https://metamask.io/" rel="nofollow">MetaMask</a>. Ce portefeuille peut ensuite être utilisé pour stocker vos cryptos et les échanger sur des exchanges décentralisés. + + +<div style={{textAlign: "center"}}> + <div> + ![metamask-logo](/images/blog/how-to-use-a-self-custody-crypto-trading-bot/metamask-logo.png) + </div> +</div> + + +À partir là, un robot de trading connecté à ce portefeuille sera capable d'appliquer votre stratégie d'investissement en tradant vos cryptos sur les exchanges décentralisés en utilisant votre propre portefeuille. + +Cela implique que le robot accède directement à votre portefeuille crypto, ce qui signifie que vous devez avoir une très grande confiance dans la plateforme de robot que vous utilisez. Le risque étant que tout pirate compromettant la plateforme du robot sera alors en mesure de voler vos cryptos. + +C'est là qu'interviennent les robots de trading crypto self-custody. Ce sont des robots de trading qui ne sont contrôlés que par vous, ils ne partagent jamais les clés de votre portefeuille crypto avec une plateforme ou quiconque. + + +Un robot de trading crypto self-custody peut se connecter à la fois aux exchanges centralisés et décentralisés, et dans les deux cas, il augmente considérablement la sécurité de vos cryptos. + + +### Robot de trading crypto self-custody pour les exchanges centralisés + +Pour automatiser une stratégie sur un exchange centralisé, il existe trois solutions : + +**Simple mais rigide :** les services de robot de trading intégrés aux exchanges centralisés +Dans ce cas, le robot fonctionne <a href="https://www.binance.com/trading-bots" rel="nofollow">directement sur les serveurs de l'exchange centralisé</a>. Ces services présentent l'avantage d'être sécurisés et très faciles à configurer et à utiliser. L'inconvénient est de manquer de flexibilité par rapport aux outils de robot de trading spécialisés. + +**Flexible mais moins sécurisé :** les plateformes de robot de trading spécialisées +Le robot fonctionne sur les serveurs d'une plateforme de robot de trading comme <a href="https://3commas.io/" rel="nofollow">3Commas</a>. Ces plateformes présentent l'avantage d'être flexibles et de permettre d'utiliser de nombreuses stratégies de trading différentes. L'inconvénient est d'être moins sécurisée car la plateforme peut <a href="https://blockworks.co/news/3commas-security-breach" rel="nofollow">faire fuiter vos clés API</a> en cas de faille de sécurité. + +**Sécurisé et flexible :** les robots de trading crypto self-custody +Un robot, tel que [la version open source d'OctoBot](https://www.octobot.cloud/trading-bot), fonctionne sur votre propre ordinateur ou serveur. Vos clés API ne quittent jamais votre ordinateur. Cela présente l'avantage d'être sécurisé et flexible. L'inconvénient est de nécessiter plus de connaissances techniques et de disposer d'un ordinateur ou serveur pour fonctionner. + +Comme souvent, il n'existe pas de solution idéale pour tous les cas. Vous devez choisir la meilleure solution pour vos besoins. Il est cependant intéressant de noter que des [options sécurisées et flexibles](https://www.octobot.cloud/features/self-custody-trading-bot) sont de plus en plus disponibles avec l'arrivée des robots de trading self-custody, qui deviennent de plus en plus accessibles au grand public. + +### Robot de trading crypto self-custody pour les exchanges décentralisés + +Pour automatiser une stratégie sur un exchange décentralisé, il existe deux types de robot DEX : + +**Faire confiance à une plateforme de robot de trading DEX** +Dans ce cas, le robot fonctionne sur les serveurs de la plateforme de robot DEX. Bien que cela soit pratique, comme cela implique de partager votre portefeuille avec la plateforme, tout problème avec cette plateforme peut avoir des conséquences dévastatrices pour vos fonds. + +**Robots de trading crypto self-custody** +Ce robot fonctionne sur votre appareil et ne partage jamais votre portefeuille avec une plateforme, ce qui en fait de loin l'option la plus sécurisée. L'inconvénient étant que très peu de robots de trading sont disponibles pour les exchanges décentralisés. + +Dans l'ensemble, automatiser une stratégie d'investissement sur un DEX reste très difficile, c'est pourquoi nous travaillons sur un [robot de trading crypto self-custody pour les exchanges décentralisés](https://www.octobot.cloud/features/self-custody-trading-bot) simple à utiliser. + +### Avantages et inconvénients des robots de trading crypto self-custody + + +La seule façon d'utiliser un robot de trading crypto self-custody est de l'exécuter sur votre propre ordinateur ou serveur. Cela signifie généralement une application de bureau que vous installez sur votre propre système et un casse-tête pour la configurer et l'exécuter correctement. + +**Les avantages des robots de trading crypto self-custody** + +<div> + {[ + { + icon: <CheckCircle className="text-primary" />, + name: 'Vos clés, vos cryptos', + description: 'Vous êtes le seul à avoir accès à votre portefeuille crypto', + }, + { + icon: <Shield className="text-primary" />, + name: 'Meilleure sécurité', + description: "Il n'y a pas de tiers pour compromettre vos cryptos ou clés API", + }, + { + icon: <CircleDollarSign className="text-primary" />, + name: 'Flexibilité maximale', + description: 'Utilisez les exchanges décentralisés et centralisés depuis la même plateforme', + }, + ].map((element, i) => ( + <HighlightElement key={i} element={element} /> + ))} +</div> + +**Les inconvénients des robots de trading crypto self-custody** + +<div> + {[ + { + icon: <CircleDollarSign className="text-rating-color-2" />, + name: 'Responsabilité', + description: "Vous êtes responsable de la sécurité de votre portefeuille crypto et de vos clés API, il n'y a pas de service de récupération si vous perdez vos clés.", + }, + { + icon: <Globe className="text-rating-color-2" />, + name: 'Exécution du logiciel', + description: + 'Le logiciel doit être exécuté en continu sur votre propre ordinateur ou serveur.', + }, + { + icon: <Globe className="text-rating-color-2" />, + name: 'Configuration complexe', + description: + 'Un robot de trading crypto self-custody nécessite en général des connaissances techniques et une configuration sécurisée.', + }, + ].map((element, i) => ( + <HighlightElement key={i} element={element} /> + ))} +</div> + +Chez OctoBot, nous travaillons sur un robot de trading crypto self-custody qui résout à la fois les inconvénients **Exécution du logiciel** et **Configuration complexe** en utilisant un robot de trading self-custody sécurisé depuis votre téléphone portable. + +<div style={{textAlign: "center"}}> + **[S'inscrire à l'accès anticipé](https://www.octobot.cloud/features/self-custody-trading-bot)** +</div> + + +## Comment utiliser un robot de trading crypto self-custody + +Un robot de trading crypto self-custody est toujours un logiciel que vous devez installer, configurer et exécuter sur votre propre mobile, ordinateur ou serveur. + +La première étape consiste donc à télécharger et installer un robot de trading crypto self-custody, tel que [l'application de bureau open source d'OctoBot](https://www.octobot.cloud/trading-bot). + +Ensuite, vous pourrez sélectionner la stratégie que vous souhaitez utiliser, vous connecter à votre compte d'exchange et commencer à trader. +Bien qu'OctoBot fonctionne avec la plupart des exchanges centralisés et quelques exchanges décentralisés, si votre objectif principal est de trader sur les exchanges décentralisés, un robot spécialisé comme <a href="https://hummingbot.org/" rel="nofollow">Hummingbot</a> serait un meilleur choix. +Ça y est, votre robot est installé et configuré, c'était la partie facile. + +> "Installer un robot de trading crypto self-custody est simple. L'exécuter et le sécuriser correctement ne l'est pas." + +Votre robot de trading crypto self-custody automatise maintenant votre stratégie. Les prochaines étapes sont de : +- S'assurer qu'il fonctionne 24/7 (ce qui signifie le surveiller, le redémarrer s'il s'arrête, le maintenir à jour) +- Le garder en sécurité contre les autres personnes pouvant accéder à votre ordinateur, les malwares potentiels et les failles de sécurité. +- S'il fonctionne sur un serveur, s'assurer que votre connexion à ce serveur est toujours sécurisée et chiffrée. + +Tout cela peut représenter un véritable défi, surtout si vous n'avez pas de solides connaissances techniques. C'est pourquoi jusqu'à aujourd'hui, la grande majorité des robots de trading crypto self-custody n'étaient disponibles que sous forme d'applications de bureau (ou même d'outils en ligne de commande) conçus pour des utilisateurs très techniques. + + +## Une application mobile de robot de trading crypto self-custody + +Chez OctoBot, nous travaillons sur les robots de trading depuis 2018, l'année où nous avons codé la première version d'<a href="https://github.com/Drakkar-Software/OctoBot" rel="nofollow">OctoBot open source sur GitHub</a>. Le temps a passé depuis et le monde des crypto a beaucoup changé. +Avec l'essor des exchanges décentralisés populaires tels qu'Hyperliquid ou Uniswap et des réglementations de plus en plus strictes, les plateformes de robot de trading traditionnelles ne sont, dans de nombreux cas, plus le choix évident. + +C'est pourquoi nous avons travaillé sur une **application mobile de robot de trading crypto self-custody** qui vous permet de : +- **Automatiser vos stratégies d'investissement** sur vos exchanges centralisés et décentralisés **de manière simple**. +- **Sécuriser votre portefeuille crypto et vos clés API d'exchange centralisé** sur votre propre appareil. +- Profiter de l'**accessibilité de votre téléphone portable** pour toujours avoir le contrôle sur vos stratégies. + +Nous lancerons l'application très bientôt. Inscrivez-vous à l'accès anticipé pour être parmi les premiers à l'utiliser. + +<div style={{textAlign: "center"}}> + **[S'inscrire à l'accès anticipé](https://www.octobot.cloud/features/self-custody-trading-bot)** +</div> diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/developers/architecture/design-philosophy.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/developers/architecture/design-philosophy.md new file mode 100644 index 0000000000..f5d3cda948 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/developers/architecture/design-philosophy.md @@ -0,0 +1,111 @@ +--- +title: "L'architecture d'OctoBot" +description: "Découvrez la philosophie de conception et l'architecture technique d'OctoBot, axées sur la rapidité et la scalabilité, en utilisant Python et la programmation asynchrone avec asyncio." +sidebar_position: 7 +--- + + + +# L'architecture d'OctoBot + +:::info + La traduction française de cette page est en cours. +::: + +## Philosophie + +The goal behind OctoBot is to have a **very fast and scalable** trading robot. + +To achieve this, OctoBot is entirely built around the + +<a href="https://docs.python.org/3/library/asyncio.html" rel="nofollow">asyncio</a> producer-consumer +<a href="https://github.com/Drakkar-Software/Async-Channel" rel="nofollow">Async-Channel</a> framework which allows to very quickly and efficiently +transmit data to different elements within the bot. The idea is to all the time +maintain **fully up-to-date data** without having to use update loops. Update +loops require sleeping time, which is inefficient. This architecture enables to +**notify the evaluation chain as quickly as possible** when an update is +available without having to wait for any update cycle of any update loop. + +Additionally, in order to save CPU time, as little threads as possible are used +by OctoBot (usually less than 10 with a standard setup). + +## Aperçu + +The OctoBot code is split into [several repositories](github-repositories). +Each module is handled as an independent python module and is available on the + +<a href="https://pypi.org/" rel="nofollow">official python package repository</a> (used in `pip` commands). + +## OctoBot + +![OctoBot architecture](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/wiki_resources/octobot_arch.svg) + +Simplified view of the OctoBot core components. + +Inside the OctoBot part, each arrow is an async channel. + +## Les Tentacles d'OctoBot + +Tentacles are OctoBot's extensions, they are meant to be easily customizable, can +be activated or not and do any specific action within OctoBot. + +### Les Tentacles de la chaîne d'évaluation + +They are tools to analyze market data as well as any other type of data (Teddit, Telegram, etc). +They implement abstract evaluators, strategies and trading modes. + +### Les Tentacles utilitaires + +These are OctoBot's interfaces (web, telegram), notification systems, social news feeds +and [backtesting](/guides/octobot-usage/backtesting) data collectors. They implement abstract interfaces, services, service +feeds, notifiers and data collectors + +## Evaluateurs, stratégies et trading modes: + +### Evaluateurs + +Simple python classes that will automatically be wake up when new data is available. +Their goal is to set `self.eval_note` and call `await self.evaluation_completed` +that will then be made available to the Strategy(ies). They should be dedicated to +a single simple task such as (for example) evaluate the RSI on the current data or +looks for a divergence in a trend. + +### Stratégies + +Strategies are more complex elements, they can read all the evaluators evaluations +on every time frame and are considering these evaluations to set their `self.eval_note` +and call `await self.strategy_completed`. As a comparison if evaluators are human +senses, strategies are the brain that will take these senses' signals and decide to +do something or not. Strategies can be generic like SimpleStrategyEvaluator that +will take any standard evaluator and time frame into account or using specific +evaluators only like MoveSignalsStrategyEvaluator. + +### Trading modes + +[Trading modes](../octobot-trading-modes/trading-modes) use the strategy(ies) evaluations to create, update or cancel orders. +Using the strategies signals, they are responsible for the way to translate a signal +into an order by looking at the available funds, open orders, considering stop loss +or not and other trading related responsibilities. + +### Déclencheurs + +Evaluators, strategies and trading modes are automatically triggered when their channel +has a new data. Trigger sources are: + +For evaluators + +- Technical evaluators: any new candle or refresh request (with updated candles data) from a strategy +- Real time evaluators: any new candle and any market price change +- Social evaluators: associated signal (ex: a post for a Reddit social evaluator) + +For strategies + +- After a technical evaluator cycle: when all TA have updated their evaluation and called `await self.evaluation_completed` +- After any real time evaluator evaluation and call of `await self.evaluation_completed` +- After any social evaluator evaluation and call of `await self.evaluation_completed` + +For trading mode + +- After any strategy evaluation and call of `await self.strategy_completed` + +_Thanks for reading this guide and if you have any idea on how to improve it, please reach out to us !_ diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/developers/environment/environment-variables.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/developers/environment/environment-variables.md new file mode 100644 index 0000000000..21d7634980 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/developers/environment/environment-variables.md @@ -0,0 +1,32 @@ +--- +title: "Variables d'environnement" +description: "Utilisez des variables d'environnement pour modifier le comportement d'OctoBot. Installez les derniers tentacles, modifiez l'adresse IP et le port de l'interface web, désactivez la limite de taux d'échange, et bien plus encore." +sidebar_position: 10 +--- + + + +# Les variables d'environnement d'OctoBot + +:::info + La traduction française de cette page est en cours. +::: + +## Installation des Tentacles + +`TENTACLES_URL_TAG` overrides the default OctoBot version tag for +tentacles package installation. Some additional tags are available : + +- **latest** : to install the latest published tentacles (usually requires an up-to-date `dev` branch on OctoBot to work) +- **tests/XXX** : for OctoBot-Tentacles-Manager tests + +## Interface Web + +- `WEB_ADDRESS` overrides the host IP address, can be set to `0.0.0.0` to accept all incoming connections. +- `WEB_PORT` overrides the default web port (5001). + +## Plateformes d'échange + +- `DEFAULT_REQUEST_TIMEOUT`: Exchanges requests timeout in milliseconds. Can be increased if your internet connection is very slow. Default value is `20000`. +- `ENABLE_CCXT_VERBOSE`: Set to `True` to log each <a href="https://github.com/ccxt/ccxt" rel="nofollow">ccxt</a> exchange request. Default is `False`. +- `ENABLE_CCXT_RATE_LIMIT`: Set to `False` to disable <a href="https://docs.ccxt.com/#/?id=rate-limit" rel="nofollow">ccxt rate limit</a>. This will make each exchange request to be instantly emitted. **Be careful as this can lead to an IP ban** if the exchange spamming rules are not respected. Default is `True`. diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/developers/environment/github-repositories.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/developers/environment/github-repositories.md new file mode 100644 index 0000000000..d8418004f0 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/developers/environment/github-repositories.md @@ -0,0 +1,43 @@ +--- +title: "Les dépôts GitHub" +description: "Apprenez-en plus sur les différents dépôts OctoBot sur GitHub, comment ils sont divisés et quel est leur but respectif." +sidebar_position: 8 +--- + + + +# Les dépôts GitHub d'OctoBot + +:::info + La traduction française de cette page est en cours. +::: + +OctoBot code is split into multiple repositories, all hosted under +the <a href="https://github.com/Drakkar-Software" rel="nofollow">Drakkar-Software</a> organisation on +GitHub. + +- <a href="https://github.com/Drakkar-Software/OctoBot" rel="nofollow">github.com/Drakkar-Software/OctoBot</a> (dev branch) for the main program initialization, + [backtesting](/guides/octobot-usage/backtesting) and strategy optimizer + setup as well as community data management. +- <a href="https://github.com/Drakkar-Software/OctoBot-Tentacles" rel="nofollow">github.com/Drakkar-Software/OctoBot-Tentacles</a> (dev branch) tentacles: evaluators, strategies, trading + modes, interfaces, notifiers, external data feeds (reddit, telegram etc), + backtesting data formats management and exchange specific behaviors. +- <a href="https://github.com/Drakkar-Software/OctoBot-Trading" rel="nofollow">github.com/Drakkar-Software/OctoBot-Trading</a> for everything trading and exchange related: exchange + connections, exchange data fetch and update, orders, trades and portfolios + management. +- <a href="https://github.com/Drakkar-Software/OctoBot-evaluators" rel="nofollow">github.com/Drakkar-Software/OctoBot-evaluators</a> for everything related to evaluators and strategies. +- <a href="https://github.com/Drakkar-Software/OctoBot-Services" rel="nofollow">github.com/Drakkar-Software/OctoBot-Services</a> for everything related to interfaces: graphic (web) and + text(telegram), notifications push and social analysis data management: update + engine to handle new data from an external feed (ex: reddit) when it gets + available. +- <a href="https://github.com/Drakkar-Software/OctoBot-Backtesting" rel="nofollow">github.com/Drakkar-Software/OctoBot-Backtesting</a> for the [backtesting + engine](/fr/guides/octobot-usage/backtesting) and scheduling as well as + historical data collection unified storage management. +- <a href="https://github.com/Drakkar-Software/OctoBot-Tentacles-Manager" rel="nofollow">github.com/Drakkar-Software/OctoBot-Tentacles-Manager</a> for tentacles installation, updates and interactions: + get a tentacle documentation, configuration or it's dependencies. +- <a href="https://github.com/Drakkar-Software/OctoBot-Commons" rel="nofollow">github.com/Drakkar-Software/OctoBot-Commons</a> for common tools and constants used across each above + repository. +- <a href="https://github.com/Drakkar-Software/Async-Channel" rel="nofollow">github.com/Drakkar-Software/Async-Channel</a> which is used by OctoBot as a base framework for every + data transfer within the bot. This allows a highly optimized and scalable + architecture that adapts to any system while using a very low amount of CPU + and RAM. diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/developers/environment/running-tests.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/developers/environment/running-tests.md new file mode 100644 index 0000000000..443c42f90c --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/developers/environment/running-tests.md @@ -0,0 +1,36 @@ +--- +title: "Exécuter les tests" +description: "Découvrez comment les tests automatisés fonctionnent sur les dépôts Python open source d'OctoBot en utilisant pytest et GitHub Actions." +sidebar_position: 4 +--- + + + +# Tests + +Chaque suite de tests des dépôts OctoBot est exécutée avec <a href="https://docs.pytest.org/" rel="nofollow">pytest</a> sur <a href="https://docs.github.com/actions" rel="nofollow">GitHub Action</a> et peut également être lancée localement dans un environnement de développement. + +## Prérequis + +Pour exécuter les tests d’OctoBot, un environnement de développement OctoBot est nécessaire. La configuration de cet environnement est décrite dans le [guide d’environnement de développement](setup-your-environment) + +## Le moteur d'OctoBot + +Pour lancer les tests du moteur d’OctoBot, utilisez la commande `pytest` à la racine du dossier OctoBot : + +```bash +pytest tests +``` + +Cette commande exécutera tous les tests présents dans le dossier tests. + +## Les Tentacles + +Pour lancer les tests des tentacles d’OctoBot, utilisez la commande `pytest tentacles` à la racine du dossier OctoBot : + +```bash +pytest tentacles +``` + +Cette commande exécutera tous les tests du dossier **tentacles**. Le test des tentacles ne fonctionne que si les Tentacles sont installés sur l’OctoBot testé. Consultez le [guide de l'environnement de développement](setup-your-environment) +pour les installer. diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/developers/environment/setup-your-environment.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/developers/environment/setup-your-environment.md new file mode 100644 index 0000000000..1a9e053baa --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/developers/environment/setup-your-environment.md @@ -0,0 +1,408 @@ +--- +title: "Configurer votre environnement" +description: "Apprenez comment créer votre environnement de développement OctoBot à partir des dépôts Python open source d'OctoBot sur GitHub en utilisant VSCode ou PyCharm." +sidebar_position: 3 +--- + + + +# Installation pour développeur d'OctoBot + +Cet environnement permet d'exécuter un OctoBot en local via le code Python, d'y apporter des modifications, puis de les déboguer et tester. + +- [**Install OctoBot requirements**](#install-octobot-requirements) +- [**Cloning OctoBot repositories**](#cloning-octobot-repositories-with-git) +- [**Setting up PyCharm IDE**](#setting-up-pycharm-ide) +- [**Setting up Visual Studio Code IDE**](#setting-up-visual-studio-code-ide) + +## Installer les prérequis d'OctoBot + +**Télécharger et installer:** + +- Langage de programmation: <a href="https://www.python.org/downloads/release/python-31011/" rel="nofollow">Python 3.10</a> +- Gestionnaire de version: <a href="https://git-scm.com/downloads" rel="nofollow">Git</a> +- IDE: <a href="https://www.jetbrains.com/pycharm/" rel="nofollow">PyCharm</a> or <a href="https://code.visualstudio.com/Download" rel="nofollow">Visual Studio Code</a> + + +## Clonage des dépôts OctoBot + +Les dépôts `OctoBot` et `OctoBot-Tentacles` sont nécessaires pour configurer l'environnement de développement OctoBot. + +Ouvrez un terminal dans votre dossier de projet et exécutez les commandes suivantes pour télécharger les dépôts officiels (version de développement) : + + +```bash +git clone https://github.com/Drakkar-Software/OctoBot.git --branch dev +git clone https://github.com/Drakkar-Software/OctoBot-Tentacles.git --branch dev +``` +Remarque : +- Pour contribuer aux projets, créez d'abord un fork de ces dépôts et utilisez vos propres copies. +- Les pull requests doivent être soumises vers la branche dev de chaque dépôt. + +*Pour aller plus loin* +Vous êtes un développeur avancé qui maîtrise déjà l'architecture globale d'OctoBot et souhaite modifier ses modules principaux ? + +Le code d'OctoBot étant réparti sur plusieurs dépôts GitHub (chaque dépôt couvrant un aspect du logiciel), vous devrez peut-être cloner d'autres dépôts. Plus de détails sur la [page des dépôts GitHub](github-repositories). + +## Environnement VSCode pour OctoBot + +### Création du projet et installation des dépendances + +1. Ouvrez Visual Studio Code dans le dossier contenant les dépôts OctoBot. +2. Ouvez le terminal et créez un environnement virtuel Python 3.10 pour contenir les dépendances d'OctoBot. Commande: `python -m venv venv` +3. Activez l'environnement virtuel (`.\venv\Scripts\Activate.ps1` sur Windows ou `source venv/bin/activate` sur Linux/macOS) +<div style={{textAlign: "center"}}> +![vscode create octobot venv](/images/guides/dev_env/vscode-create-octobot-venv.png) +</div> +4. Installez les dépendances avec `python -m pip install -r OctoBot/requirements.txt -r OctoBot/dev_requirements.txt` depuis le terminal intégré de VSCode terminal, qui utilise votre environment virtuel. +<div style={{textAlign: "center"}}> +![vscode install python requirements](/images/guides/dev_env/vscode-install-python-requirements.png) +</div> + + +### Configuration de VSCode +1. Créez un dossier `.vscode` à la racine de votre projet. +2. Ajoutez un fichier `settings.json` au dossier `.vscode` avec ce contenu (pour utiliser l'environment virtuel créé). Note: remplacer le chemin vers l'exécutatble python sur Linux/MacOS +```json +{ + "python.defaultInterpreterPath": "${workspaceFolder}/venv/Scripts/python.exe" +} +``` +3. Dans la dossier `.vscode`, Créez un fichier `launch.json` avec le contenu suivant afin de définit les configurations d'exécutions. Ce fichier simplifie le développement en permettant de : +- Démarrer OctoBot +- Lancer les tests +- Gérer les Tentacles + +```json +{ + "configurations": [ + { + "type": "debugpy", + "name": "Start OctoBot", + "request": "launch", + "console": "integratedTerminal", + "program": "${workspaceFolder}/OctoBot/start.py", + "cwd": "${workspaceFolder}/OctoBot", + "presentation": { + "hidden": false, + "group": "1.Run", + "order": 1 + }, + "justMyCode": false, + "args": [], + "env": {} + }, + { + "type": "debugpy", + "name": "OctoBot tests", + "request": "launch", + "console": "integratedTerminal", + "cwd": "${workspaceFolder}/OctoBot", + "presentation": { + "hidden": false, + "group": "2.Test", + "order": 20 + }, + "justMyCode": false, + "args": [ + "tests", + "--no-header", + "--disable-warnings", + "--show-capture=no", + "-v", + "-vv", + "-k", + " " + ], + "module": "pytest" + }, + { + "type": "debugpy", + "name": "OctoBot-Tentacles tests trading modes", + "request": "launch", + "console": "integratedTerminal", + "cwd": "${workspaceFolder}/OctoBot", + "presentation": { + "hidden": false, + "group": "2.Test", + "order": 21 + }, + "justMyCode": false, + "args": [ + "tentacles/Trading/Mode", + "--no-header", + "--disable-warnings", + "--show-capture=no", + "-v", + "-vv", + "-s", + "-k", + " " + ], + "module": "pytest" + }, + { + "type": "debugpy", + "name": "Export tentacles to repo", + "request": "launch", + "console": "integratedTerminal", + "program": "${workspaceFolder}/OctoBot/start.py", + "cwd": "${workspaceFolder}/OctoBot", + "presentation": { + "hidden": false, + "group": "OctoBot-Tentacles-Manager", + "order": 31 + }, + "justMyCode": false, + "args": [ + "tentacles", + "-e", + "../../OctoBot-Tentacles", + "OctoBot-Default-Tentacles", + "-d", + "../OctoBot/tentacles" + ] + }, + { + "type": "debugpy", + "name": "OctoBot repair tentacles", + "request": "launch", + "console": "integratedTerminal", + "program": "${workspaceFolder}/OctoBot/start.py", + "cwd": "${workspaceFolder}/OctoBot", + "presentation": { + "hidden": false, + "group": "OctoBot-Tentacles-Manager", + "order": 32 + }, + "justMyCode": false, + "args": [ + "tentacles", + "--repair", + "-d", + "." + ] + }, + { + "type": "debugpy", + "name": "Export OctoBot-Tentacles to zip", + "request": "launch", + "console": "integratedTerminal", + "program": "${workspaceFolder}/OctoBot/start.py", + "cwd": "${workspaceFolder}/OctoBot", + "presentation": { + "hidden": false, + "group": "OctoBot-Tentacles-Manager", + "order": 33 + }, + "justMyCode": false, + "args": [ + "tentacles", + "-p", + "../tentacles_default_export.zip", + "-d", + "../OctoBot-Tentacles" + ] + }, + { + "type": "debugpy", + "name": "Install tentacles zip", + "request": "launch", + "console": "integratedTerminal", + "program": "${workspaceFolder}/OctoBot/start.py", + "cwd": "${workspaceFolder}/OctoBot", + "presentation": { + "hidden": false, + "group": "OctoBot-Tentacles-Manager", + "order": 34 + }, + "justMyCode": false, + "args": [ + "tentacles", + "-i", + "--all", + "--location", + "any_platform.zip" + ] + } + ] +} +``` + +VSCode devrait maintenant afficher les configurations launch.json dans son interface utilisateur. + +<div style={{textAlign: "center"}}> +![vscode run configurations selector](/images/guides/dev_env/vscode-run-configurations-selector.png) +</div> + +Note: ces fichiers ont été créés avec VSCode 1.102.1 (juillet 2025). Si certaines valeurs deviennent obsolètes dans des versions ultérieures de VSCode, veuillez nous contacter pour mettre à jour ce guide. + +### Exécution d'OctoBot + +#### 1. Installation des tentacles depuis un dépôt de tentacles +Maintenant que VSCode est configuré, il est nécessaire d'installer vos premiers tentacles OctoBot. + +1. Exécutez la configuration `Export OctoBot-Tentacles to zip` + +Cette configuration installera automatiquement tous les tentacles contenus dans un dossier local dans votre OctoBot, pour qu'il puisse les utiliser. OctoBot ne peut utiliser que les tentacles correctement installés dans son dossier `tentacles`. + +Cette étape est nécessaire pour utiliser le code des tentacles cloné depuis `OctoBot-Tentacles`. Si vous ne l'effectuez pas, OctoBot téléchargera les tentacles associés à sa dernière version, qui pourraient être incompatibles avec la branche `dev` sur laquelle se trouve votre code OctoBot actuel. + +<div style={{textAlign: "center"}}> +![vscode executed export tentacles to zip](/images/guides/dev_env/vscode-executed-export-tentacles-to-zip.png) +</div> + +Cette action exporte les tentacles d'OctoBot-Tentacles dans une archive zip qui peut être installée sur votre OctoBot ou partagée. + +2. Exécutez la configuration `Install tentacles zip` + +<div style={{textAlign: "center"}}> +![vscode executed install tentacles from zip](/images/guides/dev_env/vscode-executed-install-tentacles-from-zip.png) +</div> + +Cela ajoutera à votre OctoBot les tentacles contenus dans ce zip. Cette configuration peut être utilisée pour installer n'importe quel zip de tentacles. + +Votre dossier local OctoBot contient maintenant le code des tentacles cloné depuis le dépôt `OctoBot-Tentacles`. Ré-exécutez `Export OctoBot-Tentacles to zip` et `Install tentacles zip` lorsque vous voulez mettre à jour vos tentacles locaux depuis le dépôt git `OctoBot-Tentacles`. +Attention : cela écrasera toutes les modifications locales des tentacles réinstallés. Assurez-vous de sauvegarder vos changements au préalable. + +#### 2. Lancement d'OctoBot + +Cette configuration démarrera votre OctoBot local. Assurez-vous d'avoir d'abord installé les tentacles `OctoBot-Tentacles` (via les configurations `Export OctoBot-Tentacles to zip` et `Install tentacles zip`), sinon OctoBot installera ses tentacles par défaut et leur import pourrait échouer. + +Exécutez la configuration `Start OctoBot` + +<div style={{textAlign: "center"}}> +![vscode executed start octobot](/images/guides/dev_env/vscode-executed-start-octobot.png) +</div> + +#### 3. Export des modifications de vos tentacles vers leur dépôt git + +Cette configuration exportera les modifications de vos tentacles locaux OctoBot vers le dépôt de tentacles configuré. Elle prendra les fichiers liés à votre package de tentacles sélectionné. + +Exécutez la configuration `Export tentacles to repo` + + +Depuis les paramètres de `launch.json`, vous pouvez modifier : +- `OctoBot-Default-Tentacles` pour sélectionner des tentacles à exporter depuis un package différent +- `OctoBot-Tentacles` pour exporter vers un dépôt git différent + +#### 4. Exécution des tests + +Les configurations `OctoBot tests` et `OctoBot-Tentacles tests trading modes` sont des exemples pour exécuter tous les tests OctoBot ou les tests des Trading Modes des tentacles. N'hésitez pas à ajouter d'autres configurations de test. + +<div style={{textAlign: "center"}}> +![vscode executed tests](/images/guides/dev_env/vscode-executed-tests.png) +</div> + + +## Environnement OctoBot dans PyCharm + +### Création du projet et installation des dépendances +1. Ouvrez PyCharm et sélectionnez le dossier contenant les dépôts OctoBot. +2. Créez un nouvel environnement virtuel Python 3.10 pour les dépendances d'OctoBot. +<div style={{textAlign: "center"}}> +![create pycharm interpreter](/images/guides/dev_env/create-pycharm-interpreter.png) +</div> +3. Installez les dépendances Python depuis le dossier OctoBot en exécutant dans le terminal intégré de PyCharm (qui utilise par défaut votre nouvel environnement virtuel) avec la commande `python -m pip install -r OctoBot/requirements.txt -r OctoBot/dev_requirements.txt`. +<div style={{textAlign: "center"}}> +![install octobot requirements from pycharm](/images/guides/dev_env/install-octobot-requirements-from-pycharm.png) +</div> + +### Configuration des exécutions dans PyCharm + +Les étapes suivantes pour créer des configurations d'exécution PyCharm utilisant l'environnement virtuel créé (celui contenant les dépendances d'OctoBot) pour chaque type de commande Python : +- Démarrer OctoBot +- Exécuter les tests +- Gérer les tentacles + + +#### 1. Installation des tentacles depuis un dépôt git +Cette configuration installera automatiquement tous les tentacles d'un dossier local dans votre OctoBot. OctoBot ne peut utiliser que les tentacles correctement installés dans son dossier `tentacles`. + +Cette étape est nécessaire pour utiliser le code des tentacles cloné depuis `OctoBot-Tentacles`. Si vous ne l'effectuez pas, OctoBot téléchargera les tentacles de sa dernière version stable, potentiellement incompatible avec la branche `dev` utilisée. + +1. Cliquez sur `Edit Configurations` +<div style={{textAlign: "center"}}> +![edit pycharm configurations](/images/guides/dev_env/edit-pycharm-configurations.png) +</div> +2. Créez la configuration `Export OctoBot-Tentacles to zip`: +- Script path: `path_to_your_octobot_repositories/OctoBot/start.py` +- Working directory: `path_to_your_octobot_repositories/OctoBot` +- Script parameters: `tentacles -p ../tentacles_default_export.zip -d ../OctoBot-Tentacles` +<div style={{textAlign: "center"}}> +![create pycharm export tentacles config](/images/guides/dev_env/create-pycharm-export-tentacles-config.png) +</div> +3. Exécutez cette configuration pour exporter les tentacles dans une archive zip qui pourra alors être installée sur votre OctoBot, ou partagée. +<div style={{textAlign: "center"}}> +![execute pycharm export tentacles](/images/guides/dev_env/execute-pycharm-export-tentacles.png) +</div> +4. Créez la configuration `Install tentacles zip` pour installer ces tentacles zippées sur votre OctoBot: +- Script path: `path_to_your_octobot_repositories/OctoBot/start.py` +- Working directory: `path_to_your_octobot_repositories/OctoBot` +- Script parameters: `tentacles -i --all --location any_platform.zip` +<div style={{textAlign: "center"}}> +![create pycharm install tentacles config](/images/guides/dev_env/create-pycharm-install-tentacles-config.png) +</div> +5. Exécutez cette configuration pour installer les tentacles. Cette configuration peut être utilisée pour installer tout zip de tentacles. +<div style={{textAlign: "center"}}> +![execute pycharm install tentacles](/images/guides/dev_env/execute-pycharm-install-tentacles.png) +</div> + +Votre dossier local OctoBot contient maintenant le code des tentacles que vous avez cloné depuis le dépôt `OctoBot-Tentacles`. Ré-exécutez `Export OctoBot-Tentacles to zip` et `Install tentacles zip` lorsque vous souhaitez mettre à jour vos tentacles locaux depuis le dépôt git `OctoBot-Tentacles`. +Attention : cela écrasera toutes les modifications locales des tentacles réinstallés. Assurez-vous de sauvegarder vos changements au préalable. + + +#### 2. Lancement d'OctoBot +Cette configuration d'exécution démarrera votre OctoBot local. Assurez-vous d'avoir d'abord installé les tentacles `OctoBot-Tentacles` (via les configurations `Export OctoBot-Tentacles to zip` et `Install tentacles zip`), sinon OctoBot installera ses tentacles par défaut et leur import pourrait échouer. + +1. Cliquez sur `Edit Configurations` +<div style={{textAlign: "center"}}> +![edit pycharm configurations](/images/guides/dev_env/edit-pycharm-configurations.png) +</div> +2. Créez la configuration `Start OctoBot`: +- Script path: `path_to_your_octobot_repositories/OctoBot/start.py` +- Working directory: `path_to_your_octobot_repositories/OctoBot` +<div style={{textAlign: "center"}}> +![create pycharm start octobot run config](/images/guides/dev_env/create-pycharm-start-octobot-run-config.png) +</div> +3. Exécutez cette configuration pour démarrer votre OctoBot +<div style={{textAlign: "center"}}> +![execute pycharm start octobot](/images/guides/dev_env/execute-pycharm-start-octobot.png) +</div> + +Vous pouvez maintenant démarrer votre OctoBot depuis votre environnement de développement, effectuer des modifications locales et exécuter Python en mode debug. + +#### 3. Export des modifications de vos tentacles vers leur dépôt git +Cette configuration exportera les modifications de vos tentacles OctoBot locaux vers le dépôt de tentacles configuré. Elle sélectionnera les fichiers liés à au package de tentacles sélectionné. + +1. Cliquez sur `Edit Configurations` +<div style={{textAlign: "center"}}> +![edit pycharm configurations](/images/guides/dev_env/edit-pycharm-configurations.png) +</div> +2. Créez la configuration `Export tentacles to repo`: +- Script path: `path_to_your_octobot_repositories/OctoBot/start.py` +- Working directory: `path_to_your_octobot_repositories/OctoBot` +- Script parameters: `tentacles -e ../../OctoBot-Tentacles OctoBot-Default-Tentacles -d ../OctoBot/tentacles` +<div style={{textAlign: "center"}}> +![create pycharm export tentacles to repo config](/images/guides/dev_env/create-pycharm-export-tentacles-to-repo-config.png) +</div> +3. Exécutez cette configuration pour appliquer les modifications de votre dossier OctoBot/tentacles vers le dépôt git de ce package de tentacles. +<div style={{textAlign: "center"}}> +![execute pycharm export tentacles to repo](/images/guides/dev_env/execute-pycharm-export-tentacles-to-repo.png) +</div> + +Dans les paramètres du script, vous pouvez modifier: +- `OctoBot-Default-Tentacles` pour sélectionner des tentacles à exporter selon un package différent. Les packages sont définis dans le `metadata.json` de chaque tentacle, sous la clé `origin_package`. +- `OctoBot-Tentacles` pour exporter les tentacles vers un dépôt git différent. + + +#### 4. Exécution des tests + +Créez des configurations d'exécution `pytest` pour lancer les tests OctoBot. N'hésitez pas à ajouter d'autres configurations de test selon vos besoins. + +<div style={{textAlign: "center"}}> +![create pycharm tests config](/images/guides/dev_env/create-pycharm-tests-config.png) +</div> +<div style={{textAlign: "center"}}> +![execute pycharm tests](/images/guides/dev_env/execute-pycharm-tests.png) +</div> diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/developers/environment/tips.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/developers/environment/tips.md new file mode 100644 index 0000000000..bc056e3ca4 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/developers/environment/tips.md @@ -0,0 +1,49 @@ +--- +title: "Astuces pour developpeurs" +description: "Profitez de nos conseils pour vous lancer rapidement en tant que développeur OctoBot. Explorez les fichiers de backtesting SQLite en utilisant un navigateur SQLite et testez vos stratégies." +sidebar_position: 9 +--- + + + +# Astuces pour les développeur + +:::info + La traduction française de cette page est en cours. +::: + +## Données de Backtesting + +[Backtesting](/guides/octobot-usage/backtesting) data files are sqlite database files. When using the regular data collector, these files contain every historical candles the requested exchange is willing to give. You can use a <a href="https://sqlitebrowser.org/" rel="nofollow">SQLite browser</a> to explore these files. + +## Testes de stratégies + +To quickly check tentacles strategy tests states or develop a new tentacles strategy test, change the following lines in **octobot/tests/functional_tests/strategy_evaluators_tests/abstract_strategy_test.py**: + +```python +def _handle_results(self, independent_backtesting, profitability): + exchange_manager_ids = get_independent_backtesting_exchange_manager_ids(independent_backtesting) + for exchange_manager in get_exchange_managers_from_exchange_ids(exchange_manager_ids): + _, run_profitability, _, market_average_profitability, _ = get_profitability_stats(exchange_manager) + actual = round(run_profitability, 3) + # uncomment this print for building tests + # print(f"results: rounded run profitability {actual} market profitability: {market_average_profitability}" + # f" expected: {profitability} [result: {actual == profitability}]") + assert actual == profitability +``` + +into + +```python +def _handle_results(self, independent_backtesting, profitability): + exchange_manager_ids = get_independent_backtesting_exchange_manager_ids(independent_backtesting) + for exchange_manager in get_exchange_managers_from_exchange_ids(exchange_manager_ids): + _, run_profitability, _, market_average_profitability, _ = get_profitability_stats(exchange_manager) + actual = round(run_profitability, 3) + # uncomment this print for building tests + print(f"results: rounded run profitability {actual} market profitability: {market_average_profitability}" + f" expected: {profitability} [result: {actual == profitability}]") + # assert actual == profitability +``` + +This will not stop tests on failure and display the current tests results as well as expected values. diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/developers/getting-started.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/developers/getting-started.md new file mode 100644 index 0000000000..63c994a977 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/developers/getting-started.md @@ -0,0 +1,41 @@ +--- +title: "Démarrer en tant que développeur" +description: "Vous vous demandez comment personnaliser OctoBot ? Apprenez comment vous lancer en tant que développeur OctoBot, créez vos propres tentacles et contribuez au robot de trading open source." +sidebar_position: 21 +--- + + + +# Développeurs OctoBot + +:::info + Pour les développeurs et contributeurs de l' + <a href="https://github.com/Drakkar-Software/OctoBot" rel="nofollow">OctoBot open source</a>. +::: + +## 1 - Apprendre les bases de Python + +Regarder une heure de cours vidéo Python pour débutants devrait suffire pour commencer. + +## 2 - Configurer votre environnement de développer OctoBot + +Une fois configuré, vous allez adorer l'[environnement de développement d'OctoBot](/guides/octobot-developers-environment/setup-your-environment). + +## 3 - Découvrir comment créer des tentacles + +Les Tentacles Octobot sont des modules/extensions pour OctoBot. + +Un "tentacle" peut être ce que vous voulez qu'il soit. Par exemple: +- Un indicateur technique +- Une stratégie de trading personnalisée en Python +- Un outil pour envoyer des notifications sur Discord +- Une amélioration de l'interface web +- Ou quelque chose de complètement différent + +Enfin, les tentacles peuvent être regroupées dans un bundle pour être partagées avec d'autres utilisateurs d'OctoBot. + +Consultez [le guide de personnalisation OctoBot](/guides/octobot-tentacles-development/customize-your-octobot) pour en savoir plus. + +## 4 - Contribuer à OctoBot + +Consultez <a href="https://github.com/Drakkar-Software/OctoBot/blob/master/CONTRIBUTING.md" rel="nofollow">nos directives de contribution</a>. diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/developers/tentacles-dev/create-a-tentacle-package.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/developers/tentacles-dev/create-a-tentacle-package.md new file mode 100644 index 0000000000..0d64618d54 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/developers/tentacles-dev/create-a-tentacle-package.md @@ -0,0 +1,206 @@ +--- +title: "Créer un tentacle package" +description: "Apprenez à regrouper vos tentacles OctoBot personnalisés dans un package de tentacles, à activer la configuration via les entrées de l'utilisateur, à y ajouter des documents et à le partager." +sidebar_position: 7 +--- + + + +# Développement tentacle package + +:::info + La traduction française de cette page est en cours. +::: + +## Tentacle packages + +This page covers tentacle package creation. A working [Octobot developer environment](/developers/environment/setup-your-environment) is required to create a tentacle. + +A tentacle package is a python module that contains one or multiple [tentacles](/guides/octobot-tentacles-development/create-a-tentacle) +of the same type. + +### Le dossier d'un tentacle package + +A tentacle package is defined by a folder located at : + +```bash +tentacles/YOUR_TP_CATEGORY/YOUR_TP_SUB_CATEGORY/YOUR_TENTACLE_PACKAGE_NAME/ +``` + +> TP is for tentacle package + +- **YOUR_TP_CATEGORY** can be Backtesting, Evaluator, Services or Trading + +- **YOUR_TP_SUB_CATEGORY** should be a sub category of **YOUR_TP_CATEGORY** in the existing + tentacle architecture + +- **YOUR_TENTACLE_PACKAGE_NAME** is the name of your tentacle package, shouldn't use an + existing tentacle package name + +### Le fichier de description + +A tentacle package contains metadata described in the metadata.json file. This file is +used to properly install the tentacle and should be carefully written. It's located at +the root path of the tentacle package : + +```bash +tentacles/YOUR_TP_CATEGORY/YOUR_TP_SUB_CATEGORY/YOUR_TENTACLE_PACKAGE_NAME/metadata.json +``` + +A tentacle package metadata.json contains : + +```javascript +{ + "version": "YOUR_TP_VERSION", + "origin_package": "YOUR_TP_ORIGIN_PACKAGE", + "tentacles": ["YOUR_TP_TENTACLE_1", "YOUR_TP_TENTACLE_2"], + "tentacles-requirements": ["YOUR_TP_TP_REQUIREMENT_1", "YOUR_TP_TP_REQUIREMENT_2"] +} +``` + +- **YOUR_TP_VERSION** is your tentacle package version +- **YOUR_TP_ORIGIN_PACKAGE** is the author or the origin repository of the tentacle package + +- **YOUR_TP_TENTACLE_1** and **YOUR_TP_TENTACLE_2** are name of the tentacle classes contained + in your tentacle package tentacles (1 or more). + +- **YOUR_TP_TP_REQUIREMENT_1** and **YOUR_TP_TP_REQUIREMENT_2** are the names of required + tentacle packages to have installed to run your tentacle package (0 or more) + +> **YOUR_TP_TENTACLE_X** should match python classes to be exposed in the tentacle + +Example _DailyTradingMode/metadata.json_ : + +```javascript +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["DailyTradingMode"], + "tentacles-requirements": ["mixed_strategies_evaluator"] +} +``` + +### Modules de tentacles + +[Tentacle](/guides/octobot-tentacles-development/create-a-tentacle) python modules should be placed at the root path of the +tentacle package. There can 1 or more modules per package. + +Example with _momentum_evaluator_ : The main python module that contains multiple tentacles +is located at + +```bash +tentacles/Evaluator/TA/momentum_evaluator/momentum.py +``` + +Every tentacle classes should be imported in the package root `__init__.py` file. + +Example with _momentum_evaluator_'s `__init__.py`.py : + +```python +from .momentum import RSIMomentumEvaluator, ADXMomentumEvaluator, RSIWeightMomentumEvaluator, +BBMomentumEvaluator, MACDMomentumEvaluator, KlingerOscillatorMomentumEvaluator, +KlingerOscillatorReversalConfirmationMomentumEvaluator +``` + +### Configuration + +A tentacle package can contain tentacle configurations. +Tentacles configuration consists in 2 parts: + +1. User inputs: the description of each configuration parameter, in the `init_user_inputs` tentacle method +2. Default configuration: the default configuration values, as json, in the `config` folder + +#### User inputs + +On OctoBot's web interface, tentacle configuration settings are generated using their definition +as User inputs as well as their current values. + +For a configuration parameter to show on the configuration interface, it has to be defined as +user input. Any value contained in the json configuration file can be used by the tentacle but +only the ones associated to user inputs will be visible to the user. + +```python +def init_user_inputs(self, inputs: dict) -> None: + self.period_length = self.UI.user_input( + "period_length", enums.UserInputTypes.INT, 14, inputs, + min_val=1, title="EMA period length." + ) + self.min_trigger_value = self.UI.user_input( + "sleep_delay", enums.UserInputTypes.FLOAT, 0.5, inputs, + min_val=0.34, max_val=0.75, title="Threshold above which to trigger a signal." + ) + self.send_notification = self.UI.user_input( + "send_notification", enums.UserInputTypes.BOOLEAN, True, inputs, + title="When enabled, send telegram notification on signal." + ) +``` + +The full definition of user inputs can be found + +<a href="https://github.com/Drakkar-Software/OctoBot-Commons/blob/master/octobot_commons/configuration/user_inputs.py" rel="nofollow">here</a> +. + +If you are unsure how to use user inputs, have a look at + +<a href="https://github.com/Drakkar-Software/OctoBot-Tentacles/blob/master/Evaluator/TA/momentum_evaluator/momentum.py" rel="nofollow">the existing tentacles user inputs</a> +. + +> Tentacles configuration are displayed using the <a href="https://github.com/json-editor/json-editor" rel="nofollow">json-editor</a> +> library. User inputs are converted into json schemas that are then passed to the editor alongside +> their current configuraiton values. + +- The `editor_options` argument allows to set json-editor specific options such as the `disable_array_add` option (`editor_options={"disable_array_add": True}`). +- The `other_schema_values` argument allows to set json schema specific parameters such as the `minItems` or `uniqueItems` for arrays (`other_schema_values={"minItems": 1, "uniqueItems": True}`). + +#### Configuration par défaut + +Values for an tentacle default configuration are located in the _config/_ folder at : + +```bash +tentacles/YOUR_TP_CATEGORY/YOUR_TP_SUB_CATEGORY/YOUR_TENTACLE_PACKAGE_NAME/config/ +``` + +Each tentacles config file should be named with the exact case and name as the associated +tentacle class. Below an example for _MyAwesomeTentacle_ : + +```bash +tentacles/YOUR_TP_CATEGORY/YOUR_TP_SUB_CATEGORY/YOUR_TENTACLE_PACKAGE_NAME/config/MyAwesomeTentacle.json +``` + +Once a tentacle configuration has been edited, a local copy of this json configuration file +is added to your profile where local changes are saved. + +### Ressources + +Tentacle resources are located in the **resources** folder of your tentacle package. + +Each tentacles documentation should be created for in `resources/YOUR_TP_TENTACLE_1.md`, +`resources/YOUR_TP_TENTACLE_2.md` (the file name should match the tentacle class name). + +A tentacle package can also contain many resources that can be binary files, images... + +Example _DailyTradingMode/resources/DailyTradingMode.md_ : + +```text +DailyTradingMode is a **low risk versatile trading mode** that reacts only the its state changes to +a state that is different from the previous one and that is not NEUTRAL. + +When triggered for a given symbol, it will cancel previously created (and unfilled) orders +and create new ones according to its new state. + +DailyTradingMode will consider every compatible strategy and average their evaluation to create +each state. +``` + +> You can use <a href="https://www.markdownguide.org/cheat-sheet" rel="nofollow">markdown</a> to format a +> tentacle documentation. + +### Tests + +Tentacle should be tested. Tests file are usually located in the **tests** folder +of the tentacle package. + +## Installer et partager les tentacles + +Follow the [tentacles installation guide](/guides/octobot-tentacles-development/customize-your-octobot) to install or +share your custom tentacle package. diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/developers/tentacles-dev/create-a-tentacle.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/developers/tentacles-dev/create-a-tentacle.md new file mode 100644 index 0000000000..067914f5e8 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/developers/tentacles-dev/create-a-tentacle.md @@ -0,0 +1,106 @@ +--- +title: "Créer une tentacle" +description: "Créez vos propres tentacles OctoBot (également appelés extensions). Ajoutez des outils d'analyse technique et sociale, améliorez les interfaces web et Telegram, et corrigez les connexions avec les plateformes d'échange." +sidebar_position: 6 +--- + + + +# Développement de Tentacle + +:::info + La traduction française de cette page est en cours. +::: + +## Tentacles + +This page covers tentacle development. A working [Octobot developer environment](/developers/environment/setup-your-environment) is required to create a tentacle. + +A tentacle is part of a [tentacle package](/guides/octobot-tentacles-development/create-a-tentacle-package) +and defines a tool for OctoBot such as a way to analyse moving averages, listen +to reddit or create grid-like orders. + +OctoBot uses tentacles to handle: + +* Price technical analysis \(moving averages, RSI, MACD, ...\) +* Social analysis \(Telegram, Reddit and Google\) +* Evaluator signals interpretations \(strategies\) +* Orders creation and followup \(trading modes\) +* User interfaces and notifications \(web, telegram\) +* [Backtesting](/guides/octobot-usage/backtesting) data files reading and writing \(.data\) +* Exchanges fixes \(to handle exchange specific behaviors\) + +There is no limit to the things OctoBot can handle: everything that can be +coded can be used by OctoBot through a tentacle. It is possible to create a +new tentacle to add a new tool to OctoBot or to build on an existing one and improve it. + +## Développer une nouvelle Tentacle + +The most efficient way to create a new tentacle is to build on top of an +existing one to add features to it. It is of course also possible to create +a new completely new tentacle. + + +To create a tentacle improving an existing one, all you need to do is to use +the existing tentacle folder as a template \(to create a +[tentacle package](/guides/octobot-tentacles-development/create-a-tentacle-package)\) and extend the existing +tentacle you want to improve and re-implement the methods you want to change +in the package's python file. + +Examples: + +**RedditForumEvaluator** is a simple Reddit evaluator available by default +in `tentacles/Evaluator/Social/forum_evaluator/forum.py`. Let's say you want +to implement **SuperRedditForumEvaluator** which is a better Reddit evaluator. + +Create the `tentacles/Evaluator/Social/super_forum_evaluator/` +[tentacle package](/guides/octobot-tentacles-development/create-a-tentacle-package) based on +`tentacles/Evaluator/Social/forum_evaluator` and start coding the the python file. + +```python + + +class SuperRedditForumEvaluator(Socials.RedditForumEvaluator): + # _get_sentiment is the RedditForumEvaluator method taking an entry and + # returning a number representing the "bullishness" of the entry. + # to change this part only, just redefine this method here + def _get_sentiment(self, entry): + # your new content + sentiment = 1 + # some advanced entry analysis to set sentiment value + return sentiment +``` + +**SimpleStrategyEvaluator** is a strategy available by default in +`tentacles/Evaluator/Strategies/mixed_strategies_evaluator/mixed_strategies.py`. +Create the `tentacles/Evaluator/Social/super_simple_strategy_evaluator/` +[tentacle package](/guides/octobot-tentacles-development/create-a-tentacle-package) based on +`tentacles/Evaluator/Strategies/mixed_strategies_evaluator` and start coding the the python file. + +```python + + +class SuperSimpleStrategyEvaluator(SimpleStrategyEvaluator): + + # _trigger_evaluation is the methods called when OctoBot is + # asking for a strategy evaluation + async def matrix_callback(self, + matrix_id, + evaluator_name, + evaluator_type, + eval_note, + eval_note_type, + exchange_name, + cryptocurrency, + symbol, + time_frame): + final_evaluation = 0 + # some advanced computations to set final_evaluation value + + # update self.eval_note to store the strategy result + self.eval_note = final_evaluation + # finally, call self.strategy_completed to notify that + # trading modes should wake up after this update + await self.strategy_completed(cryptocurrency, symbol) +``` + diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/developers/tentacles-dev/customize-your-octobot.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/developers/tentacles-dev/customize-your-octobot.md new file mode 100644 index 0000000000..4b3c6e41d6 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/developers/tentacles-dev/customize-your-octobot.md @@ -0,0 +1,116 @@ +--- +title: "Personnaliser votre OctoBot" +description: "Apprenez comment personnaliser votre OctoBot en installant, créant et partageant des tentacles personnalisés et des packages de tentacles." +sidebar_position: 3 +--- + + + +# OctoBot est personalisable ! + +:::info + La traduction française de cette page est en cours. +::: + +You can easily create or add existing tentacles to your OctoBot. + +Tentacles are evaluators \(using social media, trend analysis, news, ...\), strategies +\(interpretations of evaluator's evaluations\), analysis tools \(implementation of a +bollinger bands in depth analysis, reddit entries reader, ...\) and trading modes. + +![tentacles](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/wiki_resources/tentacles.jpg) + +OctoBot is available for free with <a href="https://github.com/Drakkar-Software/OctoBot-Tentacles" rel="nofollow">basic implementations of a lot of different evaluators</a> +. The very high modularity in OctoBot's architecture allows it to automatically +look for the most advanced version\(s\) of all the available tentacles and +automatically use them in its trading strategies. + +Therefore, anyone can implement its own version of any evaluator, strategy, analysis +tool and trading modes. It's even possible to use another version provided by someone else ! + +## Tentacles par défaut + +OctoBot default tentacles are automatically installed when first starting your OctoBot. + +You can re-install them anytime using the following command arguments with your OctoBot: +`tentacles --install --all` + +It is also possible to install a tentacles package using the web interface Tentacles tab. + +## Installer des tentacles + +To install tentacles, OctoBot can either install a tentacle package bundle or a single +tentacle from a local folder. + +Note: +VSCode & PyCharm run configuration examples of the following commands are described in the [Octobot developer environment](/developers/environment/setup-your-environment). + +### Créer un tentacle package bundle + +Tentacle package bundles are the prefered way to share tentacles. + +Steps to create a tentacles package bundle from a local folder: + +1. Make sure it follows the <a href="https://github.com/Drakkar-Software/OctoBot-Tentacles" rel="nofollow">OctoBot-Tentacles folders architecture</a> + to properly locate tentacles to be installed. There is no need to create empty folders + but packages with content have to be at the [appropriate path](create-a-tentacle-package#the-tentacle-package-folder).\ + Example: a trading mode should be located at **Trading/Mode/name_of_your_trading_mode** + in your bundle. +2. Call OctoBot with the following arguments: + +```bash +tentacles --pack "../tentacles_export.zip" --directory "path/to/your/local/tentacle_bundle" +``` + +> You now have a **tentacles_export.zip** file. It is a tentacle bundle containing your +> tentacles packages that you can install and share. + +### Installer un tentacle package bundle + +To install a package bundle, call OctoBot with the following arguments: + +```bash +tentacles --install --all --location "path/to/your/tentacles_export.zip" +``` + +You can also make it available from an URL and later install it via (for example) : + +```bash +tentacles --install --all --location "https://my.tentacles.com/pack_name" +``` + +> Installing a tentacle package will replace any existing source file that share the +> same name at the same path. + +### Installer un tentacle package unique + +It is also possible to install a single tentacle package from a local folder using +the following arguments: + +```bash +tentacles --single-tentacle-install "path/to/your/tentacle/to/install" Evaluator/TA +``` + +Please note that in this command, you also need to provide the type of the +tentacle (`Evaluator/TA` in this example). + +### Résolution de problèmes d'installation + +- **TentacleLoader Error when loading _your_tentacle_module_** : This means + the import of your tentacle module failed. Tentacles that can't be imported by + Python can't be used. +- **Python doesn't even see my tentacle**: Your tentacle module has to be imported + in your tentacle package `__init__.py` file. Your tentacle package has also to be imported + in the parent folder `__init__.py`. Please note that this parent `__init__.py` file is + managed by OctoBot and should already be properly filled when installing a tentacle bundle. +- **Python sees my tentacle but I can't see it on the web interface**: Your tentacle + classes have to be listed in your **user/profiles/NameOfYourProfile/tentacles_config.json**. + The web interface uses this file to list tentacles and check if they are enabled or not. + This file is also kept up to date when installing a tentacle bundle. + +In most cases, issues related to the **parent `__init__.py`** and `tentacles_config.json` +files can be fixed by calling OctoBot with the following arguments: + +```bash +tentacles --repair +``` diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/developers.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/developers.md new file mode 100644 index 0000000000..63c994a977 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/developers.md @@ -0,0 +1,41 @@ +--- +title: "Démarrer en tant que développeur" +description: "Vous vous demandez comment personnaliser OctoBot ? Apprenez comment vous lancer en tant que développeur OctoBot, créez vos propres tentacles et contribuez au robot de trading open source." +sidebar_position: 21 +--- + + + +# Développeurs OctoBot + +:::info + Pour les développeurs et contributeurs de l' + <a href="https://github.com/Drakkar-Software/OctoBot" rel="nofollow">OctoBot open source</a>. +::: + +## 1 - Apprendre les bases de Python + +Regarder une heure de cours vidéo Python pour débutants devrait suffire pour commencer. + +## 2 - Configurer votre environnement de développer OctoBot + +Une fois configuré, vous allez adorer l'[environnement de développement d'OctoBot](/guides/octobot-developers-environment/setup-your-environment). + +## 3 - Découvrir comment créer des tentacles + +Les Tentacles Octobot sont des modules/extensions pour OctoBot. + +Un "tentacle" peut être ce que vous voulez qu'il soit. Par exemple: +- Un indicateur technique +- Une stratégie de trading personnalisée en Python +- Un outil pour envoyer des notifications sur Discord +- Une amélioration de l'interface web +- Ou quelque chose de complètement différent + +Enfin, les tentacles peuvent être regroupées dans un bundle pour être partagées avec d'autres utilisateurs d'OctoBot. + +Consultez [le guide de personnalisation OctoBot](/guides/octobot-tentacles-development/customize-your-octobot) pour en savoir plus. + +## 4 - Contribuer à OctoBot + +Consultez <a href="https://github.com/Drakkar-Software/OctoBot/blob/master/CONTRIBUTING.md" rel="nofollow">nos directives de contribution</a>. diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges.md new file mode 100644 index 0000000000..7cd134cd56 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges.md @@ -0,0 +1,71 @@ +--- +title: "Supported Exchanges" +description: "Discover the many exchanges supported by OctoBot. Trade on Binance, OKX, Kucoin, Bybit, Crypto.com, HTX, Coinbase, Bitget, HollaEx, BingX, MEXC and many more." +sidebar_position: 7 +--- + +# Exchanges in OctoBot + +:::info + For users of the open source OctoBot. +::: + +## Officially supported exchanges + +- [Binance](exchanges/binance) +- [OKX](exchanges/okx) +- [Kucoin](exchanges/kucoin) +- [Coinbase](exchanges/coinbase) +- [Binance US](exchanges/binance-us) +- [Bybit](exchanges/bybit) +- [Crypto.com](exchanges/cryptocom) +- [HTX](exchanges/htx) +- [Bitget](exchanges/bitget) +- [Hyperliquid](exchanges/hyperliquid) +- [BingX](exchanges/bingx) +- [MEXC](exchanges/mexc) +- [CoinEx](exchanges/coinex) +- [BitMart](exchanges/bitmart) +- [HollaEx](exchanges/hollaex) +- [Phemex](exchanges/phemex) +- [GateIO](exchanges/gateio) +- [Ascendex](exchanges/ascendex) + +## Partner exchanges - Support OctoBot + +As the OctoBot team, **our goal is to keep providing the [open source OctoBot](/guides/octobot-installation/install-octobot-on-your-computer) for free**. However developing and maintaining the project comes at a cost. Therefore we rely on exchanges partnerships to propose the most convenient way to support OctoBot. + +By using OctoBot on real trading with our partner exchanges, you contribute to support the project and it won't cost you any money. + +## Community tested exchanges + +- [Kraken](exchanges/kraken) +- [Bitstamp](exchanges/bitstamp) +- [Bitfinex](exchanges/bitfinex) +- [Poloniex](exchanges/poloniex) + +## Exchanges support + +OctoBot uses <a href="https://github.com/ccxt/ccxt" rel="nofollow">ccxt</a> to connect to exchanges. In theory, any exchange that is <a href="https://github.com/ccxt/ccxt/wiki/Exchange-Markets" rel="nofollow">supported on ccxt</a> should work. However only partner, and officially supported exchanges are regularly tested by the OctoBot Team. + +Using an exchange that is not a partner or officially supported **is at your own risks**. + +### REST + +The REST technology is a HTTP polling based interface where exchanges have to be frequently requested to refresh OctoBot's databases. +It: + +- is **slower**: it might take a few seconds to update prices and orders +- can handle a **limited** amount of requests per seconds due to exchanges restrictions. Therefore only a limited amount of trading pairs can be handled simultaneously when using a REST interface. + +The REST technology is the default connection on any exchange. + +### Websocket + +The websocket technology allows for permanent channels between exchanges and OctoBot from which exchanges directly push updated information to OctoBot. +It: + +- is **almost instantaneous**: updates are directly pushed to OctoBot when updated on the exchange +- is **limitless** regarding the amount trading pairs that can be handled simultaneously + +The websocket technology is automatically enabled on each exchange when supported. diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/ascendex.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/ascendex.md new file mode 100644 index 0000000000..cdec637d1b --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/ascendex.md @@ -0,0 +1,33 @@ +--- +title: "Ascendex" +description: "Tradez sur les marchés spot d'Ascendex avec OctoBot. Utilisez le trading virtuel ou vos fonds sur échange. Profitez des connexions REST et websockets." +sidebar_position: 15 +--- + + + +# Trader sur Ascendex + +## Supporté par OctoBot + +### Trading spot + +| Trading spot | ✅ | +| :----------- | :-- | +| Simulation | ✅ | +| REST | ✅ | +| Websocket | ✅ | +| Testnet | 🚧 | + +### Trading de futures + +| Trading futures | 🚧 | +| :-------------- | :-- | +| Simulation | 🚧 | +| REST | 🚧 | +| Websocket | 🚧 | +| Testnet | 🚧 | + +<a href="https://ascendex.com/en-us/register?inviteCode=ULWCFWL6G" rel="nofollow">Formulaire d'inscription Ascendex</a> avec bonus de bienvenue. + +[Configuration de compte](/guides/exchanges/ascendex/account-setup) diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/ascendex/account-setup.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/ascendex/account-setup.md new file mode 100644 index 0000000000..b9d78d0cb3 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/ascendex/account-setup.md @@ -0,0 +1,13 @@ +--- +title: "Configuration de compte" +description: "Utilisez notre lien d'invitation pour Ascendex et supportez le projet OctoBot en créant votre compte avec ce lien." +sidebar_position: 1 +--- + + + +# Configuration de compte Ascendex + +## Créer un compte + +- Remplissez ce <a href="https://ascendex.com/en-us/register?inviteCode=ULWCFWL6G" rel="nofollow">formulaire d'inscription Ascendex</a> diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/binance.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/binance.md new file mode 100644 index 0000000000..bfab7fd91e --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/binance.md @@ -0,0 +1,50 @@ +--- +title: "Binance" +description: "Tradez les marchés spot et future de Binance avec OctoBot. Utilisez le trading virtuel, testnet ou vos fonds réels. Profitez des connexions REST et websockets." +sidebar_position: 1 +--- + + + +# Trader sur Binance + +:::info + La traduction française de cette page est en cours. +::: + +## Supporté par OctoBot + +### Trading spot + +| Trading spot | ✅ | +| :----------- | :-- | +| Simulation | ✅ | +| REST | ✅ | +| Websocket | ✅ | +| Testnet | ✅ | + +### Trading de futures + +| Trading futures | ✅ | +| :-------------- | :-- | +| Simulation | ✅ | +| REST | ✅ | +| Websocket | ✅ | +| Testnet | ✅ | + +<a href="https://accounts.binance.com/en/register?ref=528112221" rel="nofollow">Formulaire d'inscription Binance</a> avec bonus de bienvenue. + +[Configuration de compte](/guides/exchanges/binance/account-setup) + +**[Start trading with OctoBot](https://www.octobot.cloud)** + +## Détails du partneriat + +Binance has 2 requirements for an account to support OctoBot: + +- A relatively new account \(after july 1st 2021\) +- 528112221 as a referral id (or no referral id) on the account + +In case your current account is not meeting these criteria, you can support OctoBot by creating a new account on <a href="https://accounts.binance.com/en/register?ref=528112221" rel="nofollow">accounts.binance.com/en/register?ref=528112221</a>. + +Please note that thanks to <a href="https://www.binance.com/en-NG/support/faq/360037037312" rel="nofollow">Internal Transfers</a>, you can move your funds quickly and for free between Binance accounts. diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/binance/account-setup.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/binance/account-setup.md new file mode 100644 index 0000000000..276ebfbc35 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/binance/account-setup.md @@ -0,0 +1,64 @@ +--- +title: "Configuration de compte" +description: "Découvrez comment se connecter à votre compte Binance en utilisant une clé d'API. Utilisez notre lien d'invitation pour et supportez OctoBot" +sidebar_position: 1 +--- + + + +# Configuration de compte Binance + +:::info + La traduction française de cette page est en cours. +::: + +## Créer un compte + +- Remplissez ce <a href="https://accounts.binance.com/en/register?ref=528112221" rel="nofollow">formulaire d'inscription Binance</a> + +## Générer ses clés d'API + +Si vous vous demandez ce qu'est une `API Key` et pourquoi OctoBot utilise cette méthode, jetez un œil à notre [présentation des API Keys de plateformes d'échange](/investing/what-is-an-exchange-api-key). + +### Générer ses clés + +- Sign into your Binance account +- Click on your profile in the top right corner +- Click on `API Management` + +![Binance-Create-API-Key](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/docs/Binance/create_api_key.png) + +### Configurer ses clés d'API + +- Give a label to your API Key. + +![Binance-Name-API-Key](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/docs/Binance/name_api_key.png) + +- Binance will ask a security confirmation to continue. +- OctoBot needs the `Enable Reading` permission to be able to pull in balances from Binance and `Enable Spot & Margin Trading` permission to create new orders. Click on `Edit restrictions` to enable `Enable Spot & Margin Trading`. + +![Binance-created-API-Key](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/docs/Binance/created.png) + +![Binance-Updated-API-Key](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/docs/Binance/allow_trade.png) + +- Click on `Save` save the permission update. + +![Binance-Final-API-Key](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/docs/Binance/final.png) + +## Ajouter ses clés d'API sur OctoBot + +### Ajouter son compte Binance + +- Start your OctoBot +- Click on `Accounts` tab +- Click on `Exchanges` on the left menu +- Click on the selector and search `Binance` +- Click on `ADD` + +### Ajouter ses clés d'API Binance + +- Copy and paste `API Key` from Binance to your OctoBot `API Key` field +- Copy and paste `Secret Key` from Binance to your OctoBot `API Secret` field +- Leave the `API Password` as is + +![OctoBot-Validate-Credentials](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/docs/Binance/enter_binance.png) diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/binanceus.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/binanceus.md new file mode 100644 index 0000000000..10bfeea9de --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/binanceus.md @@ -0,0 +1,19 @@ +--- +title: "Binance.us" +description: "Tradez sur les marchés spot de Binance.us avec OctoBot. Utilisez le trading virtuel ou vos fonds sur échange. Profitez de la connexion REST et websocket." +sidebar_position: 3 +--- + +# Trader sur Binance.us + +## Supporté par OctoBot + +Selectionner l'échange `binanceus` pour trader sur Binance.us avec OctoBot. + +### Trading spot +| Trading spot | ✅ | +| :--- | :--- | +| Simulation | ✅ | +| REST | ✅ | +| Websocket | ✅ | +| Testnet | ❌ | diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/bingx.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/bingx.md new file mode 100644 index 0000000000..9f608fff7b --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/bingx.md @@ -0,0 +1,35 @@ +--- +title: "BingX" +description: "Tradez sur les marchés spot de Bingx avec OctoBot. Utilisez le trading virtuel ou vos fonds sur échange. Profitez des connexions REST et websockets." +sidebar_position: 8 +--- + + + +# Trader sur BingX + +## Supporté par OctoBot + +### Trading spot + +| Trading spot | ✅ | +| :----------- | :-- | +| Simulation | ✅ | +| REST | ✅ | +| Websocket | ✅ | +| Testnet | 🚧 | + +### Trading de futures + +| Trading futures | 🚧 | +| :-------------- | :-- | +| Simulation | 🚧 | +| REST | 🚧 | +| Websocket | 🚧 | +| Testnet | 🚧 | + +<a href="https://bingx.com/invite/Z4UUVX" rel="nofollow">Formulaire d'inscription BingX</a> avec bonus de bienvenue. + +[Configuration de compte](/guides/exchanges/bingx/account-setup) + +**[Start trading with OctoBot](https://www.octobot.cloud)** diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/bingx/account-setup.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/bingx/account-setup.md new file mode 100644 index 0000000000..56e099ac09 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/bingx/account-setup.md @@ -0,0 +1,13 @@ +--- +title: "Configuration de compte" +description: "Utilisez notre lien d'invitation pour Bingx et supportez le projet OctoBot en créant votre compte avec ce lien." +sidebar_position: 1 +--- + + + +# Configuration de compte BingX + +## Créer un compte + +- Remplissez ce <a href="https://bingx.com/invite/Z4UUVX" rel="nofollow">formulaire d'inscription BingX</a> diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/bitfinex.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/bitfinex.md new file mode 100644 index 0000000000..b8b496fc0e --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/bitfinex.md @@ -0,0 +1,17 @@ +--- +title: "Bitfinex" +description: "Tradez sur les marchés spot de Bitfinex avec OctoBot. Utilisez le trading virtuel ou vos fonds sur échange. Profitez des connexions REST et websockets." +sidebar_position: 8 +--- + +# Trader sur Bitfinex + +## Supporté par OctoBot + +### Trading spot +| Trading spot | ✅ | +| :--- | :--- | +| Simulation | ✅ | +| REST | ✅ | +| Websocket | ✅ | +| Testnet | ❌ | diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/bitget.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/bitget.md new file mode 100644 index 0000000000..caf68f0a98 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/bitget.md @@ -0,0 +1,26 @@ +--- +title: "Bitget" +description: "Tradez sur les marchés spot de Bitget avec OctoBot. Utilisez le trading virtuel ou vos fonds sur échange. Profitez des connexions REST et websockets." +sidebar_position: 7 +--- + + + +# Trader sur Bitget + +## Supporté par OctoBot + +### Trading spot + +| Trading spot | ✅ | +| :----------- | :-- | +| Simulation | ✅ | +| REST | ✅ | +| Websocket | ✅ | +| Testnet | 🚧 | + +<a href="https://www.bitget.site/en/referral/register?from=referral&clacCode=QBBLW2XR" rel="nofollow">Formulaire d'inscription Bitget</a> avec bonus de bienvenue. + +[Configuration de compte](/guides/exchanges/bitget/account-setup) + +**[Start trading with OctoBot](https://www.octobot.cloud)** diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/bitget/account-setup.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/bitget/account-setup.md new file mode 100644 index 0000000000..9b5997118e --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/bitget/account-setup.md @@ -0,0 +1,13 @@ +--- +title: "Configuration de compte" +description: "Utilisez notre lien d'invitation pour Bitget et supportez le projet OctoBot en créant votre compte avec ce lien." +sidebar_position: 1 +--- + + + +# Configuration de compte Bitget + +## Créer son compte sur Bitget + +- Remplissez ce <a href="https://www.bitget.site/en/referral/register?from=referral&clacCode=QBBLW2XR" rel="nofollow">formulaire d'inscription Bitget</a> diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/bitmart.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/bitmart.md new file mode 100644 index 0000000000..59c21d47d4 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/bitmart.md @@ -0,0 +1,26 @@ +--- +title: "BitMart" +description: "Tradez sur les marchés spot de BitMart avec OctoBot. Utilisez le trading virtuel, testnet ou vos fonds sur échange. Profitez des connexions REST et websockets." +sidebar_position: 11 +--- + + + +# Trader sur BitMart + +## Supporté par OctoBot + +### Trading spot + +| Trading spot | ✅ | +| :----------- | :-- | +| Simulation | ✅ | +| REST | ✅ | +| Websocket | ✅ | +| Testnet | ❌ | + +<a href="https://www.bitmart.com/invite/c6eRCp/en" rel="nofollow">Formulaire d'inscription BitMart</a> avec bonus de bienvenue. + +[Configuration de compte](/guides/exchanges/bitmart/account-setup) + +**[Start trading with OctoBot](https://www.octobot.cloud)** diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/bitmart/account-setup.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/bitmart/account-setup.md new file mode 100644 index 0000000000..6f202726ad --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/bitmart/account-setup.md @@ -0,0 +1,13 @@ +--- +title: "Configuration de compte" +description: "Utilisez notre lien d'invitation pour BitMart et supportez le projet OctoBot en créant votre compte avec ce lien." +sidebar_position: 1 +--- + + + +# Configuration de compte BitMart + +## Créer son compte sur BitMart + +- Remplissez ce <a href="https://www.bitmart.com/invite/c6eRCp/en" rel="nofollow">formulaire d'inscription BitMart</a>. diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/bitstamp.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/bitstamp.md new file mode 100644 index 0000000000..448aad0e4d --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/bitstamp.md @@ -0,0 +1,17 @@ +--- +title: "Bitstamp" +description: "Tradez sur les marchés spot de Bitstamp avec OctoBot. Utilisez le trading virtuel ou vos fonds sur échange. Profitez de la connexion REST." +sidebar_position: 9 +--- + +# Trader sur Bitstamp + +## Supporté par OctoBot + +### Trading spot +| Trading spot | ✅ | +| :--- | :--- | +| Simulation | ✅ | +| REST | ✅ | +| Websocket | 🚧 | +| Testnet | ❌ | diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/bittrex.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/bittrex.md new file mode 100644 index 0000000000..2968e10961 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/bittrex.md @@ -0,0 +1,23 @@ +--- +title: "Bittrex" +description: "Tradez sur les marchés spot de Bittrex avec OctoBot. Utilisez le trading virtuel ou vos fonds sur échange. Profitez des connexions REST et websockets." +sidebar_position: 10 +--- + + + +# Trader sur Bittrex + +:::warning + La plateforme d'échange Bittrex a cessé d'opérer. Si elle fait son retour, OctoBot devrait la supporter. +::: + +## Supporté par OctoBot + +### Trading spot +| Trading spot | ✅ | +| :--- | :--- | +| Simulation | ✅ | +| REST | ✅ | +| Websocket | ✅ | +| Testnet | ❌ | diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/bitvavo.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/bitvavo.md new file mode 100644 index 0000000000..2a19b87e15 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/bitvavo.md @@ -0,0 +1,17 @@ +--- +title: "Bitvavo" +description: "Tradez sur les marchés spot de Bitvavo avec OctoBot. Utilisez le trading virtuel ou vos fonds sur échange. Profitez de la connexion REST." +sidebar_position: 11 +--- + +# Trader sur Bitvavo + +## Supporté par OctoBot + +### Trading spot +| Trading spot | ✅ | +| :--- | :--- | +| Simulation | ✅ | +| REST | ✅ | +| Websocket | 🚧 | +| Testnet | ❌ | diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/bybit.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/bybit.md new file mode 100644 index 0000000000..cf27e1265a --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/bybit.md @@ -0,0 +1,43 @@ +--- +title: "Bybit" +description: "Tradez les marchés spot et future de Bybit avec OctoBot. Utilisez le trading virtuel, testnet ou vos fonds réels. Profitez des connexions REST et websockets." +sidebar_position: 4 +--- + + + +# Trader sur Bybit + +## Supporté par OctoBot + +:::info + Les comptes unifiés Bybit ne sont pas encore entièrement supportés. Pour + trader sur Bybit avec OctoBot, il est nécessaire d'utiliser un compte + standard. Vous pouvez facilement passer d'un compte unifié à standard en + utilisant des sous-comptes. Transférer des fonds entre les sous-comptes est + instantané et gratuit. +::: + +### Trading spot + +| Trading spot | ✅ | +| :----------- | :-- | +| Simulation | ✅ | +| REST | ✅ | +| Websocket | ✅ | +| Testnet | ❌ | + +### Trading de futures + +| Trading futures | ✅ | +| :-------------- | :-- | +| Simulation | ✅ | +| REST | ✅ | +| Websocket | ✅ | +| Testnet | ✅ | + +<a href="https://www.bybit.com/en-US/invite?ref=QW6O5" rel="nofollow">Formulaire d'inscription Bybit</a> avec bonus de bienvenue. + +[Configuration de compte](/guides/exchanges/bybit/account-setup) + +**[Start trading with OctoBot](https://www.octobot.cloud)** diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/bybit/account-setup.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/bybit/account-setup.md new file mode 100644 index 0000000000..88c1907a0c --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/bybit/account-setup.md @@ -0,0 +1,13 @@ +--- +title: "Configuration de compte" +description: "Utilisez notre lien d'invitation pour Bybit et supportez le projet OctoBot en créant votre compte avec ce lien." +sidebar_position: 1 +--- + + + +# Configuration de compte Bybit + +## Créer un compte + +- Remplissez ce <a href="https://www.bybit.com/en-US/invite?ref=QW6O5" rel="nofollow">formulaire d'inscription Bybit</a> diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/coinbase.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/coinbase.md new file mode 100644 index 0000000000..a564ec7824 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/coinbase.md @@ -0,0 +1,29 @@ +--- +title: "Coinbase" +description: "Tradez sur les marchés spot de Coinbase avec OctoBot. Utilisez le trading virtuel ou vos fonds sur échange. Profitez de la connexion REST." +sidebar_position: 4 +--- + + +# Trader sur Coinbase + +## Supporté par OctoBot + +Selectionner l'échange `coinbase` pour trader sur Coinbase avec OctoBot. + +### Trading spot +| Trading spot | ✅ | +| :--- | :--- | +| Simulation | ✅ | +| REST | ✅ | +| Websocket | ❌ | +| Testnet | ❌ | + + +## Permissions requises pour un compte Coinbase +Pour trader sur Coinbase avec OctoBot, les permissions suivantes sont nécessaires sur votre API Key. +- `View` +- `Trade` + + +**[Start trading with OctoBot](https://www.octobot.cloud)** diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/coinex.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/coinex.md new file mode 100644 index 0000000000..378127e138 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/coinex.md @@ -0,0 +1,26 @@ +--- +title: "CoinEx" +description: "Tradez sur les marchés spot de CoinEx avec OctoBot. Utilisez le trading virtuel ou vos fonds sur échange. Profitez de la connexion REST." +sidebar_position: 10 +--- + + + +# Trader sur CoinEx + +## Supporté par OctoBot + +### Trading spot + +| Trading spot | ✅ | +| :----------- | :-- | +| Simulation | ✅ | +| REST | ✅ | +| Websocket | ✅ | +| Testnet | ❌ | + +<a href="https://www.coinex.com/register?refer_code=d6muk" rel="nofollow">Formulaire d'inscription CoinEx</a> avec bonus de bienvenue. + +[Configuration de compte](coinex/account-setup) + +**[Start trading with OctoBot](https://www.octobot.cloud)** diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/coinex/account-setup.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/coinex/account-setup.md new file mode 100644 index 0000000000..edd5461944 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/coinex/account-setup.md @@ -0,0 +1,13 @@ +--- +title: "Configuration de compte" +description: "Utilisez notre lien d'invitation pour CoinEx et supportez le projet OctoBot en créant votre compte avec ce lien." +sidebar_position: 1 +--- + + + +# Configuration de compte CoinEx + +## Créer un compte + +- Remplissez ce <a href="https://www.coinex.com/register?refer_code=d6muk" rel="nofollow">formulaire d'inscription CoinEx</a> diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/crypto-com.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/crypto-com.md new file mode 100644 index 0000000000..017f13427f --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/crypto-com.md @@ -0,0 +1,22 @@ +--- +title: "Crypto.com" +description: "Tradez sur les marchés spot de Crypto.com avec OctoBot. Utilisez le trading virtuel ou vos fonds sur échange. Profitez des connexions REST et websockets." +sidebar_position: 5 +--- + + +# Trader sur Crypto.com + +## Supporté par OctoBot + +### Trading spot +| Trading spot | ✅ | +| :--- | :--- | +| Simulation | ✅ | +| REST | ✅ | +| Websocket | ✅ | +| Testnet | 🚧 | + +[Configuration de compte](/guides/exchanges/crypto-com/account-setup) + +**[Start trading with OctoBot](https://www.octobot.cloud)** diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/crypto-com/account-setup.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/crypto-com/account-setup.md new file mode 100644 index 0000000000..f362cbfa00 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/crypto-com/account-setup.md @@ -0,0 +1,13 @@ +--- +title: "Configuration de compte" +description: "Utilisez notre lien d'invitation pour Crypto.com et supportez le projet OctoBot en créant votre compte avec ce lien." +sidebar_position: 1 +--- + + + +# Configuration de compte Crypto.com + +## Créer son compte sur Crypto.com + +- Remplissez ce <a href="https://auth-x.crypto.com/users/sign_up" rel="nofollow">formulaire d'inscription Crypto.com</a> diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/gateio.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/gateio.md new file mode 100644 index 0000000000..87513881ea --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/gateio.md @@ -0,0 +1,26 @@ +--- +title: "GateIO" +description: "Tradez sur les marchés spot de GateIO avec OctoBot. Utilisez le trading virtuel, testnet ou vos fonds sur échange. Profitez des connexions REST et websockets." +sidebar_position: 14 +--- + + + +# Trader sur GateIO + +## Supporté par OctoBot + +### Trading spot + +| Trading spot | ✅ | +| :----------- | :-- | +| Simulation | ✅ | +| REST | ✅ | +| Websocket | ✅ | +| Testnet | ✅ | + +<a href="https://www.gate.io/signup/VLFGBLTDVG?ref_type=103" rel="nofollow">Formulaire d'inscription GateIO</a> avec bonus de bienvenue. + +[Configuration de compte](/guides/exchanges/gateio/account-setup) + +**[Start trading with OctoBot](https://www.octobot.cloud)** diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/gateio/account-setup.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/gateio/account-setup.md new file mode 100644 index 0000000000..92d19c56db --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/gateio/account-setup.md @@ -0,0 +1,13 @@ +--- +title: "Configuration de compte" +description: "Utilisez notre lien d'invitation pour GateIO et supportez le projet OctoBot en créant votre compte avec ce lien." +sidebar_position: 1 +--- + + + +# Configuration de compte GateIO + +## Créer son compte sur Gate.io + +- Remplissez ce <a href="https://www.gate.io/signup/VLFGBLTDVG?ref_type=103" rel="nofollow">formulaire d'inscription Gate.io</a> diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/hitbtc.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/hitbtc.md new file mode 100644 index 0000000000..2a2927ab03 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/hitbtc.md @@ -0,0 +1,18 @@ +--- +title: "HitBTC" +description: "Tradez sur les marchés spot de HitBTC avec OctoBot. Utilisez le trading virtuel ou vos fonds sur échange. Profitez de la connexion REST." +sidebar_position: 12 +--- + +# Trader sur HitBTC + +## Supporté par OctoBot + +### Trading spot +| Trading spot | ✅ | +| :--- | :--- | +| Simulation | ✅ | +| REST | ✅ | +| Websocket | 🚧 | +| Testnet | ❌ | + diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/hollaex.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/hollaex.md new file mode 100644 index 0000000000..967a36a94e --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/hollaex.md @@ -0,0 +1,23 @@ +--- +title: "HollaEx" +description: "Tradez sur les marchés spot des échanges HollaEx-powered avec OctoBot. Utilisez le trading virtuel ou vos fonds. Profitez des connexions REST et websockets" +sidebar_position: 12 +--- + + +# Trader sur HollaEx + +## Supporté par OctoBot + +### Trading spot +| Trading spot | ✅ | +| :--- | :--- | +| Simulation | ✅ | +| REST | ✅ | +| Websocket | ✅ | +| Testnet | ✅ | + +[Configuration de compte](/guides/exchanges/hollaex/account-setup) + +**[Start trading with OctoBot](https://www.octobot.cloud)** + diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/hollaex/account-setup.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/hollaex/account-setup.md new file mode 100644 index 0000000000..a6c22745b6 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/hollaex/account-setup.md @@ -0,0 +1,71 @@ +--- +title: "Configuration de compte" +description: "Découvrez comment vous connecter à la plateforme d'échange HollaEx de votre choix et automatiser vos stratégies avec OctoBot" +sidebar_position: 1 +--- + + + +# Configuration de compte HollaEx + +:::info + La traduction française de cette page est en cours. +::: + +> HollaEx is an open-source white label exchange: OctoBot is compatible with every HollaEx powered exchange. + +An API Key can be considered as a username that is generating to allow access to data. + +An API Secret, also referred to as API Private Key is simply a password used in combination with an API Key. + +## Créer un compte + +- Fill the on the HollaEx powered exchange you wish to trade on (or on <a href="https://hollaex.com" rel="nofollow">hollaex.com</a> to use HollaEx's demo exchange). + +## Générer ses clés d'API + +### Générer ses clés + +- Sign into your Exchange account +- Click on your profile in the top right corner. +- Click on `Security` +- Click on `API Keys` +- Click on `Generate API Key` + +![HollaEx-Create-API-Key](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/docs/HollaEx/HollaEx-My-Api.png) + +### Configurer ses clés d'API + +- Give a name to your API Key +- OctoBot needs the `Read` function to be able to pull in balances from HollaEx and `Trade` to create new orders. +- Click on Submit. + +![HollaEx-Name-API-Key](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/docs/HollaEx/HollaEx-Create-key.png) + +## Ajouter ses clés d'API sur OctoBot + +### Ajouter son compte HollaEx + +- Start your OctoBot +- Click on `Accounts` tab +- Click on `Exchanges` on the left menu +- Click on the selector and search `hollaex` +- Click on `ADD` + +### Entrer l'adresse de la plateforme d'échange HollaEx + +Optional: If you are connecting to an exchange that is based on HollaEx but is not https://www.hollaex.com/, you can enter its url on the HollaEx configuration. + +![HollaEx-open-config](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/docs/HollaEx/HollaEx-OctoBot-open-exchange-config.png) + +![HollaEx-url-config](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/docs/HollaEx/HollaEx-url-config.png) + +Note: after changing the url, you will need to restart OctoBot for it to take your new exchange url into account. + +### Ajouter ses clés d'API HollaEx + +- Copy and paste `API Key` from HollaEx to your OctoBot `API Key` field +- Copy and paste `Secret Key` from HollaEx to your OctoBot `API Secret` field + ![OctoBot-Validate-Credentials](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/docs/HollaEx/HollaEx-OctoBot-Add-Exchange-Creds.png) +- Click on `Save And restart` + ![OctoBot-Validate-Credentials](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/docs/HollaEx/HollaEx-Save-And-Restart.png) diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/htx.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/htx.md new file mode 100644 index 0000000000..5df59057bf --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/htx.md @@ -0,0 +1,26 @@ +--- +title: "HTX" +description: "Tradez sur les marchés spot de HTX (Huobi) avec OctoBot. Utilisez le trading virtuel ou vos fonds sur échange. Profitez des connexions REST et websockets." +sidebar_position: 6 +--- + + + +# Trader sur HTX - précédemment Huobi + +## Supporté par OctoBot + +### Trading spot + +| Trading spot | ✅ | +| :----------- | :-- | +| Simulation | ✅ | +| REST | ✅ | +| Websocket | ✅ | +| Testnet | ❌ | + +<a href="https://www.htx.com/invite/en-us/1f?invite_code=nwcu2223" rel="nofollow">Formulaire d'inscription HTX</a> avec bonus de bienvenue. + +[Configuration de compte](/guides/exchanges/htx/account-setup) + +**[Start trading with OctoBot](https://www.octobot.cloud)** diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/htx/account-setup.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/htx/account-setup.md new file mode 100644 index 0000000000..a2fbcb96b9 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/htx/account-setup.md @@ -0,0 +1,13 @@ +--- +title: "Configuration de compte" +description: "Utilisez notre lien d'invitation pour HTX (précédemment Huobi) et supportez le projet OctoBot en créant votre compte avec ce lien." +sidebar_position: 1 +--- + + + +# Configuration de compte HTX - précédemment Huobi + +## Créer son compte sur HTX + +- Remplissez ce <a href="https://www.htx.com/invite/en-us/1f?invite_code=nwcu2223" rel="nofollow">formulaire d'inscription HTX</a> diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/hyperliquid.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/hyperliquid.md new file mode 100644 index 0000000000..dd26964ccf --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/hyperliquid.md @@ -0,0 +1,53 @@ +--- +title: "Hyperliquid" +description: "Tradez sur les marchés spot de Hyperliquid avec OctoBot. Utilisez le trading virtuel ou vos fonds sur échange. Profitez de la connexion REST." +sidebar_position: 5 +--- + +# Trader sur Hyperliquid + +## Supporté par OctoBot + +### Trading spot +| Trading spot | ✅ | +| :--- | :--- | +| Simulation | ✅ | +| REST | ✅ | +| Websocket | ✅ | +| Testnet | ❌ | + +Veuillez noter que le trading de futures sur Hyperliquid n'est pas encore pris en charge par OctoBot. + +### Connecter OctoBot à un compte Hyperliquid + +Pour trader sur votre compte Hyperliquid avec OctoBot, vous devrez créer une clé API associée à votre compte Hyperliquid. Voici un guide étape par étape. + +1. Connectez-vous à votre compte Hyperliquid et ouvrez les paramètres d'API +<div style={{textAlign: "center"}}> + ![hyperliquid go to api settings](/images/guides/hyperliquid/hyperliquid-go-to-api-settings.png) +</div> +2. Créez une nouvelle clé API: + - Entrez un nom pour votre clé + - Cliquez sur "Générer" pour créer une adresse aléatoire +<div style={{textAlign: "center"}}> + ![hyperliquid api enter name and generate](/images/guides/hyperliquid/hyperliquid-api-enter-name-and-generate.png) +</div> + - Autorisez le portefeuille API +<div style={{textAlign: "center"}}> + ![hyperliquid api click authorize](/images/guides/hyperliquid/hyperliquid-api-click-authorize.png) +</div> +3. Définissez la durée maximale de validité en cliquant sur `MAX` +<div style={{textAlign: "center"}}> + ![hyperliquid add api days and copy private key](/images/guides/hyperliquid/hyperliquid-add-api-days-and-copy-private-key.png) +</div> +4. Copiez la clé privée dans le champ `API Secret` d'Hyperliquid dans OctoBot +5. Autorisez votre clé API (cela peut nécessiter une signature de votre portefeuille) +<div style={{textAlign: "center"}}> + ![hyperliquid api click authorize from popup](/images/guides/hyperliquid/hyperliquid-api-click-authorize-from-popup.png) +</div> +6. Copiez la clé publique de votre compte Hyperliquid dans le champ `API Key` d'Hyperliquid dans OctoBot et cliquez sur `Save` +<div style={{textAlign: "center"}}> + ![hyperliquid copy public key](/images/guides/hyperliquid/hyperliquid-copy-public-key.png) +</div> + +Votre compte Hyperliquid est maintenant connecté à votre OctoBot. diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/kraken.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/kraken.md new file mode 100644 index 0000000000..657d791d9d --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/kraken.md @@ -0,0 +1,19 @@ +--- +title: "Kraken" +description: "Tradez sur les marchés spot de Kraken avec OctoBot. Utilisez le trading virtuel ou vos fonds sur échange. Profitez des connexions REST et websockets." +sidebar_position: 13 +--- + +# Trader sur Kraken + +> Kraken is not providing free and used data for account balance. OctoBot wont be able to manage a real portfolio correctly. + +### Supporté par OctoBot + +### Trading spot +| Trading spot | ⚠ | +| :--- | :--- | +| Simulation | ✅ | +| REST | ⚠ | +| Websocket | ✅ | +| Testnet | ❌ | diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/kucoin.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/kucoin.md new file mode 100644 index 0000000000..098557a1a2 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/kucoin.md @@ -0,0 +1,35 @@ +--- +title: "Kucoin" +description: "Tradez les marchés spot et future de Kucoin avec OctoBot. Utilisez le trading virtuel, testnet ou vos fonds réels. Profitez des connexions REST et websockets." +sidebar_position: 3 +--- + + + +# Trader sur Kucoin + +## Supporté par OctoBot + +### Trading spot + +| Trading spot | ✅ | +| :----------- | :-- | +| Simulation | ✅ | +| REST | ✅ | +| Websocket | ✅ | +| Testnet | ❌ | + +### Trading de futures + +| Trading futures | ✅ | +| :-------------- | :-- | +| Simulation | ✅ | +| REST | ✅ | +| Websocket | ✅ | +| Testnet | ✅ | + +<a href="https://www.kucoin.com/ucenter/signup?rcode=rJ2Q2T3" rel="nofollow">Formulaire d'inscription Kucoin</a> avec bonus de bienvenue. + +[Configuration de compte](/guides/exchanges/kucoin/account-setup) + +**[Start trading with OctoBot](https://www.octobot.cloud)** diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/kucoin/account-setup.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/kucoin/account-setup.md new file mode 100644 index 0000000000..b6e50c2647 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/kucoin/account-setup.md @@ -0,0 +1,13 @@ +--- +title: "Configuration de compte" +description: "Utilisez notre lien d'invitation pour Kucoin et supportez le projet OctoBot en créant votre compte avec ce lien." +sidebar_position: 1 +--- + + + +# Configuration de compte Kucoin + +## Créer son compte sur Kucoin + +- Remplissez ce <a href="https://www.kucoin.com/ucenter/signup?rcode=rJ2Q2T3" rel="nofollow">formulaire d'inscription Kucoin</a> diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/mexc.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/mexc.md new file mode 100644 index 0000000000..60c1c6b072 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/mexc.md @@ -0,0 +1,26 @@ +--- +title: "MEXC" +description: "Tradez sur les marchés spot de MEXC avec OctoBot. Utilisez le trading virtuel ou vos fonds sur échange. Profitez des connexions REST et websockets." +sidebar_position: 9 +--- + + + +# Trader sur MEXC + +### Supporté par OctoBot + +### Trading spot + +| Trading spot | ✅ | +| :----------- | :-- | +| Simulation | ✅ | +| REST | ✅ | +| Websocket | ✅ | +| Testnet | ❌ | + +<a href="https://www.mexc.com/register?inviteCode=1fqGu" rel="nofollow">Formulaire d'inscription MEXC</a> avec bonus de bienvenue. + +[Configuration de compte](/guides/exchanges/mexc/account-setup) + +**[Start trading with OctoBot](https://www.octobot.cloud)** diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/mexc/account-setup.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/mexc/account-setup.md new file mode 100644 index 0000000000..b9186195ed --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/mexc/account-setup.md @@ -0,0 +1,13 @@ +--- +title: "Configuration de compte" +description: "Utilisez notre lien d'invitation pour MEXC et supportez le projet OctoBot en créant votre compte avec ce lien." +sidebar_position: 1 +--- + + + +# Configuration de compte MEXC + +## Créer un compte + +- Remplissez ce <a href="https://www.mexc.com/register?inviteCode=1fqGu" rel="nofollow">formulaire d'inscription MEXC</a> diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/okcoin.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/okcoin.md new file mode 100644 index 0000000000..c5e4604de2 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/okcoin.md @@ -0,0 +1,24 @@ +--- +title: "Okcoin" +description: "Tradez sur les marchés spot d'Okcoin avec OctoBot. Utilisez le trading virtuel ou vos fonds sur échange. Profitez des connexions REST et websockets." +sidebar_position: 16 +--- + + + +# Trader sur Okcoin + +### Supporté par OctoBot + +### Trading spot + +| Trading spot | ✅ | +| :----------- | :-- | +| Simulation | ✅ | +| REST | ✅ | +| Websocket | ✅ | +| Testnet | ❌ | + +<a href="https://www.okcoin.com/join?channelId=600118902" rel="nofollow">Formulaire d'inscription Okcoin</a> avec bonus de bienvenue. + +[Configuration de compte](/guides/exchanges/okcoin/account-setup) diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/okcoin/account-setup.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/okcoin/account-setup.md new file mode 100644 index 0000000000..39dc3a293d --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/okcoin/account-setup.md @@ -0,0 +1,13 @@ +--- +title: "Configuration de compte" +description: "Utilisez notre lien d'invitation pour Okcoin et supportez le projet OctoBot en créant votre compte avec ce lien." +sidebar_position: 1 +--- + + + +# Configuration de compte Okcoin + +## Créer un compte + +- Remplissez ce <a href="https://www.okcoin.com/join?channelId=600118902" rel="nofollow">formulaire d'inscription Okcoin</a> diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/okx.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/okx.md new file mode 100644 index 0000000000..011dee46b8 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/okx.md @@ -0,0 +1,37 @@ +--- +title: "OKX" +description: "Tradez les marchés spot et future d'OKX avec OctoBot. Utilisez le trading virtuel, testnet ou vos fonds réels. Profitez des connexions REST et websockets." +sidebar_position: 2 +--- + + + +# Trader sur OKX + +### Supporté par OctoBot + +### Trading spot + +| Trading spot | ✅ | +| :----------- | :-- | +| Simulation | ✅ | +| REST | ✅ | +| Websocket | ✅ | +| Testnet | ✅ | + +### Trading de futures + +Le trading de futures sur OKX est momentanément indisponible sur OctoBot + +| Trading futures | 🚧 | +| :-------------- | :-- | +| Simulation | 🚧 | +| REST | 🚧 | +| Websocket | 🚧 | +| Testnet | 🚧 | + +<a href="https://www.okx.com/join/9403477" rel="nofollow">Formulaire d'inscription OKX</a> avec bonus de bienvenue. + +[Configuration de compte](/guides/exchanges/okx/account-setup) + +**[Start trading with OctoBot](https://www.octobot.cloud)** diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/okx/account-setup.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/okx/account-setup.md new file mode 100644 index 0000000000..8080cabb27 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/okx/account-setup.md @@ -0,0 +1,66 @@ +--- +title: "Configuration de compte" +description: "Découvrez comment vous connecter à votre compte OKX avec OctoBot. Utilisez notre lien d'invitation pour OKX et supportez le projet en créant votre compte avec" +sidebar_position: 1 +--- + + + +# Configuration de compte OKX + +:::info + La traduction française de cette page est en cours. +::: + +An API Key can be considered as a username that is generating to allow access to data. + +An API Secret, also referred to as API Private Key is simply a password used in combination with an API Key. + +An API Password, also referred to as `Passphrase`, is considered an extra layer of security that is generally user generated. In this instance, you can create an API password to lock the API Key and Secret created on the OKX website. You will only be able to see your API Key and Secret by inputting the password you selected. + +## Créer son compte sur OKX + +- Remplissez ce <a href="https://www.okx.com/join/9403477" rel="nofollow">formulaire d'inscription OKX</a> + +## Générer ses clés d'API + +Si vous vous demandez ce qu'est une `API Key` et pourquoi OctoBot utilise cette méthode, jetez un œil à notre [présentation des API Keys de plateformes d'échange](/investing/what-is-an-exchange-api-key). + +### Générer ses clés + +- Sign into your OKX account +- Click on your profile in the top right corner. +- Click on the `API` +- Click on `Create V5 API Key` + +![OKX-Create-API-Key](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/docs/OKEx/OKEX-My-Api.png) + +### Configurer ses clés d'API + +- Give a name to your API Key, and set a Passphrase. + > Make sure to remember that Passphrase, as you will need to use it again in a few moments. +- OctoBot needs the `Read` function to be able to pull in balances from OKX and `Trade` to create new orders. +- Click on Confirm, then click on `View` to see your API Key and API Secret. + +![OKX-Configure-API-Key](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/docs/OKEx/OKEX-Create-v5-key.png) + +## Ajouter ses clés d'API sur OctoBot + +### Ajouter son compte OKX + +- Start your OctoBot +- Click on `Accounts` tab +- Click on `Exchanges` on the left menu +- Click on the selector and search `okx` +- Click on `ADD` + +![OctoBot-Add-Exchange](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/docs/OKEx/OKEx-OctoBot-Add-Exchange.png) + +### Ajouter ses clés d'API OKX + +- Copy and paste `API Key` from OKX to your OctoBot `API Key` field +- Copy and paste `Secret Key` from OKX to your OctoBot `API Secret` field +- Enter your OKX `API Password` to OctoBot `API Password` field + ![OctoBot-Validate-Credentials](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/docs/OKEx/OKEx-OctoBot-Add-Exchange-Creds.png) +- Click on `Save And restart` + ![OctoBot-Validate-Credentials](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/docs/OKEx/OKEx-Save-And-Restart.png) diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/phemex.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/phemex.md new file mode 100644 index 0000000000..d5265e143d --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/phemex.md @@ -0,0 +1,24 @@ +--- +title: "Phemex" +description: "Tradez sur les marchés spot de Phemex avec OctoBot. Utilisez le trading virtuel ou vos fonds sur échange. Profitez des connexions REST et websockets." +sidebar_position: 13 +--- + + + +# Trader sur Phemex + +## Supporté par OctoBot + +### Trading spot + +| Trading spot | ✅ | +| :----------- | :-- | +| Simulation | ✅ | +| REST | ✅ | +| Websocket | ✅ | +| Testnet | ✅ | + +<a href="https://phemex.com/fr/register-vt1?referralCode=CK8CC5" rel="nofollow">Formulaire d'inscription Phemex</a> avec bonus de bienvenue. + +[Configuration de compte](/guides/exchanges/phemex/account-setup) diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/phemex/account-setup.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/phemex/account-setup.md new file mode 100644 index 0000000000..d0cc4d8c8e --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/phemex/account-setup.md @@ -0,0 +1,13 @@ +--- +title: "Configuration de compte" +description: "Utilisez notre lien d'invitation pour Phemex et supportez le projet OctoBot en créant votre compte avec ce lien." +sidebar_position: 1 +--- + + + +# Phemex Configuration de compte + +## Créer son compte sur Phemex + +- Remplissez ce <a href="https://phemex.com/fr/register-vt1?referralCode=CK8CC5" rel="nofollow">formulaire d'inscription Phemex</a> diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/poloniex.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/poloniex.md new file mode 100644 index 0000000000..4dc1922cf3 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/poloniex.md @@ -0,0 +1,17 @@ +--- +title: "Poloniex" +description: "Tradez sur les marchés spot de Poloniex avec OctoBot. Utilisez le trading virtuel ou vos fonds sur échange. Profitez de la connexion REST." +sidebar_position: 14 +--- + +# Trader sur Poloniex + +## Supporté par OctoBot + +### Trading spot +| Trading spot | ✅ | +| :--- | :--- | +| Simulation | ✅ | +| REST | ✅ | +| Websocket | 🚧 | +| Testnet | ❌ | diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/wavesexchange.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/wavesexchange.md new file mode 100644 index 0000000000..e69d43adba --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/exchanges/wavesexchange.md @@ -0,0 +1,17 @@ +--- +title: "Wavesexchange" +description: "Tradez sur les marchés spot de Wavesexchange avec OctoBot. Utilisez le trading virtuel ou vos fonds sur échange. Profitez de la connexion REST." +sidebar_position: 15 +--- + +# Trader sur Wavesexchange + +## Supporté par OctoBot + +### Trading spot +| Trading spot | ✅ | +| :--- | :--- | +| Simulation | ✅ | +| REST | ✅ | +| Websocket | 🚧 | +| Testnet | ❌ | diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-advanced-usage/beta-program.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-advanced-usage/beta-program.md new file mode 100644 index 0000000000..249e4ae8ab --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-advanced-usage/beta-program.md @@ -0,0 +1,42 @@ +--- +title: "Programme de bêta" +description: "Rejoignez le programme de test bêta d'OctoBot et obtenez un accès anticipé aux nouvelles fonctionnalités." +sidebar_position: 1 +--- + + + +# Programme de bêta testeurs OctoBot + +:::info + La traduction française de cette page est en cours. +::: + +The OctoBot Beta Tester program gives you early access to most of the new features that are added to OctoBot. + +It's important to understand that beta testing is not the same as using a finished product. You may encounter +bugs or other issues that have not yet been resolved, and you may be asked to provide feedback on your experience +with the application. +This feedback can be used to help improve the final version of the feature being tested, so it's important to be thorough and honest in your testing. + +## Pourquoi rejoindre le programme ? + +The goal of this program is to test those new features as well as experimenting with improvements on existing ones. + +By joining the OctoBot Beta Tester, you will have early access to new features and improvements on OctoBot cloud by using [app-beta.octobot.cloud](https://app-beta.octobot.cloud/) + +## Qui peut le rejoindre ? + +Registrations are currently open. We are looking for different types of user with different background. No need to be a developer or an advanced trader. + +If you are interested in helping OctoBot project to get better and are willing to try out our new features, we will be glad to have you in the program ! + +## Rejoindre le Programme de bêta testeurs + +The Beta Tester program requires an account on [app-beta.octobot.cloud](https://app-beta.octobot.cloud/). This is a separate account from the one you might have on [octobot.cloud](https://www.octobot.cloud/). + +1. Create an OctoBot cloud [beta account](https://app-beta.octobot.cloud/). +2. Activate `Connect to the beta environment` in your OctoBot `About` tab and restart your OctoBot. +3. You can now login using your [app-beta.octobot.cloud](https://app-beta.octobot.cloud/) account in your OctoBot's `Community` tab. + +> Please note that for as long as `Connect to the beta environment` is enabled, your OctoBot will only be able to interact with the beta ecosystem. Therefore if you want to use features or your account from the public [octobot.cloud](https://www.octobot.cloud/), you will have to disable `Connect to the beta environment` from the `About` tab. diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-advanced-usage/tentacle-manager.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-advanced-usage/tentacle-manager.md new file mode 100644 index 0000000000..3ca630d590 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-advanced-usage/tentacle-manager.md @@ -0,0 +1,42 @@ +--- +title: "Tentacles manager" +description: "Apprenez comment installer de nouveaux tentacles (extensions) sur votre OctoBot en utilisant le gestionnaire de tentacles." +sidebar_position: 2 +--- + + + +# Tentacle Manager + +:::info + La traduction française de cette page est en cours. +::: + +OctBot is fully modular, so you can install any modules you want ! + +![tentacles](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/wiki_resources/tentacles.jpg) + +You can find the + +<a href="https://github.com/Drakkar-Software/OctoBot-Tentacles" rel="nofollow">OctoBot-Tentacles</a> repository all default tentacles (modules) you can create +to custom your own cryptocurrencies trader bot. + +And all the default tentacles package from this repository will be +installed automatically. + +If you want to modify or disable some of them see the developers [customize your OctoBot section](/guides/octobot-tentacles-development/customize-your-octobot). + +## Ajouter de nouveaux packages de tentacles à votre OctoBot + +### Avec l'interface web + +![tentacles_packages](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/wiki_resources/tentacles_packages.jpg) + +Got to the **Tentacles** tab on the navigation bar (in the advanced part), then go to +**INSTALL TENTACLES PACKAGES** and register the address (local or url) +of the wanted tentacles packages. This will automatically install the +package in your OctoBot. + +## Installer une tentacle spécifique + +Please checkout the [dedicated section on our developers guides](/guides/octobot-tentacles-development/customize-your-octobot#installing-the-tentacle-package-bundle). diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-advanced-usage/using-octobot-with-a-proxy.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-advanced-usage/using-octobot-with-a-proxy.md new file mode 100644 index 0000000000..d58d0fb0ae --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-advanced-usage/using-octobot-with-a-proxy.md @@ -0,0 +1,33 @@ +--- +title: "Utiliser un proxy" +description: "Utilisez un proxy HTTP ou HTTPS pour que votre OctoBot se connecte à votre compte d'échange de crypto depuis une adresse IP ou un emplacement prédéfini." +sidebar_position: 3 +--- + + + +# Utiliser OctoBot avec un proxy + +## Pourquoi utiliser un proxy avec votre OctoBot + +Lorsque vous utilisez OctoBot pour automatiser vos stratégies d'investissement ou de trading sur votre plateforme d'échange, vous pouvez vouloir utiliser un <a href="https://fr.wikipedia.org/wiki/Proxy" rel="nofollow">proxy</a> pour émettre des requêtes vers votre échange à partir d'une adresse IP ou d'un emplacement différent de celui où vous vous trouvez actuellement. + +Cela peut être pertinent dans les cas suivants: + +- Vous souhaitez activer le whitelisting des adresses IP et vous aimeriez être certain d'utiliser toujours la même adresse IP pour votre OctoBot, même si votre localisation ou le serveur de votre OctoBot change. +- Vous êtes en déplacement et vous aimeriez continuer à utiliser la même adresse IP pour OctoBot qui s'exécute sur votre ordinateur. + +## Comment utiliser OctoBot avec un proxy HTTP ou HTTPS + +Pour configurer votre OctoBot pour qu'il fasse ses requêtes vers les échanges depuis votre proxy, configurez les variables d'environnement suivantes avant de démarrer votre [OctoBot open source](../octobot): + +- Pour un proxy HTTP (requêtes REST): `EXCHANGE_HTTP_PROXY_AUTHENTICATED_URL` +- Pour un proxy HTTPS (requêtes REST): `EXCHANGE_HTTPS_PROXY_AUTHENTICATED_URL` +- Pour un proxy SOCKS (connexions websocket): `EXCHANGE_SOCKS_PROXY_AUTHENTICATED_URL` + +Ces variables doivent être configurées avec votre URL de proxy complète et OctoBot l'utilisera pour chacune de ses requêtes vers les échanges. + +Exemple avec un proxy HTTPS: +`EXCHANGE_HTTPS_PROXY_AUTHENTICATED_URL=https://username:password@your_proxy.com:8002` + +Veuillez noter que seul l'une des variables `EXCHANGE_HTTP_PROXY_AUTHENTICATED_URL` ou `EXCHANGE_HTTPS_PROXY_AUTHENTICATED_URL` doit être définie pour appliquer un proxy à vos requêtes REST. diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-configuration/accounts.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-configuration/accounts.md new file mode 100644 index 0000000000..908c6681d2 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-configuration/accounts.md @@ -0,0 +1,70 @@ +--- +title: "Comptes" +description: "Découvrez comment OctoBot gère vos comptes d'échange, configure ses interfaces web et Telegram, et gère les notifications." +sidebar_position: 4 +--- + + + +# Comptes + +:::info + La traduction française de cette page est en cours. +::: + +The accounts configuration page allows global (cross profile) configuration. It contains exchange API keys, interfaces credentials or keys and notification configuration. + +## Plateformes d'échanges + +![exchange accounts configuration in octobot](/images/guides/configuration/exchange-accounts-configuration-in-octobot.png) + +You can save as many accounts as you want and only trade on those enabled in your profile. + +[Here is the guide helping to setup an exchange for OctoBot](/guides/exchanges) + +## Interfaces + + +![interfaces configuration in octobot](/images/guides/configuration/interfaces-configuration-in-octobot.png) + +Interfaces are ways to connect your OctoBot to other services. + +Here are different page explaining interfaces configuration : + +- [Web](/guides/octobot-interfaces/web) +- [Reddit](/guides/octobot-interfaces/reddit) +- [Telegram](/guides/octobot-interfaces/telegram) +- [Chat GPT](/guides/octobot-interfaces/chatgpt) +- [TradingView](/guides/octobot-interfaces/tradingview) +- [Reddit](/guides/octobot-interfaces/reddit) + +## Notifications + + +![notifications configuration in octobot](/images/guides/configuration/notifications-configuration-in-octobot.png) + +When notifications are enabled, OctoBot will create notifications on all the given medias. These notifications contain the current evaluations of monitored markets as well as created, filled and cancelled orders. + +Different types of notifications are available, it is possible to use any of them, or even all of them. + +### Types de notifications + +- **Global-Info**: General notifications like a startup message or a shutdown message. +- **Price-Alerts**: A price movement is detected and is triggering a new market state. +- **Trades**: An order is created, filled or cancelled. +- **Trading-Script-Alerts**: Any notification related to a scripted trading mode. +- **Other**: Other type of notifications. + +Enable notifications types to tell which types of notifications OctoBot should use. + +### Notifications Telegram + +When selected, notifications will be sent to you on Telegram. + +Telegram notifications use the Telegram service. [See Telegram configuration guide](/guides/octobot-interfaces/telegram) + +### Notifications Web + +When selected, notifications will be sent to you on the web interface. + +Web notifications use the Web service. [See Web interface configuration guide](/guides/octobot-interfaces/web) diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-configuration/cloud-strategy-in-open-source-and-pro.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-configuration/cloud-strategy-in-open-source-and-pro.md new file mode 100644 index 0000000000..a8152f0364 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-configuration/cloud-strategy-in-open-source-and-pro.md @@ -0,0 +1,27 @@ +--- +title: "Les stratégies OctoBot cloud" +description: "Apprenez comment utiliser, tester et personnaliser les stratégies de trading OctoBot cloud directement depuis votre OctoBot cloud ou auto-hébergé." +sidebar_position: 3 +--- + +# Utilisez les stratégies OctoBot cloud depuis votre OctoBot + +Depuis OctoBot 1.0.4, vous pouvez tirer profit des stratégies d'OctoBot cloud directement depuis votre [trading bot OctoBot](https://www.octobot.cloud/trading-bot). + +Rendez-vous simple sur l'onglet **Community** de votre OctoBot pour utiliser les stratégies d'OctoBot cloud sur votre robot. + +![télécharger les stratégies d'OctoBot cloud dans votre bot open source](/images/guides/download-octobot-cloud-strategies-in-open-source-bot.png) + +## Personnalisez les stratégies d'OctoBot cloud +Pouvoir utiliser les stratégies d'OctoBot cloud directement depuis votre OctoBot signifie que vous pouvez les explorer et les utiliser avec des fonds simulés ou réels, autant que vous le souhaitez. + +Vous pouvez aussi configurer ces stratégies pour trader différemment, c'est à dire: +- Utiliser d'autres plateformes d'échange +- Trader d'autres cryptomonnaies +- Configurer différemment les indicateurs + +Comme toutes les stratégies, stratégies d'OctoBot cloud utilisent les [trading modes](/guides/octobot-trading-modes/trading-modes) qui peuvent être personalisés. + +## Backtester des stratégies OctoBot cloud personnalisées + +Backtestez les stratégies d'OctoBot cloud en utilisant le moteur de [backtesting](/guides/octobot-usage/backtesting) d'Octobot ou le [Strategy Designer](/guides/octobot-usage/strategy-designer) disponible dans les [trading bot OctoBot](https://www.octobot.cloud/trading-bot). diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-configuration/custom-profile.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-configuration/custom-profile.md new file mode 100644 index 0000000000..364a614597 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-configuration/custom-profile.md @@ -0,0 +1,91 @@ +--- +title: "Profil personnalisé" +description: "Apprenez comment créer des profils de configuration personnalisés sur votre OctoBot." +sidebar_position: 5 +--- + + + +# Profil personnalisé + +:::info + La traduction française de cette page est en cours. +::: + +A custom profile allow to customize [strategy and trading mode](/guides/octobot-trading-modes/trading-modes). + +To create a custom profile : + +1. Open an existing profile page +2. Click on duplicate button + +![duplicate octobot profile](/images/guides/configuration/duplicate-octobot-profile.png) + +## Configuration des évaluateurs et trading + + +![custom profile trading modes selector](/images/guides/configuration/custom-profile-trading-modes-selector.png) + +**user/profiles/profile_name/tentacles_config.json** is a configuration file +telling OctoBot which evaluators, strategies and trading modes to use. It is +kept up to date after each [Tentacle Manager](/guides/octobot-advanced-usage/tentacle-manager) +usage (when new elements become available). + +An example of **user/profiles/profile_name/tentacles_config.json** is available <a href="https://github.com/Drakkar-Software/OctoBot-Tentacles/blob/master/profiles/daily_trading/tentacles_config.json" rel="nofollow">as default_tentacles_config.json on github</a>. + +![custom profile evaluator selector](/images/guides/configuration/custom-profile-evaluator-selector.png) + +## Comprendre les fichiers de configuration + +Enabled [evaluators and trading modes](/guides/octobot-trading-modes/trading-modes) are stored in configuration files. You will probably never need to touch them but here is how they work. + +Example of **tentacles_config.json**: + +```json +{ + "RSIMomentumEvaluator": true, + "DoubleMovingAverageTrendEvaluator": true, + "BBMomentumEvaluator": true, + "MACDMomentumEvaluator": true, + "CandlePatternMomentumEvaluator": false, + "ADXMomentumEvaluator": true, + + "InstantFluctuationsEvaluator": true, + + "RedditForumEvaluator": false, + "GoogleTrendStatsEvaluator": true, + + "TempFullMixedStrategiesEvaluator": true, + "InstantSocialReactionMixedStrategiesEvaluator": false +} +``` + +- Here, the first part is about technical analysis evaluators: they are all + activated except for the **CandlePatternMomentumEvaluator**. This means that + any technical evaluator of these types (except **CandlePatternMomentumEvaluator**) + will be used by OctoBot. +- Second part contains only **InstantFluctuationsEvaluator**, OctoBot will + then take real time market moves into account using **InstantFluctuationsEvaluator** only. +- Third part is the social evaluation. Here OctoBot will look at Google + stats using **GoogleTrendStatsEvaluator**. However, OctoBot will not look + a reddit (`"RedditForumEvaluator": false`), therefore + a [Reddit interface](/guides/octobot-interfaces/reddit) configuration is not necessary. +- Last part are the strategies to use. Here only one strategy out of + two is to be used by OctoBot: **TempFullMixedStrategiesEvaluator**. + +### Details pour les développeurs + +Any setting also applies to subclasses of these evaluators. For example +if you add an evaluator extending **ADXMomentumEvaluator**, `"ADXMomentumEvaluator": true` +will tell OctoBot to use the **most advanced ADXMomentumEvaluator** available: if you evaluator +extends **ADXMomentumEvaluator**, your evaluator will be considered more advanced than the **basic +ADXMomentumEvaluator** and OctoBot will use it. See the +developers [customize your OctoBot](/guides/octobot-tentacles-development/customize-your-octobot) +to learn how to add elements to your OctoBot. + +This is valid for any evaluator and strategy. + +Please note that any evaluator or strategy that doesn't extend +an element in **tentacles_config.json** has to be added to this file otherwise will +be ignored by OctoBot. When using the [Tentacle Manager](/guides/octobot-advanced-usage/tentacle-manager.md) +to install tentacles, this is done automatically. diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-configuration/exchanges.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-configuration/exchanges.md new file mode 100644 index 0000000000..a777dbf4f5 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-configuration/exchanges.md @@ -0,0 +1,82 @@ +--- +title: "Plateformes d'échanges" +description: "Apprenez comment configurer vos comptes d'échange pour que votre OctoBot puisse trader en utilisant vos fonds d'échange réels ou de l'argent simulé." +sidebar_position: 7 +--- + + + +# Plateformes d'échange + +:::info + La traduction française de cette page est en cours. +::: + + +To know more about an exchange support in OctoBot, please have a look at [the exchange summary](/guides/exchanges). + +## Configuration depuis l'interface web + + +Octobot reads trading data (prices, volumes, trades, etc) from exchanges. At least one exchange +is required for OctoBot to perform trades. In [simulation mode](/guides/octobot-usage/simulator), +exchange API keys configuration is not necessary. + +![exchange accounts configuration in octobot](/images/guides/configuration/exchange-accounts-configuration-in-octobot.png) + +You can configure OctoBot's exchanges using the [web interface](/guides/octobot-interfaces/web) +**configuration** tab. + +## Configuration manuelle + + +In **user/config.json**, find this lines: + +``` json +"exchanges": { + +} +``` + +Edit this lines and add the exchange(s) you want to use. + +In OctoBot configuration, exchange connection info are encrypted. To manually add exchange configuration, you can add your info directly into your **user/config.json** file, OctoBot will then take care of the encryption for you. + +If you want to encrypt your exchange keys before starting OctoBot, you +can use the following instructions: + +Start the OctoBot with option **--encrypter** like below : + +``` bash +python start.py --encrypter +``` + +And copy and paste your api-key and api-secret to your configuration file (see example below). + +Example with Binance and Coinbase: + +``` json +"exchanges": { + "binance": { + "api-key": "YOUR_BINANCE_API_KEY_ENCRYPTED", + "api-secret": "YOUR_BINANCE_API_SECRET_ENCRYPTED", + "sandboxed": false + }, + "coinbasepro": { + "api-key": "YOUR_EXCHANGE_API_KEY_ENCRYPTED", + "api-secret": "YOUR_EXCHANGE_API_SECRET_ENCRYPTED", + "api-password": "YOUR_EXCHANGE_API_SECRET_ENCRYPTED", + "sandboxed": true + } +} +``` + +- **api-key** is your exchange account API key +- **api-secret** is your exchange account API secret +- **api-password** is your exchange account API password if this exchange is requiring a password. Leave empty otherwise +- **sandboxed** if your exchange is supporting a sandbox(or testnet) mode, allows to trade on this version of the exchange + +## Trading simulé + + +To use the Simulated exchange feature of the Octobot, you have to specify a [trader simulator](/guides/octobot-usage/simulator) configuration. To use an exchange in simulation only, you also have to specify its configuration as described above. For most exchanges, API credentials are not required in simulation mode, adding the exchange with default values is enough. diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-configuration/premium-octobot-extension.mdx b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-configuration/premium-octobot-extension.mdx new file mode 100644 index 0000000000..925424d859 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-configuration/premium-octobot-extension.mdx @@ -0,0 +1,64 @@ +--- +title: "Extension Premium" +description: "Améliorez votre OctoBot avec l'extension Premium d'OctoBot. Accédez au Strategy Designer, aux webhooks TradingView sécurisés et aux paniers de Crypto." +sidebar_position: 8 +--- + + + +# L'extension premium d'OctoBot + +L'extension Premium d'OctoBot améliore votre OctoBot de façon permanente et simplifie son utilisation. + +## Contenu de L'extension premium d'OctoBot + +### Optimizer ses stratégies avec le Strategy Designer +Utilisez le [Strategy Designer](../octobot-usage/strategy-designer), l'interface d'optimisation et de backtesting de stratégie la plus avancée d'OctoBot. + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="V4Z1xUhqWz8" title="Le Strategy Designer d" /> + +Le Strategy Designer est le meilleur outil pour créer vos stratégies de trading OctoBot, il vous permet notamment de : +- Comparer les résultats de backtesting de vos stratégies +- Visualiser le comportement de vos stratégies dans le temps +- Optimiser votre stratégie tout en utilisant un profil en direct différent + +![octobot strategy designer résultats sur doge btc shib](/images/guides/strategy-designer/octobot-strategy-designer-results-on-doge-btc-shib.png) + +### Connecter ses stratégies TradingView simplement + +Dans la version par défaut d'OctoBot, des [connaissances techniques avancées ou un fournisseur de webhooks externe payant tel que Ngrok](../octobot-interfaces/tradingview/using-a-webhook) sont nécessaires pour se connecter à vos alertes TradingView. + +![tradingview ema stratégie illustration avec 2 acahts and 2 ventes](/images/guides/trading-view/tradingview-ema-strategy-illustration-with-2-buy-and-2-sell.png) + +Utiliser OctoBot avec l'extension Premium d'OctoBot rend l'[automatisation des stratégies TradingView](../octobot-interfaces/tradingview) : +- Simple : Utilisez les webhooks OctoBot cloud - aucune souscription à Ngrok n'est nécessaire pour les webhooks. +- Facile : Vous obtenez une URL de webhook dédiée +- Sécurisée : Bénéficiez du système de webhook sécurisé d'OctoBot cloud + +### Utiliser les paniers de crypto OctoBot cloud + +Les [paniers de crypto](https://www.octobot.cloud/features/crypto-basket) OctoBot cloud sont des stratégies particulières qui facilitent l'investissement dans les meilleures cryptos du marché ou dans des thèmes spécifiques. + +<div style={{textAlign: "center"}}> + ![octobot open source utilisant les paniers de crypto avec extension premium octobot](/images/guides/trading-modes/octobot-open-source-using-crypto-baskets-from-premium-extension.png) +</div> + +Avec l'extension Premium d'OctoBot, vous pouvez suivre tous les paniers de crypto OctoBot cloud directement depuis votre OctoBot open source : votre OctoBot les maintiendra à jour. + +## Comment obtenir l'extension premium d'OctoBot ? +L'extension Premium d'OctoBot peut être achetée directement depuis votre OctoBot open source en utilisant votre compte OctoBot. Après l'achat, elle sera liée à votre compte OctoBot. Cela signifie que la mise à jour ou la réinstallation de votre OctoBot installera automatiquement votre extension tant que vous serez connecté à votre compte OctoBot. + +<div style={{textAlign: "center"}}> +![acheter l'extension premium octobot](/images/guides/premium-octobot-extension/premium-octobot-extension-buy-section.png) +</div> + +L'extension est un achat unique, il n'y a pas d'abonnement mensuel. Une fois que vous l'avez achetée, vous l'avez définitivement. Et cela inclut les améliorations et fonctionnalités futures que nous ajouterons par la suite. + +## L'extension premium d'OctoBot est-elle obligatoire ? +Non, l'extension Premium d'OctoBot est complètement optionnelle. Votre OctoBot fonctionnera très bien avec ou sans elle. + +L'extension ajoute des fonctionnalités pour améliorer certains aspects de votre OctoBot tels que la création d'une stratégie, le suivi des stratégies TradingView ou l'utilisation des paniers de cryptos OctoBot cloud. + +Cependant, si vous préférez utiliser OctoBot sans ces améliorations, vous pouvez tout à fait utiliser la version de base d'OctoBot aussi longtemps que vous le souhaitez. diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-configuration/profile-configuration.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-configuration/profile-configuration.md new file mode 100644 index 0000000000..2bd579ce49 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-configuration/profile-configuration.md @@ -0,0 +1,160 @@ +--- +title: "Configuration du profil" +description: "Apprenez comment configurer un profil OctoBot en sélectionnant sa stratégie de trading, ses évaluateurs, les cryptomonnaies, les plateformes d'échange et les paramètres de trading." +sidebar_position: 2 +--- + + + +# Configuration du profile + +:::info + La traduction française de cette page est en cours. +::: + +## Stratégies + +Most [evaluators and trading modes](/guides/octobot-trading-modes/trading-modes) can be configured. + +![octobot trading mode details from profiles](/images/guides/configuration/octobot-trading-mode-details-from-profiles.png) + +To open the configuration interface, click on your strategy configuration icon. + +![trading mode configuration from profiles](/images/guides/configuration/trading-mode-configuration-from-profiles.png) + + +This edition interface is generated according to the user inputs of the evaluator or trading mode to configure. You will find the technical details on the developers section, in [the tentacles configuration docs](/guides/octobot-tentacles-development/create-a-tentacle-package#configuration). + +It is also possible to manually edit each configuration file using a +text editor for JSON. When configurable, each evaluator or trading mode +has a **NameOfTheRelatedClass.json** file in +**user/profiles/profile_name/specific_config**. Note: this file created in your profile after any change in the default configuration of the tentacle. + +### Custom profiles + +When using default profiles, trading modes and strategies configurations can be edited, but not switch to others. +To use other trading modes, strategies or evaluators, you can duplicate a default profile to create a [custom profile](custom-profile) +which can be more deeply configured. +![custom profile trading modes selector](/images/guides/configuration/custom-profile-trading-modes-selector.png) + + +## Monnaies + +![octobot trading pairs settings from profiles](/images/guides/configuration/octobot-trading-pairs-settings-from-profiles.png) + +OctoBot will trade all the cryptocurrencies listed in its configuration. +To tell which cryptocurrencies to trade, add the currency in the +**crypto-currencies** section in +**user/profiles/profile_name/profile.json**. + +In order to keep OctoBot working at its full potential, we recommend to +trade **between 1 and 5** different assets **not to use more than 10 to +15** different assets at the same time, depending on the size of your +available funds. + +### Wildcard + +To tell OctoBot to trade all BTC trading pairs (with BTC as a quote +asset), use the wildcard "\*" instead of a list for "pairs", directly in your profile's profile.json file: + +```json +"crypto-currencies":{ + "Bitcoin": { + "pairs": ["*"], + "quote": "BTC" + } +} +``` + +A "quote" is required to specify the name of the currency to trade +with. + +## Plateformes d'échange + +![octobot exchanges settings from profiles](/images/guides/configuration/octobot-exchanges-settings-from-profiles.png) +For each profile, you can enable the exchanges you want to trade on. + +It is also where you can select if you want to use **spot** or **future** trading on those exchanges. + +## Trading + +![octobot trading settings from profiles](/images/guides/configuration/octobot-trading-settings-from-profiles.png) + +OctoBot can process two types of trading: + +- Real trading using your exchanges' portfolio. +- Simulated trading using any imaginary portfolio. + +### Marché de référence + +The **Reference-market** parameter defines which currency OctBot should +use as a reference, this reference is used to compute profitability and +the portfolio total value + +### Risque + +Any type of trading has its risk parameter. It is a parameter defining +the behavior of the trader, similarly to a real human trader. + +The **Risk** parameter defines the behaviour of OctoBot in an optimism +manner. + +It is a value between 0 and 1: + +- A low risk (closer to 0) will make OctoBot a very safe trader with + few bold moves and mostly small trades. A 0 risk bot is very + pessimistic (regarding its orders creation) and does not expect big + market moves. +- A high risk (closer to 1) will make OctoBot a very active and heavy + trader. A 1 risk bot is very optimistic (regarding its orders + creation) and is expecting significant market moves. + +### Trader + +When the **Enabled** parameter of the **Trader** section is set at **true**, OctoBot will trade +using your real funds from your exchange's accounts. When **false** +OctoBot will never any create a real trade. + +### Télécharger l'historique des trades + +When the **load-trade-history** parameter is set at **true**, OctoBot +will load the account's recent trades for the enabled traded pairs at +startup. This allows to have a view on your account's trade history. +When **false**, OctoBot will only historize trades that happen after the +bot startup. + +### Trader simulé + +Additionally to the real trading system, a [trading simulator](/guides/octobot-usage/simulator) is +available in OctoBot. + +[Here is the guide describing the simulator feature of OctoBot](/guides/octobot-usage/simulator) + +## Fichiers de configuration + +OctoBot configuration is located in the **user** folder: + +- **user/config.json** is the global configuration file, mostly used + to setup the bot exchanges credentials, interfaces and notification + settings. +- **user/profiles/** contains all the [profiles](/guides/octobot-configuration/profiles) + created and imported in your OctoBot. + +OctoBot's web interface allows to easily edit the configuration, +however, it is also possible to manually edit configuration files. +Please be careful when manually editing them or OctoBot won't be able +to read them and wont start. Json file are readable and editable using +any text editor. + +```bash +ERROR root <class 'Exception'>: Error when load config +``` + +This will appear when a configuration file is not a json valid file. + +**user/config.json** is the technical configuration file of OctoBot, an +example is available <a href="https://github.com/Drakkar-Software/OctoBot/blob/master/octobot/config/default_config.json" rel="nofollow">on +github</a>. + +When starting OctoBot, if the **user** folder is missing or incomplete, +it will automatically be created or completed with default values. diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-configuration/profiles.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-configuration/profiles.md new file mode 100644 index 0000000000..c71637a15f --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-configuration/profiles.md @@ -0,0 +1,33 @@ +--- +title: "Profils" +description: "Un profil OctoBot est une configuration spécifique pour une stratégie de trading particulière qui est enregistrée et peut être partagée. OctoBot est configuré en utilisant ces profils, ce qui permet de sauvegarder et de réutiliser des configurations pour différentes stratégies de trading." +sidebar_position: 1 +--- + + + +# Profils + +:::info + La traduction française de cette page est en cours. +::: + +OctoBot's trading configuration is using profiles (located into +user/profiles). This allows for quick switches between previously set +configurations. Each profile defines a [Trading Mode](/guides/octobot-trading-modes/trading-modes) configuration as well as other settings. + +![octobot trading mode details from profiles](/images/guides/configuration/octobot-trading-mode-details-from-profiles.png) + +Profiles include: + +- Tentacles activations +- Tentacles configurations +- Traded pairs +- Enabled exchanges +- Trading configuration +- Automation configuration + +Login related data (exchange api keys, telegram settings, ...) are not stored in profiles. + + +Profiles can also be [shared and imported](sharing-and-importing-octobot-profiles) between OctoBot's users. diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-configuration/sharing-and-importing-octobot-profiles.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-configuration/sharing-and-importing-octobot-profiles.md new file mode 100644 index 0000000000..c2e1fbc1a0 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-configuration/sharing-and-importing-octobot-profiles.md @@ -0,0 +1,24 @@ +--- +title: "Partager un profil" +description: "Partagez votre configuration d'OctoBot en partageant son profil. Importez le profil de vos amis et utilisez-le depuis votre OctoBot." +sidebar_position: 6 +--- + +# Partager et importer vos profils OctoBot + +## Comment partager un profil OctoBot + +Pour partager un profil OctoBot, ouvrez la configuration de votre profil, cliquez sur "Edit profiles", accédez à la vue d'ensemble de votre profil et cliquez sur "Share". + + +![share octobot profile](/images/guides/configuration/share-octobot-profile.png) + +Votre profil sera téléchargé sous forme de dossier compressé (zip), prêt à être importé par un autre OctoBot. + +## Comment importer un profil OctoBot + +Pour importer un profil OctoBot, ouvrez la configuration de votre profil, cliquez sur "Edit profiles", accédez à la vue d'ensemble d'un profil et cliquez sur "Import". OctoBot vous invitera alors à sélectionner le profil à importer et l'importera. + +![import octobot profile](/images/guides/configuration/import-octobot-profile.png) + +Votre profil importé sera ensuite disponible à côté de vos autres profils, prêt à être utilisé et modifié. diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-installation/cloud-install-octobot-on-digitalocean.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-installation/cloud-install-octobot-on-digitalocean.md new file mode 100644 index 0000000000..a7a25e08ed --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-installation/cloud-install-octobot-on-digitalocean.md @@ -0,0 +1,69 @@ +--- +title: "Installation en cloud" +description: "Installez votre OctoBot dans le cloud avec DigitalOcean en quelques minutes et bénéficiez de vos stratégies OctoBot 24h/24" +sidebar_position: 2 +--- + + + +# Installer OctoBot dans le cloud avec DigitalOcean + +## Créer un compte DigitalOcean + +- Créez un compte sur DigitalOcean en suivant ce lien : <a href="https://digitalocean.pxf.io/octobot-app" rel="nofollow">DigitalOcean</a> (ou connectez-vous si vous en avez déjà un). + +- Validez votre compte en ajoutant un moyen de paiement. + +## Démarrer l'application OctoBot + +- Ouvrez la page de l'application <a href="https://digitalocean.pxf.io/octobot-app" rel="nofollow">OctoBot</a> sur la marketplace de Digital Ocean. + +- Cliquez sur "Créer un Droplet OctoBot". + +![DigitalOcean Create Droplet Button](/images/guides/installation/digitalocean/digital-ocean-octobot-app-page.png) + +- Choisissez une région proche de vous. + +![Choix de région du Droplet de DigitalOcean](/images/guides/installation/digitalocean/choose-droplet-location.png) + +- Laissez l'image de l'application OctoBot sélectionnée. + +![DigitalOcean Droplet choose region](/images/guides/installation/digitalocean/digital-ocean-octobot-image.png) + +- Sélectionnez la puissance du serveur que vous voulez. Le minimum requis est l'offre à $6 / mois. + +![DigitalOcean Droplet choose pricing](/images/guides/installation/digitalocean/digital-ocean-droplet-pricing.png) + +- Entrez un mot de passe sécurisé ou une clé SSH. + +![DigitalOcean Droplet choose pricing](/images/guides/installation/digitalocean/digital-ocean-droplet-access.png) + +- Cliquez en bas sur "Create droplet". + +- Attendez que le Droplet démarre. + +![Attente du démarrage du Droplet de DigitalOcean](/images/guides/installation/digitalocean/wait-for-droplet-start.png) + +## Accéder à OctoBot + +- Sur la page du Droplet DigitalOcean, récupérez l'IP du Droplet. Par exemple, dans cet exemple, c'est l'IP `143.198.96.188`. + +![Adresse IP du Droplet DigitalOcean](/images/guides/installation/digitalocean/get-droplet-ip.png) + +- Copiez cette adresse. +- Dans votre navigateur, ouvrez un nouvel onglet et tapez http://$DROPLET_IP. Dans cet exemple il faudrait taper `http://143.198.96.188`. + +<div style={{textAlign: "center"}}> + ![ouvrir l'interface web d'OctoBot avec l'IP du + droplet](/images/guides/installation/digitalocean/open-octobot-with-droplet-ip.png) +</div> + +- Si votre navigateur indique que la connexion n'est pas sécurisée (ce qui est normal car elle n'est pas en HTTPS), acceptez en cliquant sur "continuer vers le site". + +- Après quelques secondes, l'interface web de votre OctoBot devrait apparaître. + +:::warning + Attention : Comme n'importe qui connaissant l'IP de votre OctoBot peut ouvrir + cette interface, il est fortement recommandé d'ajouter un [mot de passe de + protection](/fr/guides/octobot-interfaces/web#prot%C3%A9ger-votre-interface-web). +::: diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-installation/install-octobot-on-raspberry-pi.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-installation/install-octobot-on-raspberry-pi.md new file mode 100644 index 0000000000..65cad5e8d3 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-installation/install-octobot-on-raspberry-pi.md @@ -0,0 +1,63 @@ +--- +title: "Avec Raspberry Pi" +description: "Apprenez comment facilement installer et lancer votre OctoBot sur Raspberry Pi en utilisant la version exécutable du bot." +sidebar_position: 5 +--- + + + +# Installer OctoBot sur Raspberry Pi + +:::info + La traduction française de cette page est en cours. +::: + +## 1. Preparing the Raspberry Pi + +1. Install Rapberry OS and configure it. +2. Enable `ssh` as it will be essential for accessing the Raspberry remotely from a local network +3. Create a new user or use the default one and change the password to use a strong password. + +## 2. Install OctoBot + +1. On the Octobot latest release page, download the `OctoBot_linux_arm64` + file: this is the Raspberry Pi x64 compatible version of OctoBot. + +<div style={{textAlign: "center"}}> + **[Get the latest release](https://github.com/Drakkar-Software/OctoBot/releases/latest)** +</div> + +2. Copy the file to the `/home/pi/` folder + Note: here `pi` it is the folder of the `pi` user (default user). + +3. To facilitate this process (when using Windows), you can use <a href="https://winscp.net/eng/index.php" rel="nofollow">WinSCP</a>: it has a graphical interface and works like the Windows "file explorer". It will also be easier to later edit your Raspberry Pi files. + +4. Connect to Raspberry through a terminal using the following command: `ssh pi@192.168.1.XX` replace `pi` by your Raspberry username and `192.168.1.XX` by your Raspberry IP address and enter the password you created in setp 1. + +5. After logging on to the Raspeberry it is necessary to make the file "OctoBot_linux_arm64" into an executable. To do this, still from the terminal, type this command: `sudo chmod +x OctoBot_linux_arm64` + +6. Done. Nothing else is needed! + +## 3. Run OctoBot + +1. To run OctoBot, use the terminal from the previous step or open a new one and go to the folden containing the OctoBot executable and type in `./OctoBot_linux_arm64` + OctoBot starts and creates the necessary folders the first time it runs. + +2. In the Web browser you already have access to your OctoBot through the Raspberry Pi's local IP at the following address: `http://192.168.1.XX:5001` where `192.168.1.XX` is the IP address of your Rapberry Pi. It is the same as the one you use to connect to your Rapberry Pi. + +3. Press `Ctrl-A` then `Ctrl-D`. This will detach your screen session but leave your OctoBot process running. You can now close the terminal. + +## 4. Starting OctoBot automatically + +You might want OctoBot to start automatically when starting your Raspberry Pi. + +To start OctoBot automatically after restarting Raspberry Pi, proceed as follows. +Still from a terminal: + +1. Type in the following command: `crontab -e` +2. Add the following line at the end: `@reboot /home/pi/OctoBot_linux_arm64` where `pi` is your username +3. Save + +In the event of a power outage, your Raspberry Pi will automatically restart your OctoBot and continue executing its configured strategies. + +Also, every time your Raspberry Pi starts up, it will run Octobot and you will be able to access it from your browser. diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-installation/install-octobot-on-your-computer.mdx b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-installation/install-octobot-on-your-computer.mdx new file mode 100644 index 0000000000..d7d1367efb --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-installation/install-octobot-on-your-computer.mdx @@ -0,0 +1,69 @@ +--- +title: "Installation locale" +description: "Apprenez comment installer OctoBot sur votre propre ordinateur ou serveur en utilisant Docker, l'exécutable ou Python, ainsi que le code open source disponible sur GitHub ou via PIP." +sidebar_position: 1 +--- + + + +# Installer OctoBot sur votre ordinateur + +## Option 1: Avec l'exécutable + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="unbkFUAWXWM" title="Installer OctoBot avec l" /> + +1. Téléchargez la dernière version pour votre système en utilisant <a href="https://github.com/Drakkar-Software/OctoBot/releases/latest" rel="nofollow">la release la plus récente sur GitHub</a>. Il y a un exécutable OctoBot pour: + - [Windows](with-binary) + - [Linux](with-binary) + - [MacOS](with-binary) + - [Raspberry Pi (arm64)](install-octobot-on-raspberry-pi) + +<div style={{textAlign: "center"}}> + **[Télécharger la dernière release](https://github.com/Drakkar-Software/OctoBot/releases/latest)** +</div> +2. Démarrer l'exécutable lancera OctoBot qui ouvrira un terminal et installera +son environnement de configuration. L'interface d'OctoBot s'ouvrira +automatiquement. + +> Sur Windows, il suffit de double-cliquer sur l'exécutable. +> Sur Linux, tapez d'abord `chmod +x OctoBot_linux_x64`, puis `./OctoBot_linux_x64` dans un terminal ouvert à côté de votre exécutable téléchargé pour démarrer OctoBot. + +## Option 2: Avec Docker + + +<YouTube id="JL7ef3bK8SY" title="Installer OctoBot avec Docker" /> + +1. Si vous n'avez pas <a href="https://www.docker.com/" rel="nofollow">Docker</a>, installez le sur votre système. Voici la documentation d'installation docker pour <a href="https://docs.docker.com/engine/install/ubuntu/" rel="nofollow">Ubuntu</a>, <a href="https://docs.docker.com/engine/install/debian/" rel="nofollow">Debian</a> et <a href="https://phoenixnap.com/kb/docker-on-raspberry-pi/" rel="nofollow">Raspberry pie</a>. +2. Téléchargez l'image OctoBot avec la commande suivante:\ + `docker pull drakkarsoftware/octobot:stable` +3. Démarrez votre OctoBot avec cette commande:\ + `docker run -itd --name OctoBot -p 80:5001 -v $(pwd)/user:/octobot/user -v $(pwd)/tentacles:/octobot/tentacles -v $(pwd)/logs:/octobot/logs drakkarsoftware/octobot:stable` + +L'interface de votre OctoBot est disponible sur le port 5001 de votre système. http://localhost:5001/ ou http://ip-address:5001/ + +Vous trouverez tous les détails concernant l'installation via docker sur [notre guide d'OctoBot avec docker](/guides/octobot-installation/install-octobot-with-docker-video) + +## Option 3: Depuis le code source + +Déployez votre OctoBot depuis le code source afin de pouvoir utiliser un environnement Python local, éditer ou examiner le code source. + +1. Si vous n'avez pas déjà Python 3.10, installez <a href="https://www.python.org/downloads/release/python-31011/" rel="nofollow">Python en version 3.10</a>. +2. Clonez le dépôt OctoBot\ + `git clone https://github.com/Drakkar-Software/OctoBot` +3. Installez les dépendences Python\ + `cd OctoBot`\ + `python3 -m pip install -Ur requirements.txt` +4. Lancez votre OctoBot avec la commande:\ + `python3 start.py` + +L'interface de votre OctoBot est disponible sur le port 5001 de votre système. http://localhost:5001/ ou http://ip-address:5001/ + +Retrouvez tous les détails d'installation d'OctoBot depuis le code source sur notre [guide d'installation avec python et git](/guides/octobot-installation/install-octobot-with-python-and-git). + +## Autre option: utiliser un cloud + +Si vous ne souhaitez pas héberger vous même votre OctoBot, voici le [guide d'installation d'OctoBot sur le cloud DigitalOcean](cloud-install-octobot-on-digitalocean). + +Installer OctoBot sur un serveur permet de simplement laisser fonctionner son OctoBot 24h/24. diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-installation/install-octobot-with-docker-video.mdx b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-installation/install-octobot-with-docker-video.mdx new file mode 100644 index 0000000000..8053c1d36a --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-installation/install-octobot-with-docker-video.mdx @@ -0,0 +1,160 @@ +--- +title: "Avec Docker" +description: "Apprenez comment installer et démarrer votre OctoBot sur votre propre ordinateur ou serveur (Windows, Mac ou Linux) en utilisant la version Docker du bot." +sidebar_position: 4 +--- + + + +# Installer OctoBot avec Docker en vidéo + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="JL7ef3bK8SY" title="Installer OctoBot avec Docker" /> + +> Pour les systèmes d'exploitation UNIX uniquement + +## Installation rapide + +- Dans ce terminal, entrez la commande pour installer les paquets nécessaires : + +```bash +sudo apt install git build-essential ca-certificates curl gnupg lsb-release -y +``` + +- Ensuite, entrez la commande pour installer Docker : + +```bash +curl -fsSL https://get.docker.com -o get-docker.sh +sh ./get-docker.sh +``` + +- Téléchargez ensuite le fichier docker-compose d'OctoBot : + +```bash +curl -fs https://raw.githubusercontent.com/Drakkar-Software/OctoBot/master/docker-compose.yml -o docker-compose.yml +``` + +- Démarrez OctoBot avec le fichier docker-compose précédemment téléchargé : + +```bash +docker compose up -d +``` + +- Une fois la commande terminée, lancer la commande suivante. + +```bash +sudo usermod -aG docker $USER +``` + +## Installation manuelle + +### Utiliser l'image stable: + +1. Download OctoBot + +```bash +docker pull drakkarsoftware/octobot:stable +``` + +2. Start OctoBot (for linux x64/x86 and raspberry linux arm64/arm32) + +```bash +docker run -itd --name OctoBot -p 80:5001 -v $(pwd)/user:/octobot/user -v $(pwd)/tentacles:/octobot/tentacles -v $(pwd)/logs:/octobot/logs drakkarsoftware/octobot:stable +``` + +### Utiliser l'image "latest" (instabilité possible): + +1. Download OctoBot latest + +```bash +docker pull drakkarsoftware/octobot:latest +``` + +2. Start OctoBot (for linux x64/x86 and raspberry linux arm64/arm32) + +```bash +docker run -itd --name OctoBot -p 80:5001 -v $(pwd)/user:/octobot/user -v $(pwd)/tentacles:/octobot/tentacles -v $(pwd)/logs:/octobot/logs drakkarsoftware/octobot:latest +``` + +### Comment obtenir les logs console d'OctoBot ? + +```bash +docker logs OctoBot -f +``` + +### Comment arrêter OctoBot ? + +```bash +docker stop OctoBot +``` + +### Comment relancer OctoBot ? + +```bash +docker restart OctoBot +``` + +### Comment mettre à jour OctoBot ? + +```bash +docker pull drakkarsoftware/octobot:stable +docker stop OctoBot +docker rm OctoBot +docker run -itd --name OctoBot -p 80:5001 -v $(pwd)/user:/octobot/user -v $(pwd)/tentacles:/octobot/tentacles -v $(pwd)/logs:/octobot/logs drakkarsoftware/octobot:stable +``` + +### Exécution avec docker-compose + +A simple way to run a docker image is to use docker-compose : + +- Install <a href="https://docs.docker.com/compose/install/" rel="nofollow">docker-compose</a> +- Download the <a href="https://github.com/Drakkar-Software/OctoBot/blob/master/docker-compose.yml" rel="nofollow">docker-compose.yml file</a> +- Create a `.env` file in the current folder +- Add `HOST=YOUR_IP_ADDRESS` in the newly created `.env` file. (where YOUR_IP_ADDRESS is the ip address of the computer, can be replaced by `localhost` if it's a local computer) +- Start OctoBot with docker-compose (with the previous file `docker-compose.yml` in the current folder): + + ```bash + docker-compose up -d + ``` + +You can now open the OctoBot web interface at https://YOUR_IP_ADDRESS. + +### Exécuter plusieurs OctoBots avec docker + +To run a second OctoBot on the same computer : + +1. Create a new directory and enter it +2. Start OctoBot's web interface on a new port by changing "-p" option + +```bash +docker run -itd --name OctoBot -p 8000:5001 -v $(pwd)/user:/octobot/user -v $(pwd)/tentacles:/octobot/tentacles -v $(pwd)/logs:/octobot/logs drakkarsoftware/octobot:stable +``` + +In this example, the second OctoBot's web interface is accessible at http://127.0.0.1:8000. + +Any port can be used except those already used by another OctoBot or any software on your system. + +### Exécuter OctoBot avec des fichiers managés par Docker + +> Warning: It's easier to use but it will not be possible to update it without deleting its files. + +-v arguments can be removed from previous start commands but OctoBot's local files will then be managed by docker (and not directly visible). + +```bash +docker run -itd --name OctoBot -p 80:5001 drakkarsoftware/octobot:stable +``` + +Local OctoBot files path are located in /var/lib/docker and can be listed with the following command + +```bash +docker inspect -f '{{ .Mounts }}' OctoBot +``` + +To copy files of a directory outside the OctoBot container, for example for logs files : + +```bash +docker cp OctoBot:/octobot/logs/. . +``` + +Wherer "OctoBot" is your container name diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-installation/install-octobot-with-executable-video.mdx b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-installation/install-octobot-with-executable-video.mdx new file mode 100644 index 0000000000..c98200030f --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-installation/install-octobot-with-executable-video.mdx @@ -0,0 +1,28 @@ +--- +title: "Avec l'exécutable" +description: "Apprenez comment installer et démarrer votre OctoBot sur votre propre ordinateur ou serveur (Windows, Mac ou Linux) en utilisant la version exécutable du bot." +sidebar_position: 3 +--- + + + +# Installer OctoBot avec l'exécutable en vidéo + +## Installation + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="unbkFUAWXWM" title="Installer OctoBot avec l" /> + +Rendez-vous sur <a href="https://github.com/Drakkar-Software/OctoBot/releases/latest" rel="nofollow">La dernière release officielle d'OctoBot</a> et téléchargez la dernière version pour votre ordinateur: Windows, Linux or MacOS. Vous pouvez même utiliser OctoBot avec ARM64 sur Raspberry PI. + +<div style={{textAlign: "center"}}> + **[Télécharger la dernière release](https://github.com/Drakkar-Software/OctoBot/releases/latest)** +</div> + +## Utilisation + +Démarrer l'exécutable lancera OctoBot qui ouvrira un terminal et installera son environnement de configuration. L'interface d'OctoBot s'ouvrira automatiquement. + +> Sur Windows, il suffit de double-cliquer sur l'exécutable. +> Sur Linux, tapez d'abord `chmod +x OctoBot_linux_x64`, puis `./OctoBot_linux_x64` dans un terminal ouvert à côté de votre exécutable téléchargé pour démarrer OctoBot. diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-installation/install-octobot-with-pip.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-installation/install-octobot-with-pip.md new file mode 100644 index 0000000000..93677ecd45 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-installation/install-octobot-with-pip.md @@ -0,0 +1,76 @@ +--- +title: "Avec PIP" +description: "Apprenez comment installer et démarrer votre OctoBot sur votre propre ordinateur ou serveur (Windows, Mac ou Linux) en utilisant la version PIP (Python Index Package) du bot." +sidebar_position: 7 +--- + + + +# Installer OctoBot avec Python index package (pip) + +:::info + La traduction française de cette page est en cours. +::: + +## Prérequis + +- Python 3.10 (<a href="https://www.python.org/downloads/" rel="nofollow">download</a>) +- Add python to your PATH (<a href="https://superuser.com/questions/143119/how-do-i-add-python-to-the-windows-path" rel="nofollow">tutorial windows</a>) + +## Installation + +In a command line (with python in your PATH) type the following command: + +```bash +python3.10 -m pip install OctoBot +``` + +You can change **python3.10** to the name of the python binary you added to your PATH (for example on linux you may use **python3** or even **python** if the **python --version** commands outputs a python 3.10 version) + +## Utilisation + +```bash +OctoBot +``` + +## Mise à jour + +Exécuter la commande suivante va mettre à jour votre OctoBot Python en utilisant la dernière version et installer les dépendances associées. + +```bash +python3.10 -m pip install -U OctoBot +``` + +Le prochain redémarrage mettra automatiquement à jour les tentacles de votre OctoBot. + +## Lancer plusieurs OctoBots + +To run a second OctoBot on the same computer : + +1. Create a new directory and enter it +2. Start OctoBot and stop it after 1-2min to let it create default files +3. Open user/config.json file +4. Change web config lines + + FROM + + ```json + "web": { + "auto-open-in-web-browser": true + } + ``` + + TO + + ```json + "web": { + "auto-open-in-web-browser": true, + "port": 8000 + } + ``` + + In this example, the second OctoBot's web interface is accessible at http://127.0.0.1:8000. + + Any port can be used except those already used by another OctoBot or any software on your system. + +5. Start the new OctoBot diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-installation/install-octobot-with-python-and-git.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-installation/install-octobot-with-python-and-git.md new file mode 100644 index 0000000000..05902a7268 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-installation/install-octobot-with-python-and-git.md @@ -0,0 +1,125 @@ +--- +title: "Avec Python et Git" +description: "Apprenez comment installer et démarrer votre OctoBot sur votre propre ordinateur ou serveur (Windows, Mac ou Linux) en utilisant le code Python open source directement depuis GitHub." +sidebar_position: 6 +--- + + + +# Installer OctoBot avec Python and Git + +:::info + La traduction française de cette page est en cours. +::: + +## Prérequis + +- Packages installed : Python3.10.X, Python3.10.X-dev, Python3.10.X-pip, git + +## Installation + +**First, make sure you have python3.10 and python3.10-dev and python3.10-pip installed on your computer.** + +### Avec la version stable actuelle (branche master) + +**This is the recommended python installation.** + +Clone the OctoBot repository + +``` bash +git clone https://github.com/Drakkar-Software/OctoBot +``` + +Install python packages : + +``` bash +cd OctoBot +python3 -m pip install -Ur requirements.txt +``` + + +> On some setup like 32-bit ARM architectures, you might get a `rust` related error while running `python3 -m pip install -Ur requirements.txt` when installing `cryptography`. +If this happens, you need to install the `rust compiler`: `cryptography` is coded in `rust`. +``` bash +sudo apt-get install -y rustc +``` +You can then restart `python3 -m pip install -Ur requirements.txt`. + +### Avec la version la plus récente (branche dev) + +**This is installation allows to use the most up-to-date version of OctoBot but might broken depending on the moment it is being done (modules updates might be in progress in this branch).** + +Clone the OctoBot repository using the **dev** branch + +``` bash +git clone https://github.com/Drakkar-Software/OctoBot -b dev +``` + +*Or if you already have an OctoBot repository* + +``` bash +git checkout dev +git pull +``` + +### Installer les dernières tentacles : +> Warning: using the latest tentacles might break your OctoBot + +#### Sur Unix +``` bash +cd OctoBot +python3 -m pip install -Ur requirements.txt +export TENTACLES_URL_TAG="latest" +python3 start.py tentacles --install --all +``` +#### Sur Windows +``` bash +cd OctoBot +python3 -m pip install -Ur requirements.txt +SET TENTACLES_URL_TAG=latest +python3 start.py tentacles --install --all +``` + +## Utilisation + +The following command replaces *OctoBot Launcher*: + +``` bash +python3 start.py +``` + +## Mise à jour + +Exécuter la commande suivante va mettre à jour votre OctoBot Python en utilisant la dernière version de la branche sélectionnée (`master` ou `dev`) et installer les dépendances associées. +``` bash +git pull +cd OctoBot +python3 -m pip install -Ur requirements.txt +``` +Le prochain redémarrage mettra automatiquement à jour les tentacles de votre OctoBot. + +## Python3 + +There **python3** is refering to your **Python3.10.X** installation, just adapt the commands to match your setup if any different (might be python, python3, python3.10, etc: it depends on your environment). + +## Lancer OctoBot en tâche de fond + +> For unix distribution only + +With the Linux screen command, you can push running terminal applications to the background and pull them forward when you want to see them. + +``` bash +sudo apt-get install -y screen +screen python3 start.py +``` + +You need the number from the start of the window name to reattach it. If you forget it, you can always use the -ls (list) option, as shown below, to get a list of the detached windows: + +``` bash +screen -ls +screen -r 23167 +``` + +(23167 is an example value) + +OctoBot has been working away in the background is now brought back to your terminal window as if it had never left. diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-installation/octobot-troubleshoot.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-installation/octobot-troubleshoot.md new file mode 100644 index 0000000000..7f8aeaeb94 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-installation/octobot-troubleshoot.md @@ -0,0 +1,123 @@ +--- +title: "Résoudre les problèmes" +description: "Des questions lors de l'installation d'OctoBot ? Consultez les problèmes d'installation les plus courants dans notre guide de dépannage." +sidebar_position: 8 +--- + + + +# Résoudre les problèmes d'OctoBot + +:::info + La traduction française de cette page est en cours. +::: + +## Conserver votre configuration et historique après une mise à jour + +Sur OctoBot, le dossier `user`, localisé dans le dossier dans lequel vous exécutez OctoBot contient : + +- Votre configuration actuelle +- Vos profils +- Votre historique de portefeuille +- Votre historique de trades et PNL + +Afin de conserver la même configuration et le même historique après une mise à jour, vous pouvez soit : + +- Copier le dossier `user` de votre OctoBot précédent dans le dossier de votre nouvel OctoBot +- Ou exécuter votre nouvel OctoBot dans le même dossier que votre précédent bot. Attention : le dossier `tentacles` sera remplacé par sa nouvelle version. + +## Conserver vos fichiers de backtesting après une mise à jour + +Lors de la mise à jour, vous pouvez vouloir conserver vos fichiers de backtesting précédents. + +Pour que votre nouvel OctoBot ai accès à vos précédents fichiers de backtesting, copiez le dossier `backtesting` (localisé dans le dossier dans lequel vous exécutiez votre OctoBot précédent) dans le dossier de votre nouvel OctoBot. + +## Windows + +### Synchronization temporelle + +This issue happens when error messages such as `'recvWindow' must be less than ...` appear. + +Open an administrator terminal (`Win + X` then `A`) and type: + +```bash +net stop w32time +net start w32time +w32tm /resync +w32tm /query /status +``` + +Code from <a href="https://serverfault.com/questions/294787/how-do-i-force-sync-the-time-on-windows-workstation-or-server" rel="nofollow">serverfault.com</a> + +Another solution found by @alpi on discord channel: [timesynctool.com](http://www.timesynctool.com) + +### OctoBot est bloqué + +When running OctoBot on Windows, clicking into the OctoBot terminal (Powershell or Cmd) can freeze the log output and therefore freeze OctoBot execution (OctoBot will be waiting for the log to be published to continue). + +To fix this issue, untick the "QuickEdit Mode" in your terminal properties and restart it. + +![Powershell](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/wiki_resources/powerShellEditMode.jpg) + +![Cmd](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/wiki_resources/cmdQuickEdit.jpg) +To open the properties menu, right click on the terminal window header and select "properties". + +## CentOS + +### Installer OctoBot sur CentOS + +Requirements + +```bash +yum -y update +yum install -y git wget sqlite-devel screen +yum -y groupinstall "Development Tools" +yum -y install openssl-devel bzip2-devel libffi-devel +yum install -y screen +cd /root +wget https://www.python.org/ftp/python/3.10.11/Python-3.10.11.tgz +tar xvf Python-3.10.11.tgz +cd Python-3.10*/ +./configure --enable-loadable-sqlite-extensions && make && sudo make install +``` + +OctoBot + +```bash +git clone https://github.com/Drakkar-Software/OctoBot.git +cd OctoBot/ +python3.10 -m pip install virtualenv +virtualenv venv +source venv/bin/activate +pip install -Ur requirements.txt +python start.py +``` + +## Linux + +### Synchronization temporelle + +This issue happens when error messages such as `'recvWindow' must be less than ...` appear. + +On Debian or Ubuntu, open a terminal and type: + +```bash +sudo service ntp stop +sudo ntpd -gq +sudo service ntp start +``` + +Requires `ntp` package installation `sudo apt-get install ntp`. + +Code from + +<a href="https://askubuntu.com/questions/254826/how-to-force-a-clock-update-using-ntp#256004" rel="nofollow">askubuntu.com</a> +. + +### Installation + +During pip install if you have SSL problems, open a terminal and type + +```bash +pip3 install service_identity --force --upgrade +``` diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-interfaces/chatgpt.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-interfaces/chatgpt.md new file mode 100644 index 0000000000..adef8a034e --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-interfaces/chatgpt.md @@ -0,0 +1,66 @@ +--- +title: "ChatGPT" +description: "Apprenez comment configurer votre OctoBot pour trader en utilisant l'IA et ChatGPT ou d'un autre LLM. Comprenez les coûts d'une configuration locale." +sidebar_position: 3 +--- + + + +# Trader avec ChatGPT + +:::info + La traduction française de cette page est en cours. +::: + +Seamlessly [Integrate ChatGPT within your trading strategies](/guides/octobot-trading-modes/chatgpt-trading) and profit from the power of AI trading. + +<div style={{textAlign: "center"}}> + ![octobot collaborating with chatgpt + light](/images/guides/interfaces/octobot-collaborating-with-chatgpt-light.png) +</div> + +Checkout the [ChatGPT trading guide](/guides/octobot-trading-modes/chatgpt-trading) to learn more about how to trade with ChatGPT using OctoBot + +OctoBot uses the ChatGPT interface to interact with ChatGPT. + +## Configuration du service ChatGPT + +To use ChatGPT on an open source OctoBot, the only configuration you need is to enter your OpenAI API key into the GPT Interface + +1. Create or login to your <a href="https://platform.openai.com/" rel="nofollow">OpenAI</a> account +2. Create a new API key on <a href="https://platform.openai.com/account/api-keys" rel="nofollow">your account settings</a> +3. In the Accounts tab of the web interface, add the `GPT` interface if missing +4. Copy your API key into the `openai-secret-key` GPT configuration + +![octobot chatgpt configuration openai key and custom base url](/images/guides/chatgpt/octobot-chatgpt-configuration-openai-key-and-custom-base-url.png) + +## Prédictions avec un LLM personnalisé + +OctoBot can connect to any LLM using the **LLM custom base url** configuration parameter. This is useful to use other AI models than the default OpenAI ones. + +In this case, the **Secret key** parameter, will be used to authenticate to this other LLM server when necessary. It will be ignored otherwise. + +## Trader avec des prédictions Ollama + +To connect to a local Ollama LLM model, configure the **LLM custom base url** of your OctoBot to your Ollama server address followed by `/v1`. + +Using the default Ollama address (`localhost:11434`), your **LLM custom base url** would then be: **`http://localhost:11434/v1`**. + +## Sélectionner votre modèle de LLM + +Selection of the LLM model to use is configured in your GPTEvaluator. When your GPT interface as configured and your `GPTEvaluator` is enabled (when using a ChatGPT-based profile or a custom profile using the `GPTEvaluator`), you can select the LLM model to use from your GPTEvaluator configuration. + +The `GPTEvaluator` configuration interface can be accessed from your profile or directly from the `/config_tentacle?name=GPTEvaluator` path of your OctoBot web interface. + +## Coûts + +Using ChatGPT from automated API calls is a paid service from OpenAI. Each call to ChatGPT will consume +a few OpenAI tokens. + +Each call to ChatGPT is recrating a request which usually consumes around 90 OpenAI tokens. +You can get the current price of OpenAI token from <a href="https://openai.com/pricing" rel="nofollow">the OpenAI pricing page</a>. + +You can estimate the cost of using ChatGPT related features by estimating the amount of requests per day. + +> Running a strategy on 4h for 2 trading pairs on 1 exchange: the GPT evaluator will be called every +> 4 hours for each trading pair for each exchange. diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-interfaces/reddit.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-interfaces/reddit.md new file mode 100644 index 0000000000..62380e2170 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-interfaces/reddit.md @@ -0,0 +1,88 @@ +--- +title: "Reddit" +description: "Apprenez comment configurer votre OctoBot pour trader en utilisant Reddit et surveiller des sous-forums (subreddits) afin de trader en fonction des publications sur Reddit." +sidebar_position: 5 +--- + + + +# Trader avec les posts Reddit + +:::info + La traduction française de cette page est en cours. +::: + +<div style={{textAlign: "center"}}> + ![reddit trading automation illustrated by reddit + logo](/images/guides/interfaces/reddit-connection-to-octobot-illustrated-by-reddit-logo.png) +</div> + +OctoBot can connect to <a href="https://www.reddit.com" rel="nofollow">Reddit</a> to monitor Reddit posts from +subreddits. + +When the **RedditForumEvaluator** is enabled, OctoBot will the use <a href="https://github.com/cjhutto/vaderSentiment" rel="nofollow">VADER Sentiment Analysis's AI</a> to analyse the sentiment of each post and make a summary of each coin to be used by the [Daily Trading Mode](/guides/octobot-trading-modes/daily-trading-mode). + +## Configuration du RedditForumEvaluator + +In the Accounts tab of the web interface, add the `Reddit` interface if missing. + +![RedditForumEvaluator configuration to select subreddits to follow](/images/guides/interfaces/RedditForumEvaluator-configuration-to-select-subreddits-to-follow.png) + +Configure the **RedditForumEvaluator** to specify the subreddits to follow for each traded Cryptocurrency. + +## Configuration de la connexion à Reddit + +To connect to Reddit, OctoBot needs a Reddit script app, which you can create from your Reddit account, or a new account dedicated to OctoBot. + +<div style={{textAlign: "center"}}> + ![reddit octobot config](/images/guides/interfaces/reddit-octobot-config.png) +</div> + +1. Login on your Reddit account if you already have one +2. Go to your account's <a href="https://www.reddit.com/prefs/apps/" rel="nofollow">Applications preferences</a>. +3. Create a new `script` app + <div style={{textAlign: "center"}}> + ![reddit create app](/images/guides/interfaces/reddit-create-app.png) + </div> + - `Name` and `description` can be set as you wish + - Leave `About URL` empty + - `Redirect URI` won't be used, enter `https://www.reddit.com/` (or any other valid url) + - Create your app +3. **Client-Id** is the list of characters under your App name, next to its icon +4. **Client-Secret** is the **secret** identifier of the App +<div style={{textAlign: "center"}}> + ![reddit created app](/images/guides/interfaces/reddit-created-app.png) +</div> + +Copy and paste your new Reddit app details into your OctoBot configuration. +<div style={{textAlign: "center"}}> + ![reddit octobot config](/images/guides/interfaces/reddit-octobot-config.png) +</div> + +### Configuration depuis user/config.json + +Add in **user/config.json** in the services key : + +```json +"reddit": { + "client-id": "YOUR_CLIENT_ID", + "client-secret": "YOUR_CLIENT_SECRET" + } +``` + +**Exemple:** + +```json +"services": { + "a service": { + + }, + "reddit": { + "client-id": "YOUR_CLIENT_ID", + "client-secret": "YOUR_CLIENT_SECRET" + }, + "another service": { + + } +} +``` diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-interfaces/telegram.mdx b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-interfaces/telegram.mdx new file mode 100644 index 0000000000..b8eeeefbda --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-interfaces/telegram.mdx @@ -0,0 +1,133 @@ +--- +title: "Telegram" +description: "Apprenez comment configurer votre OctoBot pour qu'il soit accessible directement depuis Telegram. Commandez votre OctoBot de n'importe où directement depuis Telegram." +sidebar_position: 2 +--- + + + +# Votre OctoBot sur Telegram + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="E3nShLEFA90" title="Octobot sur Telegram" /> + +OctoBot can use <a href="https://telegram.org" rel="nofollow">Telegram</a> to communicate. With this interface, OctoBot can: + +- Show for how long OctoBot is working +- Display the current portfolio +- Display the current open orders +- Display the profitability since OctoBot started +- Display OctoBot's understanding of the market and its risk parameter +- Changes OctoBot's current risk +- Stop OctoBot +- Trigger emergency trades + +And much more. + +To know the full command list, use the **/help** command + +## Configuration du service Telegram + +### Créer votre bot + +<div style={{textAlign: "center"}}> + ![telegram connection to octobot illustrated by telegram + logo](/images/guides/interfaces/telegram-connection-to-octobot-illustrated-by-telegram-logo.png) +</div> + +First, you need to create a Telegram bot, it will be telegram equivalent of your OctoBot. +See tutorial on the <a href="https://core.telegram.org/bots#6-botfather" rel="nofollow">Telegram +website</a> to create one. + +### Configuration + +In the Accounts tab of the web interface, add the `Telegram` interface if missing. + +![telegram config](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/wiki_resources/telegram_config.png) + +### Token + +Get the token in the chat with **botfather** and add it to services config + +### Chat id + +Send a message to your bot and go to this url https://api.telegram.org/botXXX:YYYY/getUpdates with XXX:YYYY replaced by your bot's token. + +Warning: To get your chat id from this url, your telegram bot must have a pending message (the one you just sent). This means that your OctoBot must not be on or you will just receive this message `{"ok":true,"result":[]}` from api.telegram.org. + +Search for: + +```bash +chat: + id: XXXXXXXXX +``` + +Add this id to the telegram service configuration. + +## Allow your bot to listen to telegram groups and channels + +OctoBot can receive messages (trade signals for example) from Telegram groups. + +When invited in a Telegram group, OctoBot will never talk in this group but will +listen to the chat. Using this feature, it is possible to process **telegram signals** in OctoBot. + +In order to be able to read group messages, your telegram bot must have +its **privacy mode** **disabled**. To disable it: + +- say `/setprivacy` to **botfather** +- **botfather** replies: _Choose a bot to change group messages settings._ +- enter the name of your bot +- **botfather** gives information about privacy mode and your bot's privacy setting +- enter `Disable` + +Your OctoBot is now able to read any group message. + +## user/config.json configuration + +Add in **user/config.json** in the services key : + +```json +"telegram": { + "chat-id": "YOUR_CHAT_ID", + "token": "YOUR_BOT_TOKEN" + } +``` + +**Exemple:** + +```json +"services": { + "a service": { + + }, + "telegram": { + "chat-id": "YOUR_CHAT_ID", + "token": "YOUR_BOT_TOKEN" + }, + "another service": { + + } +} +``` + +## Résolution de problèmes + +### Chat not found + +If OctoBot is producing this you get this error, it means that your +[chat-id](#chat-id) is not set correctly. With an incorrect chat-id, +OctoBot is able to read and reply commands but can't push messages by itself. + +### TelegramSignalEvaluator is not receiving telegram messages + +To use the default Telegram signal evaluator, make sure: + +1. Your telegram group / channel is referenced in the TelegramSignalEvaluator configuration +2. Your telegram bot is setup according to [Allow your bot to listen to telegram groups and channels](#allow-your-bot-to-listen-to-telegram-groups-and-channels) +3. Your telegram bot is in the telegram channel / group +4. The telegram notifications you want your bot to process are matching the notification pattern defined in the TelegramSignalEvaluator documentation +5. The telegram signal trading pairs also are traded pairs in your current OctoBot configuration and are supported by the connected exchange(s) +6. Your TelegramSignalEvaluator is activated + +When a telegram message is ignored, a debug log (in terminal and OctoBot.log) is produced explaining the reason why each notifications has be ignored. Please first refer to this log as it will likely show what is wrong with the current setup. diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-interfaces/telegram/telegram-api.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-interfaces/telegram/telegram-api.md new file mode 100644 index 0000000000..1391b2f713 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-interfaces/telegram/telegram-api.md @@ -0,0 +1,51 @@ +--- +title: "API Telegram" +description: "Apprenez comment configurer votre OctoBot pour qu'il trade en fonction des signaux provenant de chaînes Telegram." +sidebar_position: 1 +--- + + + +# API Telegram + +:::info + La traduction française de cette page est en cours. +::: + +Telegram API allows your OctoBot to listen to telegram **public groups**. + +:::info + The Telegram API configuration is not necessary if your goal is to command your OctoBot from Telegram or to have your OctoBot listen to a private group. In those cases, the [initial Telegram configuration](.) is enough. +::: + +## Créer votre App + +Before working with Telegram’s API, you need to get your own API ID and hash: + +In order to obtain an API id and develop your own application using the Telegram API you need to do the following: + +- Sign up for Telegram using any application. +- Log in to your Telegram core: https://my.telegram.org. +- Go to 'API development tools' and fill out the form. +- You will get basic addresses as well as the **api_id** and **api_hash** parameters required for user authorization. + + +## Configuration + + +Add in **user/config.json** in the services key : + +``` json +"telegram-api": { + "telegram-api": "YOUR_API_ID", + "telegram-api-hash": "YOUR_API_HASH", + "telegram-phone": "YOUR_TELEGRAM_ACCOUNT_PHONE_NUMBER" +} +``` + +### Code de sécurité + +At the first OctoBot start with a new `telegram-api` configuration a 2-factor authentication code will be sent to your account. +Just enter it the code in your OctoBot console and press enter. + +> If you are asked a password and Telegram didn't send it to you, try to provide the mobile phone number without "+". diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-interfaces/tradingview.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-interfaces/tradingview.md new file mode 100644 index 0000000000..36f65de901 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-interfaces/tradingview.md @@ -0,0 +1,70 @@ +--- +title: "TradingView" +description: "Apprenez comment automatiser vos stratégies TradingView avec OctoBot. Envoyez des alertes depuis les webhooks TradingView et tradez avec OctoBot." +sidebar_position: 4 +--- + + + +# Automatiser le trading depuis TradingView + +<div style="text-align: center"> + +![automatisation de trading tradingview illustrée par le logo tradingview](/images/guides/interfaces/tradingview-automation-illustrated-by-tradingview-logo.png) + +</div> + +Avec OctoBot, vous pouvez écouter les alertes <a href="https://www.tradingview.com/?aff_id=27595" rel="nofollow">TradingView</a> et automatiser vos trades en vous basant sur vos indicateurs ou stratégies TradingView. + +De cette façon, quand une alerte TradingView est émise, vous pouvez instantanément créer des ordres sur la plateforme d'échange de votre choix. + +Cela fonctionne avec tout type d'alerte, que ce soit: + +- Un seuil de prix que vous avez défini +- Une valeur particulière atteinte par un indicateur +- Une stratégie de trading que vous utilisez sur TradingView + +:::info +Ces guides concernent l'utilisation de TradingView dans le cadre des [robots de trading OctoBot](/fr/trading-bot) et <a href="https://github.com/Drakkar-Software/OctoBot" rel="nofollow">OctoBot auto-hébergé</a>. Utilisez le [guide d'investisseur trading automatisé avec TradingView](/fr/investing/tradingview-automated-trading) si vous automatisez vos stratégies TradingView avec un [OctoBot TradingView](/fr/investing/tradingview-trading-tutorial) depuis [www.octobot.cloud](https://www.octobot.cloud/fr). +::: + +Pour en apprendre plus sur l'automatisation de stratégies TradingView dans OctoBot, rendez-vous sur le [guide du Trading Mode TradingView](/guides/octobot-trading-modes/tradingview-trading-mode) + +## Alertes basées sur un indicateur + +Votre OctoBot peut trader en se basant sur des indicateurs TradingView ou des évènements de prix. Suivez le [guide des alertes d'indicateur](tradingview/automating-trading-from-an-indicator) pour en savoir plus. + +## Alertes basées sur une stratégie + +Vous pouvez aussi faire en sorte que votre Octobot trade selon une stratégie Trading écrite en Pine Script. Pour cela, suivez le [guide des alertes de stratégies](tradingview/automating-trading-from-a-pine-script-strategy) pour synchroniser votre OctoBot avec votre stratégie TradingView. + +## Configuration d'OctoBot + +Ajoutez simplement l'interface `Trading-view` à la configuration "Accounts" de votre OctoBot et configurez le [service de webhook](tradingview/using-a-webhook). + +## Compte TradingView + +Tout d'abord, créez un compte <a href="https://www.tradingview.com/?aff_id=27595" rel="nofollow">TradingView</a> si vous n'en avez pas déjà un. + +Ensuite, pour pouvoir automatiser votre stratégie TradingView, vous aurez besoin d'utiliser des [webhooks](tradingview/using-a-webhook), ce qui nécessite un compte TradingView pro. Si vous n'en avez pas, vous pouvez utiliser l'essai gratuit de 30 jours. + +<div style="text-align: center"> + +<a href="https://www.tradingview.com/?aff_id=27595" rel="nofollow">![bouton plan pro tradingview](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/wiki_resources/tradingview-go-pro-trial-button.png)</a> + +</div> + +<div style="text-align: center"> + +![tradingview démarrer essai gratuit](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/wiki_resources/tradingview-start-trial-button.png) + +</div> + + +Votre compte est maintenant prêt à être utilisé avec OctoBot ! + +## Format des alerte + +Vous pouvez envoyer des commandes à votre OctoBot en utilisant des alertes TradingView, y compris la création d'ordres au marché ou aux limite, la prise de bénéfices, l'annulation d'ordres et bien plus encore. + +Consultez le [guide de format d'alerte](tradingview/alert-format) pour en savoir plus. diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-interfaces/tradingview/alert-format.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-interfaces/tradingview/alert-format.md new file mode 100644 index 0000000000..a3f1bc2b64 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-interfaces/tradingview/alert-format.md @@ -0,0 +1,143 @@ +--- +title: "Format d'une alerte" +description: "Trouvez toutes les informations dont vous avez besoin pour formater vos alertes TradingView et automatiser les transactions sur votre OctoBot. Achetez ou vendez avec des ordres au marché ou à seuil, définissez des objectifs de profit et des stops." +sidebar_position: 3 +--- + + + +# Format des alertes TradingView + +:::info + La traduction française de cette page est en cours. +::: + +:::info + The following guide describes how to format TradingView alerts to trade using the [open source version of OctoBot](../../octobot). +::: + +## Créer des ordres + +### Contenu minimal d'une alerte + + +The alert format is designed to be easily used from TradingView. Minimal alerts contain the exchange name, the alert symbol (BTCUSDT for BTC/USDT and BTC/USDT:USDT) and the side of the order to create. +Example: + +``` bash +EXCHANGE={{exchange}} +SYMBOL={{ticker}} +SIGNAL=BUY +``` + +![alert-message](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/wiki_resources/tradingview-alert-message.png) + +For a buy signal. + + +``` bash +EXCHANGE={{exchange}} +SYMBOL={{ticker}} +SIGNAL=SELL +``` + +For a sell signal. + +Parameters can be separated using a new line or a `;` character. + +### Parametres d'alertes additionels + +Additional order details can be added to the alert. These are optional: + +``` bash +ORDER_TYPE=LIMIT +VOLUME=0.01 +PRICE=42000 +STOP_PRICE=38000 +TAKE_PROFIT_PRICE=55000 +REDUCE_ONLY=true +``` + +- `ORDER_TYPE` is the type of order, it can be `MARKET`, `LIMIT` or `STOP` +- `VOLUME` is the volume of the order in base asset (BTC for BTC/USDT), it supports the [orders amount syntax](/guides/octobot-trading-modes/order-amount-syntax). +- `PRICE` is the price of the limit order in quote asset (USDT for BTC/USDT) +- `STOP_PRICE` is the price of the stop order to create. When increasing the position or buying in spot trading, the stop loss will automatically be created once the initial order is filled. When decreasing the position (or selling in spot) using a LIMIT `ORDER_TYPE`, the stop loss will be created instantly. _Required when `ORDER_TYPE=STOP`_. +- `TAKE_PROFIT_PRICE` is the price of the take profit order to create. When increasing the position or buying in spot trading, the take profit will automatically be created once the initial order is filled. When decreasing the position (or selling in spot) using a LIMIT `ORDER_TYPE`, the take profit will be created instantly. The [orders price syntax](/guides/octobot-trading-modes/order-price-syntax) is supported. Multiple take profit prices can be used from `TAKE_PROFIT_PRICE_1`, `TAKE_PROFIT_PRICE_2`, ... When using multiple take profits, the initial entry amount will be evenly split between take profits unless a `TAKE_PROFIT_VOLUME_RATIO` is set for each take profit. +- `TAKE_PROFIT_VOLUME_RATIO` is the ratio of the entry order volume to include in this take profit. Used when multiple take profits are set. Specify multiple values using `TAKE_PROFIT_VOLUME_RATIO_1`, `TAKE_PROFIT_VOLUME_RATIO_2`, …. When used, a `TAKE_PROFIT_VOLUME_RATIO` is required for each take profit. +Exemple: `TAKE_PROFIT_PRICE=1234;TAKE_PROFIT_PRICE_1=1456;TAKE_PROFIT_VOLUME_RATIO_1=1;TAKE_PROFIT_VOLUME_RATIO_2=2` will split 33% of entry amount in TP 1 and 67% in TP 2. +- `REDUCE_ONLY` when true, only reduce the current position (avoid accidental short position opening when reducing a long position). **Only used in futures trading**. Default is false +- `TAG` is an identifier to associate to the order(s) to create. Any value can be used as tag. Tags can later be used to cancel specific orders. +- `LEVERAGE` the updated leverage value to use. **Only used in futures trading**. + +### Exemples + +#### Un ordre d'achat limité de 0.01 BTC à 30000 USDT avec un take profit +``` bash +EXCHANGE=binance +SYMBOL=BTCUSDT +VOLUME=0.01 +PRICE=30000 +TAKE_PROFIT_PRICE=35000 +SIGNAL=BUY +ORDER_TYPE=LIMIT +``` + +#### Un ordre de vente limité de 0.01 ETH à 0.1 BTC +``` bash +EXCHANGE=binance +SYMBOL=ETHBTC +VOLUME=0.01 +PRICE=0.1 +SIGNAL=SELL +ORDER_TYPE=LIMIT +``` + +#### Un ordre de vente en stop loss de 10 ATOM à 5 USDT avec un tag "exit1" +``` bash +EXCHANGE=binance +SYMBOL=ATOMUSDT +VOLUME=10 +STOP_PRICE=5 +SIGNAL=SELL +ORDER_TYPE=STOP +TAG=exit1 +``` + + +## Annuler des ordres + +Use `SIGNAL=CANCEL` to cancel orders identified buy their `SYMBOL` and `EXCHANGE` + +### Annuler tous les ordres d'ETH/BTC sur Binance +``` bash +EXCHANGE=binance +SYMBOL=ETHBTC +SIGNAL=CANCEL +``` + +### Annuler tous les ordres vente d'ATOM/USDT avec le tag "exit1" sur Binance +``` bash +EXCHANGE=binance +SYMBOL=ATOMUSDT +SIGNAL=CANCEL +PARAM_SIDE=SELL +TAG=exit1 +``` + +Additional cancel parameters are available: +- `PARAM_SIDE` is the side of the orders to cancel, it can be `buy` or `sell` to only cancel buy or sell orders. +- `TAG` is the tag to select orders to cancel with. When provided, only orders created with the given tag and symbols will be canceled. + +## Sécurité des alertes + +You can use a token to add a security layer on your webhook alerts using +an identification token, this token is randomly generated by your +OctoBot and can be found on the configuration interface and in execution +logs. + +To add your token on the tradingview.com signal: add the following line +to the alert message: + +``` bash +TOKEN=YOUR_TOKEN +``` diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-interfaces/tradingview/automating-trading-from-a-pine-script-strategy.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-interfaces/tradingview/automating-trading-from-a-pine-script-strategy.md new file mode 100644 index 0000000000..1577f59c61 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-interfaces/tradingview/automating-trading-from-a-pine-script-strategy.md @@ -0,0 +1,45 @@ +--- +title: "Trader depuis une stratégie" +description: "Apprenez comment faire en sorte qu'OctoBot trade en fonction des stratégies TradingView Pine Script. Envoyez des signaux depuis des stratégies TradingView Pine Script et faites en sorte qu'OctoBot effectue instantanément des transactions sur votre plateforme d'échange." +sidebar_position: 2 +--- + + + +# Automatiser vos stratégies Pine Script de TradingView + +:::info + La traduction française de cette page est en cours. +::: + +With OctoBot, you can listen to <a href="https://www.tradingview.com/?aff_id=27595" rel="nofollow">TradingView</a> <a href="https://www.tradingview.com/pine-script-docs/en/v5/index.html#" rel="nofollow">Pine Script</a> strategies signals +to automate your trades. + +## Créer une alerte de strategie + +To send alerts from a strategy, use the <a href="https://www.tradingview.com/pine-script-docs/en/v5/concepts/Alerts.html?highlight=alert_message#order-fill-events" rel="nofollow">`alert_message`</a> parameter from Pine Script strategy functions which can generate order. + +1. Define the content of your alert before any `strategy.entry`, `strategy.exit` or `strategy.close` call: + - example: `messageBuy = "EXCHANGE=binance;SYMBOL=SOLUSDT;VOLUME=100a%;SIGNAL=BUY"` + > Note: when defining your alert, remember to add `;` between each parameter. +2. In the strategy section, add `alert_message=messageBuy` to your strategy `entry`, `exit` or `close` calls: + - example: `strategy.entry("Buy", strategy.long, comment="Buy Signal Triggered", alert_message=messageBuy)` +3. When creating a new alert (_right-click on the strategy / add new alert_) make sure that you: - Select the name of your strategy as the condition - Name the alert (the name can be whatever you want) - Replace **ALL** the message content with exactly `{{strategy.order.alert_message}}` + ![adding a TradingView strategy alert](/images/guides/adding-a-tradingview-strategy-alert.png) + +- _Et voilà !_ This alert will automatically notify your OctoBot each time your strategy executes `entry`, `exit` or `close` calls. + +Tips: + +- For multi-coin, simply edit the strategy and modify the SYMBOL entry in the messageBuy definition. You can thus vary the parameters according to the assets. +- It can be easier to define multiple messages such as `messageBuy`, `messageBuyWithATakeProfit`, `messageSell`or even `messageCancel` and use the appropriate message later on (with the `alert_message` parameter) when calling `entry`, `exit` or `close`. + +_Special thanks to @KidCharlemagne for creating the basis of this guide !_ + +## Format de l'alerte + +Learn more about how to create your TradingView alerts on [the TradingView alert format guide](/guides/octobot-interfaces/tradingview/alert-format). + +## Configuration de TradingView + +Wondering how to make your OctoBot listen to TradingView signals ? Checkout [our TradingView integration guide](/guides/octobot-interfaces/tradingview). diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-interfaces/tradingview/automating-trading-from-an-indicator.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-interfaces/tradingview/automating-trading-from-an-indicator.md new file mode 100644 index 0000000000..00993e48d7 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-interfaces/tradingview/automating-trading-from-an-indicator.md @@ -0,0 +1,51 @@ +--- +title: "Trader depuis un indicateur" +description: "Apprenez comment faire en sorte qu'OctoBot trade en fonction des indicateurs TradingView. Envoyez des signaux depuis les indicateurs TradingView Pine Script et faites en sorte qu'OctoBot effectue instantanément des transactions sur votre plateforme d'échange." +sidebar_position: 1 +--- + + + +# Automatiser le trading d'un indicateur TradingView + +:::info + La traduction française de cette page est en cours. +::: + +With OctoBot, you can listen to <a href="https://www.tradingview.com/?aff_id=27595" rel="nofollow">TradingView</a> indicator signals +to automate your trades. + +## Créer une alerte d'indicateur + +- Go to the right menu and click on the alert button + + ![alert-menu-button](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/wiki_resources/tradingview-alert-menu.png) + +- Create a new alert with ![create-alert-button](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/wiki_resources/tradingview-add-alert-button.png) +- Choose the condition : an indicator cross, a price drop, whatever + you want +- Add your OctoBot [webhook](using-a-webhook) as the following screenshot. + + ![set-webhook-url](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/wiki_resources/tradingview-alert-webhook-url.png) + + You will find OctoBot's alert webhook URL on your OctoBot's configuration + page or in OctoBot's starting logs. It should be an url like `https://webhook.octobot.cloud/tradingview/xxxx` or `http://XXXXXXXX.ngrok.io/webhook/trading_view`. + + WARNING: To improve performances, webhooks are started only when + required, this means that **you need to activate a webhook related + tentacle to get the webhook url** (a tentacle such as the **trading + view signals trading mode**) + + ![octobot open source configured tradingview alert and webhook config](/images/guides/trading-view/octobot-open-source-configured-tradingview-alert-and-webhook-config.png) + + ![webhook log](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/wiki_resources/webhook_log.jpg) + +- Set the alert message + +## Format de l'alerte + +Learn more about how to create your TradingView alerts on [the TradingView alert format guide](/guides/octobot-interfaces/tradingview/alert-format). + +## Configuration de TradingView + +Wondering how to make your OctoBot listen to TradingView signals? Checkout [our TradingView integration guide](/guides/octobot-interfaces/tradingview). diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-interfaces/tradingview/automating-tradingview-free-email-alerts-with-octobot.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-interfaces/tradingview/automating-tradingview-free-email-alerts-with-octobot.md new file mode 100644 index 0000000000..ea9e8be155 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-interfaces/tradingview/automating-tradingview-free-email-alerts-with-octobot.md @@ -0,0 +1,88 @@ +--- +title: "Utiliser des alerts par email" +description: "Configurez votre OctoBot pour trader avec les alertes email gratuites de TradingView et automatiser vos stratégies TradingView avec un compte gratuit." +sidebar_position: 4 +--- + + + +# Automatiser des stratégies TradingView par email + +:::info + Attention: L'automatisation d'alertes TradingView par email n'est malheureusement plus disponible suite à une récente restriction de la part de TradingView. +::: + +Lors de la création d'une alerte TradingView, vous pouvez choisir d'être notifié par email. Ce service est disponible avec un **compte TradingView gratuit** et peut être utilisé pour automatiser des trades en utilisant OctoBot. + +<div style={{textAlign: "center"}}> + <div> + ![formulaire d'alerte par email tradingview + complété](/images/guides/trading-view/tradingview-alert-email-form-completed.png) + </div> +</div> + +:::info + L'[extension premium + d'OctoBot](../../octobot-configuration/premium-octobot-extension) est + nécessaire pour trader avec OctoBot en utilisant les alertes email de + TradingView. +::: + +## Configuration des alertes TradingView par email + +Pour automatiser vos trades basés sur les alertes TradingView envoyées par email, il vous suffit d'aller dans votre interface de configuration `Accounts`, sélectionner l'onglet `Interfaces` et ajouter l'interface `TradingView`. + +Si vous n'êtes pas encore authentifié sur votre compte OctoBot, n'avez pas encore configuré votre adresse email (ou n'avez pas encore acheté l'extension Premium d'OctoBot), cliquez sur `GENERATE EMAIL` pour vous connecter à votre compte et configurer votre adresse email d'alerte sur TradingView. + +<div style={{textAlign: "center"}}> + ![octobot open source bouton de configuration de l'adresse email d'alertes + tradingview](/images/guides/trading-view/octobot-open-source-configuring-tradingview-alert-email-address.png) +</div> + +La prochaine étape consiste à configurer votre compte TradingView pour envoyer des alertes à cette adresse email. + +<div style={{textAlign: "center"}}> + <div> + ![octobot open source début de la configuration du mail + tradingview](/images/guides/trading-view/open-source-octobot-start-tradingview-email-config.png) + </div> +</div> + +## Enregistrer votre adresse email d'alerte sur TradingView + +Maintenant que vous avez reçu votre adresse email personnelle d'alerte TradingView, suivez les étapes de configuration depuis votre OctoBot pour ajouter cette adresse email à vos alertes TradingView. + +L'aide de configuration vous guidera à travers les étapes suivantes : + +1. Ajouter votre adresse email à vos alertes TradingView + <div style={{textAlign: "center"}}> + <div> + ![octobot open source ajout d'une alerte + tradingview](/images/guides/trading-view/octobot-open-source-add-tradingview-alert-illustration.png) + </div> + </div> +2. Réception de votre code de confirmation + <div style={{textAlign: "center"}}> + <div> + ![octobot open source attente du code de vérification + tradingview](/images/guides/trading-view/octobot-open-source-waiting-tradingview-verification-code.png) + </div> + </div> +3. Démarrage de votre OctoBot pour suivre vos alertes TradingView + <div style={{textAlign: "center"}}> + <div> + ![octobot open source dernière étape de configuration des mails d'alerte + tradingview](/images/guides/trading-view/octobot-open-source-tradingview-email-configuration-last-step.png) + </div> + </div> + +## Commencer à trader avec votre stratégie TradingView + +Votre OctoBot est maintenant prêt à trader avec les alertes TradingView. You pouvez tous les marchés sur les plateformes d'échange de votre choix en suivant le [format d'alerte d'OctoBot open source](alert-format). + +Une fois configurée, votre adresse email personnelle d'alerte TradingView sera affichée à coté de votre configuration TradingView. Si vous avez besoin de retourner sur l'interface de configuration, par exemple pour revalider votre adresse email, vous pouvez simplement cliquer sur l’icône de configuration : + +<div style={{textAlign: "center"}}> + ![octobot open source bouton de configuration des alertes email + tradingview](/images/guides/trading-view/octobot-open-source-configure-button-tradingview-alert-email-address.png) +</div> diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-interfaces/tradingview/using-a-webhook.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-interfaces/tradingview/using-a-webhook.md new file mode 100644 index 0000000000..ae444e3f94 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-interfaces/tradingview/using-a-webhook.md @@ -0,0 +1,85 @@ +--- +title: "Utiliser un webhook" +description: "Configurez vos webhooks OctoBot pour investissez à partir de signaux TradingView. Utilisez OctoBot cloud, Ngrok ou votre configuration personnelle." +sidebar_position: 5 +--- + + + +# Utiliser un webhook avec OctoBot + +:::info + La traduction française de cette page est en cours. +::: + +There are many ways to wake your OctoBot up and make it do something, +one of them is using a webhook. With a webhook, you can automatically +send messages to your OctoBot from any website supporting this system. + +<a href="https://www.tradingview.com/?aff_id=27595" rel="nofollow">TradingView</a> is one of them. + +In order to be able to receive TradingView webhook's message, you need to make your OctoBot reachable from TradingView. For this, there are 3 options: + +- Use the [Premium OctoBot Extension](/guides/octobot-configuration/premium-octobot-extension) and simply connect your OctoBot through the OctoBot cloud secure server. +- Use <a href="https://ngrok.com/" rel="nofollow">Ngrok</a> to act as a secure intermediary between the internet and your OctoBot. +- Or setup your own public IP and port configuration + +## Configurer le webhook de votre OctoBot + +1. In your OctoBot configuration, from the `Accounts` tab, in `Interfaces`, add the webhook service. +2. Set up your webhook configuration using one of the following options: + + - Option 1: Using [Premium OctoBot Extension](/guides/octobot-configuration/premium-octobot-extension): just select the `Enable-Octobot-Webhook` + - Option 2: Using Ngrok : + + 1. Select `Enable-Ngrok`, uncheck `Enable-Octobot-Webhook` + 2. Create an account on <a href="https://ngrok.com/" rel="nofollow">ngrok</a> + 3. Copy your Ngrok token from https://dashboard.ngrok.com/get-started/your-authtoken + 4. Enter your Ngrok token into your OctoBot's webhook service configuration. + + - Option 3: Manual configration: if you are familiar with webhook setups and your OctoBot is exposed to the Internet, you can disable both `Enable-Ngrok` and `Enable-Octobot-Webhook` and configure the listening port and IP for the webhook yourself. + _Note: With this manual configuration, when using docker, you also need to add `-p 9000:9000` after `docker run`_. + +3. Activate a tentacle using a webhook service (like the TradingView signals trading mode). +4. Restart your OctoBot. +5. The webhook address will be displayed on your OctoBot configuration, on to the TradingView inteface and printed in your logs. + +:::info + **Your Webhook URL is missing?** For your webhook URL to be displayed, a + TradingView-related profile has to be active. If you don't see the URL in your + TradingView configuration, select a TradingView profile in your profile + configuation and restart your OctoBot. +::: + +Follow [this guide](/guides/octobot-interfaces/tradingview) to know more on how to send TradingView signals to your OctoBot. + +## Examples de configuration + +### Option de configuration 1: Utiliser l'extension premium OctoBot + +**TradingView** and **Webhook** configuration in the Accounts tab +![octobot open source premium extension webhook configuration](/images/guides/trading-view/octobot-open-source-premium-extension-webhook-configuration.png) + +The Webhook URL is also printed in logs +![octobot open source premium extension webhook log](/images/guides/trading-view/octobot-open-source-premium-extension-webhook-log.png) + +### Option de configuration 1: Utiliser Ngrok + +TradingView and Webhook configuration in the Accounts tab +![octobot open source ngrok webhook configuration](/images/guides/trading-view/octobot-open-source-ngrok-webhook-configuration.png) + +The Webhook URL is also printed in logs +![octobot open source ngrok webhook log](/images/guides/trading-view/octobot-open-source-ngrok-webhook-log.png) + +Activate a tentacle using a webhook service (like the TradingView signals trading mode) + +## À propos de ngrok.com + +You can use Ngrok with a free account, the only drawback of having a +free version is that your webhook address will change at every OctoBot +restart, you will have to update it on your message sender <a href="https://www.tradingview.com/?aff_id=27595" rel="nofollow">TradingView</a>. + +To avoid having to re-enter your IP each time, you can either: + +- Use the [Premium OctoBot Extension](/guides/octobot-configuration/premium-octobot-extension): in this case you only pay once and always have your OctoBot secure webhook ready to receive your TradingView alerts. +- Pay a Ngrok monthly subscription diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-interfaces/web.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-interfaces/web.md new file mode 100644 index 0000000000..7fffc7655d --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-interfaces/web.md @@ -0,0 +1,124 @@ +--- +title: "Interface web" +description: "Apprenez comment configurer l'interface web de votre OctoBot. Sécurisez-la avec une authentification par mot de passe, configurez-la pour avoir plusieurs OctoBots sur le même ordinateur." +sidebar_position: 1 +--- + + + +# Interface web + +:::info + La traduction française de cette page est en cours. +::: + +![home](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/wiki_resources/home.jpg) + +OctoBot comes with a web interface allowing you to: + +- Follow OctoBot's status and moves +- Interact with OctoBot +- Configure OctoBot and the [Trading Modes](/guides/octobot-trading-modes/trading-modes) to use +- Use [Backtesting](/guides/octobot-usage/backtesting) to optimize your strategies + +## Configuration + +In the Accounts tab of the web interface, add the `Web` interface if missing. + +![web config](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/wiki_resources/web_config.png) + +- **port** is the port you want the web interface to be accessible from. Changing it allows you to [have multiple OctoBots running on the same computer](../octobot-usage/having-multiple-octobots-on-one-computer). +- **auto open in web browser** is whether starting your OctoBot should open a new tab on your browser to display the web interface +- **requires password** is whether the web interface of your OctoBot should be protected by a password + +## Protéger votre interface web + +### Utiliser mot de passe + +You can set a password to protect your web interface. This way you can secure the access to your OctoBot when hosting it on a cloud or just add a security layer to your setup. + +**By default no password is required.** + +You can activate the password authentication from the web interface configuration, it is also where you can set and change your password. + +Any IP will be automatically **blocked after 10 authentication failures in a row**. IPs will remain blocked until your OctoBot restarts. If you accidentally block your IP, you can just restart your OctoBot. + +### Comment le mettre en place ? + +- Go to "Accounts" page +- Select "Interfaces" on the left menu +- Click on "**\*\*\*\***" next to "Password: " +- Override the "\*\*\*\*" with your password +- Click on validate +- Click on "SAVE AND RESTART" red button on the left menu + +### Mot de passe oublié + +If you forgot your password, go to your **user/config.json** file and change: + +```json +"require-password": true, +``` + +into: + +```json +"require-password": false, +``` + +Then restart your OctoBot. This way you will be able to access your OctoBot without a password and then change it. + +### À propos de l'authentification par interface web + +- OctoBot's web interface authentication works on the assumption that you are the only person being able to access your OctoBot's file system and the associated processes. This authentication can be deactivated by anyone being able to edit your **user/config.json** and restart your OctoBot process. +- Only a SHA256 hash of your password will be stored in you **user/config.json** file. This is making it impossible to go back to the original password you entered. + +### Bloquer les requests provenant d'autres sites (CSRF) + +You can set the `CORS_ALLOWED_ORIGINS` environment variable before starting your OctoBot, this way only requests from the specified origin(s) will be answered to. + +Examples: + +- `CORS_ALLOWED_ORIGINS=https://mybot.com` +- `CORS_ALLOWED_ORIGINS=http://localhost:5001` +- `CORS_ALLOWED_ORIGINS=https://mybot.com,https://myotherwebsite.com` + +Requests from other origins will be refused with a 400 error and the web interface will behave as if OctoBot was constantly disconnected. + +By default, no request filter is set (equivalent to CORS_ALLOWED_ORIGINS=\*) which might make your bot vulnerable to <a href="https://owasp.org/www-community/attacks/csrf" rel="nofollow">Cross Site Request Forgery attacks</a>. + +### Configuration avec user/config.json + +Add in **user/config.json** in the services key : + +```json +"web": { + "auto-open-in-web-browser": false, + "ip": "0.0.0.0", + "password": "", + "port": 5001, + "require-password": false +} +``` + +You can also change the IP your web interface is binding to from **user/config.json**. + +**Example:** + +```json +"services": { + "a service": { + + }, + "web": { + "auto-open-in-web-browser": false, + "ip": "0.0.0.0", + "password": "", + "port": 5001, + "require-password": false + }, + "another service": { + + } +} +``` diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-script.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-script.md new file mode 100644 index 0000000000..78ed5fde7b --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-script.md @@ -0,0 +1,108 @@ +--- +title: "Commencer le scripting" +description: "Exploitez la puissance du framework OctoBot au sein de vos propres stratégies de trading scriptées en Python tout en gardant la simplicité d'un Pine Script TradingView." +sidebar_position: 17 +--- + + + +# OctoBot Script + +:::note + Pour les utilisateurs d' + <a href="https://github.com/Drakkar-Software/OctoBot-script" rel="nofollow">OctoBot Script</a> + . +::: + +:::info + La traduction française de cette page est en cours. +::: + +## Le framework de trading par script basé sur OctoBot + +> OctoBot Script est dans une version alpha + +OctoBot Script vous permet d'exploiter la puissance du framework OctoBot tout en gardant la simplicité d'un Pine Script TradingView. + +With OctoBot Script, automatisez vos stratégies de trading en utilisant vos scripts hautement optimisés + +- Que ce soit à partir de vos idées de stratégies scriptées, comme sur le Pine Script de <a href="https://www.tradingview.com/?aff_id=27595" rel="nofollow">TradingView</a> +- Ou en utilisant une stratégie avancée basée sur l'IA + +## Installer OctoBot Script depuis pip + +> OctoBot Script nécessite **Python 3.10** + +```{.sourceCode .bash} +python3 -m pip install OctoBot wheel appdirs==1.4.4 +python3 -m pip install octobot-script +``` + +## Exemple de script: une strategie RSI + +Dans cet exemple, OctoBot script permet de créer rapidement une stratégie de trading basée sur le <a href="https://www.investopedia.com/terms/r/rsi.asp" rel="nofollow">RSI</a> comprenant: + +- une prise de profit à 25% de gains +- un stop loss à 15% de perte + +```python + + +async def rsi_test(): + async def strategy(ctx): + # Will be called at each candle. + if run_data["entries"] is None: + # Compute entries only once per backtest. + closes = await obs.Close(ctx, max_history=True) + times = await obs.Time(ctx, max_history=True, use_close_time=True) + rsi_v = tulipy.rsi(closes, period=ctx.tentacle.trading_config["period"]) + delta = len(closes) - len(rsi_v) + # Populate entries with timestamps of candles where RSI is + # below the "rsi_value_buy_threshold" configuration. + run_data["entries"] = { + times[index + delta] + for index, rsi_val in enumerate(rsi_v) + if rsi_val < ctx.tentacle.trading_config["rsi_value_buy_threshold"] + } + await obs.plot_indicator(ctx, "RSI", times[delta:], rsi_v, run_data["entries"]) + if obs.current_live_time(ctx) in run_data["entries"]: + # Uses pre-computed entries times to enter positions when relevant. + # Also, instantly set take profits and stop losses. + # Position exists could also be set separately. + await obs.market(ctx, "buy", amount="10%", stop_loss_offset="-15%", take_profit_offset="25%") + + # Configuration that will be passed to each run. + # It will be accessible under "ctx.tentacle.trading_config". + config = { + "period": 10, + "rsi_value_buy_threshold": 28, + } + + # Read and cache candle data to make subsequent backtesting runs faster. + data = await obs.get_data("BTC/USDT", "1d", start_timestamp=1505606400) + run_data = { + "entries": None, + } + # Run a backtest using the above data, strategy and configuration. + res = await obs.run(data, strategy, config) + print(res.describe()) + # Generate and open report including indicators plots + await res.plot(show=True) + # Stop data to release local databases. + await data.stop() + + +# Call the execution of the script inside "asyncio.run" as +# OctoBot script runs using the python asyncio framework. +asyncio.run(rsi_test()) +``` + +## Rapport généré + +![rapport octobot pro avec btc usdt avec graphiques de trades et portfolio et rsi](/images/guides/octobot-pro/octobot-pro-report-btc-usdt-with-chart-trades-portfolio-value-and-rsi.jpg) + +## Rejoignez la communauté + +Nous avons récemment créé un canal Telegram dédié au script OctoBot. + +<a href="https://t.me/+366CLLZ2NC0xMjFk" rel="nofollow">Telegram News</a> diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-trading-modes/chatgpt-trading.mdx b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-trading-modes/chatgpt-trading.mdx new file mode 100644 index 0000000000..07592ffc98 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-trading-modes/chatgpt-trading.mdx @@ -0,0 +1,74 @@ +--- +title: "Trader avec ChatGPT" +description: "Apprenez à utiliser les prédictions de ChatGPT sur OctoBot pour améliorer vos stratégies de trading crypto avec des prédictions du marché basées sur l'IA." +sidebar_position: 4 +--- + + + +# Trader avec ChatGPT + +Avec OctoBot, il est possible de trader avec des intelligences artificielles telles que [ChatGPT](/guides/octobot-interfaces/chatgpt) en utilisant le [DCA trading mode](dca-trading-mode) et le [Daily trading mode](daily-trading-mode). + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="BV4ZHQrIpRQ" title="Stratégie de trading crypto avec ChatGPT sur OctoBot" /> + +Dans cette vidéo, nous montrons comment l'une des stratégies de trading ChatGPT d'[OctoBot cloud](/) fonctionne. + +Dans OctoBot, ChatGPT est utilisé comme un indicateur de trading et non comme un moyen de passer des ordres. Cela signifie que plusieurs [trading modes](trading-modes) peuvent utiliser ChatGPT. + +## Utiliser ChatGPT dans vos trading modes + +Lorsque vous utilisez le [DCA trading mode](dca-trading-mode) intégré et [Daily trading mode](daily-trading-mode), vous pouvez choisir d'activer le GPTEvaluator. En le faisant, vos trading modes prendront en compte les prédictions de ChatGPT lors de leur trading. + +<div style={{textAlign: "center"}}> + ![ai trading illustrated by octobot head with chatgpt logo trading bitcoin + ethereum litecoin usd + logos](/images/guides/ai-trading-illustrated-by-octobot-head-with-chatgpt-logo-trading-bitcoin-ethereum-litecoin-usd-logos.png) +</div> + +Vous pouvez donc choisir de trader uniquement avec les prédictions ChatGPT en activant l'évaluateur ChatGPT uniquement ou les consolider avec d'autres évaluateurs. + +## Fonctionnement de l'évaluateur ChatGPT + +L'évaluateur ChatGPT fonctionne en 3 étapes : + +1. Collecte des données du marché : Selon la configuration choisie pour votre évaluateur ChatGPT, les données du marché sont traitées puis envoyées à ChatGPT pour lui demander une prédiction. +2. Demande la prédiction : Octobot demande à ChatGPT une prédiction concernant le marché basée sur les données fournies. Cette prédiction consiste à demander à ChatGPT si le marché est plus susceptible de monter ou de descendre dans un avenir proche avec un certain pourcentage de confiance. +3. Analyse de la prédiction ChatGPT : Selon la réponse donnée par ChatGPT, l'évaluateur ChatGPT émet une évaluation. Le côté de l'évaluation dépendra de la réponse "UP" ou "DOWN" de l'IA, et son poids est basée sur le niveau de confiance donné par ChatGPT sur sa prédiction. + +## Configurations de l'évaluateur ChatGPT + +L'évaluateur ChatGPT peut être configuré de différentes manières pour personnaliser la façon dont vous souhaitez que ChatGPT fasse des prédictions. + +![configuration du trading avec chatgpt dans octobot avec le GPTEvaluator](/images/guides/trading-with-chatgpt-in-octobot-GPTEvaluator-configuration.png) + +- `Indicator` définit la manière dont vous souhaitez prétraiter les données du marché avant de les envoyer à ChatGPT. Vous pouvez choisir d'envoyer les bougies brutes (sans prétraitement), une moyenne mobile ou d'autres types de valeurs transformées. +- `Source` est l'entrée à donner à l'`Indicator` lorsqu'il est sélectionné +- `Period` est le réglage de la période de l'`Indicator` lorsqu'il est sélectionné +- `Minimum confidence threshold` est une valeur en % à partir de laquelle il envoyer `1` ou `-1` au lieu d'une valeur **comprise entre** -1 et 1. Ce paramètre est particulièrement utile lorsque vous utilisez le [DCA trading mode](dca-trading-mode) avec un DCA basé sur les évaluateurs, qui nécessitent une évaluation à `1` ou `-1` +- `GPT Model` vous permet de sélectionner le <a href="https://platform.openai.com/docs/models" rel="nofollow">modèle GPT OpenAI</a> à utiliser pour vos évaluations. Notez que certains modèles peuvent avoir un tarif différent. D'autres modèles peuvent aussi être utilisés en se connectant à un [autre serveur de LLM](../octobot-interfaces/chatgpt#prédictions-avec-un-llm-personnalisé). +- `Allow Reevaluation` est utilisée lorsque vous utilisez l'évaluateur ChatGPT en même temps que d'autres évaluateurs tels que des évaluateurs en temps réel qui peuvent nécessiter des réévaluations. Comme les réévaluations peuvent se produire à une fréquence élevée, la désactivation de ces réévaluations est une mesure de sécurité pour éviter d'utiliser trop de [tokens OpenAI](/guides/octobot-interfaces/chatgpt#costs) au cas où cela se produirait +- `OpenAI token limit` est le montant maximum de jetons OpenAI qui peut être dépensé dans une journée. Vous pouvez utiliser ce paramètre pour définir une limite d'utilisation de jetons si vous n'êtes pas certains du coût total de votre stratégie. + +:::info + Lorsque vous utilisez OctoBot en open source, l'évaluateur ChatGPT nécessite + la configuration de l'[interface + ChatGPT](/fr/guides/octobot-interfaces/chatgpt) pour pouvoir fonctionner. +::: + +## Backtesting avec l'évaluateur ChatGPT + +Il n'est pas vraiment possible de demander efficacement à ChatGPT les milliers de prédictions requises lors d'un [backtesting](/guides/octobot-usage/backtesting). Cela prendrait des heures et coûterait énormément en termes de jetons OpenAI + +Cependant, chez OctoBot cloud, nous avons décidé de prendre en charge ce coût pour vous sur les paires de trading utilisées dans les [stratégies OctoBot cloud](/). Cela signifie que le backtesting avec l'évaluateur ChatGPT est possible sur les paires de trading utilisées par les stratégies OctoBot cloud. Lorsque vous sélectionnez ces paires, votre OctoBot récupérera automatiquement les prédictions historiques de ChatGPT et exécutera votre backtesting en avec cet historique. + +## Allez plus loin avec le trading ChatGPT + +Plus d'info sur les stratégies de trading basées sur ChatGPT sur nos articles de blog dédiés:: + +- [Trader avec ChatGPT](/blog/trading-using-chat-gpt) +- [Making of de la stratégie de trading ChatGPT](/blog/chatgpt-strategy-deep-dive) +- [Trading avec les signaux ChatGPT + ](/fr/blog/introducing-chatgpt-trading-tool) diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-trading-modes/daily-trading-mode.mdx b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-trading-modes/daily-trading-mode.mdx new file mode 100644 index 0000000000..439fe71a50 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-trading-modes/daily-trading-mode.mdx @@ -0,0 +1,62 @@ +--- +title: "Daily trading mode" +description: "En utilisant le Daily Trading Mode, vous utilisez la stratégie de trading la plus flexible sur OctoBot. Tradez avec des évaluateur techniques, sociaux, ou d'IA." +sidebar_position: 5 +--- + + + +# Daily Trading Mode + +Le Daily Trading Mode (ou DailyTradingMode) est conçu pour être le mode de trading le plus flexible. En utilisant le Daily Trading Mode, vous pouvez trader en utilisant n'importe quel évaluateur technique, social, d'IA ou en temps réel sur les marchés SPOT et futures. + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="e-GqmTfrchY" title="Le Daily Trading Mode d" /> + +Il vous permet de créer des stratégies de trading hautement personnalisées. + +En utilisant le Daily Trading mode, le type des ordres créés (au marché ou aux limites) sera défini par la force du signal reçu des évaluateurs: +- Un signal plus proche de -1 déclenchera un ordre d'achat au marché. +- Un signal plus proche de -0.5 déclenchera un ordre d'achat aux limites. +- Un signal plus proche de 0.5 déclenchera un ordre de vente aux limites. +- Un signal plus proche de 1 déclenchera un ordre de vente au marché. + + +## Le Daily Trading Mode peut + +- Acheter ou Vendre lorsque les évaluateurs signalent une opportunité à la hausse (long) ou à la baisse (short) +- Créer automatiquement un ordre de take profit après chaque entrée +- Créer automatiquement un ordre de stop loss après chaque entrée +- Acheter ou Vendre uniquement si configuré pour +- Être utilisé pour trader sur les marchés SPOT et futures + +## Mode par défaut - Target profits mode désactivé +Lorsque le `Target profits mode` est désactivé, le Daily Trading Mode achètera lorsqu'un signal à la hausse est reçu et vendra lorsqu'un signal à la baisse est reçu. + +:::info + En utilisant le mode par défaut, le Daily Trading Mode ne prendra pas en compte les prix d'entrée lors de la création des ordres de sortie. Cela signifie que si un signal à la baisse (short) est reçu, un ordre de vente sera créé, même si l'actif a été acheté à un prix plus élevé. Soyez prudent lors de la négociation d'actifs en tendance baissière dans cette configuration. Dans certains cas, désactiver les ordres de vente pourrait aider à éviter ce problème. +::: + +## Target profits mode +Lorsque le `Target profits mode` est activé, de même qu'avec le mode par défaut, le Daily Trading Mode achètera lorsqu'un signal à la hausse est reçu et vendra lorsqu'un signal à la baisse est reçu. + +Cependant, en Target profits mode, les signaux ne sont utilisés que pour acheter (ou vendre en short) mais jamais pour vendre (ou racheter en short). Ce mode évite le problème décrit ci-dessus de "vente à perte" du mode par défaut. Les take profit et les stops loss doivent être configurés directement à partir de la configuration du Target profits mode et seront créés dès que n'importe quel ordre d'entrée est exécuté. + + +## Configurer les ordres +- Le Daily Trading mode peut créer des ordres au marché ou aux limite en fonction de la force du signal. Lors de l'utilisation d'ordres aux limites, la `Fixed limit prices difference` permet de définir la différence de % pour calculer le prix de l'ordre. +- All in buy / sell order: peut être activé pour trader en utilisant le montant maximal du portefeuille à chaque fois +- Les ordres d'achat ou de vente peuvent être désactivés pour ne considérer que les signaux d'achat ou de vente +- Les montants d'ordre peuvent être configurés en utilisant la [syntaxe des montants d'ordre](order-amount-syntax). +- Le `Maximum currency percent` peut être utilisé pour réduire le % maximal alloué à une cryptomonnaie donnée. Cela peut être utile pour éviter plusieurs achats du même actif pendant des conditions de marché inhabituelles. Attention : cela n'est utilisé que lorsque les montants d'ordre sont automatiquement calculés. Cette valeur est ignorée lorsque les montants d'ordre sont définis dans la configuration. + + +## La création des ordres +L'objectif du Daily Trading Mode est d'avoir un état (long, neutre ou court) et de créer des ordres lorsque cet état change. Cela signifie que tant que l'état du mode de trading reste le même, les ordres resteront en ligne. Cependant, si l'état change (de long à court, par exemple), tous les ordres de l'état précédents qui sont toujours ouverts seront annulés et de nouveaux ordres correspondant à l'état actuel seront créés. + +En cas de va-et-vient entre neutre et le même état, les ordres ouverts seront simplement remplacés. + +Par exemple, un état long créera un ordre d'achat aux limites. Si l'état du Daily Trading Mode change plus tard à neutre, les ordres ouverts ne seront pas annulés (le neutre n'annule pas les ordres ouverts). Maintenant, si l'état revient à long, l'ordre d'achat précédemment ouvert sera annulé et remplacé par un nouveau. Si l'ordre d'achat précédemment créé a été exécuté, un nouvel ordre d'achat sera ouvert. + +Les profits proviennent de l'achat lorsque l'état est long, puis de la vente lorsque l'état est court (ou vice versa lors de la vente à découvert dans le trading de futures). diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-trading-modes/dca-trading-mode.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-trading-modes/dca-trading-mode.md new file mode 100644 index 0000000000..1e66b3e3bb --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-trading-modes/dca-trading-mode.md @@ -0,0 +1,61 @@ +--- +title: "DCA trading mode" +description: "Optimisez votre stratégie d'investissement en utilisant le trading mode Dollar Cost Averaging avec des achats programmés ou basés sur des indicateurs" +sidebar_position: 2 +--- + + + +# Trading mode DCA + +Le trading mode DCA (ou DCATradingMode) est conçu pour acheter et vendre selon une stratégie de [Smart Dollar cost averaging](/blog/smart-dca-making-of). + +<div style={{textAlign: "center"}}> + ![dca trading illustrated by a man watering a plant growing money](/images/guides/dca-trading-illustrated-by-a-man-watering-a-plant-growing-money.png) +</div> + +Il vous permet d'optimiser vos prix d'entrée et de sortie en fonction de votre configuration. + +## Le trading mode DCA peut +- Acheter régulièrement +- Acheter lorsque les évaluateurs signalent une opportunité d'achat +- Créer plusieurs ordres d'achat à des prix différents +- Créer automatiquement un ou plusieurs ordres de take profit après chaque achat +- Créer automatiquement un ou plusieurs ordres stop loss après chaque achat +- Être utilisé pour trader sur les marchés SPOT et Futures + +## DCA basé sur le temps +En utilisant le Trigger mode `Time based`, le trading mode DCA créera des ordres d'entrée (achat) régulièrement en fonction de la `Trigger period` configurée. + +## DCA basé sur les évaluateurs +En utilisant le trigger mode `Maximum evaluators signals based`, le trading mode DCA créera des ordres d'entrée (achat) à chaque fois qu'une nouvelle valeur maximale d'évaluateur est reçue. Une valeur maximale d'évaluateur est une valeur de `-1` ou `1`. +Avec ce trigger mode, vous pouvez déclencher des ordres DCA basés sur les signaux des évaluateurs techniques, les signaux provenant de Telegram, ChatGPT ou tout autre indicateur que vous activez. Veuillez noter qu'une valeur d'évaluateur `-1` ou `1` est requise ; toute autre valeur sera ignorée. + +## Configurer les ordres +- Le trading mode DCA peut créer des ordres d'entrée (achat) sous forme d'ordres au marché ou à cours limité. Lors de l'utilisation d'ordres à cours limité, le paramètre `Limit entry percent difference` permet de définir la différence de prix en % pour calculer le prix de l'ordre d'achat. +- Des ordres secondaires peuvent également être activés. Il peut y en avoir autant que configurés et ils peuvent avoir un prix et un montant différents des ordres initiaux. +- Des ordres de take profit (vente) peuvent être activés pour créer automatiquement des ordres de vente lorsque les ordres d'entrée sont remplis. +- Des ordres stop loss peuvent être activés pour créer automatiquement des ordres stop loss lorsque les ordres d'entrée sont remplis. +- De la même manière que les ordres secondaires, les vente (take profit et stop loss) peuvent également être séparées en plusieurs ordres utilisant différents prix. Lorsqu'ils sont activés, le montant initial sera réparti uniformément entre les ordres de vente. +- Chaque montant d'ordre d'entrée et de sortie peut être configuré en utilisant la syntaxe des [montants des ordres](order-amount-syntax). +Cycle des ordres d'entrées : lorsque `Cancel open orders on each entry` est activé, un seul ordre d'entrée (et ses ordres secondaires si ils existent) est autorisé pour chaque paire tradée. Cela signifie que la réception d'un nouveau signal lorsqu'il existe déjà des ordres d'entrée non executés annulera d'abord les ordres d'entrée ouverts avant de créer des ordres associées à ce nouveau signal. En revanche, lorsque cette option est désactivée, plusieurs ordres d'entrée provenant de signaux différents peuvent coexister car le trading mode ne les annulera pas automatiquement. +- `Enable initialization entry orders`: Ce paramètre active ou désactive la création systématique d'ordres d'entrées lorsque bot démarre, et ce indépendamment des conditions de marché. +- La part maximum du portefeuille allouée à une crypto en particulier peut etre limitée avec le paramètre `Max asset holding`. Par exemple, un "Max asset holding" de 30% signifie que le trading mode DCA n'achetera plus de BTC si la part de BTC du portefeuille dépasse 30% de sa valeur totale. + +:::info + Pour le moment, lors de l'utilisation du trading de futures, le trading mode DCA ne prend en charge que les positions longues. Il ne créera pas de positions courtes. +::: + +## Health check +Activer le Health check dans le trading mode DCA garantira qu'il n'y a pas d'actifs au sein des paires de trading qui restent sans ordres de vente. + +Cela est utile pour s'assurer que la stratégie DCA reste cohérente même lors du redémarrage du bot ou si votre OctoBot a été hors ligne pendant un certain temps. + +Par exemple, en tradant BTC/USDT et ETH/USDT, si le bot constate que de l'ETH se trouve dans le portefeuille et n'est pas inclus dans un ordre de vente, alors il considérera que cet ETH doit être vendu et le vendra contre des USDT avec un ordre au marché. + +## Exemples d'utilisation du trading mode DCA +De nombreuses stratégies OctoBot cloud sont construites en utilisant le trading mode DCA. + +- Dans notre [Smart DCA making of](/blog/smart-dca-making-of), nous expliquons le processus de conception de certaines des stratégies OctoBot cloud. + +- En [tradant avec ChatGPT](chatgpt-trading), vous pouvez également utiliser le trading mode DCA pour la gestion des ordres. diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-trading-modes/dip-analyser-trading-mode.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-trading-modes/dip-analyser-trading-mode.md new file mode 100644 index 0000000000..83917f46db --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-trading-modes/dip-analyser-trading-mode.md @@ -0,0 +1,24 @@ +--- +title: "Dip Analyser trading mode" +description: "Profitez des creux locaux et de multiples prises de profits avec le Dip Analyser Trading Mode sur OctoBot pour trader sur les marchés SPOT ou futures." +sidebar_position: 6 +--- + +# Dip Analyser Trading Mode + +Le Dip Analyser Trading Mode (ou DipAnalyserTradingMode) est conçu pour acheter sur les creux locaux et vendre les actifs achetés avec plusieurs prises de profits. Il peut être comparé à un mode de trading DCA avancé basé sur plusieurs évaluateurs [DCA trading mode](dca-trading-mode). + +## Le Dip Analyser Trading Mode peut + +- Diviser les take profits en plusieurs ordres de vente pour maximiser les profits +- Utiliser des ordres d'entrée au marché aux limites +- Utiliser des ordres de stop loss +- Personnaliser les prix de take profit en fonction de la force du signal du creux local +- Trader sur les marchés SPOT et futures + +## Configurer les ordres +- Le Dip Analyser Trading mode peut diviser les take profits en autant d'ordres que défini dans la configuration. +- Les montants d'entrée utilisent à la fois les montants par défaut ou configurés et le multiplicateur de volume du signal d'entrée. +- Les prix des ordres de take profit sont répartis linéairement entre le prix d'entrée et le multiplicateur de prix du signal d'entrée. +- La définition d'un multiplicateur de prix de stop loss activera la création d'ordres de stop loss aux côtés des ordres de take profit. +- Les montants des ordres d'entrée peuvent être configurés en utilisant la [syntaxe des montants d'ordre](order-amount-syntax). diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-trading-modes/grid-trading-mode.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-trading-modes/grid-trading-mode.md new file mode 100644 index 0000000000..cdb9247cfa --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-trading-modes/grid-trading-mode.md @@ -0,0 +1,32 @@ +--- +title: "Grid trading mode" +description: "Profitez facilement des marchés stables en maintenant un ensemble d'ordres d'achat et de vente selon une grille grâce au mode Grid Trading Mode." +sidebar_position: 7 +--- + + + +# Grid Trading Mode + +Le Grid Trading Mode (ou GridTradingMode) est conçu pour tirer profit des marchés stables en maintenant un ensemble d'ordres d'achat et de vente semblable à une grille. Réalisez des bénéfices réguliers sur chaque petite variation du marché avec des risques minimisés grâce au trading par grille. + +<div style={{textAlign: "center"}}> + ![grid trading illustrated by a man stepping up on green stairs grabbing coins](/images/guides/grid-trading-illustrated-by-a-man-stepping-up-on-green-stairs-grabbing-coins.png) +</div> + +Le Grid Trading Mode est une version simplifiée du [Staggered Orders Trading Mode](staggered-orders-trading-mode). + +## Le Grid Trading Mode peut + +- Utilisez une configuration par défaut +- Être configuré pour chaque paire de trading indépendamment +- Maintenir une grille d'ordres d'achat et de vente en utilisant l'écart et l'incrément configuré en valeurs statiques +- S'adapter à la hausse ou à la baisse (via trailing) lorsque le prix du marché dépasse les ordres la grille +- Utiliser une quantité limitée de fonds +- Utiliser le montant configuré pour chaque ordre +- Dispatcher automatiquement les nouveaux fonds déposés +- Inclure un délai lors de la création d'ordres opposés lorsque qu'un achat ou une vente est exécutée +- Initialiser la grille en fonction d'un prix personnalisé +- Tradez sur les marchés SPOT +- Optimiser automatiquement le portefeuille afin de créer la grille parfaite à l'aide de la commande `Optimize Initial Portfolio` +- Mettre en pause le maintient des ordres à l'aide dela commande `Pause Orders Mirroring` diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-trading-modes/index-trading-mode.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-trading-modes/index-trading-mode.md new file mode 100644 index 0000000000..695d7504ce --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-trading-modes/index-trading-mode.md @@ -0,0 +1,66 @@ +--- +title: "Index trading mode" +description: "Investissez dans plusieurs cryptomonnaies en même temps et créez votre propre indice de crypto en utilisant le mode de trading Index." +sidebar_position: 3 +--- + + + +# Index Trading Mode + +Le Trading Mode Index (ou IndexTradingMode) est conçu pour maintenir votre portefeuille en utilisant une configuration prédéfinie de cryptomonnaies. + +<div style={{textAlign: "center"}}> + <div> + ![index trading illustraté par un panier de crypto](/images/guides/crypto-basket.png) + </div> +</div> + +Tout comme les [paniers de cryptos d'OctoBot cloud](https://www.octobot.cloud/features/crypto-basket), Le Trading Mode Index vous permet d'investir facilement dans des ensembles de cryptomonnaies. + +## L'Index Trading Mode permet de + +- Répartir équitablement vos fonds en marché de référence entre les différentes crypto de votre configuration. +- Vérifier et adapter votre portefeuille si une crypto : + - Prend une plus grande part que prévue dans votre portefeuille + - Prend une plus petite part que prévue dans votre portefeuille + - Est absente de votre portefeuille +- Vérifier et si besoin répartir votre portfolio à votre guise à chaque fois que vous démarrez OctoBot ou à intervalle régulier. + +## Répartition des fonds +Lorsque vous démarrez un OctoBot avec le trading mode Index, celui-ci va : +1. Évaluer tous les actifs configurés dans les paires échangées de votre profil et calculer leurs ratios dans votre portefeuille +2. Si une crypto des paires échangées est absente ou présente avec un ratio incorrect, un rééquilibrage est déclenché. +3. Si un rééquilibrage est déclenché, alors vos fonds sont convertis sur le marché de référence puis répartis entre les crypto configurées. + + +## Utiliser les paniers de crypto OctoBot cloud +En utilisant l'[extension premium d'OctoBot](/guides/octobot-configuration/premium-octobot-extension), vous pouvez utiliser chaque panier de cryptos disponible sur OctoBot cloud directement depuis votre OctoBot open source. + +<div style={{textAlign: "center"}}> + <div> + ![index trading illustraté par un panier de crypto](/images/guides/trading-modes/octobot-open-source-using-crypto-baskets-from-premium-extension.png) + </div> +</div> + +De cette manière, lorsque un panier de cryptos OctoBot cloud est mis à jour, par exemple si le top 20 du marché crypto change ou si une nouvelle crypto rejoint le panier de crypto d'intelligence artificielle, alors votre OctoBot open source se mettra également à jour automatiquement. + +## Configurer les rééquilibrages +### Période de rééquilibrage +Votre OctoBot peut vérifier le contenu de votre portefeuille régulièrement pour s'assurer qu'il reste représentatif de l'indice configuré. + + +La `Trigger period` est le nombre de jours pendant lesquels votre OctoBot attend avant de revérifier le contenu de votre portefeuille par rapport au contenu idéal de l'indice. + +### Seuil de rééquilibrage +Lors de la vérification du contenu de votre portefeuille, le contenu idéal de l'indice ne sera jamais rigoureusement conforme. Étant donné que les prix des cryptomonnaies changent constamment, il y aura toujours des petites différences entre vos fonds et la répartition théorique de votre indice. + +Le `Rebalance cap` définit une valeur en `%` à partir de laquelle considérer qu'un ratio de fonds est désynchronisé du le ratio cible d'un indice. + + +**Exemple avec un indice composé de 4 cryptomonnaies : BTC, ETH, SOL et AVAX:** + +Idéalement, le portefeuille contiendrait exactement 25% de chaque crypto. +Cependant, si le prix d'AVAX augmente de 10%, il pourrait alors représenter 28% du portefeuille au lieu des 25% idéaux. Dans ce cas, lors de la prochaine vérification de rééquilibrage du portefeuille, deux résultats sont possibles: +- A. Le `Rebalance cap` est inférieur ou égal à 3% : Le ratio AVAX détenu est supérieur de 3% aux 25% idéal, un rééquilibrage est déclenché afin que les gains d'AVAX soient redistribués entre BTC, ETH et SOL. +- B. Le `Rebalance cap` est supérieur à 3% : Le ratio d'AVAX détenu reste dans la plage idéale plus ou moins la marge autorisée par le `Rebalance cap`: aucun rééquilibrage n'est nécessaire et rien ne se produit. diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-trading-modes/order-amount-syntax.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-trading-modes/order-amount-syntax.md new file mode 100644 index 0000000000..72a586b9a4 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-trading-modes/order-amount-syntax.md @@ -0,0 +1,83 @@ +--- +title: "Syntaxe des montants d''ordres" +description: "Configurez la taille de vos ordres OctoBot en fonction d'un pourcentage de votre portefeuille, d'une évolution ou même d'un montant fixe." +sidebar_position: 10 +--- + + +# La syntaxe des montants d'ordres + + +Avec OctoBot, vous pouvez dimensionner vos ordres en fonction de différents facteurs tels que la valeur de votre portefeuille, utiliser des montants constants ou qui évoluent en fonction de la croissance de vos gains. + +Les montants d'ordres peuvent être configurées dans la configuration du mode de trading, dans les paramètres du profil. + +Note : vous pouvez également laisser la configuration du montant de l'ordre vide et les modes de trading utiliseront un pourcentage de votre portefeuille (calculé en fonction de votre niveau de risque) lorsque aucune valeur n'est configurée. + +:::info + Dans la syntaxe des montants d'ordres, `%X` est toujours équivalent à `X%`. Par conséquent, l'utilisation de `%s` ou `s%` est strictement identique. Cela est vrai pour chaque identifiant à 2 caractères. +::: + +## Montants constants +Montants qui restent toujours constants. + +### Montant fixes en devise de base +Un montant statique à utiliser dans chaque ordre, en devise de base. + +> Utiliser `0.1` pour trader 0.1 BTC à chaque ordre BTC/USD. + +### Montant fixes en devise de quotation: q +Un montant statique à utiliser dans chaque ordre, en devise de quotation. + +> Utiliser `100q` pour trader l'équivalent de 100 USD de BTC à chaque ordre BTC/USD. + +## Montants évolutifs +Des montants qui varient en fonction de la valeur totale du portefeuille. Les montants évolutifs sont utiles pour réinvestir les bénéfices. + + +### Pourcentage d'actifs du symbole tradé: s% +Un pourcentage de la valeur combinée des actifs associés au symbole tradé. + +> Utiliser `12s%` pour trader 12 % de la valeur cumulée des avoirs en BTC et USDT lors du trading en BTC/USDT. + +Note : contrairement à `t%`, `s%` ignore les avoirs d'autres paires de trading. + + +### Pourcentage total d'actifs tradés: t% +Un pourcentage des avoirs combinés associés à chaque paire de trading configurée. + +> Utiliser `12t%` pour trader 12 % de la valeur des avoirs disponibles en BTC, ETH, SOL et USDT lors du trading de BTC/USDT tout en tradant avec ETH et SOL dans d'autres paires de trading. + +`t%` ignore les actifs détenus qui ne sont pas associés aux paires de trading actuellement en cours. + +:::info + Le pourcentage total d'actifs tradés est particulièrement utile pour maintenir une taille d'ordre évolutif dans le temps indépendamment des autres paires de trading. Cela permet d'ignorer les autres actifs qui peuvent se trouver dans le portefeuille mais qui ne doivent pas être tradés. +::: + +## Montants variables +Des montants qui changent après chaque ordre d'achat ou vente. Les montants variables peuvent être utiles pour acheter moins lorsque les fonds disponibles sont réduits par exemple. + +### Total des avoirs en actif: % +Un pourcentage du total des avoirs du portefeuille concernant l'actif tradé + +> Utiliser `2%` pour trader 2% du total des avoirs portefeuille liés à l'actif tradé. + +Ici, les avoirs totaux du portefeuille désignent votre détention de l'actif à acheter ou à vendre. Il s'agirait d'USDT dans le cas des ordres d'achat BTC/USDT. + +:::info + Lorsque vous utilisez le total des avoirs en actif, une fois qu'un ordre est exécuté et que le total des avoirs en actif tradé est réduit, le même pourcentage créera des ordres suivants plus petits. +::: + +### Avoirs disponibles de l'actif: a% +Un pourcentage des avoirs disponibles de l'actif tradé. + +> Utiliser `12a%` pour trader 12% des avoirs disponibles du portefeuille liés à l'actif tradé. + +De manière similaire à `%`, ici les avoirs désignent votre détention de l'actif utilisé pour acheter ou vendre. La différence est que `a%` ne comptera que les fonds disponibles, c'est-à-dire les fonds qui ne sont pas déjà bloqués dans des ordres en cours. + +### Pourcentage de position: p% +Un pourcentage de la position courante du symbol donné. + +> Utiliser `20p%` pour trader avec 20% de la valeur totale de la position ouverte. + +_Disponible uniquement en trading de futures._ diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-trading-modes/order-price-syntax.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-trading-modes/order-price-syntax.md new file mode 100644 index 0000000000..7db27c8f49 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-trading-modes/order-price-syntax.md @@ -0,0 +1,32 @@ +--- +title: "Syntaxe des prix d''ordres" +description: "Configurez les prix de vos ordres OctoBot en utilisant un pourcentage du prix actuel, un prix fixe ou une différence par rapport au prix actuel." +sidebar_position: 11 +--- + + +# Syntaxe des prix d'ordres + +En utilisant OctoBot, vous pouvez fixer le prix vos ordres de différentes manières en utilisant une valeur fixe ou une valeur relative au prix actuel d'une crypto. + +Les prix des ordres peuvent être configurés dans la configuration de votre trading mode, dans les paramètres du profil. + + +## Price constant +Un prix qui reste toujours constant. + +> Utilisez `50000` pour définir le prix de votre ordre exactement à "50000" USDT lors du trading BTC/USDT par exemple. + +## Delta de prix: d +Une valeur qui augmente ou diminue le cours actuel d'une valeur prédéfinie. + +> Utilisez `100d` pour définir le prix de votre ordre à 100 de plus que le cours actuel. Par exemple, si le cours actuel est "50000", alors le prix de l'ordre serait de "50100". + +> Utilisez `-400d` pour définir le prix de votre ordre à 400 de moins que le cours actuel. Par exemple, si le cours actuel est "50000", alors le prix de l'ordre serait de "49600". + +## Pourcentage de prix: % +Pourcentage d'augmentation ou réduction par rapport au cours actuel. + +> Utilisez `10%` pour définir le prix de votre ordre à 10% de plus que le cours actuel. Par exemple, si le cours actuel est "50000", alors le prix de l'ordre serait "55000". + +Utilisez `-25%` pour définir le prix de votre ordre à 25% de moins que le cours actuel. Par exemple, si le cours actuel est "50000", alors le prix de l'ordre serait "37500". diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-trading-modes/staggered-orders-trading-mode.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-trading-modes/staggered-orders-trading-mode.md new file mode 100644 index 0000000000..0c623b32c1 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-trading-modes/staggered-orders-trading-mode.md @@ -0,0 +1,31 @@ +--- +title: "Staggered Orders trading mode" +description: "Profitez des marchés stables avec un ensemble d'ordres d'achat et vente de type grille et une configuration avancée grâce au Staggered Orders Trading Mode." +sidebar_position: 8 +--- + + + +# Staggered Orders Trading Mode + +Le Staggered Orders Trading Mode (ou StaggeredOrdersTradingMode) est conçu pour tirer profit des marchés stables en maintenant un ensemble d'ordres d'achat et de vente semblable à une grille. Réalisez de petits bénéfices réguliers sur chaque petite variation du marché avec des risques minimisés grâce aux ordres en grille. + +<div style={{textAlign: "center"}}> + ![grid trading illustrated by a man stepping up on green stairs grabbing coins](/images/guides/grid-trading-illustrated-by-a-man-stepping-up-on-green-stairs-grabbing-coins.png) +</div> + +Le Staggered Orders Trading Mode est une version plus complexe et flexible du [Grid Trading Mode](grid-trading-mode). Dans la plupart des situations, le [Grid Trading Mode](grid-trading-mode) est un meilleur choix. + +Alors que le Grid Trading Mode est principalement défini autour du nombre d'ordre que vous souhaitez maintenir, le Staggered Orders Trading Mode se concentre sur la plage de prix que vous souhaitez couvrir. En configurant les bornes supérieure et inférieure, l'écart et l'incrément, le Staggered Orders Trading Mode déterminera combien d'ordre sont nécessaires, utilisera les fonds maximum disponibles et maintiendra les ordres pertinents sur la plateforme d'échange. + +## Le Staggered Orders Trading Mode peut + +- Être configuré pour chaque paire de trading indépendamment +- Spécifier la manière dont les fonds sont dispatchés dans les ordres d'achat et vente +- Maintenez une grille d'ordres d'achat et vente en utilisant l'écart et l'incrément configurés en %. +- Calculer automatiquement le nombre d'ordres d'achat et de vente nécessaires en fonction des bornes supérieure et inférieure configurées, ainsi que de l'écart et de l'incrément +- Maintenez un nombre limité d'ordres sur la plateforme d'échange (les plateformes imposent généralement une limite sur les ordres ouverts simultanés). Cette limite est définie par le paramètre `Operational depth` parameter. Les autres ordres seront marqués comme "virtuels": ils ne seront créés sur la plateforme d'échange que si néccessaire. +- Inclure un délai dans la +- Inclure un délai lors de la création d'ordres opposés lorsque qu'un achat ou une vente est exécutée +- Tradez sur les marchés SPOT +- Optimiser automatiquement le portefeuille afin de créer la grille parfaite à l'aide de la commande `Optimize Initial Portfolio` diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-trading-modes/trading-modes.mdx b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-trading-modes/trading-modes.mdx new file mode 100644 index 0000000000..10a721d355 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-trading-modes/trading-modes.mdx @@ -0,0 +1,95 @@ +--- +title: "Les Trading modes" +description: "Découvrez comment fonctionnent les stratégies de trading dans OctoBot et les principaux trading modes basés sur le DCA, le grid trading, l'IA et TradingView." +sidebar_position: 1 +--- + + + +# Les trading modes d'OctoBot + +## Concepts clés + +Dans OctoBot, les trading modes définissent la manière de créer, maintenir et annuler les ordres. Ils sont un composant clé de toute stratégie de trading et sont compatibles avec chaque [plateforme d'échange supportée](/guides/exchanges). + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="Exdl94cmMDQ" title="Configuration de Trading dans OctoBot" /> + +Un trading mode peut également s'appuyer sur des évaluateurs pour signaler les opportunités à la hausse (long) ou à la baisse (short). + +### Les responsabilités des trading modes + +Dans OctoBot, les trading modes définissent: + +- Le type d'ordre à créer +- La quantité d'argent à investir dans chaque ordre et à quel prix +- Comment prendre des bénéfices ou fixer des stop-loss +- Quand annuler les ordres si nécessaire + +### Les responsabilités des evaluators + +Alors que les trading modes sont responsables des ordres au sein d'une stratégie de trading, certaines stratégies qui reposent sur des évaluateurs techniques, sociaux, IA ou en temps-réel tels que [les trading modes statistiques](#les-trading-modes-statistiques) utilisent également des `evaluators` et `strategies` pour être notifiées **when** ils doivent créer des ordres. + +Cela signifie qu'en utilisant des [trading modes statistiques](#les-trading-modes-statistiques), il est possible qu'il soit nécessaire d'également activer: + +- Un ou plusieurs `evaluators` qui seront chargés d'analyser les nouvelles candles et nouveaux prix ou réseaux sociaux afin fournir un signal haussier (long) ou baissier (short) lorsque cela est nécessaire. +- Une `strategy`, qui rassemble tous les signaux des `evaluators` et les résume en un seul signal donné au trading mode. + +<div style={{textAlign: "center"}}> + ![ai trading illustrated by octobot head with chatgpt logo trading bitcoin + ethereum litecoin usd + logos](/images/guides/ai-trading-illustrated-by-octobot-head-with-chatgpt-logo-trading-bitcoin-ethereum-litecoin-usd-logos.png) +</div> + +Un `evaluator` peut être un évaluateur technique tel qu'une évaluation <a href="https://www.investopedia.com/terms/r/rsi.asp" rel="nofollow">RSI</a> des 30 dernières candles, [demander l'opinion de ChatGPT sur le marché](chatgpt-trading), [surveiller reddit](/guides/octobot-interfaces/reddit) ou bien d'autres choses encore. Il n'y a aucune limite à ce que les `evaluators` peuvent faire pour créer les bons signaux pour vos stratégies de trading. + +:::info + Lors de l'utilisation des `evaluators`, les time frames à utiliser pour vos + évaluations sont définis dans la configuration de la `strategy`. +::: + +## Les trading modes + +OctoBot est livré avec des modes de trading intégrés. Tous peuvent être configurés et testés en profondeur grâce au [backtesting](/guides/octobot-usage/backtesting). + +### Les trading modes statistiques + +Les entrées (et éventuellement les sorties) sont calculées à l'aide des statistiques. Cela peut provenir d'évaluateurs techniques, d'IA, des réseaux sociaux, d'événements de prix ou bien plus encore. + +<div style={{textAlign: "center"}}> + ![dca trading illustrated by a man watering a plant growing + money](/images/guides/dca-trading-illustrated-by-a-man-watering-a-plant-growing-money.png) +</div> + +- [DCA trading mode](dca-trading-mode): Utilisez une stratégie de Dollar cost Averaging avancée qui peut (mais pas nécessairement) inclure plusieurs ordres d'achat et de vente, des stop loss, des périodes d'investissement ainsi que des `evaluators` techniques ou basés sur l'intelligence artificielle. +- [Daily Trading Mode](daily-trading-mode): Créez des ordres d'achat et de vente en fonction de vos `evaluators` techniques, sociaux ou basés sur l'intelligence artificielle. +- [Dip Analyser trading mode](dip-analyser-trading-mode): Tradez les minimum locaux et optimisez vos profits en utilisant plusieurs niveaux de take profits. + +### Les trading modes basés sur une grille + +Les ordres d'achat et de vente sont créés de manière déterministe selon la configuration du trading mode. Il n'y a pas de probabilité dans ces algorithmes. + +<div style={{textAlign: "center"}}> + ![grid trading illustrated by a man stepping up on green stairs grabbing + coins](/images/guides/grid-trading-illustrated-by-a-man-stepping-up-on-green-stairs-grabbing-coins.png) +</div> + +- [Grid trading mode](grid-trading-mode): Profitez des marchés stables en utilisant une stratégie simple basée sur la grille. +- [Staggered Orders trading mode](staggered-orders-trading-mode): Profitez des marchés stables en utilisant une stratégie avancée basée sur la grille. + +### Automatiser une stratégie TradingView + +<div style={{textAlign: "center"}}> + ![tradingview automation illustrated by tradingview + logo](/images/guides/interfaces/tradingview-automation-illustrated-by-tradingview-logo.png) +</div> + +Les entrées et sorties sont créées en fonction des signaux provenant de votre compte TradingView. Dans ce mode de trading, le cœur de votre stratégie repose sur TradingView et OctoBot agit en automatisation pour synchroniser votre stratégie avec n'importe quel compte de plateforme d'échange. + +- [TradingView trading mode](tradingview-trading-mode): Automatisez facilement les ordres à partir de vos indicateurs ou stratégies TradingView sur n'importe quelle plateforme d'échange supportée par OctoBot. + +:::info + OctoBot étant open-source, si vous avez quelques connaissances en + programmation Python, vous pouvez également créer votre propre trading mode. +::: diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-trading-modes/tradingview-trading-mode.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-trading-modes/tradingview-trading-mode.md new file mode 100644 index 0000000000..735a77bea0 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-trading-modes/tradingview-trading-mode.md @@ -0,0 +1,46 @@ +--- +title: "TradingView trading mode" +description: "Automatisez facilement vos stratégies et indicateurs de TradingView en passant des ordres sur une plateforme d'échange grâce au mode de TradingView Trading Mode" +sidebar_position: 9 +--- + + + +# TradingView Trading Mode + +Le TradingView Trading Mode (ou TradingViewTradingMode) est conçu pour automatiser la création d'ordres sur les plateformes d'échange en se basant sur les signaux de <a href="https://www.tradingview.com/?aff_id=27595" rel="nofollow">TradingView</a>. + +<div style={{textAlign: "center"}}> + ![tradingview automation illustrated by tradingview + logo](/images/guides/interfaces/tradingview-automation-illustrated-by-tradingview-logo.png) +</div> + +Il vous suffit d'émettre des alertes à partir de vos indicateurs ou stratégies TradingView pour trader sur n'importe quelle plateforme d'échange. Apprenez-en davantage sur la configuration de votre OctoBot pour le trading avec TradingView et la [syntaxe des alertes](/guides/octobot-interfaces/tradingview/alert-format) dans le [guide TradingView](/guides/octobot-interfaces/tradingview). + +:::info + Ces guides concernent l'utilisation de TradingView dans le cadre des [robots + de trading OctoBot](/fr/trading-bot). Utilisez le [guide d'investisseur + trading automatisé avec + TradingView](/fr/investing/tradingview-automated-trading) si vous automatisez + vos stratégies TradingView avec un [OctoBot + TradingView](/fr/investing/tradingview-trading-tutorial) depuis + [www.octobot.cloud](https://www.octobot.cloud/fr). +::: + +## Le TradingView Trading Mode peut + +- [Automatisez les signaux des indicateurs TradingView](/guides/octobot-interfaces/tradingview/automating-trading-from-an-indicator) +- [Automatisez les signaux des stratégies Pine Script de TradingView](/guides/octobot-interfaces/tradingview/automating-trading-from-a-pine-script-strategy) +- Utilisez des ordres au marché aux limites +- Créez ou annulez simplement des ordres d'achat, de vente ou des stop-loss +- Créez des ordres d'entrée avec un take profit prédéfini +- Créez des ordres d'entrée avec un stop loss prédéfini +- Créez des ordres de stop loss +- Tradez sur les marchés SPOT et Futures + +## Configurer les ordres + +- Chaque signal provenant de TradingView contient les détails concernant l'ordre à créer. +- La fonctionnalité `Cancel previous orders` peut être activée pour ne maintenir qu'un seul ordre par paire de trading. +- Le montant de chaque ordre peut être configuré en utilisant la [syntaxe des montants d'ordre](order-amount-syntax). +- Le prix de chaque ordre peut être configuré en utilisant la [syntaxe des prix d'ordre](order-price-syntax). diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-usage/backtesting.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-usage/backtesting.md new file mode 100644 index 0000000000..5421d6688b --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-usage/backtesting.md @@ -0,0 +1,174 @@ +--- +title: "Backtesting" +description: "Utilisez le backtesting pour tester et optimiser sans risque vos stratégies de trading OctoBot. Evaluez leurs performances sur plusieurs jours, semaines ou mois" +sidebar_position: 3 +--- + + + +# Backtesting + +Le backtesting est le processus permettant de mesurer les performances d'un système sur des données historiques. Il utilise des données enregistrées des marchés de cryptomonnaies ou d'actions. Apprenez-en davantage sur le backtesting sur <a href="https://www.investopedia.com/terms/b/backtesting.asp" rel="nofollow">investopedia</a>. + +![résumé des résultats de backtesting octobot](/images/guides/backtesting/octobot-backtesting-result-summary.png) + +Dans OctoBot, le backtesting est un outil essentiel qui vous permet de tester et d'optimiser rapidement vos stratégies dans un environnement sans risque, en exécutant votre stratégie sur des scénarios passés pour identifier les meilleurs paramètres pour vos marchés tradés. + +## Backtester une stratégie de trading dans OctoBot + +OctoBot comprend un moteur de backtesting qui peut rapidement exécuter les stratégies de trading OctoBot sur des données historiques. Pour effectuer un backtest d'une stratégie, il vous suffit de : + +1. Sélectionner le profil à tester dans le sélecteur de profils. +2. Utiliser le collecteur de données pour télécharger des données historiques +3. Démarrer un backtesting +4. Analyser les résultats + +### Sélection du profil à exécuter lors du backtesting + +Accédez au sélecteur de profils sur votre OctoBot et sélectionnez le profil que vous souhaitez tester. + +![sélecteur de profil pour backtesting octobot](/images/guides/backtesting/octobot-backtesting-profile-selector.png) + +#### Trading modes, stratégies et évaluateurs + +Lors du backtesting, OctoBot utilise la version la plus récente du trading mode sélectionné, des stratégies et évaluateurs choisis, ainsi que leur plus récente configuration. + +Cela signifie que vous pouvez sélectionner différents trading modes & évaluateurs et relancer des backtestings sans avoir à redémarrer OctoBot : votre prochain backtesting prendra en compte vos dernières modifications. +Ceci est utile pour rapidement essayer différentes valeurs d'un indicateur ou de tout autre paramètre de configuration. + +Remarque : lors du backtesting d'une stratégie, il est préférable de sélectionner un profil utilisant le `trading simulé` (utiliser le [Simulateur](simulator)), ainsi toute modification que vous apporterez au profile n'affectera pas vos profiles de trading réel. + +#### Portfolio initial + +De la même manière que pour le trading simulé, votre portefeuille initial de backtesting est constitué en utilisant la configuration `Starting-Portfolio` de votre profil. + +Lorsque vous lancez un backtest, assurez-vous d'avoir configuré votre portefeuille de départ avec suffisamment de fonds pour que votre stratégie puisse effectuer des transactions. N'oubliez pas d'ajouter du BTC lorsque vous tradez contre le BTC, par exemple. + +#### Paramètres des actifs tradés lors du backtesting + +- **Coins**: Les coins et les paires sélectionnées sont ignorés car le fichier de données que vous allez sélectionner pour exécuter vos backtests fournira les paires tradées +- **Reference market**: Le marché de référence sélectionné sera modifié pour correspondre à la devise commune des paires tradées dans votre fichier de données s'il y a une devise commune. Par exemple, un fichier de données contenant BTC/USDT et ETH/USDT forcera son marché de référence à USDT afin de calculer les profits en USDT + +### Téléchargement des données historiques + +À l'aide du collecteur de données disponible dans l'onglet Backtesting, vous pouvez télécharger des données historiques à partir de la plupart des plateformes d'échanges de cryptomonnaies. + +![collecteurde donnée de backtesting octobot](/images/guides/backtesting/octobot-backtesting-data-collector.png) + +Vous pouvez télécharger des données sur plusieurs paires d'échange et time frames simultanément. En utilisant de tels fichiers, le backtestings appliquera votre stratégie sur chaque paire disponible et utilisera les time frames [requis dans sa configuration](../octobot-trading-modes/trading-modes#evaluators-responsabilities). + +#### Échanges avec historique complet + +Lors de la sélection des données historiques à télécharger, les échanges sont répartis en 2 catégories : `Full History` (historique complet) et `Other` (autres). Voici les différences. + +**Full history**: ces échanges permettent de télécharger des données historiques sur une plage de temps sélectionnée. Lorsque vous effectuez cette opération, chaque bougie pour chaque time frame sur chaque paire tradée sera téléchargée sur l'intervalle de tmeps sélectionné. Cela signifie que lorsqu'un intervalle de temps est sélectionné: + +- L'historique téléchargé est complet pour chaque bougie sur intervalle de temps sélectionné +- Le processus de téléchargement peut être lent si vous avez sélectionné un grand nombre total de bougies +- Les fichiers de données d'historique complet sont marqués comme `Full` dans le sélecteur de fichiers de données. + **Avertissement**: ne pas sélectionner une plage horaire dans les échanges avec historique complet entraînera le téléchargement des dernières bougies uniquement, tout comme pour les échanges **Other**. + +Les échanges **Other** sont des échanges qui n'autorisent pas (actuellement) le téléchargement des données historiques. Cela signifie que: + +- Seules les bougies les plus récentes seront téléchargées (généralement les 500 dernières bougies) +- Sélectionner des time frames courts et longues en même temps donnera lieu à des backtesting courts car ils ne s'exécuteront que sur les bougies disponibles. Par exemple, un fichier de données de backtesting contenant les 500 dernières bougies d'une minute et les 500 dernières bougies quotidiennes ne s'exécutera que sur les 500 dernières bougies, soit moins d'une journée +- Les fichiers de ce type affichent leur nombre total de bougies dans le sélecteur de fichiers. + +Dans l'ensemble, il est préférable d'utiliser des exchanges avec historique **Full** et de sélectionner l'intervalle de temps sur lequel pour effectuer vos backtests. + +### Démarrage d'un backtesting + +Une fois votre fichier de données téléchargé, sélectionnez-le et lancez votre backtesting. +![sélection du fichier de données pour le backtesting octobot](/images/guides/backtesting/octobot-backtesting-data-selector-starting-a-backtesting.png) + +Les backtestings durent généralement quelques secondes et s'exécutent en arrière-plan. Si vous le souhaitez, vous pouvez faire autre chose avec votre OctoBot pendant qu'un backtesting est en cours. + +Vous êtes notifié une fois que votre backtesting est terminé. + +### Analyser les résultats + +Vous pouvez accéder aux résultats de votre backtest depuis l'onglet Backtesting. Votre rapport de backtesting se trouve sous le sélecteur de données. +Dans ce rapport, vous trouverez un résumé des performances de votre backtesting, des graphiques avec les prix historiques, les trades et les ordres ouverts, ainsi qu'un explorateur de trades. + +#### Profitabilité + +![résumé des résultats d'un backtesting octobot](/images/guides/backtesting/octobot-backtesting-result-summary.png) + +Ce résumé montre la rentabilité de votre stratégie sur l'intervalle de temps sélectionné. + +- **Bot profitability** correspond aux bénéfices en % du marché de référence réalisés par votre stratégie. +- **Market average profitability** est la rentabilité moyenne des marchés tradés. Elle est donnée à titre de comparaison des bénéfices que vous auriez réalisés si vous aviez une exposition permanente à 100% de ces actifs, ce qui est extrêmement risqué. Cela correspond à diviser vos fonds initiaux entre ces actifs et les détenir pendant toute la durée du backtesting. +- **Symbol profitability** correspond à la rentabilité de chaque paire tradée pendant le backtesting. +- **End portfolio** représente le contenu de votre portefeuille à la fin du backtest. +- **Starting portfolio** représente le contenu de votre portefeuille au début du backtest. +- **Reference market** est le marché de référence utilisé pour calculer la profitabilité + +#### Graphiques historiques + +![graphique de résultats d'un backtesting octobot](/images/guides/backtesting/octobot-backtesting-result-graph.png) +Pour chaque paire échangée, un graphique historique sera affiché. Ces graphiques sont interactifs et vous pouvez sélectionner le time frame à utiliser. Pour les backtesting de grande envergure, il peut être plus facile de lire un graphique sur un time frmae plus grand. Chaque graphique affiche: + +- Les bougies historiques et leur volume +- Les trades effectués lors du backtesting +- Les ordres encore ouverts à la fin du backtesting + +#### Trades historiques + +![trades résultant d'un backtesting octobot](/images/guides/backtesting/octobot-backtesting-result-trades.png) +Tout trade exécuté lors d'un backtesting est disponible dans l'explorateur de trade où vous pouvez facilement filtrer et trier les trades pour comprendre le comportement de votre stratégie. + +## Approfondir avec le Strategy Designer + +Le backtesting tel que présenté sur cette page est la version basique, mais déjà très complète du [Strategy Designer](strategy-designer) disponible sur les plans cloud d'OctoBot. + +![résultats strategy designer octobot sur doge btc shib](/images/guides/strategy-designer/octobot-strategy-designer-results-on-doge-btc-shib.png) + +Le Strategy Designer vous permet de faire tout ce que fait le backtesting traditionnel et ajoute : + +- L'accès à **l'historique de vos résultats** de backtesting +- Des graphiques pour analyser plus efficacement vos exécutions en backtesting avec la **valeur historique du portefeuille**, le PNL et plus encore +- La possibilité de **comparer les résultats de vos backtestings** entre eux. +- Des profils spécifiques au backtesting pour tester sans affecter votre profil actuel de trading live +- Et bien plus encore... + +Si vous effectuez déjà des tests sur vos stratégies et souhaitez utiliser un outil plus puissant, nous vous recommandons vivement de jeter un coup d'œil au [Strategy Designer](strategy-designer). + +## Fonctionnement du Backtesting dans OctoBot + +### Backtesting vs live trading + +Lorsqu'il s'exécute en mode backtesting, OctoBot utilise le même code pour exécuter une stratégie de trading que lorsqu'il s'exécute en mode réel. Cela signifie que les résultats obtenus lors d'une exécution en backtesting et en réel sont identiques tant que les données d'entrée sont également identiques. + +Comme le backtesting utilise des bougies complètes, il peut y avoir une différence avec le trading live car ce dernier peut utiliser des bougies incomplètes pour exécuter ses indicateurs (c'est le cas par exemple avec les évaluateurs en temps réel). Par conséquent, lors du backtesting, **les évaluateurs en temps réel ne fonctionnent pas de la même manière qu'en trading live** car les bougies en cours de construction ne sont pas disponible. + +Pour la même raison, car seules les données des bougies sont disponibles, il est actuellement impossible de faire des backtests sur des stratégies qui utilisent d'autres données que les données des bougies (par exemple en suivant les tendances Google). + +La seule exception concerne les **signaux historiques de ChatGPT qui sont mis à disposition gratuitement** grâce à OctoBot cloud lorsqu'un backtesting est effectué en utilisant le ChatGPTEvaluator sur les paires de trading et les time frames utilisés par <a href="https://www.octobot.cloud/explore" rel="nofollow">les stratégies du cloud OctoBot</a> qui utilisent également le ChatGPTEvaluator. + +### Gestion du temps + +Le backtesting fonctionne en appliquant une stratégie à l'aide de données du passé. Lorsque vous appliquez une stratégie, le moteur de backtesting simule le passage du temps à partir du début des données de backtesting, et ce jusqu'à la fin de la période sélectionnée. +Le backtesting itère sur les bougies et chaque itération effectue les opérations suivantes : + +1. Mettre à jour de la bougie actuelle pour chaque paires de trading et chaque time frame +2. Vérifier si les ordres ouverts doivent être exécutés en fonction des nouvelles données de prix +3. Déclanchement d'un cycle d'évaluation pour chaque paires de trading: + 1. Envoie des nouvelles nouvelles bougies aux évaluateurs + 2. Activation des stratégies pour résumer les analyses des évaluateurs + 3. Activation des trading modes pour créer ou annuler des ordres +4. Vérifier si des ordres doivent être exécutés instantanément (par exemple, les ordres au marché) + +### Plusieurs paires de trading + +Lorsque vous sélectionnez un fichier contenant plusieurs paires de trading, à chaque nouveau tick temporel, les bougies associées (si il y en a de nouvelles) seront envoyées aux évaluateurs. Cet envoi se fait séquentiellement, une paire après l'autre. + +### Exécution des ordres + +Lors d'un backtest, OctoBot n'a accès qu'aux bougies historiques. Cela signifie que pour déterminer si un ordre doit être exécuté, il examinera la bougie la plus récente. + +:::info + Vous pouvez améliorer la précision des exécutions d'ordres lors du backtesting + en sélectionnant un time frame court dans votre fichier de données. Cela + rendra votre backtesting plus lent mais cela peut être utile si l'exécution + des ordres doit être précise dans le temps. +::: diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-usage/frequently-asked-questions-faq.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-usage/frequently-asked-questions-faq.md new file mode 100644 index 0000000000..431dbb3210 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-usage/frequently-asked-questions-faq.md @@ -0,0 +1,114 @@ +--- +title: "FAQ" +description: "Des questions lors de l'utilisation d'OctoBot ? Consultez les questions les plus courantes de la communauté OctoBot et trouvez des réponses détaillées dans notre FAQ." +sidebar_position: 7 +--- + + + +# Foire aux quesitons (FAQ) + +:::info + La traduction française de cette page est en cours. +::: + +## Pourquoi mon OctoBot ne crée-t-il pas d'ordres ? + +Avant de créer un ordre (en utilisant le simulateur de trading ou le trading réel), OctoBot demande à la plateforme d'échange quelles sont ses exigences minimales (et maximales) pour tout ordre. +Lors de la création d'un ordre (suite à un signal d'achat ou de vente) ces exigences sont vérifiées. Si l'ordre ne respecte pas ces exigences, il ne sera pas transmis à la plateforme d'échange. + +The most common case of signals without created orders is when there is +**not enough funds** of the required asset to proceed with an order. + +Example: not enough **USD** to buy BTC for a BTC/**USD** **buy** signal. + +> In [trading simulator](/guides/octobot-usage/simulator) and [backtesting](/guides/octobot-usage/backtesting) modes, OctoBot uses a simulated portfolio called +`"starting-portfolio"` that is defined in the +[trading simulator configuration](/guides/octobot-usage/simulator.md#starting-portfolio). + +## À quelle fréquence mon OctoBot va-t-il trader ? + +Cela peut se produire une fois par semaine ou 5 fois par minute, cela dépend de la stratégie que votre OctoBot utilise. + +Par exemple : lorsque vous utilisez les paramètres par défaut, l'évaluateur de stratégie mixte simple utilise un intervalle d'une heure comme le plus court. +Étant donné qu'il s'agit d'une stratégie basée sur l'évaluation technique, elle se mettra à jour toutes les heures. +Dans cette configuration, votre OctoBot créera de nouvelles transactions chaque fois qu'il détecte une opportunité, soit toutes les heures. +Il peut y avoir des heures sans opportunité et sans création d'ordre. + +## J'ai mis à jour mon OctoBot et il ne démarre plus. + +Cela est probablement dû à un problème dans votre dossier **tentacles** folder. +Essayez de le supprimer et de redémarrer votre OctoBot, il téléchargera les dernières versions de chaque tentacle et devrait résoudre le problème. + +## Comment suivre les activités de trading de mon OctoBot ? + +Lorsque votre OctoBot passe un ordre ou a un ordre qui est exécuté, il apparaîtra sur l'interface web. +L'interface web affiche la liste des ordres en cours et la liste des ordres exécutés. + +Vous pouvez également recevoir des notifications Telegram et bientôt Discord concernant la passation des ordres et les transactions. + +## Quelle partie de mon portfolio est tradée par OctoBot ? + + +OctoBot will consider it can trade 100% of the portfolio you give it. +However how this funds will be used (size of orders, orders frequency, +...) depends on your risk setting and the trading mode you are using. + +## Comment configurer le portfolio de départ en backtesting ? + + +Each [backtesting](/guides/octobot-usage/backtesting) run is using the +[trading simulator configuration](/guides/octobot-usage/simulator.md#starting-portfolio) +as a base. + + +## Pourquoi mon marché de référence change-t-il en backtesting ? + + +The reference market is automatically switched to the base of the traded +pair in [backtesting](/guides/octobot-usage/backtesting) to compute more accurate profitability. + +Example: a backtesting on ETH/**BNB** would make **BNB** the temporary +reference market for this backtesting. + +## Combien de mes fonds sur plateforme d'échange seront tradés par OctoBot ? + +For now, OctoBot uses all the available funds to trade. Therefore it's +possible that 100% of the exchange funds on an account will be traded. + +## Pourquoi est-ce que le backtesting n'utilise pas toutes les données disponibles ? + +[OctoBot backtesting](/guides/octobot-usage/backtesting) is always using the **maximum available data allowing to keep a realistic simulation**. + +However exchange are usually not giving all of their data: they give the +last X candles (500 for binance). Therefore a regular backtesting data +file has 500 1hour (1h) candles, 500 1minute (1m) candles etc. These +candles are always the most recent ones. That means that when running a +backtesting on 1h and 1d time frames, the maximum backtesting range is +not 1h and 1d with 500 candles each but the time range where **both** 1h +and 1d have data: there the past 500 hours (500 1h candles and +approximately 20 1d candles). + +As an example, in a backtesting with 1m and 1d candles: the common time +range in 1d is `500/(60*24) = 0.35` which means the whole backtesting is +carried out with the data of one day: the last daily candle of the 500 +1d candle only while using 100% of the shortest time frame: 1m (which +all happened during this one day). + +## "RuntimeError: Event loop is closed" dans mes logs d'exécution, y a-t-il un problème ? + + +This error (or something very similar) might appear in your OctoBot's logs: + +``` +<function _ProactorBasePipeTransport.del at 0x000001064DE8A310> +Traceback (most recent call last): + File "asyncio\proactor_events.py", line 116, in del + File "asyncio\proactor_events.py", line 108, in close + File "asyncio\base_events.py", line 719, in call_soon + File "asyncio\base_events.py", line 508, in _check_closed +RuntimeError: Event loop is closed +``` + +This is a minor issue with the current Windows implementation of the asynchronous +libraries OctoBot is using. It has absolutely no effect and can be completely ignored. diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-usage/futures-trading-with-octobot.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-usage/futures-trading-with-octobot.md new file mode 100644 index 0000000000..47de538e01 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-usage/futures-trading-with-octobot.md @@ -0,0 +1,35 @@ +--- +title: "Trading de Futures" +description: "OctoBot peut être utilisé pour configurer et automatiser de nombreuses stratégies de trading de Futures sur Binance, Bybit, Kucoin et OKX." +sidebar_position: 2 +--- + +# Trader des Futures avec OctoBot + +OctoBot peut être utilisé pour configurer et automatiser de nombreuses stratégies de trading de Futures sur plusieurs échanges. + +## Trading Modes pris en charge +Les [Trading Modes](../octobot-trading-modes/trading-modes) suivants peuvent être utilisés pour trader avec des Futures: +- [DCA Trading Mode](../octobot-trading-modes/dca-trading-mode) +- [Dip Analyser Trading Mode](../octobot-trading-modes/dip-analyser-trading-mode) +- [TradingView Trading Mode](../octobot-trading-modes/tradingview-trading-mode) +- [Daily Trading Mode](../octobot-trading-modes/daily-trading-mode) + +## Echanges pris en charge +Les échanges suivants peuvent être utilisés pour trader les Futures sur OctoBot: +- [Binance](exchanges/binance) +- [Bybit](exchanges/bybit) +- [Kucoin](exchanges/kucoin) + +## Configuration du levier + +La valeur actuelle du levier à utiliser pour le trading de Futures avec un profil peut être définie depuis la page configuration du Trading Mode activé qui est disponible depuis la [configuration de votre profil](../octobot-configuration/profile-configuration). + +![access octobot trading mode config from profiles](/images/guides/configuration/access-octobot-trading-mode-config-from-profiles.png) + +Remarque : Le trading de futures doit être activé sur l’exchange de votre profil pour que le paramètre de levier apparaisse dans la configuration de votre trading mode. + + +## Marge croisée et marge isolée + +Pour le moment, seule la marge isolée est prise en charge par OctoBot. La marge croisée ne devrait pas être utilisée pour trader avec OctoBot. diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-usage/having-multiple-octobots-on-one-computer.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-usage/having-multiple-octobots-on-one-computer.md new file mode 100644 index 0000000000..619da2be2f --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-usage/having-multiple-octobots-on-one-computer.md @@ -0,0 +1,44 @@ +--- +title: "Avoir plusieurs OctoBots" +description: "Guide sur comment utiliser plusieurs OctoBots sur le même ordinateur. Utilisez plusieurs comptes sur le même échange et investissez avec plusieurs stratégies" +sidebar_position: 6 +--- + +# Avoir plusieurs OctoBots sur un ordinateur + +OctoBot est conçu pour être léger. Bien qu'un OctoBot qui effectue de très nombreuses transactions sur plusieurs crypto et échanges peut nécessiter beaucoup de CPU et de RAM sur votre ordinateur, OctoBot nécessite généralement moins de 1 Go de RAM et 1% de CPU. + +Lancer autant d'OctoBot que qu'il vous est nécessaire sur un seul ordinateur est généralement possible, voici comment. + +## Comment exécuter plusieurs OctoBot sur un ordinateur ? + +Voici les étapes pour démarrer un autre OctoBot sur votre ordinateur : +1. Arrêtez votre OctoBot actuel s'il est en cours d'exécution +2. Dupliquez le dossier entier de votre OctoBot actuel +3. À partir de votre nouveau dossier, démarrez le nouvel OctoBot. Il démarrera à la même adresse web que le bot précédent +4. Changez la valeur du port de l'interface web du nouvel OctoBot (voir le [guide de l'interface web](../octobot-interfaces/web#configuration)) +5. Redémarrez votre nouvel OctoBot. Attention : l'adresse de l'interface de votre nouvel OctoBot contiendra désormais la nouvelle valeur du port. Par exemple : si l'adresse de votre premier OctoBot était `http://localhost:5001/`, alors `5001` était son port. Si vous avez utilisé `5002` pour votre autre OctoBot, alors l'adresse de votre autre OctoBot est maintenant `http://localhost:5002/` + +Si votre port initial était `5001`, alors en démarrant votre OctoBot initial (à partir du dossier initial), le bot démarrera sur `http://localhost:5001/`. En démarrant votre autre bot, à partir du deuxième dossier, il démarrera sur `http://localhost:5002/`. Les deux bots peuvent être utilisés simultanément et se connecter au compte d'échange de votre choix. + +## Pourquoi utiliser un autre dossier et port pour votre OctoBot ? + +Chaque OctoBot individuel nécessite seulement deux éléments de votre ordinateur pour fonctionner : +1. **Un dossier dédié pour son exécution**. Cela est nécessaire pour que le bot ait sa propre configuration et gestion des journaux +2. **Un port d'interface web unique**. Deux OctoBots ne peuvent pas utiliser le même port d'interface web. Utiliser la même valeur de port empêchera votre deuxième OctoBot de démarrer son interface web. + + +## Les bénéfices à utiliser plusieurs OctoBots + +Alors qu'un seul OctoBot peut être utilisé pour échanger autant de paires de trading que nécessaire sur plusieurs échanges, l'exécution de plusieurs OctoBots permet de : +- Trader sur plusieurs comptes avec le même échange +- Diviser un portefeuille en différentes cryptos qui seront investies en utilisant différentes stratégies +- Trader à la fois sur les marchés spot et futures sur le même échange +- Utiliser plusieurs stratégies à la fois avec du trading réel et / ou [en trading simulé sans risque](simulator) + + +## Les limites associées à l'utilisation de plusieurs OctoBots + +- Le **rate limit** : Les échanges ont des politiques de rate limit qui peuvent empêcher plusieurs OctoBots fonctionnant à partir de la même adresse IP de récupérer correctement les données des marchés. Lors de l'utilisation de plusieurs OctoBots sur le même échange, il est important de s'assurer de ne pas recevoir d'erreurs liées aux rate limits, sinon votre adresse IP pourrait être temporairement bannie. +- La **bande passante** : L'utilisation de plusieurs OctoBots augmentera la bande passante requise pour récupérer et mettre à jour toutes les données de marché nécessaires. Assurez-vous toujours que votre connexion internet peut gérer cette augmentation correctement, sinon vos stratégies s'exécuteront avec un délai. +- Les **RAM et CPU** : Lors de l'exécution de plusieurs OctoBots sur un ordinateur peu performant or surchargé, vos bots pourraient être ralentis si la RAM ou le CPU sont insuffisants. diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-usage/simulator.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-usage/simulator.md new file mode 100644 index 0000000000..80e4fcf5ff --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-usage/simulator.md @@ -0,0 +1,54 @@ +--- +title: "Simulateur" +description: "Vous préférez trader avec de l'argent simulé avant d'utiliser vos fonds réels ? Utilisez le simulateur de trading OctoBot pour exécuter n'importe quelle stratégie en utilisant le trading virtuel." +sidebar_position: 1 +--- + + + +# Simulateur + +:::info + La traduction française de cette page est en cours. +::: + +OctoBot peut être utilisé en mode simulation. Dans ce mode, OctoBot simulera des transactions en utilisant exactement le même processus que dans le mode de trading réel. + +![octobot trading settings from profiles](/images/guides/configuration/octobot-trading-settings-from-profiles.png) + +La seule différence avec un véritable trader réside dans le portefeuille initial défini dans la configuration du simulateur de trading. +Chaque profil possède son propre portefeuille simulé. +Ce portefeuille sera géré par OctoBot, et les ordres simulés utiliseront ces cryptomonnaies disponibles comme base. + +Le simulateur de trader utilisera les dernières transactions des plateformes d'échange pour déterminer si les ordres actuels auraient été exécutés ou non. +S'ils auraient été exécutés, les ordres simulés sont exécutés, et le portefeuille simulé actuel est mis à jour en conséquence. + +## Frais d'échanges + +Fees in % to be deducted at simulated orders completion in simulated orders and [backtesting](backtesting). Examples: +- A maker fee configured to `0.1` corresponds to a 0.1% trading fee on marker orders. +- A taker fee configured to `1.2` corresponds to a 1.2% trading fee on taker orders. + +## Portfolio d'origine + +This is the imaginary portfolio given to the trader simulator to create +its orders with. It can contain any amount of any cryptocurrency. If +these cryptocurrencies are in the **crypto-currencies** configuration, +they will be traded as if they were from a real portfolio. + +The simulated portfolio is kept between instances of your OctoBot is simulated trading. It will be reset to the value of your profile's Starting portfolio when: +- Clicking `Reset history` on your portfolio view +- Changing the value of your current profile Starting portfolio + +The starting portfolio is also **used for backtesting**. + +## Mode, marché de référence et risque + + +These parameters are defined in the **trading** section, which is used by the trader simulator as +well as the real trader. This **trading** section is described on +the [trading settings](/guides/octobot-configuration/profile-configuration#trading) + +## Trader réel + +Additionally to the simulated trading system, a real trader is available in OctoBot. diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-usage/strategy-designer.mdx b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-usage/strategy-designer.mdx new file mode 100644 index 0000000000..465fd0e881 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-usage/strategy-designer.mdx @@ -0,0 +1,143 @@ +--- +title: "Strategy Designer" +description: "Utilisez le meilleur outil pour créer, tester et optimiser votre stratégie de trading. Le Strategy Designer vous permet d'analyser et d'améliorer vos stratégie" +sidebar_position: 4 +--- + + + +# Strategy Designer + +Le Strategy Designer est un outil avancé pour créer, backtester et optimiser efficacement vos stratégies de trading. Il est disponible avec l'[extension premium d'OctoBot](/guides/octobot-configuration/premium-octobot-extension). + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="V4Z1xUhqWz8" title="Le Strategy Designer d" /> + +**[Démarrer votre OctoBot](https://www.octobot.cloud)** + +Le Strategy Designer vient en complément de l'outil intégré de [backtesting](backtesting) d'OctoBot et permet d'aller beaucoup plus loin lors de la création, du test et de l'optimisation d'une stratégie de trading. + +## Backtester une stratégie + +![résultats du strategy designer octobot sur doge btc shib](/images/guides/strategy-designer/octobot-strategy-designer-results-on-doge-btc-shib.png) + +### Aperçu +Lorsque vous backtestez une stratégie à l'aide du Strategy Designer, OctoBot utilise le même moteur que celui utilisé pour le [backtesting standard](backtesting). Tout ce qui est mentionné dans le contexte du backtesting standard s'applique également au backtesting avec le Strategy Designer. + +En plus des fonctionnalités offertes par le backtesting régulier, le Strategy Designer offre les possibilités suivantes : +- Enregistrer un historique complet des exécutions de backtesting avec leurs résultats +- Utiliser des graphiques pour visualiser facilement le comportement de votre stratégie tout au long de l'exécution du backtesting +- Comparer les résultats de plusieurs exécutions de backtesting +- Exécuter un backtesting avec des profils dédiés afin de ne pas affecter votre OctoBot actif tout en optimisant une stratégie + +### Historique des exécutions du backtesting + +Avec le Strategy Designer, vous pouvez consulter les précédentes exécutions de backtesting et sélectionner celles que vous souhaitez voir dans la vue principale. + +<div style={{textAlign: "center"}}> +![strategy designer octobot pour explorer les backtestings du passé](/images/guides/strategy-designer/octobot-strategy-designer-explore-your-past-backtestings.png) +</div> + +L'explorateur permet également de filtres efficacement les exécutions que vous souhaitez afficher et ce que vous voulez afficher lors de la comparaison des exécutions du backtesting. + +- Vous pouvez trier vos exécutions par rentabilité tout en filtrant les exécutions avec une rentabilité supérieure à 20%. +- Ou peut-être souhaitez-vous sélectionner toutes les exécutions où les take profits sont activées et où les bénéfices sont négatifs, c'est également possible. + +![strategy designer octobot pour explorer les backtestings du passé avec colonnes personnalisées](/images/guides/strategy-designer/octobot-strategy-designer-explore-your-past-backtestings-customize-columns.png) +L'explorateur d'exécution du backtesting dispose d'une vue détaillée qui peut être configurée selon vos préférences. + +### Graphiques historiques + +Les résultats de chaque execution du Strategy Designer peuvent être affichés sous forme de graphiques avec le PNL historique, la valeur du portefeuille, ainsi que sur les trades d'entrées et de sorties. +![graphiques historiques du strategy designer octobot](/images/guides/strategy-designer/octobot-strategy-designer-historical-charts.png) + +L'ensemble de l'interface peut également être redimensionné pour se concentrer sur la zone qui compte. + +### Comparaison des executions + +Vous pouvez utiliser l'explorateur pour sélectionner une execution. Vous pouvez également sélectionner plusieurs executions pour les afficher ensemble. Cela permet de visualiser facilement les résultats de l'utilisation de différentes stratégies ou de variations de paramètres sur la même stratégie. +![utiliser le strategy designer octobot pour comparer les résultats de backtesting](/images/guides/strategy-designer/octobot-strategy-designer-compare-run-results.png) + +Tout ce qui est affiché est compatible avec ce comparateur: vous pouvez comparer le PNL, l'historique du portefeuille, les trades et plus encore. + +### Profil dédié au backtesting + +Chaque profil live que vous utilisez sur OctoBot contient également un profil de backtesting activé par le Strategy Designer. +Par conséquent, chaque profil live que vous sélectionnez est utilisé comme base pour le profil de backtesting associé. Ce profil de backtesting est créé automatiquement lorsque vous accédez au Strategy Designer et vous permet d'en éditer n'importe quelle configuration sans affecter la session de trading live. + +En sélectionnant un autre profil live depuis l'onglet des profils, le profil de backtesting associé sera également sélectionné. De cette manière, vous pouvez avoir autant de profils de backtesting que vous le souhaitez, il suffit juste de se rappeler que chaque profil live est lié à son profil de backtesting utilisé dans le Strategy Designer. + +<div style={{textAlign: "center"}}> +![utiliser un profile du strategy designer octobot en live trading](/images/guides/strategy-designer/octobot-strategy-designer-use-as-live-profile.png) +</div> + +Les profils de backtesting peuvent également être convertis en profils live à tout moment afin que vous puissiez commencer à trader avec votre stratégie optimisée par les tests historiques. + +## Création d'une stratégie + +Lorsque vous utilisez le Strategy Designer, vous pouvez créer une stratégie de trading grâce à un processus simple étape par étape ou configurer le profil de backtesting actuel en direct. + +### Création d'une toute nouvelle stratégie + +![sélectionner les coins pour créer une nouvelle statégie avec le strategy designer octobot](/images/guides/strategy-designer/octobot-strategy-designer-create-a-new-strategy-select-coins.png) +1. Sélectionnez les paires sur lesquelles vous voulez trader. + +![sélectionner le portfolio pour créer une nouvelle statégie avec le strategy designer octobot](/images/guides/strategy-designer/octobot-strategy-designer-create-a-new-strategy-select-portfolio.png) +2. Configurez votre portefeuille initial pour votre profil. + +![sélectionner le trading mode pour créer une nouvelle statégie avec le strategy designer octobot](/images/guides/strategy-designer/octobot-strategy-designer-create-a-new-strategy-select-trading-mode.png) +3. Sélectionnez et configurez le [trading mode](../octobot-trading-modes/trading-modes) à utiliser. + +![sélectionner et configurer les evaluators pour créer une nouvelle statégie avec le strategy designer octobot](/images/guides/strategy-designer/octobot-strategy-designer-create-a-new-strategy-select-and-configure-evaluators.png) +![configurer les evaluators pour créer une nouvelle statégie avec le strategy designer octobot](/images/guides/strategy-designer/octobot-strategy-designer-create-a-new-strategy-configure-evaluators-settings.png) +4. Sélectionnez et configurez la [stratégie et les évaluateurs](../octobot-trading-modes/trading-modes#evaluators-responsabilities) à utiliser. + +![résumé de la nouvelle statégie avec le strategy designer octobot](/images/guides/strategy-designer/octobot-strategy-designer-create-a-new-strategy-summary.png) +5. Vérifiez que tout est correctement configuré et lancez un test historique avec votre nouvelle stratégie. +Astuce : vous pouvez donner un nom à votre test historique pour l'identifier rapidement dans l'explorateur. + +### Modification de la stratégie actuelle + +Si vous souhaitez simplement modifier le profil de backtesting actuel, vous pouvez utiliser les raccourcis de configuration directement disponibles depuis l'interface du Strategy Designer. +![confuguration du profil actuel avec strategy designer octobot +](/images/guides/strategy-designer/octobot-strategy-designer-edit-current-profile.png) + +Modifiez rapidement vos trading modes, la configuration de la stratégie et des évaluateurs. + +<div style={{textAlign: "center"}}> +![configuration du profil de backtesting avec strategy designer octobot +](/images/guides/strategy-designer/octobot-strategy-designer-edit-current-backtesting.png) +</div> + +Ou configurez le contexte de votre backtesting. + +<div style={{textAlign: "center"}}> +![nouveau backtesting avec strategy designer octobot](/images/guides/strategy-designer/octobot-strategy-designer-new-backtesting.png) +</div> + +Et lancez un nouveau test historique avec votre nouvelle configuration. + +## Configurer le Strategy Designer + +### Campagnes d'optimisation +Vos backtestings peuvent être associées à une campagne d'optimisation. +Les campagnes d'optimisation n'affectent pas les résultats de vos backtestings, mais peuvent être utilisées pour sélectionner les backtestings à afficher dans l'explorateur. + +<div style={{textAlign: "center"}}> +![sélection de campagne d'optimisation avec strategy designer octobot](/images/guides/strategy-designer/octobot-strategy-designer-campaigns-selector.png) +</div> + +Vous pouvez avoir autant de campagnes que vous le souhaitez afin de différencier facilement les backtestings provenant de différents contextes et éviter ainsi d'avoir à filtrer manuellement des simulations antérieures ou non liées. + +### Affichage + +Parfois, vous pourriez vouloir modifier la façon dont le Strategy Designer affiche les résultats. +<div style={{textAlign: "center"}}> +![configuration d'affichage du strategy designer octobot](/images/guides/strategy-designer/octobot-strategy-designer-display-settings.png) +</div> +Afficher moins d'éléments peut parfois accélérer l'affichage ou même éviter des problèmes liés à la mémoire RAM du navigateur lorsque vous affichez une grande quantité de données sous forme graphique. + +Pour résoudre les problèmes de performances du navigateur, vous pouvez réduire la valeur du seuil `Lines plot instead of candlesticks threshold`. Lorsque ce seuil est atteint, les bougies sont transformés en lignes sur le graphique. Contrairement aux bougies, les lignes sont très rapides à traiter sur le navigateur car elles bénéficient d'une accélération GPU. + +**[Utiliser le Strategy Designer](https://www.octobot.cloud)** diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-usage/understanding-profitability.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-usage/understanding-profitability.md new file mode 100644 index 0000000000..72f41ffc98 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot-usage/understanding-profitability.md @@ -0,0 +1,46 @@ +--- +title: "Comprendre la profitabilité" +description: "Vous avez du mal à comprendre comment fonctionnent la profitabilité ou les profits et pertes (PNL) dans OctoBot, ou comment les réinitialiser ? Consultez notre guide." +sidebar_position: 5 +--- + + + +# La profitabilité dans OctoBot + +:::info + La traduction française de cette page est en cours. +::: + +## Historique de profitabilité + +Every asset in OctoBot is valued using the **reference market** setting +(available in [Trading settings](/guides/octobot-configuration/profile-configuration#reference-market)). +Profitably follows this principle. + +![home](https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/wiki_resources/home.jpg) + +To compute its profitability, OctoBot evaluates the value of all its +traded assets (the ones available for trading in its configuration) by +getting their value in reference market. Profitability is the difference +between the total value of the traded assets when OctoBot +started and the total value of current holdings at the moment +profitability is displayed. + +## Historique de PNL + +Profit and loss (PNL) history, which is the profit from each historical trade independently from your portfolio assets historical valuation, is displayed on the "Trading" tab. + +You can see it as "pure profits or losses from your trading strategy". + +![pnl history](/images/guides/pnl.png) + +> Please note that PNL history is not available on every trading mode. + + +## Réinitialiser l'historique de profitabilité + +You can reset your OctoBot's profitability history from the **Portfolio** tab. + +## Réinitialiser l'historique de PNL +Profit and loss history is computed using trades history. You can reset it by clearing the trades history from the **Trading** tab. diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot.md new file mode 100644 index 0000000000..07221be147 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/guides/octobot.md @@ -0,0 +1,100 @@ +--- +title: "Lancer votre OctoBot" +description: "Vous avez des questions sur OctoBot, le robot de trading open source ? Voici les guides sur comment installer votre bot localement ou en utilisant un cloud et comment trader en utilisant Telegram, ChatGPT ou TradingView." +sidebar_position: 1 +--- + + + +# Lancer votre OctoBot + +:::info +Cette section "Robot de trading" est dédiée aux utilisateurs d'<a href="https://github.com/Drakkar-Software/OctoBot" rel="nofollow">OctoBot open source</a>. +::: + +## Configurer votre OctoBot + +Il existe deux façons d'utiliser OctoBot: + +- Avec [www.octobot.cloud](https://www.octobot.cloud/fr) pour facilement investir en crypto +- Avec [le robot de trading open source OctoBot](https://www.octobot.cloud/trading-bot) pour créer et tester ses propres stratégies de trading + +### Utiliser OctoBot cloud + +Utiliser [OctoBot cloud](/fr) directement pour facilement investir avec des **paniers de crypto**, bénéficier les **stratégies d'investissement pré-configurées** et **automatiser vos stratégies TradingView**. + +Dans ce cas, [www.octobot.cloud](https://www.octobot.cloud/fr) est fait pour vous et la **section [Investir](/investing/introduction)** des guide est ce que vous recherchez. + +### Utiliser le robot de trading open source + +<div style="text-align: center; margin: 1.5rem 0"> + <iframe width="100%" height="400" style="max-width: 640px; border-radius: 8px" src="https://www.youtube.com/embed/TJUU62e1jR8" title="OctoBot - Robot de trading crypto open source" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> +</div> + +Avec [le robot de trading OctoBot](https://www.octobot.cloud/trading-bot) vous pouvez créer, tester et configurer votre propre stratégie d'investissement, que ce soit avec les stratégies d'OctoBot cloud ou les votre. Il est possible installer votre robot de trading [sur votre ordinateur](octobot-installation/install-octobot-on-your-computer) ou sur un [serveur dans le cloud](octobot-installation/cloud-install-octobot-on-digitalocean). + +La **section Robot de trading** des guides est dédiée à OctoBot en tant que + +<a href="https://github.com/Drakkar-Software/OctoBot" rel="nofollow">robot de trading open source disponible sur GitHub</a> +. + +<div style="text-align: center"> + <a href="octobot-installation/install-octobot-on-your-computer"><strong>Installer OctoBot</strong></a> +</div> + +## Configuration et trading modes + +OctoBot propose de nombreuses stratégies de trading : les [trading modes](octobot-trading-modes/trading-modes). Chaque mode de trading est unique et consiste en une technique de trading différente. Les modes de trading peuvent : + +- Trader sur les marchés (spot) au comptant ou futures (à terme). +- Utiliser des statistiques et une analyse technique pour trouver les meilleures entrées et sorties de trade. +- Trader avec des stratégies d'[IA](https://www.octobot.cloud/features/ai-trading-bot) et des [prédictions de ChatGPT](octobot-trading-modes/chatgpt-trading). +- Utiliser des [algorithmes de market-making](octobot-trading-modes/grid-trading-mode) pour minimiser les risques et garantir des gains réguliers et petits. +- Trader sur des signaux de plateformes telles que [TradingView](octobot-trading-modes/tradingview-trading-mode). +- Appliquer les meilleures [stratégies d'OctoBot cloud](/fr). + + +<div style="text-align: center; margin: 1.5rem 0"> + <iframe width="100%" height="400" style="max-width: 640px; border-radius: 8px" src="https://www.youtube.com/embed/Exdl94cmMDQ" title="Configuration de Trading dans OctoBot" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> +</div> + +Faites quelque chose de totalement différent basé sur vos idées : + +- [Personnalisez](octobot-configuration/profile-configuration) votre OctoBot pour le faire trader selon vos idées +- Ou codez et testez directement vos idées en utilisant [OctoBot script](/guides/octobot-script). + +## Discutez directement avec OctoBot + +Vous pouvez commander à distance votre OctoBot en le connectant à [Telegram](octobot-interfaces/telegram). Vous pouvez suivre ce que fait votre robot, regarder son portefeuille, ses profits, déclencher une vente d'urgence... Accédez à votre OctoBot où que vous soyez, quand vous le voulez, grâce à son interface Telegram. + +<div style="text-align: center"> + +![telegram connection to octobot illustrated by telegram logo](/images/guides/interfaces/telegram-connection-to-octobot-illustrated-by-telegram-logo.png) + +</div> + +Voir [la configuration de l'interface Telegram](octobot-interfaces/telegram) pour plus de détails sur l'interface Telegram d'OctoBot. + +## Optimizez votre OctoBot avec le backtesting + +En utilisant le backtesting d'OctoBot, testez votre stratégie de trading actuelle en utilisant des données historiques des marchés sur lesquels vous +voulez trader. La plupart des configurations d'OctoBot sont testables en +[backtesting](octobot-usage/backtesting). +Affinez la configuration de votre bot jusqu'au moindre détail et maximisez vos profits. + +![octobot backtesting result summary](/images/guides/backtesting/octobot-backtesting-result-summary.png) + +## Codez directement vos stratégies avec OctoBot Script + +Transformez vos idées de trading en stratégie testée et optimisée en utilisant [OctoBot Script](/guides/octobot-script). +Codez directement votre stratégie en python tout en bénéficiant de tous les outils OctoBot et d'un système de reporting et de visualisation extrêmement flexible. Analysez exactement ce qui se passe lorsque vous exécutez vos idées sur des données passées et créez vos meilleures stratégies. + +![rapport octobot pro avec btc usdt avec graphiques de trades et portfolio et rsi](/images/guides/octobot-pro/octobot-pro-report-btc-usdt-with-chart-trades-portfolio-value-and-rsi.jpg) + +## Plateformes d'échange supportées + +Trouvez la liste des plateformes d'échange supportées et partenaires dans [le résumé des échanges](/guides/exchanges). + +## Guides du Developpeur + +Trouvez les guides des développeurs dans [la section des développeurs](/guides/developers). diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/connect-your-binance-account-to-octobot.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/connect-your-binance-account-to-octobot.md new file mode 100644 index 0000000000..a32a10997b --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/connect-your-binance-account-to-octobot.md @@ -0,0 +1,126 @@ +--- +title: "Se connecter à Binance" +description: "|" +sidebar_position: 22 +--- + + + +# Connecter votre compte Binance à OctoBot cloud + +Pour automatiser les stratégies d'investissement de votre choix sur votre propre compte Binance, il est nécessaire d'autoriser OctoBot à accéder à une partie de votre compte. + +Cela est possible en utilisant des clés d'API ou `API Keys`. Les API Keys sont un moyen d'authentification standard et sécurisé qui est très souvent utilisé pour connecter les logiciels ensemble. + +Si vous vous demandez ce qu'est une `API Key` et pourquoi OctoBot utilise cette méthode, jetez un œil à notre [présentation des API Keys de plateformes d'échange](what-is-an-exchange-api-key). + +## Connecter votre compte Binance grâce aux API Keys + +Voici les 7 étapes simples pour connecter votre compte Binance à OctoBot cloud et automatiser vos stratégies d'investissement. + +### 1. Connectez-vous à votre compte Binance + +Rendez-vous sur <a href="https://accounts.binance.com/en/register?ref=528112221" rel="nofollow">binance.com</a> et connectez-vous à votre compte (ou créez un compte). + +![connection au compte binance](/images/guides/binance/binance-account-authentification.png) + +### 2. Allez sur Gestion des API + +Sélectionnez "Compte" et "Gestion des API" depuis votre Tableau de bord ou "Gestion des API" depuis le menu déroulant supérieur droit de votre icône de profil. +![compte lien gestion des api](/images/guides/binance/compte-lien-gestion-des-api.png) + +![compte lien gestion des api depuis navbar](/images/guides/binance/compte-lien-gestion-des-api-depuis-navbar.png) + +### 3. Créer une nouvelle API Key + +Cliquez sur "Créer une API", sélectionnez "Générée par le système" et nommez la comme vous voulez. Le nom de l'API Key est uniquement visible pour vous et vous permet de vous souvenir de l'objectif de cette clé. +![apis liste creer nouvelle api](/images/guides/binance/apis-liste-creer-nouvelle-api.png) + +![selection api type](/images/guides/binance/selection-api-type.png) + +![selection nom de api](/images/guides/binance/selection-nom-api.png) + +### 4. Vérification de sécurité + +Complétez la vérification de sécurité pour créer l'API Key. +![creer api verification securite](/images/guides/binance/creer-api-verification-securite.png) + +### 5. Ajouter la permission de trader et la liste blanche d'IP + +Votre API Key est maintenant créée ! + +La dernière étape sur Binance est l'ajout de la permission de trading afin qu'OctoBot soit en mesure de créer et annuler des ordres sur ce compte en utilisant cette API Key. Pour ce faire : + +1. Sélectionnez "Modifier les restrictions" + +2. Choisissez "Restreindre l'accès aux adresses IP de confiance uniquement" + +3. Cliquez sur le bouton "copier" depuis OctoBot cloud pour copier la liste blanche d'IP + +4. Collez la liste dans le champ qui vient d'apparaître + +5. Cliquez sur "Confirmer". + +6. Cochez "Activer le trading Spot et sur marge". + +7. Enfin cliquez sur "Sauvegarder". + +![api cree modifier restrictions](/images/guides/binance/api-cree-modifier-restrictions.png) + +![api cree ajouter trading permission](/images/guides/binance/api-cree-ajouter-trading-permission.png) + +![api cree ajouter trading permission sauvegarder](/images/guides/binance/api-cree-ajouter-trading-permission-sauvegarder.png) + +![api restreindre aux ips de confiance](/images/guides/binance/api-restreindre-ips-de-confiance.png) + +Note: Toutes les permissions en dehors de "Permettre la lecture" et "Activer le trading Spot et sur marge" doivent rester décochée. + +### 7. Ajouter votre API Key à votre compte OctoBot cloud + +Votre API key est maintenant prête à être utilisée par OctoBot ! + +Tout ce qu'il vous reste à faire est de copier/coller les valeurs de `API Key` et `Secret Key` +dans la configuration de votre compte Binance sur OctoBot cloud. Cette étape peut être réalisée au lancement d'une stratégie de trading avec un compte réel ou depuis votre profil sur [octobot.cloud](https://www.octobot.cloud/) + +Remarque : Quand vous ajoutez une API Key sur OctoBot cloud, vous avez la possibilité de la nommer. Cette étape, semblable à celle sur Binance, permet de choisir un nom facilement identifiable pour votre configuration Binance. +![api cree key selectionnees](/images/guides/binance/api-cree-key-selectionnees.png) + +![ajouter api key a octobot cloud depuis start de strategie](/images/guides/binance/ajouter-api-key-a-octobot-cloud-depuis-start-de-strategie.png) + +<div style="text-align: center"> + <em>Ajouter une API Key au lancement d'une stratégie</em> +</div> + +![ajouter api key a octobot cloud depuis profil](/images/guides/binance/ajouter-api-key-a-octobot-cloud-depuis-profil.png) + +<div style="text-align: center"> + <em>Ajouter une API Key depuis <a href="https://www.octobot.cloud/fr/account" rel="nofollow">votre profil</a></em> +</div> + +Votre compte Binance peut maintenant être utilsié sur OctoBot cloud ! + +:::info + Veuillez noter que lors du démarrage d'un bot, une partie des fonds disponibles dans le portefeuille lié à votre API Key peuvent être vendus. Cela inclut les stablecoins, les fonds en monnaie fiduciaire (comme les euros) ainsi que les cryptomonnaies échangées par la stratégie que vous avez sélectionnée. Cela fait partie de [l'optimisation de portefeuille](invest-with-your-strategy#1-optimisation-du-portefeuille). +::: + +## Résolution de problèmes + +### API key erronée: _Incorrect API keys_ + +Si vous obtenez l'erreur `Incorrect API keys`, cela généralement signifie que: + +- Votre API Key ou Secret Key n'a pas été copiée correctement depuis Binance +- Vous fait une erreur lors de la copie de la liste blanche d'IP +- Vous avez séléctionné le mauvais échange (assurez vous d'avoir sélectionné Binance) + +### Trading permissions: _Incorrect API restrictions: missing spot trading_ + +Si vous obtenez l'erreur `Incorrect API restrictions: missing spot trading`, il est nécessaire de modifier les restrictions de votre API Key sur binance afin de cocher "Activer le trading Spot et sur marge", comme expliqué [en étape 6](#6-ajouter-la-permission-de-trader). + +### Retraits activés: _Incorrect API restrictions: withdrawals enabled_ + +Si vous obtenez l'erreur `Incorrect API restrictions: withdrawals enabled`, alors vous devez décocher la permission `Activer les retraits`. Vous pouvez le faire en modifiant les restrictions de votre API Key, comme expliqué [en étape 6](#6-ajouter-la-permission-de-trader). + +### Autres questions + +Si vous avez d'autres questions ou si quelque chose n'est pas clair, n'hésitez pas à contacter l'équipe de support en utilisant la chatbox en bas à droite de l'écran sur [octobot.cloud](https://www.octobot.cloud/). diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/connect-your-coinbase-account-to-octobot.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/connect-your-coinbase-account-to-octobot.md new file mode 100644 index 0000000000..de1603b201 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/connect-your-coinbase-account-to-octobot.md @@ -0,0 +1,120 @@ +--- +title: "Se connecter à Coinbase" +description: "|" +sidebar_position: 24 +--- + + + +# Connecter votre compte Coinbase à OctoBot cloud + +Pour automatiser les stratégies d'investisements de votre choix sur votre propre compte Coinbase, il est nécessaire d'autoriser OctoBot à accéder à une partie de votre compte. + +Cela est possible en utilisant des clés d'API ou `API Keys`. Les API Keys sont un moyen d'authentification standard et sécurisé qui est très souvent utilisé pour connecter les logiciels ensemble. + +Si vous vous demandez ce qu'est une `API Key` et pourquoi OctoBot utilise cette méthode, jetez un œil à notre [présentation des API Keys de plateformes d'échange](what-is-an-exchange-api-key). + +## Connecter votre compte Coinbase grâce aux API Keys + +Voici les 5 étapes simples pour connecter votre compte Coinbase à OctoBot cloud et automatiser vos stratégies d'investisements. + +### 1. Connectez-vous à votre compte Coinbase + +Rendez-vous sur <a href="https://login.coinbase.com/signin" rel="nofollow">coinbase.com</a> et connectez-vous à votre compte (ou créez un compte). + +![connection au compte coinbase](/images/guides/coinbase/coinbase-account-login.png) + +### 2. Allez sur Gestion des API + +Rendez-vous sur la configuration de votre compte en cliquant sur l'icone de votre compte et sélectionnez "Settings". +![compte lien gestion des api](/images/guides/coinbase/account-setting-api-management.png) + +### 3. Créer une nouvelle API Key + +Scrollez vers le bas si nécessaire et cliquez sur "API". + +![setting du compte cliquer sur api](/images/guides/coinbase/account-setting-api-management-click-api.png) + +Cliquez sur "Create API Key with Coinbase Developer Platform (Recommended)". + +![apis liste creer nouvelle api](/images/guides/coinbase/apis-list-create-new-api.png) + +1. Nommez la comme vous voulez. Le nom de l'API Key est uniquement visible pour vous et vous permet de vous souvenir de l'objectif de cette clé. + +2. Sélectionnez le portefeuille que vous souhaitez utiliser avec votre OctoBot. Remarque : le portefeuille « Default » (par défaut) de Coinbase contient généralement vos fonds sur la version standard (non Avancée) de Coinbase. Si vous souhaitez utiliser d’autres fonds, veuillez transférer vos actifs vers un autre portefeuille Coinbase et le sélectionner avec votre API Key. + +3. **Pensez à cocher l'API-specific restriction "Trading"**. + +![sélection de l'api name restrictions](/images/guides/coinbase/select-api-name-and-restrictions.png) + +4. Cliquez sur le bouton "copier" depuis OctoBot cloud pour copier la liste blanche d'IP et collez la liste dans le champ `IP whitelist`. + +### 4. Sauvegarder votre clé d'API + +Maintenant que votre clé d'API est nommée, a la permission de "Trading" et la liste blanche d'IP est configurée, cliquez sur "Create & download". +Complétez la vérification de sécurité pour créer l'API Key. + +Votre clé d'API est créée. Ne pas fermer cette fenêtre tant que vous n'avez pas reporté ces informations sur OctoBot cloud. + +<div style="text-align: center"> + +![api key coinbase créée](/images/guides/coinbase/coinbase-api-key-created.png) + +</div> + +Note: Coinbase va vous demander de télécharger un fichier contenant les détails de la clé d'API. Ce téléchargement n'est pas nécessaire, ne téléchargez pas ce fichier ou supprimez le de votre ordinateur si vous l'avez téléchargé. + +### 5. Ajouter votre API Key à votre compte OctoBot cloud + +Votre API key est maintenant prête à être utilisée par OctoBot ! + +Tout ce qu'il vous reste à faire est de copier/coller les valeurs de `API Key`, `Secret Key` et passphrase dans la configuration de votre compte Coinbase sur OctoBot cloud. Cette étape peut être réalisée au lancement d'une stratégie de trading avec un compte réel ou depuis votre profil sur [octobot.cloud](https://www.octobot.cloud/) + +Remarque : Quand vous ajoutez une API Key sur OctoBot cloud, vous avez la possibilité de la nommer. Cette étape, semblable à celle sur Coinbase, permet de choisir un nom facilement identifiable pour votre configuration Coinbase. + +<div style="text-align: center"> + +![api cree key selectionnees](/images/guides/coinbase/api-creation-completed-selected-values.png) + +</div> + +![ajouter api key a octobot cloud depuis start de strategie](/images/guides/coinbase/add-api-key-to-octobot-cloud-from-strategy-start.png) + +<div style="text-align: center"> + <em>Ajouter une API Key au lancement d'une stratégie</em> +</div> + +![ajouter api key a octobot cloud depuis profil](/images/guides/coinbase/add-api-key-to-octobot-cloud-from-profile.png) + +<div style="text-align: center"> + <em>Ajouter une API Key depuis <a href="https://www.octobot.cloud/fr/account" rel="nofollow">votre profil</a></em> +</div> + +Votre compte Coinbase peut maintenant être utilsié sur OctoBot cloud ! + +:::info + Veuillez noter que lors du démarrage d'un bot, une partie des fonds disponibles dans le portefeuille lié à votre API Key peuvent être vendus. Cela inclut les stablecoins, les fonds en monnaie fiduciaire (comme les euros) ainsi que les cryptomonnaies échangées par la stratégie que vous avez sélectionnée. Cela fait partie de [l'optimisation de portefeuille](invest-with-your-strategy#1-optimisation-du-portefeuille). +::: + +## Résolution de problèmes + +### API key erronée: _Incorrect API keys_ + +Si vous obtenez l'erreur `Incorrect API keys`, cela généralement signifie que: + +- Votre API Key ou Secret Key n'a pas été copiée correctement depuis Coinbase +- Vous fait une erreur lors de la copie de la liste blanche d'IP +- Vous avez séléctionné le mauvais échange (assurez vous d'avoir sélectionné Coinbase) +- Faut-il utiliser une clé ECDSA ou Ed25519 ? Vous pouvez utiliser celle de votre choix. Les deux formats ECDSA et Ed25519 sont supportés. + +### Trading permissions: _Incorrect API restrictions: missing spot trading_ + +Si vous obtenez l'erreur `Incorrect API restrictions: missing spot trading`, il est nécessaire de modifier les restrictions de votre API Key sur coinbase afin de cocher "Trade", comme expliqué [en étape 3](#3-créer-une-nouvelle-api-key). + +### Retraits activés: _Incorrect API restrictions: withdrawals enabled_ + +Si vous obtenez l'erreur `Incorrect API restrictions: withdrawals enabled`, alors vous devez décocher la permission `Transfer`. Vous pouvez le faire en modifiant les restrictions de votre API Key, comme expliqué [en étape 3](#3-créer-une-nouvelle-api-key). + +### Autres questions + +Si vous avez d'autres questions ou si quelque chose n'est pas clair, n'hésitez pas à contacter l'équipe de support en utilisant la chatbox en bas à droite de l'écran sur [octobot.cloud](https://www.octobot.cloud/). diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/connect-your-kucoin-account-to-octobot.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/connect-your-kucoin-account-to-octobot.md new file mode 100644 index 0000000000..1b7c80ac1b --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/connect-your-kucoin-account-to-octobot.md @@ -0,0 +1,120 @@ +--- +title: "Se connecter à Kucoin" +description: "|" +sidebar_position: 23 +--- + + + +# Connecter votre compte Kucoin à OctoBot cloud + +Pour automatiser les stratégies d'investisements de votre choix sur votre propre compte Kucoin, il est nécessaire d'autoriser OctoBot à accéder à une partie de votre compte. + +Cela est possible en utilisant des clés d'API ou `API Keys`. Les API Keys sont un moyen d'authentification standard et sécurisé qui est très souvent utilisé pour connecter les logiciels ensemble. + +Si vous vous demandez ce qu'est une `API Key` et pourquoi OctoBot utilise cette méthode, jetez un œil à notre [présentation des API Keys de plateformes d'échange](what-is-an-exchange-api-key). + +## Connecter votre compte Kucoin grâce aux API Keys + +Voici les 5 étapes simples pour connecter votre compte Kucoin à OctoBot cloud et automatiser vos stratégies d'investisements. + +### 1. Connectez-vous à votre compte Kucoin + +Rendez-vous sur <a href="https://www.kucoin.com/ucenter/signup?rcode=rJ2Q2T3" rel="nofollow">kucoin.com</a> et connectez-vous à votre compte (ou créez un compte). + +![connection au compte kucoin](/images/guides/kucoin/kucoin-account-login.png) + +### 2. Allez sur Gestion des API + +Rendez-vous sur le tableau de bord de votre compte et sélectionnez "API Management". +![compte lien gestion des api](/images/guides/kucoin/account-setting-api-management.png) + +### 3. Créer une nouvelle API Key + +1. Cliquez sur "Create API", sélectionnez "API-Based Trading". + +2. Nommez la comme vous voulez et donnez lui une passphrase. Le nom de l'API Key est uniquement visible pour vous et vous permet de vous souvenir de l'objectif de cette clé. La passphrase devra être renseignée avec les détails de l'API Key sur OctoBot cloud. + +3. **Pensez à cocher l'API Restriction "Spot Trading"** + +![apis liste creer nouvelle api](/images/guides/kucoin/apis-list-create-new-api.png) + +![sélection de l'api name passphrase et restrictions](/images/guides/kucoin/select-api-name-passphrase-and-restrictions.png) + +4. Sélectionnez l'option `Restrict to Trusted IPs Only`. + +5. Cliquez sur le bouton "copier" depuis OctoBot cloud pour copier la liste blanche d'IP et collez la liste dans le champ IP whitelist, puis cliquez sur `Add`. + +### 4. Sauvegarder votre clé d'API + +Maintenant que votre clé d'API est nommée, a une passphrase et la permission de "Spot Trading", cliquez sur "Next" + +Complétez la vérification de sécurité pour créer l'API Key. + +<div style="text-align: center"> + +![creer api verification securite](/images/guides/kucoin/create-api-security-verification.png) + +</div> + +Votre clé d'API est créée. Ne pas fermer cette fenêtre tant que vous n'avez pas reporté ces informations sur OctoBot cloud. + +<div style="text-align: center"> + +![api key kucoin créée](/images/guides/kucoin/kucoin-api-key-created.png) + +</div> + +### 5. Ajouter votre API Key à votre compte OctoBot cloud + +Votre API key est maintenant prête à être utilisée par OctoBot ! + +Tout ce qu'il vous reste à faire est de copier/coller les valeurs de `API Key`, `Secret Key` et passphrase dans la configuration de votre compte Kucoin sur OctoBot cloud. Cette étape peut être réalisée au lancement d'une stratégie de trading avec un compte réel ou depuis votre profil sur [octobot.cloud](https://www.octobot.cloud/) + +Remarque : Quand vous ajoutez une API Key sur OctoBot cloud, vous avez la possibilité de la nommer. Cette étape, semblable à celle sur Kucoin, permet de choisir un nom facilement identifiable pour votre configuration Kucoin. + +<div style="text-align: center"> + +![api cree key selectionnees](/images/guides/kucoin/api-creation-completed-selected-values.png) + +</div> + +![ajouter api key a octobot cloud depuis start de strategie](/images/guides/kucoin/add-api-key-to-octobot-cloud-from-strategy-start.png) + +<div style="text-align: center"> + <em>Ajouter une API Key au lancement d'une stratégie</em> +</div> + +![ajouter api key a octobot cloud depuis profil](/images/guides/kucoin/add-api-key-to-octobot-cloud-from-profile.png) + +<div style="text-align: center"> + <em>Ajouter une API Key depuis <a href="https://www.octobot.cloud/fr/account" rel="nofollow">votre profil</a></em> +</div> + +Votre compte Kucoin peut maintenant être utilsié sur OctoBot cloud ! + +:::info + Veuillez noter que lors du démarrage d'un bot, une partie des fonds disponibles dans le portefeuille lié à votre API Key peuvent être vendus. Cela inclut les stablecoins, les fonds en monnaie fiduciaire (comme les euros) ainsi que les cryptomonnaies échangées par la stratégie que vous avez sélectionnée. Cela fait partie de [l'optimisation de portefeuille](invest-with-your-strategy#1-optimisation-du-portefeuille). +::: + +## Résolution de problèmes + +### API key erronée: _Incorrect API keys_ + +Si vous obtenez l'erreur `Incorrect API keys`, cela généralement signifie que: + +- Votre API Key, Secret Key ou passphrase n'a pas été copiée correctement depuis Kucoin +- Vous fait une erreur lors de la copie de la liste blanche d'IP +- Vous avez séléctionné le mauvais échange (assurez vous d'avoir sélectionné Kucoin) + +### Trading permissions: _Incorrect API restrictions: missing spot trading_ + +Si vous obtenez l'erreur `Incorrect API restrictions: missing spot trading`, il est nécessaire de modifier les restrictions de votre API Key sur kucoin afin de cocher "Spot Trading", comme expliqué [en étape 3](#3-créer-une-nouvelle-api-key). + +### Retraits activés: _Incorrect API restrictions: withdrawals enabled_ + +Si vous obtenez l'erreur `Incorrect API restrictions: withdrawals enabled`, alors vous devez décocher la permission `Transfer`. Vous pouvez le faire en modifiant les restrictions de votre API Key, comme expliqué [en étape 3](#3-créer-une-nouvelle-api-key). + +### Autres questions + +Si vous avez d'autres questions ou si quelque chose n'est pas clair, n'hésitez pas à contacter l'équipe de support en utilisant la chatbox en bas à droite de l'écran sur [octobot.cloud](https://www.octobot.cloud/). diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/find-your-strategy.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/find-your-strategy.md new file mode 100644 index 0000000000..c6f6e07a54 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/find-your-strategy.md @@ -0,0 +1,70 @@ +--- +title: "Trouver votre investissement" +description: "Apprenez comment explorer, comparer et trouver ou créer la meilleure stratégie de trading pour vous sur OctoBot cloud." +sidebar_position: 4 +--- + + + +# Trouvez le meilleur investissement pour vous + +De nombreuses offres d'investissement sont disponibles sur [octobot.cloud](https://www.octobot.cloud/). Certains sont créés par l'équipe OctoBot, d'autres sont réalisés par la communauté. + +L'objectif d'OctoBot cloud est de vous aider à trouver l'investissement parfait pour vous, en fonction de vos propres objectifs. + +![explorateur de stratégies OctoBot cloud avec paniers de crpyto et stratégies](/images/guides/octobot-cloud-strategies-explorer-with-crypto-baskets-and-strategies.png) + +Trouver votre investissement idéal peut être difficile. C'est pourquoi nous essayons de rendre aussi simple que possible de : + +- Choisir un thème de panier de crypto +- Comparer les stratégies entre elles +- Comprendre le fonctionnement de chaque stratégie +- Accéder aux performances passées de chaque stratégie de manière transparente + +## Le thème d'un panier de crypto + +<div style={{textAlign: "center"}}> + ![exemple d'un panier de crypto OctoBot + cloud](/images/guides/utiliser-un-panier-de-crypto.png) +</div> + +[Une grande variété de paniers de crypto](https://www.octobot.cloud/features/crypto-basket) est proposée sur OctoBot cloud. Un panier de crypto est une collection de différentes cryptomonnaies qui partagent un même thème. +Ces paniers vous permettent d'investir dans les crypto qui vous intéressent ou tout simplement dans les crypto avec la plus grande capitalisation ("le top des crypto"). + +Utiliser ces paniers vous permet d'éviter d'avoir à choisir individuellement chaque crypto à acheter et ainsi tirer profit de la prochaine envolée d'une crypto qui compose le panier. + +## Détails d'une stratégie + +![strategies OctoBot cloud](/images/guides/cloud-strategy.png) + +Chaque stratégie dispose d'un graphique des bénéfices historiques. Ce graphique est généré en exécutant la stratégie avec des données historiques sur la période affichée. + +> Note : Les graphiques des bénéfices sont générés à l'aide du [backtesting OctoBot](/guides/octobot#optimize-your-octobot-using-backtesting) + +Pour une transparence totale, les performances de chaque stratégie sont réévaluées chaque semaine. Cela garantit que les performances affichées soient toujours à jour avec le marché actuel. + +## Créer votre stratégie + +Vous préférerez peut-être créer votre propre stratégie de trading plutôt que d'utiliser les stratégies prêtes à l'emploi d'OctoBot cloud. + +<div style={{textAlign: "center"}}> + ![automatisation TradingView illustrée par le logo + TradingView](/images/blog/introducing-the-investor-plus-plan/tradingview-automation-illustrated-by-tradingview-logo.png) +</div> + +À cette fin, OctoBot se connecte à TradingView pour vous permettre de créer votre stratégie sur TradingView de manière: + +- **Claire et attrayante**: aucune compétence en codage nécessaire car TradingView est moyen visuel simple et visuel de créer une stratégie. +- **Adaptée aux connaissances** de l'investisseur: OctoBot Cloud permet les meilleures stratégies possibles pour vous en tant qu’investisseur. Que cela signifie simplement trader selon des cibles de prix ou alors utiliser une combinaison complexe d’indicateurs, nous travaillons pour le rendre possible et facile à faire. +- **Facile à suivre et surveiller**: vous savez toujours quels trades sont en cours, ce qui peut se passer ensuite ainsi que ce qui s’est passé précédemment. + +Consultez le [guide du trading automatisé sur TradingView](tradingview-automated-trading) pour en savoir plus sur la façon de créer votre propre stratégie avec OctoBot cloud. + +## Utiliser l'investissement que vous avez trouvé + +Une fois que vous avez identifiée l'investissement que vous souhaitez utiliser, vous avez 2 possibilités: + +![choix entre le trading avec un compte réel ou virtuel](/images/guides/trading-account-type-choice-real-or-paper-trading.png) + +- La tester avec le [trading simulé](paper-trading-a-strategy): démarrez un OctoBot avec des fonds simulés et testez la stratégie ou le panier de crypto autant que vous le souhaitez. Le trading simulé (ou virtuel) permet de tester une stratégie ou un panier de crypto sans risque. +- L'utiliser avec de [véritables fonds](invest-with-your-strategy): démarrez un OctoBot sur votre compte réel de plateforme d'échange et commencez à tirer profit de la stratégie ou du panier de crypto. diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/fine-tune-your-octobots.mdx b/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/fine-tune-your-octobots.mdx new file mode 100644 index 0000000000..3d5b95eed5 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/fine-tune-your-octobots.mdx @@ -0,0 +1,74 @@ +--- +title: "Ajuster vos OctoBots" +description: "Ajustez votre OctoBot en créant, remplaçant et annulant facilement des ordres et en rééquilibrant votre portefeuille directement depuis vos OctoBots." +sidebar_position: 12 +--- + + + +# Ajuster vos OctoBots + +En utilisant le <a href="https://www.octobot.cloud/fr/plan" rel="nofollow">plan Pro</a> d'OctoBot Cloud, vous pouvez modifier la façon dont vos OctoBots effectuent des transactions: + +- Annuler tout ordre ouvert de vos stratégies +- Remplacer les ordres par les vôtres +- Ajouter vos propres ordres à n'importe quelle stratégie + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="w3RxwrBQxp0" title="Le Plan Pro d" /> + +## Faites trader vos OctoBots à votre façon + +Modifiez facilement la manière dont vos OctoBots achètent ou vendent des cryptomonnaies. + +<div style={{textAlign: "center"}}> + ![annuler des ordres directement depuis + octobot](/images/guides/annuler-des-ordres-directement-depuis-octobot.png) +</div> +Directement depuis n'importe quel OctoBot, qu'il suive une <a href="https://www.octobot.cloud/fr/explore" rel="nofollow">stratégie OctoBot cloud</a> +, votre [stratégie TradingView](tradingview-automated-trading) sur votre compte +d'échange ou en [trading virtuel](paper-trading-a-strategy) vous pouvez: - +Annuler tout ordre ouvert - Remplacer les ordres existants par les vôtres + +## Tradez directement depuis vos OctoBots + +Achetez et vendez des crypto-monnaies directement depuis votre compte d'échange ou de [trading virtuel](paper-trading-a-strategy) depuis votre OctoBot + +<div style={{textAlign: "center"}}> + ![acheter et vendre des crypto directement depuis votre + OctoBot](/images/guides/acheter-et-vendre-des-crypto-directement-depuis-votre-octobot.png) +</div> +Avec le <a href="https://www.octobot.cloud/fr/plan" rel="nofollow">plan Pro</a> +, vous pouvez depuis n'importe quel Octobot: - Facilement trader directement sur +votre exchange ou compte de [trading virtuel sans +risque](paper-trading-a-strategy) - Créer n'importe quel type d’ordre d’achat ou +vente à tout moment + +**[Passer à Pro](https://www.octobot.cloud/pricing)** + +## Rééquilibrez votre portefeuille + +Adaptez et rééquilibrez les fonds de votre portefeuille à tout moment en utilisant des ordres au marché ou limités via votre Octobot. + +<div style={{textAlign: "center"}}> + ![acheter et vendre depuis votre portfolio + octobot](/images/guides/acheter-et-vendre-depuis-votre-portfolio-octobot.png) +</div> + +Réalisez simplement vos profits, arrêtez vos pertes ou achetez les cryptomonnaies dans lesquels vous souhaitez investir. + +:::info + Astuce : vous pouvez également empêcher votre OctoBot de trader avec une + partie de vos fonds en achetant des crypto qui ne sont pas échangés dans la + stratégie d'OctoBot cloud que vous avez sélectionnée. +::: + +## Suivez l'activité de vos OctoBots + +Toutes les activités de vos OctoBots enregistrées dans un historique clair des transactions, automatisations et annulations effectuées avec votre bot. + +<div style={{textAlign: "center"}}> + ![acheter et vendre des crypto historique activite + octobot](/images/guides/acheter-et-vendre-des-crypto-historique-activite-octobot.png) +</div> diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/follow-your-profits.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/follow-your-profits.md new file mode 100644 index 0000000000..2557bf232b --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/follow-your-profits.md @@ -0,0 +1,55 @@ +--- +title: "Suivre vos profits" +description: "Apprenez comment suivre rapidement et facilement les profits et l'activité de vos robots de trading OctoBot en cours d'exécution et arrêtés sur OctoBot cloud." +sidebar_position: 11 +--- + + + +# Suivre vos profits + +## Vos comptes d'échange + +OctoBot cloud vous permet de suivre la composition de vos portefeuilles sur toutes les plateformes d'échange connectées ainsi que de visualiser l'évolution de la valeur total de vos crypto. + +![Tableau de bord multi-plateforme OctoBot avec graphique crypto détenues et valeurs historiques du portefeuille et bots en cours d'exécution](/images/guides/octobot-multi-exchange-dashboard-with-historical-portfolio-value-holdings-pie-chart-and-running-bots.png) + +Grâce à votre tableau de bord OctoBot cloud, vous pouvez facilement: + +- Voir tous les portefeuilles de vos comptes d'échange et suivre leur croissance +- Suivre les activités de tous vos OctoBots + +## Vos OctoBots + +Une fois que vous avez lancé un OctoBot pour exécuter une stratégie (en [trading simulé](paper-trading-a-strategy) ou en [trading réel](invest-with-your-strategy)) vous pouvez le suivre de la manière que vous préférez. + +### Depuis OctoBot cloud + +Le site web OctoBot cloud, sur <a href="https://www.octobot.cloud/fr/bots" rel="nofollow">octobot.cloud/bots</a> +![bots sur OctoBot cloud](/images/guides/cloud-bots.png) + +### Depuis l'application mobile OctoBot + +L'application mobile OctoBot est disponible sur <a href="https://play.google.com/store/apps/details?id=com.drakkarsoftware.octobotapp&utm_source=www.octobot.cloud&utm_media=investing&utm_content=follow-your-profit" rel="nofollow">Google play</a> et sur l'<a href="https://apps.apple.com/us/app/octobot-crypto-investment/id6502774175" rel="nofollow">App Store</a>. + +<div style={{textAlign: "center"}}> + <div style={{textAlign: "center"}}> + <a href="https://apps.apple.com/us/app/octobot-crypto-investment/id6502774175" rel="nofollow"><AppleStoreButton /></a> + <a href="https://play.google.com/store/apps/details?id=com.drakkarsoftware.octobotapp&utm_source=www.octobot.cloud&utm_media=investing&utm_content=follow-your-profit" rel="nofollow"><GoogleStoreButton /></a> + </div> +</div> + +### Avec l'application web OctoBot + +L'application web OctoBot est disponible sur <a href="https://mobile.octobot.cloud" rel="nofollow">mobile.octobot.cloud</a>. + +:::info + Elle vous permet d'utiliser l'application sans avoir à l'installer sur votre + téléphone +::: + +## Détails de votre bot + +![détails d'un bot sur OctoBot cloud](/images/guides/cloud-bot.png) + +Chaque OctoBot dispose d'une vue détaillée sur laquelle vous pouvez voir son activité actuelle, son portefeuille et ses bénéfices historiques. diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/having-multiple-octobot-strategies.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/having-multiple-octobot-strategies.md new file mode 100644 index 0000000000..822ac714f7 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/having-multiple-octobot-strategies.md @@ -0,0 +1,80 @@ +--- +title: "Avoir plusieurs OctoBot" +description: "Apprenez à exécuter gratuitement plusieurs stratégies de trading pour optimiser vos gains et réduire les risques en utilisant OctoBot cloud." +sidebar_position: 7 +--- + + + +# Avoir plusieurs OctoBots + +Lorsque vous utilisez le cloud OctoBot, vous pouvez avoir autant de stratégies ou de panier que vous le souhaitez. + +![avoir plusieurs octobot sur différentes](/images/guides/having-multiple-octobots-on-different-strategies.png) + +Bien qu'il n'y ait rien d'obligatoire, il est généralement préférable d'utiliser plusieurs stratégies d'investissement. Sur ce point, les investissements automatisés sont très profitables car ils permettent de facilement tirer profit de plusieurs stratégies simultanément. + +## Les avantages d'utiliser plusieurs OctoBot + +Sur OctoBot cloud, l'exécution d'une ou plusieurs stratégies de trading est toujours gratuite. Il n'y donc a aucune raison réelle de ne pas le faire. Si vous pensez que cela peut être rentable pour vous, alors vous avez la possibilité de le faire sans frais. + +L'exécution simultanée de plusieurs OctoBots présente de nombreux avantages, il permet notamment de: + +1. Diversifier vos investissements en profitant d'autres types de stratégie ou thèmes de panier, ce qui réduit les risques +2. Trader sur différentes plateformes d'échange simultanément, ce qui réduit les risques liés aux problèmes potentiels de ces plateformes +3. Trader différents actifs et donc d'augmenter vos chances d'investir dans les prochaines perles d'investissement tout en réduisant les risques associés à chaque crypto échangée + +Dans l'ensemble, utiliser plusieurs OctoBot se rapproche de l'expression populaire "ne pas mettre tous ses œufs dans le même panier". + +## Comment démarrer plusieurs OctoBots + +Lorsque vous exécutez une stratégie ou un panier de crypto sur [octobot.cloud](https://www.octobot.cloud/), il n'y a qu'une seule règle: + +:::info + Une seule stratégie ou panier par portefeuille d'échange. +::: + +Cela signifie que vous pouvez exécuter autant de stratégies ou de paniers de crypto simultanés que vous le souhaitez, à condition qu'ils s'exécutent sur différentes plateformes d'échange, ou alors en utilisant des portefeuilles différents sur le même compte d'échange. + +> Pourquoi imposons-nous cela ? +> Simplement pour éviter les interférences entre les stratégies et permettre aux OctoBots de fonctionner à leur plein potentiel. + +### Utiliser différentes plateformes d'échange + +Pour utiliser plusieurs OctoBot, il vous suffit de connecter différentes plateformes d'échange à votre compte OctoBot cloud. Vous pourrez alors exécuter une stratégie ou un panier de crypto sur chaque compte d'échange. + +Par exemple, vous pouvez exécuter une stratégie risquée mais à haut poentiel basée sur l'intelligence artificielle sur Binance, et une stratégie de grille à faible risque sur Kucoin. Cela réduit également les risques associés à la détention de vos fonds sur une seule plateforme. + +### Avoir plusieurs OctoBots sur Binance + +Vous pouvez également exécuter plusieurs OctoBot sur le même compte Binance en utilisant des sous-comptes. Avec des sous-comptes, vous pouvez rapidement et facilement répartir vos fonds entre plusieurs portefeuilles au sein du même échange, ce qui vous permet d'utiliser plusieurs stratégies ou paniers sur le même compte Binance. + +<div style="text-align: center"> + +![sous comptes binance](/images/guides/binance/binance-subaccounts.png) + +</div> + +Par exemple, Binance vous permet d'avoir jusqu'à 10 sous-comptes, Vous pouvez donc exécuter jusqu'à 11 stratégies ou paniers de crypto en simultanés sur votre compte Binance : une sur votre compte principal et 10 sur vos sous-comptes. + +:::info +Vous vous demandez comment créer un sous-compte Binance ? Les sous-comptes Binance sont maintenant ouverts à tous, <a href="https://www.binance.com/fr/support/faq/fonctionnalit%C3%A9-des-sous-comptes-binance-et-questions-fr%C3%A9quemment-pos%C3%A9es-360020632811" rel="nofollow">consultez ce guide</a>. +::: + +### Avoir plusieurs OctoBots sur Coinbase + +Pour exécuter plusieurs startégies sur le même compte Coinbase, vous pouvez utiliser différents portefeuilles: chaque portefeuille peut être lié à un OctoBot. + +<div style="text-align: center"> + +![multi portefeuille coinbase](/images/guides/coinbase/coinbase-multi-portfolio.png) + +</div> + +Créez autant de portefeuilles que nécessaire depuis l' <a href="https://www.coinbase.com/advanced-portfolio" rel="nofollow">interface de portefeuille Coinbase</a> et créez des clés d'API associées à ces portefeuilles pour que votre OctoBot investisse avec. + +<div style="text-align: center"> + +![selection de portefeuille pour clé d'api coinbase](/images/guides/coinbase/coinbase-api-key-select-multi-portfolio.png) + +</div> diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/how-to-automate-any-tradingview-strategy-on-octobot-cloud.mdx b/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/how-to-automate-any-tradingview-strategy-on-octobot-cloud.mdx new file mode 100644 index 0000000000..1d2a897930 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/how-to-automate-any-tradingview-strategy-on-octobot-cloud.mdx @@ -0,0 +1,295 @@ +--- +title: "Automatisation de stratégies" +description: "Utilisez les stratégies de TradingView pour automatiser les trades de votre compte d'échange crypto avec stratégies préconçues ou personnalisées." +sidebar_position: 18 +--- + + + +# Comment automatiser toute stratégie TradingView sur OctoBot cloud + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="OZIYfg7bJf4" title="Automatisez votre stratégie avec un compte TradingView" /> + +Automatisez n'importe quelle stratégie TradingView en quelques secondes en utilisant un compte TradingView et le <a href="https://www.octobot.cloud/fr/creator" rel="nofollow">générateur de stratégie par IA</a>. + +Note: L'automatisation d'alertes TradingView gratuitement par email n'est malheureusement plus disponible suite à une récente restriction de la part de TradingView. + +## Toute stratégie TradingView peut être automatisée avec OctoBot + +OctoBot peut facilement être [connecté à TradingView](tradingview-automated-trading) pour automatiser les trades en se basant sur des événements de prix, des indicateurs ou encore des stratégies complètes. + +<div style={{textAlign: "center"}}> + <div> + ![stratégies de la communauté + tradingview](/images/guides/trading-view/tradingview-community-strategies.png) + </div> +</div> + +Automatiser une stratégie TradingView présente de nombreux avantages par rapport à une simple automatisation suivant des événements de prix ou des indicateurs techniques. +Avec l'automatisation d'une stratégie, vous pouvez : + +- Automatiser vos transactions en utilisant **toute stratégie TradingView**, **votre propre stratégie** créée par le <a href="https://www.octobot.cloud/fr/creator" rel="nofollow">générateur de strategies TradingView</a> d'OctoBot, ou une stratégie fournie par **votre créateur de stratégies préféré** +- **Combinez plusieurs indicateurs** pour créer vos ordres d'entrée et de sortie lorsque toutes vos conditions sont remplies +- Utilisez le moteur de backtesting intégré à TradingView pour **optimiser votre stratégie** + +## 1. Préparer une stratégie TradingView à se connecter à votre OctoBot + +Pour permettre à votre OctoBot de trader sur votre compte d'échange (ou un [compte virtuel sans risque](paper-trading-a-strategy)) à partir d'une stratégie TradingView, il vous suffit de lier les ordres de la stratégie à vos [automatisations TradingView](tradingview-alerts-automation#trading-automatisé-avec-des-stratégies-pine-script) en utilisant des alertes TradingView classiques. + +Voici un guide étape par étape présentant la façon de procéder. + +Une fois que vous savez quelle stratégie vous souhaitez utiliser, affichez-la sur votre graphique TradingView et cliquez sur l'icône `{}`. + +<div style={{textAlign: "center"}}> + <div> + ![code d'une stratégie + tradingview](/images/guides/trading-view/tradingview-open-strategy-code.png) + </div> +</div> + +Cela affiche le code Pine Script de la stratégie. Modifier ce code modifiera le comportement de la stratégie. + +:::info + Vous ne trouvez pas le code la stratégie que vous voudiez utiliser ? Générez + le avec le{' '} + <a href="https://www.octobot.cloud/fr/creator" rel="nofollow">générateur de strategies TradingView</a>{' '} + d'OctoBot. +::: + +Chaque stratégie TradingView utilisant Pine Script en version 4 et supérieure peut utiliser les mots-clés `strategy.entry`, `strategy.exit`, `strategy.close` et `strategy.order` pour trader afin d'entrer et de sortir des positions. +Pour lier ces transactions à votre compte d'échange en utilisant OctoBot, il vous suffit d'ajouter le paramètre `alert_message="your-OctoBot-automation-id"` au mot-clé. + +<div style={{textAlign: "center"}}> + <div> + ![message d'ajout d'alerte + tradingview](/images/guides/trading-view/tradingview-adding-alert-message.png) + </div> +</div> + +Notez que l'entrée initiale `strategy.entry("RsiLE", strategy.long, comment="Buy")` a été modifiée en `strategy.entry("RsiLE", strategy.long, comment="Buy", alert_message="your-OctoBot-automation-id")` afin d'inclure le paramètre `alert_message="your-OctoBot-automation-id"`. + +Dans votre stratégie Pine Script, chaque appel du mot-clé suivant doit inclure ce paramètre `alert_message="your-OctoBot-automation-id"` : + +- `strategy.entry` +- `strategy.exit` +- `strategy.close` +- `strategy.order` + +Oublier d'ajouter le paramètre `alert_message` à l'un de ces appels entraînera des signaux manquants sur votre compte d'échange. + +Maintenant que chaque appel de stratégie contient le paramètre, allez dans l'onglet personnalisé de l'<a href="https://www.octobot.cloud/explore?category=tv" rel="nofollow">explorateur de stratégies OctoBot</a> et démarrez un nouvel OctoBot TradingView. + +<div style={{textAlign: "center"}}> + ![lancer un nouvel octobot depuis + l'explorer](/images/guides/trading-view/start-new-tradingview-octobot-from-explorer.png) +</div> + +Voici un [guide expliquant comment démarrer un OctoBot TradingView](tradingview-trading-tutorial#1-create-your-tradingview-octobot) si vous désirez plus d'information sur le sujet. + +Maintenant que votre TradingView OctoBot est lancé, il vous suffit de créer une automatisation pour chaque signal de trading de votre stratégie. + +:::info + Les automatisations sont des actions à exécuter automatiquement par votre + OctoBot lorsqu'elles sont déclenchées par les alertes TradingView. +::: + +## 2. Option A: Créer vos automatisations + +### 2.1 Créer une automation standard + +Chaque mot-clé `strategy.entry`, `strategy.exit`, `strategy.close` et `strategy.order` crée des signaux de trading, il suffit de créer une automatisation pour chacun de ces signaux. + +<div style={{textAlign: "center"}}> + <div> + ![stratégie rsi simple sur + tradingview](/images/guides/trading-view/tradingview-simple-rsi-strategy.png) + </div> +</div> + +Dans cette stratégie RSI simple, deux signaux sont émis : un signal LONG et un signal SHORT, tous deux créés par un mot-clé strategy.entry. + +Dans cet exemple, nous voulons acheter lorsque le signal LONG est émis et vendre lorsque le signal SHORT est envoyé. + +Nous allons donc créer les deux automatisations suivantes : + +**Automation 1 : signal long** + +<div style={{textAlign: "center"}}> + <div> + ![automatisation octobot achetent des eth pour 25 percent des + usdt](/images/guides/trading-view/octobot-automation-buy-eth-25-percent-usdt.png) + </div> +</div> +Acheter de l'ETH/USDT en utilisant 25% du portefeuille au prix du marché. +<div style={{textAlign: "center"}}> + <div> + ![id de l'automatisation octobot achetant des eth pour 25 percent des + usdt](/images/guides/trading-view/octobot-automation-buy-eth-25-percent-usdt-automation-id.png) + </div> +</div> +Nous utiliserons l'identifiant de cette automatisation pour remplacer le texte +`your-OctoBot-automation-id` dans le premier mot-clé `strategy.entry`. + +**Automation 2 : short signal** + +<div style={{textAlign: "center"}}> + <div> + ![automatisation octobot pour vendre 100 percent des + eth](/images/guides/trading-view/octobot-automation-sell-100-percent-eth.png) + </div> +</div> +Vendre 100% de la détention d'ETH/USDT au prix du marché. Nous utiliserons +l'identifiant de cette automatisation pour remplacer le texte +`your-OctoBot-automation-id` dans le deuxième mot-clé `strategy.entry`. + +### 2.2 Lier votre stratégie TradingView à vos automatisations standards + +**1. Copiez les identifiants de vos automatisations dans votre alerte de stratégie** + +Copiez l'identifiant de chaque automatisation dans la valeur textuelle de `alert_message` de leur signal associé. + +<div style={{textAlign: "center"}}> + <div> + ![panel de connection d' automatisation + octobot](/images/guides/trading-view/octobot-open-automation-connection-panel.png) + </div> + <div> + ![id d'automatisation octobot pour acheter des eth avec 25 pourcent des + usdt](/images/guides/trading-view/octobot-automation-buy-eth-25-percent-usdt-automation-id.png) + </div> +</div> + +Il ne devrait plus rester de `your-OctoBot-automation-id` dans le code de votre stratégie Pine Script. + +<div style={{textAlign: "center"}}> + <div> + ![exemple de stratégie tradingview avec des id + d'automatisation](/images/guides/trading-view/tradingview-strategy-example-with-automation-ids.png) + </div> +</div> + +Notez les textes `f2b0b567-d63e-412e-b6cb-1a31c0bc1217` et `af4b897e-6b8e-45d0-a88d-9f11fd57a9b2` qui se trouvent maintenant dans le mot-clé `strategy.entry` au lieu de la valeur par défaut `your-OctoBot-automation-id`. Naturellement, ces deux identifiants sont des exemples et vous devriez utiliser vos propres identifiants d'automatisation à la place. + +**2. Configurer l'alerte TradingView** + +Créez une nouvelle alerte TradingView liée à votre stratégie et modifiez la section `Message` de l'alerte pour ne contenir que la valeur du paramètre `alert_message` (qui est votre identifiant d'automatisation). + +<div style={{textAlign: "center"}}> + ![création d'alerte sur + tradingview](/images/guides/trading-view/tradingview-create-alert.png) +</div> +Ouvrez la vue `Alertes` sur la droite et cliquez sur `Créer une alerte`. + +<div style={{textAlign: "center"}}> + <div> + ![ajout d'alerte de stratégie dans + tradingview](/images/guides/trading-view/tradingview-adding-strategy-alert-message.png) + </div> +</div> + +1. Sélectionnez votre stratégie dans la section `Condition`. Veuillez noter que toute modification de la configuration de votre stratégie nécessitera de sélectionner la version la plus récente de la stratégie dans cette alerte. +2. Utilisez **cette syntaxe exacte** pour le contenu du `Message` : `{{strategy.order.alert_message}}` + +**3. Assurez-vous que vos paramètres de notification sont à jour** + +Vous voudrez peut-être utiliser des notifications par [webhooks](tradingview-trading-tutorial#25-configurer-lurl-du-webhook) pour trader en utilisant cette stratégie. Avant de confirmer l'alerte, assurez-vous toujours que la configuration des **Notifications** de votre alerte est à jour, sinon votre OctoBot risque de ne pas recevoir vos alertes. + +<div style={{textAlign: "center"}}> + <div> + ![configuration de notification + tradingview](/images/guides/trading-view/tradingview-notification-configuration.png) + </div> +</div> + +Remarque : lorsque vous utilisez des notifications par courrier électronique, assurez-vous toujours de cocher `Envoyer du texte brut`. + +## 2. Option B: Utiliser des automatisations personnalisées + +### 2.1 Créer des automatisations personnalisées + + +<YouTube id="HeOi4PY1ayk" title="Tutorial TradingView: automatiser n" /> +<div style={{textAlign: "center"}}> + Tutorial TradingView: automatiser n'importe quelle stratégie avec les + automatisations personnalisées d'OctoBot. +</div> + +Chaque mot-clé `strategy.entry`, `strategy.exit`, `strategy.close` et `strategy.order` crée des signaux de trading, il suffit de créer une automatisation pour chacun de ces signaux. + +<div style={{textAlign: "center"}}> + <div> + ![stratégie rsi simple sur + tradingview](/images/guides/trading-view/tradingview-simple-rsi-strategy.png) + </div> +</div> + +Dans cette stratégie RSI simple, deux signaux sont émis : un signal LONG et un signal SHORT, tous deux créés par un mot-clé strategy.entry. + +Dans cet exemple, nous voulons acheter lorsque le signal LONG est émis et vendre lorsque le signal SHORT est envoyé. + +Nous allons donc mettre à jour le paramètre `alert_message` de chaque signal LONG et SHORT pour envoyer nos signaux d'achat et de vente. + +**Automatisation personnalisée : Acheter 0,1 ETH sur un signal LONG et le vendre sur un signal SHORT** + +<div style={{textAlign: "center"}}> + <div> + ![Automatisation personnalisée octobot achat + eth](/images/guides/trading-view/octobot-custom-automation-buy-eth.png) + </div> +</div> +- Contenu de l' `alert_message` du signal LONG: +`SYMBOL=ETHUSDT;SIGNAL=BUY;VOLUME=0.1;BOT_ID=21a7e1e2-d499` - Contenu de l' +`alert_message` du signal SHORT: +`SYMBOL=ETHUSDT;SIGNAL=SELL;VOLUME=0.1;BOT_ID=21a7e1e2-d499` + +Vous trouverez la liste des paramètres pris en charge dans le [guide des automatisations](tradingview-alerts-automation#automatisations-tradingview-personnalisées). + +_Remarque : ici, `BOT_ID=21a7e1e2-d499` n'est qu'une valeur d'exemple et doit être remplacée par l'identifiant de votre OctoBot TradingView, que vous pouvez trouver comme dernier composant de l'URL de votre OctoBot TradingView._ + +Et voilà, ce simple `alert_message` indique automatiquement à OctoBot que faire lorsque la stratégie se déclenche. + +### 2.2 Lier votre stratégie TradingView à vos automatisations personnalisées + +Créez une nouvelle alerte TradingView liée à votre stratégie et modifiez la section `Message` de l'alerte pour ne contenir que la valeur du paramètre `alert_message` (qui est le contenu de l'automatisation). + +<div style={{textAlign: "center"}}> + ![création d'alerte sur + tradingview](/images/guides/trading-view/tradingview-create-alert.png) +</div> +Ouvrez la vue `Alertes` sur la droite et cliquez sur `Créer une alerte`. + +<div style={{textAlign: "center"}}> + <div> + ![ajout d'alerte de stratégie dans + tradingview](/images/guides/trading-view/tradingview-adding-strategy-alert-message.png) + </div> +</div> + +1. Sélectionnez votre stratégie dans la section `Condition`. Veuillez noter que toute modification de la configuration de votre stratégie nécessitera de sélectionner la version la plus récente de la stratégie dans cette alerte. +2. Utilisez **cette syntaxe exacte** pour le contenu du `Message` : `{{strategy.order.alert_message}}` + +**3. Assurez-vous que vos paramètres de notification sont à jour** + +Vous voudrez peut-être utiliser des notifications par [webhooks](tradingview-trading-tutorial#25-configurer-lurl-du-webhook) pour trader en utilisant cette stratégie. Avant de confirmer l'alerte, assurez-vous toujours que la configuration des **Notifications** de votre alerte est à jour, sinon votre OctoBot risque de ne pas recevoir vos alertes. + +<div style={{textAlign: "center"}}> + <div> + ![configuration de notification + tradingview](/images/guides/trading-view/tradingview-notification-configuration.png) + </div> +</div> + +Remarque : lorsque vous utilisez des notifications par courrier électronique, assurez-vous toujours de cocher `Envoyer du texte brut`. + +## Tout est prêt ! + +C'est tout ! Avec cette configuration, lorsque votre stratégie TradingView détectera une opportunité d'achat ou de vente, elle : + +1. Appellera `strategy.entry` (ou tout autre mot-clé `strategy.` que vous aurez utilisé) +2. Appellera strategy.entry (ou tout autre mot-clé strategy. que vous avez utilisé) + Ce qui enverra une alerte liée à une automatisation de votre TradingView OctoBot (identifié par le paramètre `alert_message`) +3. Votre automatisation OctoBot sera exécutée sur votre compte d'échange réel ou virtuel + +**[Démarrer votre bot TradingView](https://www.octobot.cloud)** diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/introduction.mdx b/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/introduction.mdx new file mode 100644 index 0000000000..398aa91464 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/introduction.mdx @@ -0,0 +1,56 @@ +--- +title: "Introduction" +description: "Des questions sur l'investissement avec OctoBot cloud ? Comment utiliser une stratégie de trading et suivre votre investissement ? Tout est dans les guides." +sidebar_position: 1 +--- + + + +# Investir avec OctoBot cloud + +## Améliorez vos investissements crypto avec OctoBot cloud + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="WiC5DMw_6ZA" title="Améliorez vos investissements crypto avec OctoBot cloud" /> + +Améliorez vos investissements en cryptomonnaie en utilisant les stratégies d'investissement automatisées basées sur OctoBot cloud. +Investissez dans les principaux projets ou catégories de cryptomonnaies et profiter des dernières tendances avec les [paniers de crypto](https://www.octobot.cloud/features/crypto-basket). Augmentez vos profits avec des <a href="https://www.octobot.cloud/fr/explore" rel="nofollow">stratégies</a> préconfigurées d'IA, de DCA ou de trading en grille, ou encore automatisez toute [stratégie TradingView](tradingview-automated-trading). + +**[Investir avec OctoBot](https://www.octobot.cloud)** + +## Choisissez une stratégie, pas un outil + +OctoBot cloud vous permet d'utiliser les meilleures stratégies d'investissement gratuitement. + +- Vous pouvez utiliser des stratégies de trading gratuitement, de manière illimitée (ce n'est pas un essai gratuit) +- Vos fonds restent sur votre compte d'échange : OctoBot envoie des ordres d'achat et de vente pour exécuter votre stratégie + +Idéalement, lorsque vous souhaitez utiliser une stratégie, vous souhaitez : + +1. Explorer et comparer les stratégies +2. Comprendre les profits potentiels et les risques de la stratégie de votre choix +3. Appliquer cette stratégie sur votre compte d'échange + +Cela ne devrait pas être plus compliqué que cela. Rendre ces étapes **aussi simples que possible** +est notre objectif avec [OctoBot cloud](/fr) et ses plans Investisseur et [Investisseur Plus](/blog/introducing-the-investor-plus-plan). + +## Avec OctoBot cloud, vous pouvez vous concentrer sur ce qui est important + +### 1. Sélectionnez la stratégie de votre choix + +![explorateur de stratégies OctoBot cloud avec paniers de crpyto et stratégies](/images/guides/octobot-cloud-strategies-explorer-with-crypto-baskets-and-strategies.png) + +OctoBot cloud propose une large gamme de stratégies de trading que vous pouvez : + +- [Explorer et comparer](find-your-strategy) +- [Tester avec le trading virtuel](paper-trading-a-strategy) +- [Appliquer sur votre compte d'échange](invest-with-your-strategy) + +Ou alors, si vous préférez utiliser votre propre stratégie, vous pouvez investir avec de l'argent réel ou virtuel avec [votre stratégie depuis TradingView](tradingview-automated-trading). + +### 2. Suivez vos gains + +![cloud-bots](/images/guides/cloud-bots.png) + +Une fois que vous avez lancé votre OctoBot, qu'il s'agisse d'un trader en direct ou virtuel, suivez-le [directement depuis OctoBot cloud ou l'application mobile](follow-your-profits). diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/invest-with-your-strategy.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/invest-with-your-strategy.md new file mode 100644 index 0000000000..736ff70f51 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/invest-with-your-strategy.md @@ -0,0 +1,50 @@ +--- +title: "Démarrer l'investissement" +description: "Apprenez comment démarrer rapidement et facilement votre investissement sur OctoBot cloud." +sidebar_position: 6 +--- + + + +# Démarrer votre investissement + +![strategies OctoBot cloud](/images/guides/cloud-strategy2.png) + +Une fois que vous avez trouvé la stratégie ou le panier que vous souhaitez utiliser avec vos fonds réels, vous êtes prêt à vraiment tirer profit d'OctoBot cloud. + +## Démarrer + +1. À partir de la stratégie ou le panier que vous souhaitez utiliser, cliquez sur **Démarrer le trading**. +2. Sélectionnez **Trading réel**. +![choix entre le trading avec un compte réel ou virtuel](/images/guides/type-de-compte-de-trading-choix-entre-reel-ou-virtuel.png) +3. Sélectionnez ou saisissez la [clé API](what-is-an-exchange-api-key) du compte d'échange. Suivez le [guide de connexion à Binance](connect-your-binance-account-to-octobot), [guide de connexion à Kucoin](connect-your-kucoin-account-to-octobot) ou [guide de connexion à Coinbase](connect-your-coinbase-account-to-octobot) si vous avez des questions sur ce point. +![sélection d'échange pour une stratégie OctoBot cloud](/images/guides/cloud-strategy-select-exchange.png) + _Note: OctoBot s'assurera que vous disposez de suffisamment de fonds sur votre compte d'échange pour démarrer la stratégie choisie_ +![lancer une stratégie OctoBot cloud](/images/guides/cloud-strategy-start.png) +4. Démarrez votre OctoBot pour automatiser vos investissements avec cette stratégie ou ce panier. + +## Que va-t-il se passer ? + +### 1. Optimisation du portefeuille +Votre OctoBot peut équilibrer les cryptomonnaies associées à l'USD (comme USDT, USDC, etc), ansi que les cryptomonnaies tradées par la stratégie choisie, qui se trouvent dans votre portefeuille afin de créer des conditions optimales pour démarrer votre stratégie ou votre panier. + +Example: + +Imaginons un portefeuille avec 100 USDC et 100 USDT. Démarrer une stratégie qui utilise l'USDT fera vendre à OctoBot vos USDC contre des USDT afin de pouvoir les investir selon la stratégie sélectionnée. + +:::info +Ce processus n'utilise que les cryptomonnaies associées à l'USD présentes dans votre portefeuille ainsi que celles tradées par la stratégie. Si vous souhaitez exclure une partie de vos fonds de la stratégie, il suffit de déplacer ces fonds vers une cryptomonnaie qui n'est pas associée au dollar américain ou tradée par votre stratégie. +::: + + +### 2. Exécution de l'investissement +Votre OctoBot appliquera désormais automatiquement la stratégie ou le panier sélectionnée à votre compte d'échange en créant des ordres d'achat et de vente sur les cryptomonnaies échangées. + +Comme pour les [OctoBots en trading simulé](paper-trading-a-strategy), vous pouvez [suivre votre bot de trading](follow-your-profits) comme d'habitude. + +Veuillez noter que vos fonds restent toujours sur votre compte d'échange. OctoBot ne crée que des ordres de trading sur votre compte mais n'accède jamais directement à vos fonds. +Pour ajouter un niveau supplémentaire de sécurité, il est recommandé d'utiliser des clés API sans autorisation de retrait. + +:::info + Astuce: Vous pouvez continuer à tester d'autres stratégies d'investissement ou paniers de crypto sans prendre de risque grâce au [trading simulé](paper-trading-a-strategy), et ce même lorsque vous exécutez une stratégie ou un panier avec des fonds réels. +::: diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/investor-faq.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/investor-faq.md new file mode 100644 index 0000000000..60b1701cfa --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/investor-faq.md @@ -0,0 +1,85 @@ +--- +title: "FAQ" +description: "Vous avez des questions sur OctoBot cloud ? Voici les questions fréquemment posées et leurs réponses." +sidebar_position: 33 +--- + + + +# Foire aux questions d'OctoBot cloud (FAQ) + +## Comment puis-je tester une stratégie ou un panier de crypto ? + +Sur OctoBot cloud, nous cherchons à simplifier autant que possible, et cela inclut les tests de stratégies et de paniers. En plus des performances historiques publiques, **chaque stratégie ou panier de crypto peut être testé sans risque en utilisant le [trading virtuel](paper-trading-a-strategy)**. + +Cela signifie que vous pouvez exécuter n'importe quelle stratégie de trading ou panier de crypto à tout moment en utilisant des fonds virtuels avant de [démarrer l'investissement sur votre compte d'échange réel](invest-with-your-strategy). Le trading virtuel vous permet de tester les stratégies qui vous intéressent autant que vous le souhaitez, et ce, gratuitement. + +[En savoir plus sur le trading virtuel](paper-trading-a-strategy) + +## Comment sont calculés les profits des stratégies ? + +Chaque stratégie sur OctoBot cloud est construite, exécutée et testée à l'aide d'OctoBot. Cela signifie que les performances passées de chaque stratégie sont évaluées régulièrement en utilisant des données historiques et le [backtesting d'OctoBot](/guides/octobot-usage/backtesting). + +Chez OctoBot, nous croyons en la transparence. Cela signifie que parfois les stratégies peuvent devenir non rentables, car les profits dépendent de nombreux facteurs différents, y compris les conditions du marché. Si une stratégie ne génère pas de profits pendant une période donnée, vous le verrez avant de l'utiliser. + +## Comment créer ma stratégie ? + +OctoBot cloud vous permet de trader selon votre propre stratégie grace à [l'automatisation de stratégies TradingView](tradingview-automated-trading). + +## Où sont vos fonds lorsque vous utilisez OctoBot ? + +Vos fonds restent toujours sur la plateforme d'échange, sur votre propre compte d'échange. + +OctoBot est un logiciel vous permettant d'appliquer une stratégie de trading ou un panier de crypto sur votre propre compte d'échange. Cela signifie qu'OctoBot envoie simplement des ordres de trading à votre compte d'échange pour acheter et vendre des actifs selon la stratégie ou le panier que vous avez sélectionné. OctoBot ne reçoit jamais ni n'envoie de fonds de la part de ses utilisateurs. + +## Dépôt et retrait de fonds + +La plateforme OctoBot ne détient jamais vos fonds. Lorsque vous utilisez OctoBot, vos fonds restent toujours sur le compte d'échange que vous avez sélectionné pour votre OctoBot. La stratégie d'investissement que vous avez sélectionnée fonctionnera en envoyant des ordres d'achat et de vente sur votre compte d'échange. + +En conséquence, vous pouvez déposer et retirer des fonds de votre compte d'échange comme vous le feriez normalement si aucun OctoBot n'y était connecté. Si un OctoBot constate que des fonds ont été ajoutés ou retirés, il s'adaptera automatiquement et maintiendra la stratégie d'investissement que vous avez sélectionnée en fonctionnement tant que les fonds minimum requis pour exécuter cette stratégie restent disponibles. + +Remarque : Si quelqu'un prétend que vous devez déplacer vos fonds vers une plateforme quelconque pour utiliser OctoBot, alors cette personne ment et tente de voler votre argent. L'équipe d'OctoBot ne vous demandera jamais de faire une telle chose. + +## Combien pouvez-vous perdre d'argent au maximum ? + +Cela dépend de la stratégie que vous avez sélectionnée. Dans tous les cas, vous ne pouvez jamais perdre plus que votre investissement. + +Lors de l'utilisation d'OctoBot, les mêmes règles que sur les plateformes d'échange s'appliquent, ce qui signifie que vous pouvez finir par perdre des fonds, par exemple, si les événements suivants se produisent : + +- Vente d'un actif à un prix inférieur à celui auquel vous l'avez acheté +- Frais de trading prélevés par la plateforme d'échange lors de l'exécution des ordres +- Problèmes liés à l'actif investi ou à la plateforme d'échange elle-même (par exemple, si la valorisation de l'actif s'effondre) + +:::info + Vous pouvez tester n'importe quelle stratégie **sans risque**, donc sans + aucune chance de perdre des fonds, en utilisant [le trading + virtuel](paper-trading-a-strategy). +::: + +## OctoBot cloud est-il sécurisé ? + +Oui, la sécurité est l'une de nos principales priorités. Lors de l'utilisation d'OctoBot cloud, les mesures de sécurité suivantes s'appliquent : + +- Vos clés d'API d'échange sont stockées dans un coffre-fort crypté sécurisé. Cela signifie que même en cas de fuite des clés d'API d'échange depuis les serveurs d'OctoBot, elles ne seraient pas lisibles. +- Vos clés d'API d'échange sont configurées pour ne pouvoir être utilisées que depuis les adresses IP d'OctoBot cloud. Cela signifie que dans l'improbable cas où vos clés d'API seraient compromises (depuis OctoBot cloud ou de votre part), elles seraient refusées par l'échange. +- Les clés d'API d'OctoBot avec des droits de retrait ne peuvent pas être utilisées. OctoBot cloud refuse de stocker les clés d'API d'échange avec des autorisations de retrait (lorsque cela est techniquement possible). Cela signifie que vos fonds ne peuvent techniquement pas être retirés de votre compte d'échange par OctoBot ou par la société qui le gère. +- OctoBot repose sur des stratégies automatisées plutôt que sur des actions humaines. Cela signifie que chaque stratégie est fiable et prévisible. Vous n'avez pas besoin de faire confiance à un être humain pour exécuter correctement la stratégie. + +## Puis-je utiliser le même compte de plateforme d'échange sur plusieurs OctoBots ? + +Oui, vous pouvez utiliser le même compte d'échange sur plusieurs OctoBots. Chaque OctoBot opérera sur le budget que vous avez défini pour lui, à partir du portefeuille de votre compte d'échange. + +## Pourquoi y a-t-il des fonds minimaux pour utiliser les stratégies de trading et les paniers de crypto ? + +Il y a deux raisons pour les fonds minimaux dans les stratégies de trading et les paniers de crypto : + +- **Règles de trading de l'échange**: OctoBot envoie des ordres à l'échange. Ces échanges ont des règles de trading qui imposent une taille minimale pour chaque ordre. Sur Binance, ce montant <a href="https://www.binance.com/en/trade-rule" rel="nofollow">est généralement de 5 ou 10 dollars</a>. Les stratégies tradent généralement avec une partie de votre portefeuille pour chaque ordre, cela signifie que cette partie doit être suffisamment grande pour respecter les règles de trading. C'est particulièrement vrai pour les stratégies de trading basées sur la grille, où vos fonds sont répartis en un grand nombre de petits ordres. +- **Le plan investisseur**: afin de maintenir le plan investisseur d'OctoBot cloud complètement gratuit, nous nous associons avec des échanges pour leur apporter du volume de trading. Cela signifie que nous devons exiger un montant minimum dans chaque portefeuille pour payer nos factures. Nous essayons de maintenir ce minimum aussi bas que possible, mais nous devons définir un seuil. + +## Comment puis-je connecter mon compte de plateforme d'échange à OctoBot ? + +Pour vous aider à connecter votre compte de plateforme d'échange à OctoBot, nous avons créé ces guides étape par étape : + +- [Guide de connexion à Binance](connect-your-binance-account-to-octobot) +- [Guide de connexion à Kucoin](connect-your-kucoin-account-to-octobot) +- [Guide de connexion à Coinbase](connect-your-coinbase-account-to-octobot) diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/paper-trading-a-strategy.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/paper-trading-a-strategy.md new file mode 100644 index 0000000000..5afc6260f5 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/paper-trading-a-strategy.md @@ -0,0 +1,52 @@ +--- +title: "Tester sans risque" +description: "Apprenez comment utiliser le trading virtuel (paper trading) pour tester des stratégies et des paniers de crypto en direct et sans risque avec OctoBot cloud." +sidebar_position: 5 +--- + + + +# Tester sans risque + +![cloud octobot en trading simulé](/images/guides/paper-trading-cloud-octobot.png) + +Avec OctoBot, vous pouvez utiliser le trading simulé (ou trading papier) avec n'importe quelle stratégie ou panier de crypto. + +Le trading simulé vous permet de tester des stratégies de trading dans des conditions réelles en utilisant un portefeuille virtuel. + +C'est parfait pour expérimenter une stratégie de trading ou un panier de crypto sans prendre de risque car seuls des fonds simulés sont utilisés. + +## L'utiliser en trading simulé + +1. À partir de la stratégie ou le panier de crypto avec lequel vous souhaitez faire du trading simulé, cliquez sur **Démarrer le trading** +2. Sélectionnez **Trading virtuel** +![choix entre le trading avec un compte réel ou virtuel](/images/guides/type-de-compte-de-trading-choix-entre-reel-ou-virtuel.png) +3. Sélectionnez le montant que vous souhaitez utiliser dans votre portefeuille simulé +![sélectionner un échange pour une stratégie cloud](/images/guides/trading-virtuel-configuration-du-portefeuille.png) + + +:::info + Aucun compte d'échange n'est requis pour utiliser le trading virtuel sur OctoBot cloud. +::: + +## Le trading virtuel dans OctoBot + +### Votre OctoBot en trading virtuel + +Votre OctoBot en mode trading virtuel appliquera maintenant la stratégie ou le panier sélectionné comme s'il était sur un compte d'échange réel, sauf qu'il ne se connectera pas réellement à un compte d'échange. + +Comme avec les OctoBots en mode réel, vous pouvez [suivre votre bot en trading simulé](follow-your-profits) comme d'habitude. + +### Pendant combien de temps puis-je tester ? +En utilisant OctoBot cloud, vous pouvez faire fonctionner votre OctoBot en mode trading simulé aussi longtemps que vous le souhaitez. + +La seule contraine est de cliquer sur **Prolonger** une fois toutes les deux semaines lorsque votre OctoBot approche de sa date d'expiration. Cela notifie à OctoBot cloud qu'il doit continuer à faire fonctionner votre OctoBot en trading simulé. + +![cloud octobot entrading simulé qui expire dans 2 jours](/images/guides/paper-trading-cloud-octobot-expiring-in-2-days.png) + +### Puis-je trader en simulé et avec mes fonds réels simultanément ? + +Oui ! +En fait, nous vous encourageons même à utiliser le trading simulé pour expérimenter des stratégies ou des paniers de crypto sur OctoBot cloud, même après avoir trouvé ceux avec lesquels investir avec vos fonds réels. + +Cela vous permet de tester rapidement de nouveaux panier ou stratégies et d'optimiser vos gains en utilisant toujours ceux que vous préférez à tout moment. diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/pay-with-crypto.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/pay-with-crypto.md new file mode 100644 index 0000000000..b53cdd7dec --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/pay-with-crypto.md @@ -0,0 +1,57 @@ +--- +title: "Payer avec des crypto" +description: "Guide étape par étape sur le paiement en crypto de l'abonnement cloud OctoBot" +sidebar_position: 30 +--- + + + +# Payer votre abonnement avec des cryptomonnaies + +:::info + Attention : Les paiements en cryptomonnaies sont temporairement indisponibles. Nous travaillons activement à rétablir cette option prochainement. +::: + +## Options de paiement pour OctoBot Cloud + +Vous avez la possibilité de payer votre abonnement OctoBot Cloud soit par carte bancaire, soit par cryptomonnaies. Pour les paiements en cryptomonnaies, vous pouvez utiliser le [USDC](https://www.octobot.cloud/what-is-usdc) sur différentes blockchains telles qu'[Ethereum](https://www.octobot.cloud/what-is-ethereum), Optimism, BNB Smart Chain, Polygon, Base et [Arbitrum](https://www.octobot.cloud/what-is-arbitrum). + +## Comment effectuer un paiement en cryptomonnaies + +Après la période d'essai d'OctoBot Cloud, ou suite à la création de votre compte, vous serez automatiquement inscrit au plan gratuit, le plan Investisseur. +Pour accéder à des fonctionnalités supplémentaires disponibles dans les plans [Investisseur Plus](/blog/introducing-the-investor-plus-plan) et [Pro](/blog/introducing-the-pro-plan), vous devrez mettre à jour votre abonnement. + +1. Ouvrir la page des <a href="https://www.octobot.cloud/fr/plan" rel="nofollow">plans OctoBot cloud</a>. +2. Sélectionnez le plan souhaité et cliquez sur "Payer en crypto". + +<div style={{textAlign: "center"}}> + ![choisir payer en + crypto](/images/investing/pay-with-crypto/pay-with-crypto-fr.png) +</div> + +3. Connectez votre wallet <a href="https://metamask.io/" rel="nofollow">Metamask</a>, Binance, Brave, Coinbase, <a href="https://walletconnect.com/" rel="nofollow">Wallet Connect</a>, etc... La plupart des wallet sont supportés. + +![connectez votre wallet crypto](/images/investing/pay-with-crypto/connect-your-crypto-wallet.png) + +4. Choisissez la blockchain et le token que vous désirez utiliser pour le paiement. + +![choisir une blockchain et un token](/images/investing/pay-with-crypto/select-a-blockchain-and-a-token.png) + +5. Cliquez sur "Sign to continue". +6. Signez la transaction avec votre wallet pour accepter les termes de service du fournisseur de service <a href="https://www.loopcrypto.xyz/payments" rel="nofollow">LoopCrypto</a>. +7. Confirmez l’autorisation de transaction, qui sera supérieure au montant de l’abonnement, permettant ainsi des débits automatiques mensuels. +8. Attendez la validation de la transaction sur la blockchain. Vous recevrez un email de confirmation une fois que votre abonnement sera mis à jour. + +## Changement de méthode de paiement de carte bancaire à cryptomonnaies + +Pour l'instant, cette option n'est pas disponible directement depuis votre compte. Pour changer votre méthode de paiement de la carte bancaire aux cryptomonnaies, veuillez contacter le support client à [contact@octobot.cloud](mailto:contact@octobot.cloud). + +## Comment arrêter le paiement en cryptomonnaies + +Pour arrêter votre abonnement payé par cryptomonnaies, suivez ces étapes : + +1. Rendez-vous dans la section <a href="https://www.octobot.cloud/fr/account" rel="nofollow">Mon compte</a>. +2. Cliquez sur le bouton "Stop" pour annuler votre abonnement. +3. Si vous souhaitez également déconnecter votre portefeuille, cliquez sur "Mettre à jour ma méthode de paiement" et suivez les instructions pour retirer l'accès de votre portefeuille. + +En suivant ces étapes, vous pouvez gérer facilement votre abonnement et vos méthodes de paiement pour OctoBot Cloud. Si vous avez des questions ou besoin d'assistance supplémentaire, n'hésitez pas à contacter le support client. diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/share-your-trading-signals.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/share-your-trading-signals.md new file mode 100644 index 0000000000..345f05ef4b --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/share-your-trading-signals.md @@ -0,0 +1,85 @@ +--- +title: "Partagez vos signaux de trading" +description: "Apprenez comment partager vos signaux de trading crypto sur OctoBot cloud pour permettre à d'autres de copier automatiquement vos signaux." +sidebar_position: 27 +--- + + + +# Partager vos signaux de trading + +## Partager des signaux de trading depuis Telegram + +L'intégration du bot Telegram d'OctoBot vous permet de partager des signaux de trading depuis votre groupe Telegram. +Vous pouvez choisir entre deux formats de signaux : + +- Format OctoBot (similaire au format d'[alerte personnalisée de TradingView](tradingview-alerts-automation#automatisations-tradingview-personnalisées)) +- Format Cornix + +### Étapes pour configurer le bot Telegram + +1. **Ouvrir la vue de gestion des stratégies** + +- Rendez-vous sur la <a href="https://www.octobot.cloud/creator" rel="nofollow">page de gestion des stratégies</a>, dans la section `Administration` +- Sélectionnez la stratégie pour laquelle vous souhaitez partager des signaux + +2. **Ajouter OctoBot à votre groupe Telegram** + +Ajoutez le bot Telegram d'OctoBot à votre groupe Telegram en tant qu'administrateur. Cela permet au bot de récupérer les signaux de trading du groupe. +Vous pouvez trouver le bot en recherchant son nom dans Telegram et en l'ajoutant à votre groupe avec des privilèges d'administrateur. + +3. **Récupérer l'ID du canal** + +Transférez un message de votre groupe Telegram à `@getidsbot` pour obtenir l'ID du canal. L'ID du canal sera un nombre négatif, par exemple `-1000000000000`. +Copiez cet ID de canal pour l'utiliser à l'étape suivante. + +4. **Activer l'intégration Telegram et saisir l'ID du canal** + +Dans la section "Intégrations" de votre stratégie OctoBot, trouvez l'onglet **Telegram** et activez-le en basculant l'interrupteur sur la position "on". +Dans le champ "ID du canal", collez l'ID du canal que vous avez récupéré (par exemple, `-1000000000000`). Cela indique à OctoBot où lire les signaux de trading. + +5. **Sélectionner le type de signal** + +Choisissez le format des signaux de trading à partager dans votre groupe Telegram : + +- **Format OctoBot** : Le format par défaut, similaire au format d'alerte personnalisé de TradingView, utilisé par OctoBot pour partager des signaux. +- **Format Cornix** : Le même format que Cornix. +- Utilisez le menu déroulant "Type de signal" pour sélectionner votre format préféré. + +## Gérer les utilisateurs de la stratégie avec un l'interface HTTP + +L'interface HTTP vous permet de gérer les utilisateurs de votre stratégie en ajoutant des ID externes et en définissant des dates d'expiration. Cette procédure est nécessaire pour les stratégies privées. + +### Étapes pour gérer les utilisateurs avec l'interface HTTP + +1. **Configurer le contrôle d'accès pour votre stratégie** + +Dans la section "Contrôle d'accès", choisissez entre "Stratégie publique" et "Stratégie privée". Pour gérer les utilisateurs via HTTP, sélectionnez **Stratégie privée** pour activer la gestion des membres. + +- Stratégie publique : Tout le monde peut accéder et utiliser la stratégie sans gestion des membres. +- Stratégie privée : Seuls les membres approuvés peuvent accéder à la stratégie, nécessitant une gestion des membres. + +2. **Copier l'URL HTTP** + +Dans la section "Intégrations", copiez le **l'URL HTTP** et collez-le dans votre code. Cela vous permet d'envoyer des signaux de trading ou de gérer les membres via des requêtes HTTP. + +3. **Générer une clé API** + +Cliquez sur le bouton **Créer une nouvelle clé API** pour générer une clé API unique pour vos requêtes HTTP. Cette clé sera utilisée pour authentifier vos requêtes. + +**Avertissement** : Les clés API ne sont affichées qu'une seule fois. Elles ne doivent jamais être partagées. + +4. **Ajouter la clé secrète API à votre requête HTTP** + +Incluez la clé API **secrète** dans l'en-tête de votre requête HTTP sous la forme `Your-API-Key`. + +Par exemple, pour gérer des membres avec des ID Telegram : + +``` +curl -X POST https://services.octobot.cloud/cloud/creator/webhook/AAAAA-BBBBBBB/CCCCCCC-DDDDDDDD/members/telegram -d '{"user_id": "USER_ID", "expiration_date": "EXPIRATION_DATE"}' -H 'Content-Type: application/json' -H 'Api-Key: XXXXXXXXXXX-YYYYYYYYYYY' +``` + +Avec : + +- `USER_ID` : L'ID utilisateur Telegram du membre que vous souhaitez ajouter ou mettre à jour (pas son Telegram handle). +- `EXPIRATION_DATE` : La date jusqu'à laquelle le membre a accès à la stratégie (par exemple, 2025-12-31). diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/stop-a-strategy.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/stop-a-strategy.md new file mode 100644 index 0000000000..752a2c811f --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/stop-a-strategy.md @@ -0,0 +1,26 @@ +--- +title: "Arrêter l'investissement" +description: "Apprenez comment arrêter et redémarrer rapidement et facilement votre stratégie d'investissement en cours d'exécution sur OctoBot cloud." +sidebar_position: 8 +--- + +# Arrêter d'investir + +## Arrêter un OctoBot + +Vous pouvez arrêter un OctoBot à tout moment afin de mettre en pause ou arrêter l'exécution de sa stratégie d'investissement. + +![arrêter un bot OctoBot cloud en annulant ses ordres](/images/guides/stopping-cloud-octobot-cancelling-orders.png) + +Arrêter un OctoBot va: +- Annuler tous ses ordres ouverts d'achat et de vente +- L'empêcher de créer de nouveaux ordres +- Instantanément vendre toutes les cryptomonnaies achetées par la stratégie (si cette option est sélectionnée) +- Libérer le compte d'échange associé afin de pouvoir l'utiliser avec une autre stratégie + +## Redémarrer un OctoBot +Après avoir été arrêté, les OctoBots peuvent être redémarrés pour reprendre votre stratégie investissement. + +![octobot cloud redémarrer octobot](/images/guides/octobot-cloud-restart-octobot.png) + +Un OctoBot redémarré applique sa stratégie tout en conservant son historique de portefeuille et de profitabilité d'origine. diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/tradingview-alerts-automation.mdx b/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/tradingview-alerts-automation.mdx new file mode 100644 index 0000000000..893a4ceb6c --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/tradingview-alerts-automation.mdx @@ -0,0 +1,218 @@ +--- +title: "Automatisation d'alertes TradingView" +description: "Apprenez à automatiser vos alertes TradingView provenant d'événements de prix, d'indicateurs ou de stratégies Pine Script avec du trading réel ou simulé." +sidebar_position: 17 +--- + + + +# Automatisation des alertes TradingView + +Avec OctoBot cloud, vous pouvez facilement transformer n'importe quelle alerte de prix TradingView, indicateur ou stratégie Pine Script en trades. Vous pouvez ainsi trader sur vos plateformes d'échanges préférées ou sans risque avec le [trading simulé](paper-trading-a-strategy). + +<div style={{textAlign: "center"}}> + ![automatisations de stratégies TradingView illustré par le logo + TradingView](/images/guides/trading-view/tradingview-automation-illustrated-by-tradingview-logo.png) +</div> + +:::info + Pour trader sur n'importe quelle alerte TradingView, vous devez d'abord + [Configurer l'URL du webhook + ](tradingview-trading-tutorial#25-configurer-lurl-du-webhook) + pour votre paire de trading si ce n'est pas déjà fait. +::: +Si vous voulez des précisions sur la façons d'utiliser les alertes TradingView, +consultez notre [Tutoriel de trading avec +TradingView](tradingview-trading-tutorial). + +**[Démarrer un bot](https://www.octobot.cloud)** + +## Trading automatisé avec des alertes de prix + +TradingView peut envoyer automatiquement une alerte lorsque le prix d'un actif franchit une valeur donnée. + +<div style={{textAlign: "center"}}> + ![creer alerte de prix depuis + tradingview](/images/guides/trading-view/creer-alerte-de-prix-depuis-tradingview.png) +</div> + +Cette alerte de prix qui notifiera mon automatisation d'ordre d'achat identifiée par `71e47ccd-2447-4938-8219-968010045a08` si BTC passe sous 40 000 USDT. + +## Trading automatisé avec des indicateurs + +TradingView peut envoyer automatiquement une alerte lorsqu'il se produit quelque chose sur un indicateur. + +<div style={{textAlign: "center"}}> + ![creer une alerte depuis un indicateur + tradingview](/images/guides/trading-view/creer-une-alerte-depuis-un-indicateur-tradingview.png) +</div> + +Cette alerte d'indicateur utilise <a href="https://www.investopedia.com/terms/r/rsi.asp" rel="nofollow">l'Indice de force relative</a> (ou RSI) qui notifiera mon automatisation d'ordre de vente identifiée par `6f20ebd6-1d98-4630-b640-96afbe98e4b4` si la valeur du RSI dépasse 80, que je considère comme un signal de vente. + +Une alerte d'indicateur peut être un événement simple tel que le franchissement d'une valeur configurée ou une condition beaucoup plus avancée comme des divergences baissières ou la sortie d'un canal de prix. Cela est illustré dans l'exemple ci-dessus de l'indicateur RSI. + +<div style={{textAlign: "center"}}> + ![options de declanchement pour creer une alerte depuis un indicateur + tradingview](/images/guides/trading-view/options-de-declanchement-pour-creer-une-alerte-depuis-un-indicateur-tradingview.png) +</div> + +:::info + N’importe quel indicateur TradingView (intégré ou personnalisé, payant et + gratuit) peut être utilisé pour envoyer des alertes et automatiser vos trades + en utilisant vos automations OctoBot via Tradingview. +::: + +## Trading automatisé avec des stratégies Pine script + +TradingView peut automatiquement envoyer des alertes quand vos <a href="https://www.tradingview.com/pine-script-docs/en/v5/index.html#" rel="nofollow">stratégies Pine Script</a> créent des ordres. + +Pour envoyer des alertes à partir d'une stratégie Pine Script, utiliser le paramètre <a href="https://www.tradingview.com/pine-script-docs/en/v5/concepts/Alerts.html?highlight=alert_message#order-fill-events" rel="nofollow">`alert_message`</a> dans les fonctions de stratégie Pine Script qui peuvent créer des ordres. + +![creer une alerte de strategie tradingview](/images/guides/trading-view/creer-une-alerte-de-strategie-tradingview.png) + +Pour envoyer des alertes à partir d'une stratégie Pine Script, créer une nouvelle alerte et s'assurer de: + +1. Sélectionner le nom de votre stratégie en tant que condition +2. Remplacer **tout** le contenu du message par exactement `{{strategy.order.alert_message}}` + +Dans le code Pine Script de votre stratégie, ajouter `alert_message="yourAutomationIdentifier"` dans vos appels à `entry`, `exit` ou `close`. +Exemple avec l'identifiant d'automatisation `71e47ccd-2447-4938-8219-968010045a08`: + +> `strategy.entry("Buy", strategy.long, comment="Buy Signal Triggered", alert_message="71e47ccd-2447-4938-8219-968010045a08")` + +Pour en savoir plus sur l'automatisation de stratégies TradingView en Pine Script, rendez-vous sur le [tutoriel d'automation de stratégies](how-to-automate-any-tradingview-strategy-on-octobot-cloud). + +<div style={{textAlign: "center"}}> + **[Générez votre stratégie](https://www.octobot.cloud/fr/creator)** +</div> + +:::info + Utiliser les stratégies Pine Script de TradingView pour automatiser votre + trading est très puissant car vous pouvez également utiliser le testeur de + stratégie intégré à TradingView pour optimiser votre stratégie. +::: + +## Automatisations TradingView personnalisées + +À la place automatisations définies à partir de l'interface utilisateur de votre OctoBot TradingView, il est également d'utiliser des alertes avec un contenu personnalisé. + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="HeOi4PY1ayk" title="Tutorial TradingView: automatiser n" /> + +Ce format offre une plus grande flexibilité dans la façon d'exécuter les automatisations en spécifiant directement le contenu de votre automatisation dans le message de l'alerte. + +Exemple: **un achat au marché de 0.001 BTC sur un bot avec l'id 123** + +```bash +SYMBOL=BTCUSDT;SIGNAL=BUY;VOLUME=0.001;BOT_ID=123 +``` +Voir [tous les exemples](#exemples-dautomatisations-personnalisées). + +### Paramètres des automatisations personnalisées + +| Paramètre | Exemple 1 | Exemple 2 | Détails | +| :-------------------- | :----------------------------------- | :----------------------------------- | :---------------------------------------------------------------------------------------------------------------------------------------- | +| `SYMBOL` | BTCUSDT | ETH/USDT | Votre symbol tradé, peut aussi être `{{ticker}}`. | +| `SIGNAL` | BUY | CANCEL | Créer un ordre d'achat, de vente ou en annuler un. | +| `ORDER_TYPE` | LIMIT | MARKET | Type d'order à créer (`MARKET`, `LIMIT`, `STOP`). Valeur par défaut: `MARKET`. | +| `VOLUME` | 0.01 | 50q | Le montant à utiliser. Suit la [syntaxe des montants](../guides/octobot-trading-modes/order-amount-syntax). | +| `PRICE` | 30000 | -10% | Le prix à utiliser. Suit la [syntaxe des prix](../guides/octobot-trading-modes/order-price-syntax). _Requis lorsque `ORDER_TYPE=LIMIT`_. | +| `TAKE_PROFIT_PRICE` | 45000 | 10% | Le prix du "take profit" à utiliser. Suit la [syntaxe des prix](../guides/octobot-trading-modes/order-price-syntax). | +| `TAKE_PROFIT_PRICE_2` | 50000 | 25% | Le prix du "take profit" n°N à utiliser. Suit la [syntaxe des prix](../guides/octobot-trading-modes/order-price-syntax). Les fonds de l'ordre d'entrée seront uniformément répartis entre les take profits sauf si un `TAKE_PROFIT_VOLUME_RATIO` est renseigné pour chaque take profit. | +| `TAKE_PROFIT_VOLUME_RATIO_2` | 30 | 70 | Ratio du montant de l'ordre d'entrée à inclure dans ce take profit. Si utilisé, un `TAKE_PROFIT_VOLUME_RATIO_X` est requis pour chaque take profit. | +| `STOP_PRICE` | 40000 | -25% | Le prix du stop loss. Suit la [syntaxe des prix](../guides/octobot-trading-modes/order-price-syntax). _Requis lorsque `ORDER_TYPE=STOP`_. | +| `TRAILING_PROFILE` | filled_take_profit | filled_take_profit | Active les ordres de trailing suivant le [profil de trailing](#profils-de-trailing) donné. Profils supportés: `filled_take_profit`. | +| `TAG ` | entry1 | exit2 | Le tag de cet ordre or le tag des ordres à annuler. | +| `REDUCE_ONLY` | false | true | Si ordre à créer doit être reduce only. _Utilisé en trading de futures uniquement_. Valeur par défaut: `false`. | +| `LEVERAGE` | 10 | 2 | La nouvelle valeur de levier à utiliser. _Utilisé en trading de futures uniquement_. | +| `BOT_ID` | c403ee03-ba4c-4d9d-9d78-ad692333a291 | b403ee03-ba4c-4d9d-9d78-ad692333a292 | L'ID de votre OctoBot sur lequel exécuter le signal. | + +Les paramètres doivent être séparés avec le caractère `;` et peuvent être inclus dans n'importe quel ordre. + +Note: Le paramètre `BOT_ID` est requis. Votre `BOT_ID` est le dernier segment de l'URL de votre OctoBot TradingView. +Exemple: si l'URL de votre OctoBot est `https://www.octobot.cloud/fr/bots/0280badc-e884-4637-bb86-44444444`, alors votre `BOT_ID` est `0280badc-e884-4637-bb86-44444444`. +```bash +BOT_ID=0280badc-e884-4637-bb86-44444444;SYMBOL=BTCUSDT;SIGNAL=BUY;ORDER_TYPE=LIMIT;VOLUME=45q;PRICE=-3% +``` + +### Exemples d'automatisations personnalisées +> Un ordre `BUY MARKET` de `20` unités de l'actif de cotation avec un `ticker` dynamique et un bot id de `123`. +```bash +SYMBOL={{ticker}};SIGNAL=BUY;VOLUME=20q;BOT_ID=123 +``` + +> Un ordre `BUY LIMIT`de `0.01 ETH` à `-3%` du prix courant avec un tag `strategy-1`. +```bash +SYMBOL=ETHUSDC;SIGNAL=BUY;ORDER_TYPE=LIMIT;VOLUME=0.01;PRICE=-3%;TAG=strategy-1;BOT_ID=123 +``` + +> Un ordre `BUY LIMIT` de `45 USDT` à `-3%` du prix actuel immédiatement suivi par un `take profit à +10% du prix d'achat` et `un stop loss à -20%` dès que l'ordre d'achat initial est exécuté. +Note: lorsque les paramètres `TAKE_PROFIT_PRICE` et `STOP_PRICE` sont renseignés, les take profit et stop loss créés seront des ordres OCO (one cancels the other). Dans ce cas, seul le stop loss sera envoyé à l'échange. Il sera ensuite remplacé par le take profit si le prix de ce take profit est atteint avant celui du stop loss. +_Les ordres OCO sont actuellement en beta test et peuvent présenter des instabilités._ +```bash +SYMBOL=BTCUSDT;SIGNAL=BUY;ORDER_TYPE=LIMIT;VOLUME=45q;PRICE=-3%;BOT_ID=123;TAKE_PROFIT_PRICE=10%;STOP_PRICE=-20% +``` + +> Un ordre `BUY MARKET` de `6 SOL` suivi par `3 take profits à 5%, 10% et 20%` du prix d'achat. Ici, chaque take profit aura une quantité de `2 SOL` : la quantité achetée est répartie entre les take profits. +```bash +SYMBOL=SOLUSDC;SIGNAL=BUY;VOLUME=6;TAKE_PROFIT_PRICE=5%;TAKE_PROFIT_PRICE_2=10%;TAKE_PROFIT_PRICE_3=20%;BOT_ID=123 +``` + +> Un ordre `BUY MARKET` de `6 SOL` suivi par `3 take profits à 5%, 10% et 20%` du prix d'achat. Ici, chaque take profit aura respectivement une quantité de 1, 2 et 3 SOL, ce qui correspond à `17`, `33` et `50` % du montant acheté. +```bash +SYMBOL=SOLUSDC;SIGNAL=BUY;VOLUME=6;TAKE_PROFIT_PRICE=5%;TAKE_PROFIT_PRICE_2=10%;TAKE_PROFIT_PRICE_3=20%;TAKE_PROFIT_VOLUME_RATIO=17;TAKE_PROFIT_VOLUME_RATIO_2=33,TAKE_PROFIT_VOLUME_RATIO_3=50;BOT_ID=123 +``` + +> `CANCEL` tous les ordres `SOL/USDC` avec le tag `strategy-1`. +```bash +SIGNAL=CANCEL;SYMBOL=SOLUSDT;TAG=strategy-1;BOT_ID=123 +``` + +**Pour le trading de futures** + +> Un ordre `SELL MARKET` de `3 SOL` sur un marché de futures de en `REDUCE_ONLY`. +```bash +SYMBOL=SOLUSDC;SIGNAL=SELL;VOLUME=3;REDUCE_ONLY=true;BOT_ID=123 +``` + +> Un ordre `BUY MARKET` de `200 USDC` qui configure aussi la valeur de levier du contrat `SOL/USDC` à `3`. +```bash +SYMBOL=SOLUSDC;SIGNAL=BUY;VOLUME=200q;LEVERAGE=3;BOT_ID=123 +``` + +### Profils de trailing +_Profils de trailing sont actuellement en beta test et peuvent présenter des instabilités._ + +Lorsqu'il est configuré sur un profil valide, `TRAILING_PROFILE` active les ordres trailing selon le profil donné. Voici comment les profils fonctionnent: +- `filled_take_profit`: Fonctionne avec un stop loss associé à plus d'un take profit. Lorsqu'un take profit est exécuté, le prix du stop loss sera mis à jour, d'abord au prix d'entrée de l'ordre d'achat, puis au prix du take profit précédemment rempli. Ce profil est utile pour s'assurer de toujours clôturer sa position avec un bénéfice dès qu'au moins un take profit a été atteint. + +## Exemples de stratégies automatisées + +- [Stratégie Death and Golden Cross](tradingview-strategies-tutorials/automating-a-tradingview-death-and-golden-cross-strategy): acheter et vendre en fonction des Golden et Death Cross +- [Stratégie de RSI pour Bull market](tradingview-strategies-tutorials/bull-market-strategy-from-tradingview-using-rsi-with-video): acheter et vendre en utilisant le RSI pour augmenter ses profits en bull market. +- [Tutoriel pour automatiser une stratégie TradingView personnalisée](how-to-automate-any-tradingview-strategy-on-octobot-cloud): apprenez à trader automatiquement selon toute stratégie TradingView. + +**[Démarrer un bot TradingView](https://www.octobot.cloud)** + +## Limite d'utilisation des automatisation + +| Type d'automatisation | Limite d'utilisation par heure | Temps moyen d'execution | +| :-------------------- | :----------------------------- | :---------------------- | +| Email | 20 | 10 secondes | +| Webhook | 20 | 5 secondes | + +Afin de réduire l'impact des alertes mal configurées et prévenir tout abus du système, il existe une limite au nombre de fois qu'une automation donnée peut être déclenchée en 60 minutes. + +Le nombre de bots et d'automatisations que vous pouvez avoir est illimité, mais chaque automatisation ne peut être déclenchée individuellement que 20 fois maximum en 60 minutes. +De plus, un maximum de 80 automatisations peut être déclenchée sur un même compte OctoBot en 60 minutes. Toute tentative pour dépasser cette limitation sans accord écrit préalable de l'équipe OctoBot pourra être sanctionné d'un bannissement temporaire ou permanent du/des compte(s) concerné(s). +Contactez-nous si vous avez besoin d'augmenter cette limite. + +Le temps moyen d'exécution est le temps mesuré entre le moment où TradingView émet l'alerte et le moment où elle est exécutée par OctoBot. Il s'agit d'une moyenne, ce temps peut donc varier. +Cette variation est faible pour les webhooks mais peut, dans de rare cas, atteindre plusieurs dizaines de secondes pour les alertes par email. Ceci s'explique par les contraintes techniques associées au transfert d'email qui est un processus moins optimisé qu'un simple appel à une webhook. + +## Securité des alertes + +L'infrastructure d'OctoBot cloud est conçue avec la sécurité en priorité. Il en va de même pour l'intégration des alertes TradingView. + +Seules les alertes provenant du site officiel de <a href="https://tradingview.com/" rel="nofollow">TradingView</a> peuvent déclencher les automatisations TradingView. diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/tradingview-automated-trading.mdx b/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/tradingview-automated-trading.mdx new file mode 100644 index 0000000000..bfd8b1fcdf --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/tradingview-automated-trading.mdx @@ -0,0 +1,159 @@ +--- +title: "Trading automatisé avec TradingView" +description: "Automatisez facilement vos trades depuis TradingView avec OctoBot. Profitez d'alertes illimitées sur les prix, les indicateurs et les stratégies Pine Script." +sidebar_position: 15 +--- + + + +# Trading automatisé avec TradingView + +<a href="https://www.tradingview.com/?aff_id=27595" rel="nofollow">TradingView</a> peut être bien plus qu'un outil pour analyser les prix. +Vous pouvez également l'utiliser pour envoyer des alertes à OctoBot cloud et +avoir un OctoBot qui achète ou vend instantanément selon vos objectifs dès que +votre condition est remplie sur TradingView. + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="bZwyQMsgYYE" title="Automatiser TradingView" /> + +Lorsque vous utilisez un OctoBot TradingView, chaque alerte de TradingView peut déclencher une action que nous appelons `automatisation`. + +**[Démarrer un bot](https://www.octobot.cloud)** + +## Les automatisations pour créer vos stratégies + +Une automatisation est une action telle que "achat", "vente" ou "annuler d'un ordre" que vous pouvez configurer directement sur votre OctoBot TradingView. Chaque OctoBot a son propre ensemble d'automatisations que vous pouvez créer et utiliser à volonté. + +En utilisant des automatisations, vous pouvez créer n'importe quel type de stratégie de trading. + +<div style={{textAlign: "center"}}> + ![octobot plusieurs automatisations + tradingview](/images/guides/trading-view/octobot-plusieurs-automatisations-tradingview.png) +</div> + +### Utiliser les automatisations + +Les automatisations peuvent être configurées selon vos souhaits et vous pouvez en avoir autant que nécessaire. + +<div style={{textAlign: "center"}}> + ![octobot automatisations + interface](/images/guides/trading-view/octobot-automatisations-interface.png) +</div> + +Chaque automation possède son propre identifiant à définir dans le message [d'alerte TradingView](tradingview-trading-tutorial#26-créer-une-nouvelle-alerte) : cela permet à votre OctoBot de savoir quoi faire lorsqu'une alerte TradingView est reçue. + +<div style={{textAlign: "center"}}> + ![octobot automatisation + identifiant](/images/guides/trading-view/octobot-automatisation-identifiant.png) +</div> + +Lorsque votre OctoBot reçoit une alerte de TradingView, l'automatisation associée est déclenchée et son résultat est affiché. Chaque OctoBot dispose également d'un historique complet de ses automatisations et de leur résultat d'exécution. + +<div style={{textAlign: "center"}}> + ![octobot automatisation + historique](/images/guides/trading-view/octobot-automatisation-historique.png) +</div> + +Créer vos premières automatisations en suivant le [tutoriel de trading automatisé depuis TradingView](tradingview-trading-tutorial). + +### Automatisations + +En utilisant le <a href="https://www.octobot.cloud/fr/plan" rel="nofollow">plan Pro</a>, vous obtenez un accès illimité à chaque automatisation simple qui vous permet d'acheter et de vendre automatiquement dès que vos alertes TradingView sont déclenchées. + +<div style={{textAlign: "center"}}> + ![octobot simple automatisation creer acheter + btc](/images/guides/trading-view/octobot-simple-automatisation-creer-acheter-btc.png) +</div> + +Les automatisations comprennent l'achat et la vente : + +- De toute cryptomonnaie +- Sur chaque plateforme d'échange prise en charge +- En utilisant des ordres au marché ou aux limites +- À un prix prédéfini ou avec un pourcentage de différence par rapport au prix du marché de la crypto-monnaie tradée +- Avec un pourcentage de votre portefeuille ou un montant fixe quantifié dans la cryptomonnaie échangée ou dans la devise de cotation (en BTC ou USDT pour BTC/USDT) + +Les automatisations conviennent parfaitement aux investisseurs qui souhaitent optimiser soigneusement leurs achats et ventes ainsi que chaque montant tradé, ce qui est particulièrement utile pour trader plusieurs cryptomonnaies avec la même stratégie. + +### Automatisations personnalisées + +Pour ceux qui veulent aller plus loin dans l'automatisation de leur stratégie, les [automatisations personnalisées](tradingview-alerts-automation#automatisations-tradingview-personnalisées) permettent une plus grande liberté. + + +<YouTube id="HeOi4PY1ayk" title="Tutorial TradingView: automatiser n" /> + +Les automatisations personnalisées sont la manière la plus flexible d'automatiser une stratégie et permettent d'approfondir le processus d'automatisation en permettant de: + +- Utiliser des prix et des volumes dynamiques, renseignée en Pine Script +- Annuler des ordres ouverts +- Automatiser les prises de profit et stop losses +- Créer des ordres avancés en trading de futures + +Pour en savoir plus sur les automatisations OctoBot, consultez le [guide des automatisations](tradingview-alerts-automation). + +## Des stratégies illimitées + +Lorsque vous utilisez un OctoBot Tradingview, il n'y a aucune limite d'utilisation. Il s'agit de notre approche unique concernant les automatisations TradingView. + +When you use TradingView OctoBots, there are **no usage limits**. This is our unique approach on TradingView automations. + +Contrairement aux autres systèmes d’automation basés sur TradingView, nous n'avons pas conçu les TradingView OctoBots avec des limites concernant le nombre d'alertes gérées par mois, combien de bots on peuvent être créés ou le nombre maximum de plateformes d'échange utilisables simultanément. Avec OctoBot cloud, vous pouvez: + +- Créer et déclencher autant d'automatisations que nécessaire +- Utiliser autant de bots que vous le souhaitez +- Vous connecter à toutes les plateformes d'échanges et comptes dont vous avez besoin + +Nous pensons que permettre aux utilisateurs de créer les stratégies qu'ils veulent est ce qu'un bot TradingView devrait faire. Nous ne voulons pas que vous vous demandiez si vous devez prendre le plan payant A ou B pour automatiser votre stratégie parce que vous craignez que le nombre d'alertes envoyé depuis TradingView soit trop élevé et que la limite de votre plan soit atteinte avant la fin du mois. C'est un problème important car cela peut entraîner des frais supplémentaires ou pire, votre bot pourrait rester bloqué avec une position ouverte et ne pas appliquer vos alertes. Cela ne peut pas se produire avec OctoBot. + +C'est pourquoi notre tarification offre différents types d'automatisations et même le plan le moins cher est illimité : + +- Les plans proposent des automatisations simples ou avancés +- Toutes les automatisations sont illimitées + +## Démarrer votre OctoBot TradingView + +Chez OctoBot cloud, nous essayons de proposer des choses puissantes mais simples et faciles à utiliser. +C'est pourquoi nous avons créé ce [tutoriel pour vous aider à démarrer votre trading automatisé depuis TradingView](tradingview-trading-tutorial). +Dans ce tutoriel, vous apprendrez à: + +- Créer un OctoBot TradingView +- Utiliser une automatisation pour acheter du BTC/USDT sur une alerte TradingView +- Configurer cette alerte sur votre compte TradingView +- Suivre l'historique d'exécution de cette alerte + +Ou alors, si vous êtes intéressé pour automatiser vos statégies TradingView directement, jetez un œil au [tutoriel d'automatisation d'une stratégie TradingView](how-to-automate-any-tradingview-strategy-on-octobot-cloud). + +**[Démarrer un bot TradingView](https://www.octobot.cloud)** + +## Pour aller plus loin + +TradingView est un merveilleux site web qui permet de créer des alertes sur de nombreux éléments. Bien sûr, chacune d'entre elles peut être utilisée pour automatiser vos transactions avec OctoBot cloud. + +Consultez notre [guide d'automatisation des alertes TradingView](tradingview-alerts-automation) pour découvrir comment automatiser vos trades en vous basant sur: + +- Des événements de prix +- N’importe quel indicateur TradingView +- Des [stratégies Pine Script TradingView](how-to-automate-any-tradingview-strategy-on-octobot-cloud) +- Votre propre <a href="https://www.octobot.cloud/fr/creator" rel="nofollow">stratégie générée par AI</a> + +## Exemples de stratégies automatisées + +Voici quelques inspirations de stratégies élaborées avec TradingView et automatisées en utilisant OctoBot et qui ne nécessitent pas une ligne de code. + +### Optimiser son Bull Market avec le RSI + +Découvrez comment utiliser le RSI pour acheter et vendre au meilleur moment en Bull Market, dès que la tendance commence à changer. + + +<YouTube id="Hksh9vnDvy4" title="Optimiser son Bull Market Crypto avec le RSI" /> + +Voir le [guide de la stratégie Optimiser son Bull Market avec le RSI](tradingview-strategies-tutorials/bull-market-strategy-from-tradingview-using-rsi-with-video). + +### Stratégie Death and Golden Cross + +Découvrez comment automatiser les achats et ventes en se basant sur une stratégie Death et Golden Cross créée sur TradingView. + +![illustration de la stratégie ema tradingview avec 2 achats et 2 ventes](/images/guides/trading-view/tradingview-ema-strategy-illustration-with-2-buy-and-2-sell.png) + +Voir le [tutoriel de la stratégie Death and Golden Cross](tradingview-strategies-tutorials/automating-a-tradingview-death-and-golden-cross-strategy). diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/tradingview-strategies-tutorials/automating-a-tradingview-death-and-golden-cross-strategy.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/tradingview-strategies-tutorials/automating-a-tradingview-death-and-golden-cross-strategy.md new file mode 100644 index 0000000000..f67b3253ae --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/tradingview-strategies-tutorials/automating-a-tradingview-death-and-golden-cross-strategy.md @@ -0,0 +1,183 @@ +--- +title: "Stratégie Golden Cross" +description: "Apprenez à automatiser une stratégie Bitcoin de Death et Golden Cross en utilisant des alertes TradingView et OctoBot avec du trading simulé ou réel." +sidebar_position: 2 +--- + + + +# Automatiser une stratégie TradingView Death et Golden Cross + +Avec ce tutoriel, vous apprendrez à trader avec les Death et Golden Crosses (ou croix d'or et de mort) en utilisant deux <a href="https://www.investopedia.com/terms/e/ema.asp" rel="nofollow">Moyennes mobiles exponentielles</a> (ou EMA). +Le concept est le suivant : + +- Acheter lorsque l'EMA à court terme croise à la hausse l'EMA à long terme. Il s'agit d'une <a href="https://www.investopedia.com/terms/g/goldencross.asp" rel="nofollow">Golden Cross</a> qui est généralement un signe haussier. +- Vendre lorsque l'EMA à court terme croise vers le bas l'EMA à long terme. Il s'agit d'une <a href="https://www.investopedia.com/terms/d/deathcross.asp" rel="nofollow">Death Cross</a> qui est généralement un signe baissier. + +## 1. Identifier les Death et Golden Crosses automatiquement + +### 1.1 Selectionner votre marché à trader + +Tout d'abord, nous voulons visualiser nos Death et Golden Crosses. Allons sur <a href="https://www.tradingview.com/?aff_id=27595" rel="nofollow">TradingView</a> et sélectionnons la paire de trading, plateforme d'échange et time frame sur lesquels nous voulons trader. + +<div style={{textAlign: "center"}}> + ![tradingview selection btcusdt + marché](/images/guides/trading-view/tradingview-selection-btcusdt-marche.png) +</div> + +Pour ce tutoriel, nous allons trader BTC/USDT sur Binance en utilisant un time frames de 5 minutes. Bien sûr, toute autre valeur fonctionnerait également. +Précision: trader selon des Death et Golden Crosses est généralement plus performant en utilisant des time frames plus long. Le time frame de 5 minutes est uniquement utilisé ici à titre d'exemple. + +### 1.2 Ajouter les indicateurs EMA + +Ensuite, nous ajoutons 2 fois l'indicateur de Moyenne Mobile Exponentielle : + +1. Une fois pour l’EMA à long terme +2. Une fois pour l’EMA à court terme + +<div style={{textAlign: "center"}}> + ![tradingview ajouter ema + indicateur](/images/guides/trading-view/tradingview-ajouter-ema-indicateur.png) +</div> + +### 1.3 Configurer les indicateurs EMA + +Cliquez sur les `Configurations` des deux indicateurs EMA et définissez la valeur `Longueur` selon vos souhaits pour configurer vos Death et Golden Crosses. + +<div style={{textAlign: "center"}}> + ![tradingview configurer ema + indicator](/images/guides/trading-view/tradingview-configurer-ema-indicateur.png) +</div> + +Dans cet exemple, nous utiliserons les valeurs suivantes : + +1. `21` pour la Longueur de l’EMA à long terme +2. `9` pour la Longueur de l’EMA à court terme + +Remarque : vous pouvez également configurer le `Style` ces EMA pour les rendre plus faciles à visualiser sur le graphique. + +### 1.4 Visualiser la stratégie + +Les Death et Golden crosses se produisent lorsque l'EMA à long term est traversée par celle à court terme. We can now easily see what it would look like. +Les Croisements dorés se produisent lorsque les EMA à longue échance sont traversées par ceux à courte échance . Nous pouvons maintenant facilement voir ce que cela ressemblerait. + +<div style={{textAlign: "center"}}> + ![tradingview visualisation ema indicateur golden et death + crosses](/images/guides/trading-view/tradingview-visualisation-ema-indicateur-golden-et-death-crosses.png) +</div> + +Notre stratégie est prête, la seule étape restante est de créer un OctoBot pour trader lorsque ces croisements se produisent. + +## 2. Créer les automatisations OctoBot pour acheter et vendre + +### 2.1 Créer un OctoBot TradingView + +Ouvrons un nouvel onglet et allons sur <a href="https://www.octobot.cloud/fr/dashboard" rel="nofollow">OctoBot cloud</a> pour démarrer un nouveal OctoBot TradingView. + +<div style={{textAlign: "center"}}> + ![demarrer un nouvel octobot tradingview depuis + explorer](/images/guides/trading-view/demarrer-un-nouvel-octobot-tradingview-depuis-l-explorer.png) +</div> + +**[Démarrer un bot](https://www.octobot.cloud)** + +Pour ce tutoriel, nous démarrerons un bot sur Binance. Si vous avez de questions sur comment démarrer un OctoBot TradingView, consultez la section `Créer votre OctoBot TradingView` du [tutorial de trading avec TradingView](../tradingview-trading-tutorial#1-créer-votre-octobot-tradingview). + +### 2.2 Créer votre automatisation d'achat + +Lorsqu'une Golden Cross se produit, nous voulons que notre OctoBot achète. Pour ce tutoriel, nous achèterons en utilisant 50% des USDT de notre portefeuille. + +<div style={{textAlign: "center"}}> + ![octobot automation creer acheter + btc](/images/guides/trading-view/octobot-automation-creer-acheter-btc.png) +</div> + +### 2.3 Créer votre automatisation de vente + +Lorsqu'une death cross se produit, nous voulons que notre OctoBot vende. Pour ce tutoriel, nous vendrons tous les BTC de notre portefeuille. + +<div style={{textAlign: "center"}}> + ![octobot automation creer vendre + btc](/images/guides/trading-view/octobot-automation-creer-vendre-btc.png) +</div> + +Remarque : dans ce tutoriel, nous décrivons un scénario simple en utilisant des ordres au marché, en vendant tout d'un coup et en ayant seulement une seule automatisation d'achat et vente. +Puisqu'il n'y a pas de limite aux automatisations que vous pouvez créer, vous pouvez personnaliser cette stratégie autant que vous le souhaitez en créant d'autres automatisations d'achat et de vente. + +## 3. Connecter les automatisations pour se déclencher sur les Crosses + +Remarque : les étapes suivantes supposent que vous avez déjà configuré l'URL du webhook des alertes TradingView. Si ce n'est pas le cas, veuillez suivre le [guide Configurer l'URL du webhook](../tradingview-trading-tutorial#25-configurer-lurl-du-webhook). + +### 3.1 Acheter lors des Golden Crosses + +Ouvrez le panneau de connexion de votre automation d'achat et copiez son identifiant d'automatisation. + +<div style={{textAlign: "center"}}> + ![octobot automatisations vue connexion interface + selectionnee](/images/guides/trading-view/octobot-automatisations-vue-connexion-interface-selectionnee.png) +</div> + +<div style={{textAlign: "center"}}> + ![octobot identifiant + d'automatisation](/images/guides/trading-view/octobot-automatisation-identifiant.png) +</div> + +Revenez à votre onglet TradingView et créez une nouvelle alerte. + +<div style={{textAlign: "center"}}> + ![creer une alerte depuis + tradingview](/images/guides/trading-view/creer-une-alerte-depuis-tradingview.png) +</div> + +<div style={{textAlign: "center"}}> + ![tradingview creer alerte golden + cross](/images/guides/trading-view/tradingview-creer-golden-cross-alerte.png) +</div> + +Dans cette alerte : + +- Sélectionnez `Croisement vers le haut` ainsi qu'EMA 9 et 21 comme Condition : c'est notre Golden Cross. +- Sélectionnez `Une fois par barre (sur clôture)` comme Déclenchement pour vérifier les Golden Crosses à chaque clôture de bougie. +- Donnez un nom à votre alerte pour l'identifier facilement plus tard. +- Remplacez la totalité de valeur de Message par l'identifiant de votre automatisation d'achat de votre onglet OctoBot. + +Et voila ! Votre stratégie TradingView enverra une alerte déclenchant votre automatisation d'achat par OctoBot lorsqu'une Golden Cross sera identifiée selon vos paramètres d'EMA. + +### 3.2 Vendre lors des Death Crosses + +De la même manière que pour la configuration de la Golden Cross: + +1. Sur votre onglet OctoBot, ouvrez le panneau de connexion de votre automatisation de vente. +2. Sur l'onglet TradingView, créez une deuxième alerte pour identifier les Death Crosses et configurez-la pour déclencher votre automatisation de vente. + +<div style={{textAlign: "center"}}> + ![tradingview creer alerte death + cross](/images/guides/trading-view/tradingview-creer-death-cross-alerte.png) +</div> + +Dans cette alerte, n'oubliez pas de : + +- Sélectionner `Croisement vers le base` ainsi qu’EMA 9 et 21 comme Condition : c'est notre Death Cross. +- Sélectionner `Une fois par barre (sur clôture)` comme Déclenchement pour vérifier les Death Crosses à chaque clôture de bougie. +- Donnez un nom à votre alerte pour l'identifier facilement plus tard. +- Remplacez la totalité de valeur de Message par l'identifiant de votre automatisation de vente de votre onglet OctoBot. + +## La stratégie est prête + +Et c'est prêt ! +Nous venons de créer une stratégie EMA de Death et Golden Cross sur TradingView et avons automatisé son trading en utilisant OctoBot. À chaque fois qu'une Death ou Golden Cross se produit sur TradingView, notre OctoBot achètera ou vendra du BTC en conséquence. + +![tradingview illustration la de stratégie ema avec 2 achats et 2 ventes](/images/guides/trading-view/tradingview-ema-strategy-illustration-with-2-buy-and-2-sell.png) + +![octobot tradingview illustration coté trading de la stratégie ema with avec 2 achats et 2 ventes](/images/guides/trading-view/octobot-tradingview-trading-side-of-ema-strategy-illustration-with-2-buy-and-2-sell.png) + +Bien sûr, vous pouvez utiliser cette configuration pour trader toute paire crypto sur n'importe quelle plateforme d'échange en utilisant vos fonds réels ou sans risque avec des [fonds simulés](../paper-trading-a-strategy). + +**[Démarrer un bot TradingView](https://www.octobot.cloud)** + +Nous espérons que ce tutoriel était suffisamment clair. N'hésitez pas à nous faire savoir s'il y a quelque chose que nous devrions améliorer. + +:::info + Attention : La stratégie présentée dans ce tutoriel est uniquement destinée à + des fins éducatives et ne constitue pas un conseil financier. +::: diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/tradingview-strategies-tutorials/bull-market-strategy-from-tradingview-using-rsi-with-video.mdx b/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/tradingview-strategies-tutorials/bull-market-strategy-from-tradingview-using-rsi-with-video.mdx new file mode 100644 index 0000000000..adb9c18ce7 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/tradingview-strategies-tutorials/bull-market-strategy-from-tradingview-using-rsi-with-video.mdx @@ -0,0 +1,121 @@ +--- +title: "Stratégie RSI en Bull Market" +description: "Optimisez vos gains en Bull Market en profitant de la montée des altcoins en prenant de l'avance sur les mouvements du marché en utilisant le RSI." +sidebar_position: 1 +--- + + + +# Stratégie TradingView de Bull Market avec le RSI en vidéo + +Comment profiter au mieux du Bull Market quand il est là ? +Dans cette stratégie, nous explorons comment utiliser le RSI pour acheter et vendre en avance, dès que la tendance commence à changer. + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="Hksh9vnDvy4" title="Optimiser son Bull Market Crypto avec le RSI" /> + +## L'objectif de la stratégie RSI en Bull Market + +En <a href="https://www.investopedia.com/terms/b/bullmarket.asp" rel="nofollow">Bull Market</a>, le prix des crypto monte régulièrement. Afin d'optimiser ses gains, il est important de pouvoir acheter les coins qui vont prendre en valeur avant que leur prix n'augmente trop. Il faut aussi idéalement pouvoir vendre rapidement lorsque le maximum est atteint. Vendre vous permettra de réaliser un achat à plus bas prix ou de débloquer vos fonds pour tirer profit d'une autre crypto prometteuse. + +C'est ce sur quoi cette stratégie se spécialise: **profiter des hausses temporaires**. + +## Le principe de la stratégie + +Utiliser le <a href="https://www.investopedia.com/terms/r/rsi.asp" rel="nofollow">Relative Strength Index</a>, afin d'identifier les zones de surachat et de survente, est intéressant pour: + +1. Acheter avant que le prix ne commence vraiment à monter +2. Vendre uniquement lorsque le maximum local est atteint + +<div style={{textAlign: "center"}}> + ![strategie RSI de bull market qui achete et vend du + solana](/images/guides/tradingview-tutos/strategie-RSI-de-bull-market-qui-achete-et-vend-du-solana.png) +</div> + +## Configuration de la stratégie + +L'objectif de la stratégie est de trader lorsqu'un maximum est atteint. Il est identifié grâce aux changements de tendance de RSI. Voici la confguration de RSI et d'alertes utilisée dans cette vidéo. + +Cette stratégie utilise TradingView afin d'automatiser l'analyse RSI de tout type de cryptomonnaie. Pour plus d'information sur la façon d'utiliser TradingView et OctoBot, consulter le [tutorial de trading avec TradingView](../tradingview-trading-tutorial). + +### Configuration TradingView + +#### Le RSI pour identifier les tendances + +<div style={{textAlign: "center"}}> + ![configuration RSI avec rolling moving + averages](/images/guides/tradingview-tutos/configuration-RSI-avec-rolling-moving-averages.png) +</div> +- Longueur de 14, activation sur prix de fermeture (configuration standard) - MA +Type: SMMA, c'est à dire Rolling Moving Average. Cela permet de donner plus de +poids aux dernières valeurs et avoir une moyenne plus réactive que la moyenne +utilisée par défaut - Longueur MA: 9 - BB StdDev: 1 + +#### Les ordres d'achat + +1. Achat en cas de forte survente pour tirer profit des baisses de prix importantes + <div style={{textAlign: "center"}}> + ![extreme buy solana sur rsi ema seuil tradingview alert + configuration](/images/guides/tradingview-tutos/extreme-buy-solana-sur-rsi-ema-seuil-tradingview-alert-configuration.png) + </div> + +- Lorsque la moyenne mobile du RSI (RSI-based MA) croise vers le haut le seuil de 39.57 +- Une fois par barre (clôture) +- Référence d'une automation d'achat sur OctoBot + +2. Achat en cas de survente standard pour tirer profit des faibles baisses de prix de Bull Market + <div style={{textAlign: "center"}}> + ![regular buy solana sur rsi ema seuil tradingview alert + configuration](/images/guides/tradingview-tutos/regular-buy-solana-sur-rsi-ema-seuil-tradingview-alert-configuration.png) + </div> + +- Lorsque la moyenne mobile du RSI (RSI-based MA) croise vers le haut le seuil de 48.62 +- Une fois par barre (clôture) +- Référence d'une automation d'achat sur OctoBot + +#### Les ordres de vente + +1. Vente en cas sur fort surachat pour tirer profit des augmentations de prix importantes + <div style={{textAlign: "center"}}> + ![extreme sell solana sur rsi ema seuil tradingview alert + configuration](/images/guides/tradingview-tutos/extreme-sell-solana-sur-rsi-ema-seuil-tradingview-alert-configuration.png) + </div> + +- Lorsque la moyenne mobile du RSI (RSI-based MA) croise vers le bas le seuil de 67.45 +- Une fois par barre (clôture) +- Référence d'une automation de vente sur OctoBot + +2. Vente en cas de surachat standard pour tirer profit des des augmentations de prix régulières du Bull Market + <div style={{textAlign: "center"}}> + ![regular sell solana sur rsi ema seuil tradingview alert + configuration](/images/guides/tradingview-tutos/regular-sell-solana-sur-rsi-ema-seuil-tradingview-alert-configuration.png) + </div> + +- Lorsque la moyenne mobile du RSI (RSI-based MA) croise vers le bas le seuil de 53.63 +- Une fois par barre (clôture) +- Référence d'une automation de vente sur OctoBot + +### Configuration OctoBot + +Une automatisation d'achat et de vente de la crypto à trader, avec le montant que vous désirez. + +<div style={{textAlign: "center"}}> + ![automatisations octobot de la strategie RSI de bull + market](/images/guides/tradingview-tutos/automatisations-octobot-de-la-strategie-RSI-de-bull-market.png) +</div> + +Pour plus d'information sur les automatisations d'OctoBot permettant d'acheter et vendre avec TradingView dans la section `Les automatisations pour créer vos stratégies` du [guide de trading avec TradingView](../tradingview-automated-trading#les-automatisations-pour-créer-vos-stratégies). + +## Optimisations possibles + +Dans cette vidéo, nous vous présentons une version mono-crypto de la stratégie. Afin d'optimiser les gains de cette stratégie, plusieurs améliorations peuvent être réalisées: + +- **Trader contre Bitcoin** au lieu d'USDT afin de profiter aussi de la montée du prix du Bitcoin +- Utiliser la stratégie sur **plusieurs crypto à la fois**: si leurs phases ascendantes ne sont pas simultanées, alors cela permet de profiter des montées de l'une quand l'autre est stable +- Utiliser **plusieurs ordres d'achat et de vente** pour optimiser le prix des ordres selon davantage de seuils RSI ou même depuis d'autres indicateurs techniques + +:::info + Attention : La stratégie présentée ici est uniquement destinée à des fins + éducatives et ne constitue pas un conseil financier. +::: diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/tradingview-trading-tutorial.mdx b/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/tradingview-trading-tutorial.mdx new file mode 100644 index 0000000000..273464cfbb --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/tradingview-trading-tutorial.mdx @@ -0,0 +1,262 @@ +--- +title: "Tutoriel de trading avec TradingView" +description: "Tutoriel étape par étape pour créer votre OctoBot TradingView et trader sur les alertes TradingView en utilisant le trading simulé ou réel." +sidebar_position: 16 +--- + + + +# Tutoriel de trading avec TradingView + +Avec OctoBot cloud, il est très facile d'automatiser vos trades en utilisant vos propres stratégies basées sur TradingView. + +Voici un tutoriel étape par étape pour démarrer votre OctoBot TradingView et effectuer vos premiers trades en utilisant le trading simulé et TradingView. + +import YouTube from '@site/src/components/YouTube'; + +<YouTube id="bZwyQMsgYYE" title="Automatiser TradingView" /> + +Dans ce tutoriel, vous apprendrez à : + +1. Créer un OctoBot TradingView et lui faire acheter du BTC à chaque réception d'une alerte TradingView +2. Configurer TradingView pour notifier votre OctoBot lorsque le prix du BTC descend en dessous de 40 000 USDT +3. Suivre l'activité de votre OctoBot TradingView + +## 1. Créer votre OctoBot TradingView + +### 1.1 Démarrer un nouvel OctoBot TradingView + +Si vous créez votre compte OctoBot, après avoir sélectionné votre échange, descendez en bas de la page de sélection de stratégie et selectionnez "Automatisez votre stratégie avec TradingView". + +<div style={{textAlign: "center"}}> + ![octobot creer tradingview bot depuis + intro](/images/guides/trading-view/octobot-creer-tradingview-bot-depuis-intro.png) +</div> + +- Ou alors selectionnez l'onglet `Personalisées` de l'<a href="https://www.octobot.cloud/fr/explore?category=tv" rel="nofollow">explorateur de stratégies</a>. + <div style={{textAlign: "center"}}> + ![demarrer un nouvel octobot tradingview depuis + explorer](/images/guides/trading-view/demarrer-un-nouvel-octobot-tradingview-depuis-l-explorer.png) + </div> + +**[Démarrer un bot](https://www.octobot.cloud)** + +### 1.2 Choisir la plateforme d'échange sur laquelle trader + +Comme les bots classiques, les octobots TradingView fonctionnent sur une seule plateforme d'échange à la fois. + +<div style={{textAlign: "center"}}> + ![octobot creer tradingview bot selectionner + exchange](/images/guides/trading-view/octobot-creer-tradingview-bot-selectionner-exchange.png) +</div> + +Sélectionnez la plateforme sur laquelle vous souhaitez que cet OctoBot opère. + +### 1.3 Sélectionner le trading avec des fonds réels ou simulés + +<div style={{textAlign: "center"}}> + ![octobot creer tradingview bot selectionner compte reel ou + simule](/images/guides/trading-view/octobot-creer-tradingview-bot-selectionner-compte-reel-ou-simule.png) +</div> + +Tout comme les autres OctoBots, vous pouvez utiliser votre Octobot TradingView sur un [compte réel](invest-with-your-strategy) ou sans risque avec [des fonds simulés](paper-trading-a-strategy). + +Nous vous suggérons de commencer par le trading simulé jusqu'à ce que vous soyez à l'aise pour automatiser vos stratégies TradingView avec vos fonds réels. + +### 1.4 Démarrer votre OctoBot + +Une fois que votre compte est sélectionné et initialisé, vous pouvez démarrer votre OctoBot TradingView. + +<div style={{textAlign: "center"}}> + ![octobot creer tradingview bot + demarre](/images/guides/trading-view/octobot-creer-tradingview-bot-demarre.png) +</div> + +Lorsqu'il démarre, les bots TradingView ne créent pas d'ordres, ils attendront vos alertes TradingView pour trader. + +### 1.5 Votre bot TradingView est prêt + +Votre OctoBot TradingView est maintenant prêt à trader sur le compte que vous avez sélectionné. Il tradera à chaque fois qu'il sera notifié depuis vos alertes TradingView. + +<div style={{textAlign: "center"}}> + ![octobot tradingview bot vue + initiale](/images/guides/trading-view/octobot-tradingview-bot-vue-initiale.png) +</div> + +Les nouveaux bots TradingView utilisent des automatisations pour trader. Par conséquent au moins une automatisation est nécessaire pour que votre bot commence le trading. Nous allons maintenant créer la première automatisation de votre bot. + +### 1.6 Introduction aux automatisations + +Les automatisations sont les building blocks de chaque stratégie TradingView sur OctoBot. + +Les automatisations sont des actions qui sont automatiquement effectuées lors de la réception de l'alerte TradingView associée. Une automatisation déclanger la création ou l'annulation d'ordres selon votre configuration + +Chaque bot peut avoir autant d'automatisations que nécessaire, il n'y a pas de limite quant au nombre d'automatisations et de paires échangées qu'un OctoBot TradingView peut gérer. + +Apprenez-en davantage sur les automatisations dans le [guide du trading automatisé avec TradingView](tradingview-automated-trading) + +<div style={{textAlign: "center"}}> + ![octobot automatisations interface + vide](/images/guides/trading-view/octobot-automatisations-interface-vide.png) +</div> + +Sur OctoBot cloud, nous essayons de simplifier les choses. Voici un exemple de la façon créer une automatisation et comme vous pouvez le voir, c'est simple. + +Créons maintenant une automatisation pour acheter du BTC sur votre compte lorsque vous recevez une alerte TradingView. + +### 1.7 Créer une automatisation + +Créez une nouvelle automatisation en appuyant sur le bouton `+` + +<div style={{textAlign: "center"}}> + ![octobot tradingview bot vue initiale nouvelle automatisation + selectionnée](/images/guides/trading-view/octobot-tradingview-bot-vue-initiale-nouvelle-automatisation-selectionnee.png) +</div> + +Et saisissez les informations suivantes dans l'interface `Créer une automatisation`: + +<div style={{textAlign: "center"}}> + ![octobot automatisations interface + selectionnee](/images/guides/trading-view/octobot-automatisations-interface-selectionnee.png) +</div> + +- Nom : le nom de votre automatisation, vous pouvez utiliser celui que vous souhaitez pour retrouver facilement votre automatisation ultérieurement +- Symbole de l’ordre : Le symbole tradé pour créer l’ordre. Veuillez noter qu'il doit suivre le format `/`. Par exemple, BTCUSDT sur TradingView serait BTC/USDT sur votre automatisation + +Appuyez sur `Créer` lorsque vous êtes satisfait de votre configuration. + +:::info + Si vous avez besoin d'automations plus avancées comme utiliser des valeurs dynamiques depuis Pine Script, annuler des ordres ou automatiser des prises de profits ou des stop losses, jetez un œil aux [automatisations personnalisées](tradingview-alerts-automation#automatisations-tradingview-personnalisées). +::: + +## 2. Configurer TradingView pour envoyer des alertes à votre bot + +### 2.1 Ouvez l'interface de connexion de votre automatisation + +<div style={{textAlign: "center"}}> + ![octobot automatisations vue connexion interface + selectionnee](/images/guides/trading-view/octobot-automatisations-vue-connexion-interface-selectionnee.png) +</div> + +Dans la section Automatisation de votre OctoBot TradingView, ouvrez le l'interface connexion de `Acheter BTC au marché - 10% des USDT`. + +<div style={{textAlign: "center"}}> + ![octobot automation interface de connexion + tradingview](/images/guides/trading-view/octobot-automation-interface-de-connexion-tradingview.png) +</div> + +Cette interface affiche toutes les informations nécessaires pour déclencher l'automatisation `Acheter BTC au marché - 10% des USDT` depuis TradingView. + +### 2.2 Se connecter à TradingView + +Avec un nouvel onglet (pour conserver l'interface de connexion ouverte), aller sur <a href="https://www.tradingview.com/?aff_id=27595" rel="nofollow">TradingView</a> et se connecter ou <a href="https://www.tradingview.com/?aff_id=27595" rel="nofollow">créer un compte</a>. + +La connexion entre TradingView et OctoBot s'effectue avec des alertes qui peuvent être envoyées à OctoBot par **Webhooks**. Les webhooks nécessitent un abonnement "Essential" ou supérieur. Vous pouvez créer un compte TradingView et commencer un essai gratuit de 30 jours en utilisant <a href="https://www.tradingview.com/?aff_id=27595" rel="nofollow">ce lien</a>. + +### 2.3 Sélectionner votre paire de trading + +Dans notre automatisation actuelle, nous tradons du BTC/USDT. Sélectionner le marché BTCUSDT sur TradingView, par exemple celui de Binance. + +<div style={{textAlign: "center"}}> + ![tradingview selection btcusdt + marche](/images/guides/trading-view/tradingview-selection-btcusdt-marche.png) +</div> + +### 2.4 Selectionner l'onglet Alertes + +![creer une alerte depuis tradingview](/images/guides/trading-view/creer-une-alerte-depuis-tradingview.png) + +Depuis l'onglet des alertes, cliquez sur **Créer une alerte**. + +### 2.5 Configurer l'URL du webhook + +Dans l'onglet "Notifications" du menu "Créer une alerte sur BTCUSDT" sur TradingView, saisir l'URL du webhook de votre compte OctoBot. + +1. Sélectionnez `URL du Webhook` + + <div style={{textAlign: "center"}}> + ![creer une alerte sur tradingview avec webhook + url](/images/guides/trading-view/creer-une-alerte-sur-tradingview-avec-webhook-url.png) + </div> + +2. Copiez votre URL de webhook. Vous trouverez votre URL de webhook dans la section `3. Automatiser via webhook` de l' [interface de connexion](#21-ouvez-linterface-de-connexion-de-votre-automatisation) de votre automatisation, sur votre onglet OctoBot. + +<div style={{textAlign: "center"}}> + ![url du webhook octobot de + l'automatisation](/images/guides/trading-view/octobot-automatisation-webhook-url.png) +</div> + +Il est possible vous deviez activer l'authentification à deux facteurs sur TradingView pour saisir une URL du webhook. + +Vos notifications par webhook sont maintenant configurées ! + +:::info + Cette configuration d'URL du webhook n'a besoin d'être + effectuée qu'une seule fois. Les alertes suivantes que vous créerez + utiliseront automatiquement cette configuration de notifications. +::: + +### 2.6 Créer une nouvelle alerte + +Revenir à l'onglet "Configurations" du menu "Créer une Alerte sur BTCUSDT" sur TradingView et remplir les détails de votre alerte. + +<div style={{textAlign: "center"}}> + ![creer alerte de prix depuis + tradingview](/images/guides/trading-view/creer-alerte-de-prix-depuis-tradingview.png) +</div> + +Dans ce tutoriel, nous allons créer une alerte simple basée sur le prix de BTC/USDT qui déclenchera notre automatisation `Acheter BTC au marché - 10% des USDT` lorsque le prix du Bitcoin franchira la barre des 40 000 USDT sur TradingView. + +1. Sélectionner une condition pour déclencher votre alerte. +2. Nommer votre alerte. Ce nom n'a aucun impact et est simplement utile pour que vous puissiez vous souvenir de l'alerte créée. +3. Remplir le champ "Message" avec **l’identifiant de votre automatisation** que vous trouverez dans la section `2. Créer une alerte` de [l'interface de connexion](#21-ouvez-linterface-de-connexion-de-votre-automatisation) de votre automatisation, sur votre onglet OctoBot. + +<div style={{textAlign: "center"}}> + ![octobot automatisation + identifiant](/images/guides/trading-view/octobot-automatisation-identifiant.png) +</div> +4. Appuyez ensuite sur "Créer" pour enregistrer votre alerte TradingView. + +## 3. Tout est prêt + +Félicitations ! Vous disposez désormais d'une alerte TradingView qui déclenchera automatiquement votre automatisation `Acheter BTC au marché - 10% des USDT` sur votre compte OctoBot dès que l'alerte sera déclenchée sur TradingView. + +**[Démarrer un bot TradingView](https://www.octobot.cloud)** + +### 3.1 Suivre les alertes + +À chaque nouvelle alerte reçue, votre OctoBot la sauvegardera et vous pourrez voir le résultat de son exécution. + +<div style={{textAlign: "center"}}> + ![octobot automatisations vue avec alerte tradingview + executee](/images/guides/trading-view/octobot-automatisations-vue-avec-alerte-tradingview-executee.png) +</div> + +Vous pouvez également consulter l'historique complet des alertes et automatisations de votre bot en utilisant le bouton `Voir tous`. + +<div style={{textAlign: "center"}}> + ![octobot automatisation + historique](/images/guides/trading-view/octobot-automatisation-historique.png) +</div> + +### 3.2 Créer d'autres automatisations + +Pour chacun de vos OctoBot TradingView, vous pouvez créer autant d'automatisations que vous le souhaitez et trader avec autant de symboles que vous le désirez. + +<div style={{textAlign: "center"}}> + ![octobot plusieurs automatisations + tradingview](/images/guides/trading-view/octobot-plusieurs-automatisations-tradingview.png) +</div> + +Dans ce tutoriel, nous avons créé une automatisation sur le BTC/USDT déclenchée par un événement de prix, mais il existe de nombreux autres moyens de déclencher des alertes TradingView, notamment : + +- Les événements sur les prix, comme ce que nous avons fait dans ce tutoriel +- Les indicateurs, pour trader en utilisant des indicateurs techniques simples ou sophistiqués +- Les stratégies Pine Script, pour trader à partir de stratégies complètes écrites en Pine Script directement depuis TradingView + +<div style={{textAlign: "center"}}> + ![plusieurs btcusdt alertes + tradingview](/images/guides/trading-view/plusieurs-btcusdt-alertes-tradingview.png) +</div> + +Apprenez-en davantage sur les différents types d'alertes dans le [guide d'automatisation des alertes TradingView](tradingview-alerts-automation). diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/what-is-an-exchange-api-key.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/what-is-an-exchange-api-key.md new file mode 100644 index 0000000000..474659769c --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/investing/what-is-an-exchange-api-key.md @@ -0,0 +1,27 @@ +--- +title: "Qu'est-ce qu'une API Key ?" +description: "Vous vous demandez ce qu'est une API Key et pourquoi vous devriez l'utiliser avec un logiciel de trading ? Voici les réponses à vous questions." +sidebar_position: 34 +--- + +# Qu'est-ce qu'une API Key de plateforme d'échange ? + +Dans le trading de cryptomonnaies, les API Keys sont la façon la plus commune de permettre à des logiciels de trading de créer et annuler des ordres sur un compte de plateforme d'échange de façon sécurisée. Cette solution présente aussi l'avantage de ne pas nécessiter de communiquer votre adresse email ou mot de passe de connexion à Binance. + +## Les API Keys sur OctoBot +Sur OctoBot, vos API Keys sont utilisées pour exécuter une stratégie, c'est à dire: +- consulter le solde de votre portefeuille Binance +- consulter, créer et annuler des ordres de trading sur votre compte + +## Les permissions +Les API Keys peuvent être configurées avec certaines permissions. Il s'agit d'un dispositif de sécurité supplémentaire permettant d'empêcher toute action non autorisée via cette API Key. Par exemple, un logiciel utilisant une API Key qui ne dispose pas des permissions de retrait ne peut pas initier de retrait des fonds du compte associé. + +Pour cette raison, seules les permissions **Permettre la lecture et Activer le trading Spot et sur marge** sont nécessaires pour qu'OctoBot puisse exécuter une stratégie de trading. + +**Aucune autre permission n'est requise et nous recommandons fortement de ne pas ajouter d'autre permission aux API Keys que vous utilisez avec un logiciel de trading, que ce soit OctoBot ou un autre.** + +## Comment créer votre API Key de plateforme d'échange ? +Pour vous aider à connecter votre compte de plateforme d'échange à OctoBot, nous avons créé ces guides pas à pas: +- [Guide de connexion à Binance](connect-your-binance-account-to-octobot) +- [Guide de connexion à Kucoin](connect-your-kucoin-account-to-octobot) +- [Guide de connexion à Coinbase](connect-your-coinbase-account-to-octobot) diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/octobot-script/creating-trading-orders.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/octobot-script/creating-trading-orders.md new file mode 100644 index 0000000000..fa015846df --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/octobot-script/creating-trading-orders.md @@ -0,0 +1,92 @@ +--- +title: "Créer des ordres" +description: "Apprenez comment créer des ordres de marché, à seuil, de limite, de stop-loss et de trailing avec Python en utilisant OctoBot Script." +sidebar_position: 6 +--- + + + +# Créer des ordres de trading + +:::info + La traduction française de cette page est en cours. +::: + +Orders can be created using the following keywords: +- `market` +- `limit` +- `stop_loss` +- `trailing_market` + +## Montant +Each order accept the following optional arguments: +- `amount`: for spot and futures trading +- `target_position`: futures trading only: create the associated order to update to position size to the given value. Uses the same format as the order amount. + +To specify the amount per order, use the following syntax: +- `0.1` to trade 0.1 BTC on BTC/USD +- `2%` to trade 2% of the total portfolio value +- `12%a` to trade 12% of the available holdings + +``` python +# create a buy market order using 10% of the total portfolio +await obs.market(ctx, "buy", amount="10%") +``` + +## Prix +Orders set their price using the `offset` argument. + +To specify the order price, use the following syntax: +- `10` to set the price 10 USD above the current BTC/USD market price +- `2%` to set the price 2% USD above the current BTC/USD market price +- `@15555` to set the price at exactly 15555 USD regardless of the current BTC/USD market price + +``` python +# create a buy limit order of 0.2 units (BTC when trading BTC/USD) +# with a price at 1% below the current price +await obs.limit(ctx, "buy", amount="0.2", offset="-1%") +``` + +Note: market orders do not accept the `offset` argument. + +## Take profit et stop losses automatisés +When creating orders, it is possible to automate the associated +stop loss and / or take profits. When doing to, the associated take profit/stop loss will have +the same amount as the initial order. + +Their price can be set according to the same rules as the initial order price +(the `offset` argument) using the following optional argument: +- `stop_loss_offset`: automate a stop loss creation when the initial order is filled and set the stop loss price +- `take_profit_offset`: automate a take profit creation when the initial order is filled and set the take profit price + +``` python +# create a buy limit order of 0.2 units (BTC when trading BTC/USD) with: +# - price at 1% below the current price +# - stop loss at 10% loss +# - take profit at 15% profit +await obs.limit(ctx, "buy", amount="0.2", offset="-1%", stop_loss_offset="-10%", take_profit_offset="15%") +``` + +> When using both `stop_loss_offset` and `take_profit_offset`, two orders will be created after the initial order fill. +Those two orders will be grouped together, meaning that if one is cancelled or filled, the other will be cancelled. + +## Trader des futures + +### Ouvrir une position +Use regular orders to open a position. When the order is filled, the associated position will be created, updated or closed. + +A sell order will open a short position if your balance becomes negative after filling this order. + +### Clôturer une position +Set the position size to 0 to close it. You can do it either by: +- Filling an order with the same amount as the position size and an opposite side +- Or using `target_position=0` as order parameters + + +### Changer le levier + +Use `set_leverage` to update the current leverage value when trading futures. + +``` python +await obs.set_leverage(ctx, 5) +``` \ No newline at end of file diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/octobot-script/fetching-history.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/octobot-script/fetching-history.md new file mode 100644 index 0000000000..966f064adc --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/octobot-script/fetching-history.md @@ -0,0 +1,58 @@ +--- +title: "Télécharger les données historiques" +description: "Apprenez comment récupérer et réutiliser des données historiques de marché d'échange en utilisant Python à l'aide d'OctoBot Script." +sidebar_position: 14 +--- + + + +# Télécharger les données historiques de trading + +:::info + La traduction française de cette page est en cours. +::: + +In order to run a backtest, OctoBot script requires historical +trading data, which is at least candles history. + +## Télécharger de nouvelles données + +When using OctoBot script, historical data can be fetched using: +`await obs.get_data(symbol, time frame)` + +Where: + +- symbol: the trading symbol to fetch data from. It can also be a list of symbols +- time frame: the time frame to fetch (1h, 4h, 1d, etc). It can also be a list of time frames + +Optional arguments: + +- start_timestamp: the unix timestamp to start fetching data from. Use <a href="https://www.epochconverter.com/" rel="nofollow">this converter</a> if you are unsure what you should use. +- exchange: the exchange to fetch data from. Default is "binance" +- exchange_type: the exchange trading type to fetch data from. Default is "spot", "future" is also possible on supported exchanges + +```python +data = await obs.get_data("BTC/USDT", "1d", start_timestamp=1505606400) +``` + +## Réutiliser les données précédemment téléchargées + +Calling `data = await obs.get_data` will save the downloaded data into the `backtesting/data` local folder. +If you want to speedup subsequent calls, you can provide the `data_file` optional argument to read +data from this file instead of downloading historical data. This also makes it possible to run a +script while being offline. + +You can get the name of the downloaded backtesting file by accessing +`data.data_files[0]` + +```python +data = await obs.get_data("BTC/USDT", "1d", start_timestamp=1505606400) +# print the name of the downloaded data file +print(data.data_files[0]) +``` + +```python +datafile = "ExchangeHistoryDataCollector_1671754854.5234916.data" +# will not download historical data as a local data_file is provided +data = await obs.get_data("BTC/USDT", "1d", start_timestamp=1505606400, data_file=datafile) +``` diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/octobot-script/getting-started.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/octobot-script/getting-started.md new file mode 100644 index 0000000000..78ed5fde7b --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/octobot-script/getting-started.md @@ -0,0 +1,108 @@ +--- +title: "Commencer le scripting" +description: "Exploitez la puissance du framework OctoBot au sein de vos propres stratégies de trading scriptées en Python tout en gardant la simplicité d'un Pine Script TradingView." +sidebar_position: 17 +--- + + + +# OctoBot Script + +:::note + Pour les utilisateurs d' + <a href="https://github.com/Drakkar-Software/OctoBot-script" rel="nofollow">OctoBot Script</a> + . +::: + +:::info + La traduction française de cette page est en cours. +::: + +## Le framework de trading par script basé sur OctoBot + +> OctoBot Script est dans une version alpha + +OctoBot Script vous permet d'exploiter la puissance du framework OctoBot tout en gardant la simplicité d'un Pine Script TradingView. + +With OctoBot Script, automatisez vos stratégies de trading en utilisant vos scripts hautement optimisés + +- Que ce soit à partir de vos idées de stratégies scriptées, comme sur le Pine Script de <a href="https://www.tradingview.com/?aff_id=27595" rel="nofollow">TradingView</a> +- Ou en utilisant une stratégie avancée basée sur l'IA + +## Installer OctoBot Script depuis pip + +> OctoBot Script nécessite **Python 3.10** + +```{.sourceCode .bash} +python3 -m pip install OctoBot wheel appdirs==1.4.4 +python3 -m pip install octobot-script +``` + +## Exemple de script: une strategie RSI + +Dans cet exemple, OctoBot script permet de créer rapidement une stratégie de trading basée sur le <a href="https://www.investopedia.com/terms/r/rsi.asp" rel="nofollow">RSI</a> comprenant: + +- une prise de profit à 25% de gains +- un stop loss à 15% de perte + +```python + + +async def rsi_test(): + async def strategy(ctx): + # Will be called at each candle. + if run_data["entries"] is None: + # Compute entries only once per backtest. + closes = await obs.Close(ctx, max_history=True) + times = await obs.Time(ctx, max_history=True, use_close_time=True) + rsi_v = tulipy.rsi(closes, period=ctx.tentacle.trading_config["period"]) + delta = len(closes) - len(rsi_v) + # Populate entries with timestamps of candles where RSI is + # below the "rsi_value_buy_threshold" configuration. + run_data["entries"] = { + times[index + delta] + for index, rsi_val in enumerate(rsi_v) + if rsi_val < ctx.tentacle.trading_config["rsi_value_buy_threshold"] + } + await obs.plot_indicator(ctx, "RSI", times[delta:], rsi_v, run_data["entries"]) + if obs.current_live_time(ctx) in run_data["entries"]: + # Uses pre-computed entries times to enter positions when relevant. + # Also, instantly set take profits and stop losses. + # Position exists could also be set separately. + await obs.market(ctx, "buy", amount="10%", stop_loss_offset="-15%", take_profit_offset="25%") + + # Configuration that will be passed to each run. + # It will be accessible under "ctx.tentacle.trading_config". + config = { + "period": 10, + "rsi_value_buy_threshold": 28, + } + + # Read and cache candle data to make subsequent backtesting runs faster. + data = await obs.get_data("BTC/USDT", "1d", start_timestamp=1505606400) + run_data = { + "entries": None, + } + # Run a backtest using the above data, strategy and configuration. + res = await obs.run(data, strategy, config) + print(res.describe()) + # Generate and open report including indicators plots + await res.plot(show=True) + # Stop data to release local databases. + await data.stop() + + +# Call the execution of the script inside "asyncio.run" as +# OctoBot script runs using the python asyncio framework. +asyncio.run(rsi_test()) +``` + +## Rapport généré + +![rapport octobot pro avec btc usdt avec graphiques de trades et portfolio et rsi](/images/guides/octobot-pro/octobot-pro-report-btc-usdt-with-chart-trades-portfolio-value-and-rsi.jpg) + +## Rejoignez la communauté + +Nous avons récemment créé un canal Telegram dédié au script OctoBot. + +<a href="https://t.me/+366CLLZ2NC0xMjFk" rel="nofollow">Telegram News</a> diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/octobot-script/plotting-anything.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/octobot-script/plotting-anything.md new file mode 100644 index 0000000000..a1f424da1f --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/octobot-script/plotting-anything.md @@ -0,0 +1,43 @@ +--- +title: "Tout tracer" +description: "Apprenez comment représenter graphiquement n'importe quel type de données dans votre rapport d'exécution de stratégie en utilisant Python à l'aide d'OctoBot Script." +sidebar_position: 11 +--- + + + +# Tout tracer + +:::info + La traduction française de cette page est en cours. +::: + +Anything can be plotted on your strategy run report using the `plot(ctx, name, ...)` keyword. +The plot arguments are converted into <a href="https://plotly.com/javascript/" rel="nofollow">plotly</a> charts parameters. + +Where: + +- `name`: name of the indicator on the chart + +Optional arguments: + +- `x`: values to use for the x axis +- `y`: values to use for the y axis +- `z`: values to use for the z axis +- `text`: point labels +- `mode`: plotly mode ("lines", "markers", "lines+markers", "lines+markers+text", "none") +- `chart`: "main-chart" or "sub-chart" (default is "sub-chart") +- `own_yaxis`: when True, uses an independent y axis for this plot (default is False) +- `color`: color the of plot +- `open`: open values for a candlestick chart +- `high`: high values for a candlestick chart +- `low`: low values for a candlestick chart +- `close`: close values for a candlestick chart +- `volume`: volume values for a candlestick chart +- `low`: low values for a candlestick chart + +Example: + +```python +await obs.plot(ctx, "RSI", x=time_values, y=indicator_values, mode="markers") +``` diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/octobot-script/plotting-indicators.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/octobot-script/plotting-indicators.md new file mode 100644 index 0000000000..72b4974810 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/octobot-script/plotting-indicators.md @@ -0,0 +1,28 @@ +--- +title: "Tracer des indicateurs" +description: "Apprenez comment représenter graphiquement des indicateurs techniques tels que le RSI ou l'EMA dans votre rapport d'exécution de stratégie en utilisant Python à l'aide d'OctoBot Script." +sidebar_position: 10 +--- + + + +# Tracer des indicateurs + +:::info + La traduction française de cette page est en cours. +::: + +Indicators and associated signals can be easily plotted using the +`plot_indicator(ctx, name, x, y, signals)` keyword. + +Where: +- `name`: name of the indicator on the chart +- `x`: values to use for the x axis +- `y`: values to use for the y axis +- `signal`: (optional) x values for which a signal is fired + +Example where the goal is to plot the value of the rsi indicator from +the [example script](/guides/octobot-script#script). +``` python +await obs.plot_indicator(ctx, "RSI", time_values, indicator_values, signal_times) +``` diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/octobot-script/run-report.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/octobot-script/run-report.md new file mode 100644 index 0000000000..c710693d72 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/octobot-script/run-report.md @@ -0,0 +1,32 @@ +--- +title: "Rapport d'exécution" +description: "Apprenez comment créer, afficher et trouver le rapport d'exécution de votre stratégie à la fin de chaque exécution de stratégie avec OctoBot Script." +sidebar_position: 9 +--- + + + +# Rapport d'exécution + +:::info + La traduction française de cette page est en cours. +::: + +Each full execution of your strategy can generate a complete report. + +To generate a report at the end of a strategy run, add the following instruction + +```python +await res.plot(show=True) +``` + +> Tip: Use the `show` parameter to automatically open the report on your web browser + +![rapport octobot pro avec btc usdt avec graphiques de trades et portfolio et rsi](/images/guides/octobot-pro/octobot-pro-report-btc-usdt-with-chart-trades-portfolio-value-and-rsi.jpg) + +By default, each run report is stored in its run directory, in +`user/data/BacktesterTradingMode/default_campaign/backtesting/backtesting_X/report.html`. +Where X is the identifier of your backtesting run. + +> This report can be customized to include any information that would be useful to you. +Do customize your report, checkout the following articles. diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/octobot-script/strategies.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/octobot-script/strategies.md new file mode 100644 index 0000000000..8f22ff84db --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/octobot-script/strategies.md @@ -0,0 +1,111 @@ +--- +title: "Stratégies" +description: "Apprenez comment créer, exécuter et effectuer des backtest sur vos stratégies de trading automatisées en utilisant un langage simple similaire à TradingView Pine Script avec OctoBot Script." +sidebar_position: 3 +--- + + + +# Les stratégies sur OctoBot script + +:::info + La traduction française de cette page est en cours. +::: + +On OctoBot script, similarly to TradingView Pine Script, a trading strategy is a python async function that will be called at new price data. +``` python +async def strategy(ctx): + # your strategy content +``` + +In most cases, a strategy will: +1. Read price data +2. Use technical evaluators or statistics +3. Decide to take (or not take) action depending on its configuration +4. Create / cancel or edit orders (see [Creating orders](/guides/octobot-script-docs/creating-trading-orders)) + +As OctoBot script strategies are meant for backtesting, it is possible to create a strategy in 2 ways: + +## Stratégies pré-calculées +Pre-computed are only possible in backtesting: since the data is already known, when dealing with technical +evaluator based strategies, it is possible to compute the values of the evaluators for the whole backtest at once. +This approach is faster than iterative strategies as evaluators call only called once. + +Warning: when writing a pre-computed strategy, always make sure to associate the evaluator values to the +right time otherwise you might be reading data from the past of the future when running the strategy. + +``` python +config = { + "period": 10, + "rsi_value_buy_threshold": 28, +} +run_data = { + "entries": None, +} +async def strategy(ctx): + if run_data["entries"] is None: + # 1. Read price data + closes = await obs.Close(ctx, max_history=True) + times = await obs.Time(ctx, max_history=True, use_close_time=True) + # 2. Use technical evaluators or statistics + rsi_v = tulipy.rsi(closes, period=ctx.tentacle.trading_config["period"]) + delta = len(closes) - len(rsi_v) + # 3. Decide to take (or not take) action depending on its configuration + run_data["entries"] = { + times[index + delta] + for index, rsi_val in enumerate(rsi_v) + if rsi_val < ctx.tentacle.trading_config["rsi_value_buy_threshold"] + } + await obs.plot_indicator(ctx, "RSI", times[delta:], rsi_v, run_data["entries"]) + if obs.current_live_time(ctx) in run_data["entries"]: + # 4. Create / cancel or edit orders + await obs.market(ctx, "buy", amount="10%", stop_loss_offset="-15%", take_profit_offset="25%") +``` +This pre-computed strategy computes entries using the RSI: times of favorable entries are stored into +`run_data["entries"]` which is defined outside on the `strategy` function in order to keep its values +throughout iterations. + +Please note the `max_history=True` in `obs.Close` and `obs.Time` keywords. This is allowing to select +data using the whole run available data and only call `tulipy.rsi` once and populate `run_data["entries"]` +only once. + +In each subsequent call, `run_data["entries"] is None` will be `True` and only the last 2 lines of +the strategy will be executed. + +## Stratégies itératives +``` python +config = { + "period": 10, + "rsi_value_buy_threshold": 28, +} +async def strategy(ctx): + # 1. Read price data + close = await obs.Close(ctx) + if len(close) <= ctx.tentacle.trading_config["period"]: + # not enough data to compute RSI + return + # 2. Use technical evaluators or statistics + rsi_v = tulipy.rsi(close, period=ctx.tentacle.trading_config["period"]) + # 3. Decide to take (or not take) action depending on its configuration + if rsi_v[-1] < ctx.tentacle.trading_config["rsi_value_buy_threshold"]: + # 4. Create / cancel or edit orders + await obs.market(ctx, "buy", amount="10%", stop_loss_offset="-15%", take_profit_offset="25%") +``` +This iterative strategy is similar to the above pre-computed strategy except that it is evaluating the RSI +at each candle to know if an entry should be created. + +This type of strategy is simpler to create than a pre-computed strategy and can be used in +OctoBot live trading. + +## Exécuter un stratégie + +When running a backtest, a strategy should be referenced alongside: +- The [data it should be run on](/guides/octobot-script-docs/fetching-history) using `obs.run`: +- Its configuration (a dict in above examples, it could be anything) + +``` python +res = await obs.run(data, strategy, config) +``` + +Have a look [at the demo script](/guides/octobot-script#script) for a full example of +how to run a strategy within a python script. diff --git a/docs/i18n/fr/docusaurus-plugin-content-pages/terms/about.md b/docs/i18n/fr/docusaurus-plugin-content-pages/terms/about.md new file mode 100644 index 0000000000..d68ec88b37 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-pages/terms/about.md @@ -0,0 +1,39 @@ +--- +title: "À propos de nous" +description: "Qui est derrière OctoBot ? Comment contacter l'équipe ? Rencontrez l'équipe d'OctoBot et apprenez en plus sur la communauté OctoBot." +--- + + + +# À propos de nous + +## Qui sommes-nous ? + +### OctoBot est réalisé par une équipe de 2 personnes. + +Depuis 2018, OctoBot est développé sans financement externe par [Paul](https://github.com/Drakkar-Software) et [Guillaume](https://github.com/Drakkar-Software). + +Tous deux ont un parcours de développeurs qui leur a permis de créer OctoBot et les éléments de l'écosystème sur la base de leurs idées et des retours des utilisateurs. + +Tout ce temps a permis de développer une base de code très solide tout en étant en mesure de se concentrer sur les besoins des utilisateurs et de faire évoluer OctoBot dans la bonne direction. + +### OctoBot est également façonné par sa communauté + +L'équipe d'OctoBot est également composée de la communauté de ses utilisateurs, certains d'entre eux font partie de la communauté depuis des années maintenant. C'est grâce à cette communauté qu'OctoBot continue d'évoluer pour répondre au mieux aux besoins des utilisateurs et grandit vers les fonctionnalités les plus utiles. + +Pour cela, nous sommes extrêmement reconnaissants et nous remercions la communauté OctoBot pour son aide et ses idées incroyables au fil des années. + +## Contactez-nous + +- N'hésitez pas à nous contacter à contact@octobot.cloud + +## État de l'écosystème + +- L'état de l'écosystème d'OctoBot est disponible sur + <a href="https://status.octobot.online" rel="nofollow">status.octobot.online</a> + +## Remerciements + +- Licence complète de <a href="https://www.jetbrains.com/opensource/" rel="nofollow">JetBrains</a> pour les projets open source. + +_Octobot est entièrement développé avec l'aide de l'IDE Pycharm !_ diff --git a/docs/i18n/fr/docusaurus-plugin-content-pages/terms/affiliate.md b/docs/i18n/fr/docusaurus-plugin-content-pages/terms/affiliate.md new file mode 100644 index 0000000000..9f05e1f6a9 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-pages/terms/affiliate.md @@ -0,0 +1,201 @@ +--- +title: "Affiliate terms" +description: "Know everything about OctoBot cloud Affiliate Terms. Earn passive income as an affiliate when your referenced users trade or subscribe to paid plans." +--- + +# Affiliate terms + +We welcome you to OctoBot cloud, a SaaS service provided by Drakkar-Software that hosts OctoBot software, a bot for cryptocurrency trading. The Octobot software, the www.octobot.cloud website, mobile applications, and application program interfaces are collectively referred to as the "Software". + +**By creating or sharing an affiliate link, you acknowledge and consent to being bound by these Affiliate Terms.** + +For matters not addressed in the Affiliate Terms, the Terms of Use will apply. If there is a discrepancy between the Affiliate Terms and the Terms of Use, the Affiliate Terms will prevail. Drakkar-Software reserves the right to modify the Affiliate Terms as outlined in Section 1 of the Terms of Use. + +For additional information, please visit our website at www.octobot.cloud or email us at contact@drakkar.software. + +## Definitions + +- A "**Affiliate**" is an individual or legal entity with a software user account, + without any bans, blocks, sanctions, or limitations, who has accepted + the Affiliate Terms and has expressed interest in participating in + OctoBot's affiliate program by attracting new clients via the + Affiliate Link. Affiliates must be manually approved by + Drakkar-Software to become an Affiliate. +- A "**Qualified Client**" is an individual or legal entity who has (a) successfully registered + on the Software using the Affiliate Link, (b) successfully initiated a + paid or free plan on the Software, and (c) is not the Affiliate. +- A "**Affiliate Link**" is a personalized hyperlink to the Software registration page, + generated automatically for the Affiliate to share with potential + Qualified Clients. + +## Affiliate's Responsibilities + +To become an Affiliate, you must: + +- Be a registered Client of the Software with a complete profile + that includes all requested data, including contact details. +- If representing a legal entity, apply to become an Affiliate by + contacting Drakkar-Software and receiving approval. +- Obtain an Affiliate Link in the affiliate section of the Software. + +The Affiliate can involve any person to become a Qualified Client of the Software, as long as it is legal and they ensure that such individuals: + +- Are of legal age to use the Software. +- Are not under the control of, or residing in, a jurisdiction that + explicitly bans the use of similar software to the Software. +- Understand that the use of the Software is at their discretion and + responsibility. + +The Affiliate is not authorized to: + +- Enter into or conclude any agreements on behalf of OctoBot or + Drakkar-Software. +- Present themselves as an employee or partner of OctoBot or + Drakkar-Software. +- Collude with other existing or potential clients of the Software + for illegal benefits. +- Make any statement or guarantee regarding the Software. + +Damage the reputation or image of Drakkar-Software and/or the Software. This includes, but is not limited to: + +- Misrepresentation of OctoBot or Drakkar-Software, such as + using false or misleading claims about OctoBot or + Drakkar-Software, its products, or services. This includes + implying an official partnership or endorsement by OctoBot or + Drakkar-Software when none exists. +- Spammy marketing practices, such as sending unsolicited emails + (spam) that include OctoBot or Drakkar-Software's name or + products. +- Unethical marketing practices, such as using clickbaits, fake + reviews, or deceptive tactics to promote OctoBot or + Drakkar-Software. +- Negative publicity, such as publicly criticizing or + disparaging OctoBot or Drakkar-Software, its products, or its + services or engaging in behavior that could lead to negative + press or social media backlash against OctoBot or + Drakkar-Software. +- Association with competing brands, such as promoting competing + brands in a way that undermines or devalues OctoBot or + Drakkar-Software's reputation or displaying OctoBot or + Drakkar-Software's ads or links alongside ads for direct + competitors in a manner that creates confusion or conflict. +- Violation of laws or regulations, such as engaging in illegal + activities or promoting OctoBot or Drakkar-Software in a way + that violates local, national, or international laws. This + includes using OctoBot or Drakkar-Software's name or products + in connection with fraudulent or unethical schemes. +- Poor quality or irrelevant content, such as promoting OctoBot + or Drakkar-Software on low-quality websites or platforms that + reflect poorly on OctoBot or Drakkar-Software's image. This + includes placing OctoBot or Drakkar-Software's ads or links on + irrelevant or unrelated content that could confuse or mislead + customers. +- Use any intellectual property of OctoBot or Drakkar-Software + contrary to the license provided under the Terms of Use. +- A new client brought to the Software by the Affiliate is considered a + Qualified Client only if they have followed the Affiliate Link + provided by the Affiliate to register and successfully started a paid + or free plan on the Software. + +## Payment terms + +**For an individual Affiliate** Drakkar-Software will pay a fee calculated as a percentage of the net +amount of any successful (and not refunded or reversed) payment for +the subscription made by a Qualified Client and a percentage of the +trading fees received by Drakkar-Software when the Qualified Client's +associated trading robot executes trades on a partner exchange with an +eligible exchange account: + +- Each Qualified Client brought by the Affiliate is subject to a 25% + fee on its paid subscriptions. +- The percentage of trading fees may vary depending on the + Software's partnership terms with each exchange and at the + discretion of Drakkar-Software. + +**For a legal entity Affiliate** who is accepted by Drakkar-Software to use the Software, +Drakkar-Software will pay a fee calculated as a percentage of the net +amount of any successful (and not refunded or reversed) payment for +the subscription made by a Qualified Client and a percentage of the +trading fees received by Drakkar-Software when the Qualified Client's +associated trading robot executes trades on a partner exchange with an +eligible exchange account: + +- The percentage of subscriptions and trading fees is agreed upon + when Drakkar-Software approves the legal entity’s application to + become an Affiliate. +- Drakkar-Software will pay the Affiliate's fee in US Dollars. + Drakkar-Software may require that the Affiliate sends Drakkar-Software + an invoice for the fee, in which case the Affiliate shall not be + entitled to the fee, unless it has sent Drakkar-Software an invoice + for the fee; +- The Affiliate's fee is credited to the client by Drakkar-Software each + month, proportional to the payments (and trades when applicable) made + by the Qualified Client. Amount might be converted from original + cryptocurrency to US Dollars equivalent at the rate of + Drakkar-Software and at the moment of such credit transaction + processing. + +### Drakkar-Software is not obliged to: + +- Transfer any fee for payments that don't comply with the Affiliate + Terms. +- Transfer funds accumulated in currencies other than acceptable + cryptocurrency, unless approved by Drakkar-Software. + +### With the funds in the Affiliate account, you can either: + +- Use the funds to pay for OctoBot subscriptions (these funds are + non-refundable), or +- Request a withdrawal to your external wallet, subject to + limitations, such as a minimal withdrawal amount, presented within + the Software. To request a withdrawal of the funds, contact + affiliate@octobot.cloud, accompanied by a comprehensive invoice + detailing the precise monetary amount. + +## Compliance with Sanctions + +By using Drakkar-Software's services, you confirm that you: + +Are not included in any trade embargos or economic sanctions lists, including but not limited to: + +- Restrictive measures of the European Union +- Sanctions of the United Nations +- Sanctions of the Government of France +- the list of specially designated nationals maintained by + Office of Foreign Assets Control (OFAC) of the U.S. Department + of the Treasury +- the denied persons or entity list of the U.S. Department of + Commerce +- Lists of subjects to Financial Sanctions maintained by the UK + Office of Financial Sanctions Implementation (OFSI) +- Do not violate or circumvent any international sanctions or + restrictive measures established by the European Union, United + Nations, United States of America, United Kingdom, or any other + sanctions applicable in France +- Are not from countries or geographical regions under sanctions + imposed by the European Union, United Nations, United States of + America, United Kingdom, and or any other international sanctions + applicable in France. + +And: + +- Drakkar-Software reserves the right to choose markets and + jurisdictions to conduct business, and may restrict or refuse the + provision of services in certain countries or regions. +- If you become subject to international sanctions, you must immediately + stop using our services and notify Drakkar-Software. +- We reserve the right to terminate, suspend, or restrict our services + to you, or terminate this Agreement if you become a subject of + international sanctions, if providing services to you violates or + circumvents international sanctions, or if you are related to a + territory, area of activity, transaction, or person subject to + international sanctions according to our assessment. +- We also reserve the right to terminate, suspend, or restrict our + services to you, or terminate this Agreement if we decide to limit our + business activities in certain markets and jurisdictions as mentioned + in Section 4. + +## Updates + +Updated the 02/09/2025 to add examples of behaviors damaging the +reputation or image of Drakkar-Software and/or the Software. diff --git a/docs/i18n/fr/docusaurus-plugin-content-pages/terms/index.md b/docs/i18n/fr/docusaurus-plugin-content-pages/terms/index.md new file mode 100644 index 0000000000..fab5d96c4d --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-pages/terms/index.md @@ -0,0 +1,462 @@ +--- +title: "Terms of use" +description: "Know everything about OctoBot cloud terms of use. What can you do and what can't you do when using OctoBot cloud" +--- + +# Terms of use + +Welcome to OctoBot cloud, the SaaS service offered by Drakkar-Software to host the OctoBot software, a cryptocurrency trading bot. By accessing and using OctoBot cloud, you agree to be bound by the following terms and conditions. + +OctoBot cloud is a service that allows users to access and use trading strategies for cryptocurrency trading through the OctoBot software. Using OctoBot cloud for trading involves significant risks and potential for financial losses. + +OctoBot cloud and Drakkar-Software are not responsible for any trading related losses or financial losses resulting from the use of the service. + +Users can publish their own trading strategies on the OctoBot cloud platform. OctoBot cloud owns the intellectual property of any strategy published on the platform. + +For more information, please visit our website at www.octobot.cloud or contact us at contact@drakkar.software. + +## Introduction + +- OctoBot cloud is a Service-as-a-Software (SaaS) offering + from Drakkar-Software, which provides hosting services for + OctoBot software, a cryptocurrency trading bot. +- The purpose of OctoBot cloud is to allow users to access + and utilize the capabilities of the OctoBot software for + the purpose of trading cryptocurrencies on supported + exchanges. +- By accessing and using OctoBot cloud, the user agrees to + be bound by these terms and conditions, which constitute a + legally binding agreement between the user and + Drakkar-Software. +- Drakkar-Software reserves the right to modify these terms + and conditions at any time, with notice to the user. The + user is responsible for regularly reviewing these terms + and conditions. Continued use of OctoBot cloud after any + modifications indicates the user's acceptance of the + modified terms. +- Drakkar-Software reserves the right to make changes to any + product at any time without prior notice to the user. It + is the responsibility of the user to regularly review + these details and updates. By continuing to use OctoBot + Cloud after any changes have been made, the user is + acknowledging and accepting the updated product. +- By using OctoBot cloud, the user agrees to the collection + and use of their personal information as outlined in the + Privacy Policy. The [Privacy Policy](terms/privacy), provides more information + about the types of information collected, how it is used, + and with whom it may be shared. The user is encouraged to + review the Privacy Policy carefully and to contact OctoBot + Cloud if they have any questions or concerns. +- By using OctoBot cloud and its services, the user + acknowledges and agrees to the terms and conditions set + forth in this agreement, including but not limited to the + [Risks Disclosure Statement](terms/risk). The user is + advised to regularly review the terms and conditions, + including the [Risks Disclosure Statement](terms/risk), to stay informed + of any changes. + +## Use of Service + +- The OctoBot cloud service is provided by Drakkar-Software + and allows users to access and use the OctoBot software, a + cryptocurrency trading bot, through a subscription + service. +- OctoBot cloud users are responsible for ensuring that + their use of the service complies with all applicable laws + and regulations. +- OctoBot cloud and Drakkar-Software do not provide + financial, investment, legal, tax, or any other + professional advice. The use of the OctoBot software, + whether it be through customization, copying or using + trading strategies or trading signals, involves + significant risks and potential for financial loss. +- OctoBot and OctoBot cloud use crypto exchanges as third + parties to collect price and chart data. +- Users are permitted to create, publish, and update their + own trading strategies on the OctoBot cloud platform. + Users retain control over access to their strategies and + may manage permissions and conditions for other users to + view or use them, in accordance with their own terms and + conditions, provided they do not conflict with these Terms + of use. +- OctoBot cloud is not responsible for the performance of + any trading strategy, or it's traded crypto-assets. Gains + or losses resulting from the use of OctoBot cloud are + user's sole responsibility. Displayed performance is based + on past data and cannot guarantee future performance. +- OctoBot cloud is not responsible for the actions or + security issues of any company, team member, or community + member associated with traded crypto-assets. Users are + sole responsible for assessing the risks and merits of any + investment decision. +- By using crypto baskets, users agree that the composition + of the basket may be automatically updated in response to + changing market conditions to align with the selected + basket's goal. OctoBot cloud is not liable for any + resulting gains or losses associated to these updates. +- Artificial Intelligence (AI) services, strategies, and + answers provided by OctoBot cloud are for informational + purposes only and do not constitute financial, investment, + legal, tax, or any other professional advice. Any + decision made based on AI-generated content made by the + user is at its own risk. OctoBot cloud is not responsible + for any resulting gains or losses. +- OctoBot cloud retains the intellectual property of any + strategy published on the platform. + +## Registration and Accounts + +- In order to use the OctoBot cloud services, you must + create a user account. You are responsible for maintaining + the confidentiality of your account and password, and you + agree to accept responsibility for all activities that + occur under your account. +- You must provide accurate and complete information during + the registration process and keep your information + up-to-date. OctoBot cloud reserves the right to refuse + registration or cancel an account at any time if the + information provided is false, inaccurate, or incomplete. +- You must notify us in writing immediately if you become + aware of any unauthorized use of your account. +- You must not use any other person's account to access the + website. + +## Subscription + +- To access the trading strategies or other services offered + by OctoBot cloud, users may be required to pay a + subscription fee. The subscription fee will be billed on a + recurring basis, according to the selected plan. +- The user may choose to upgrade or downgrade their + subscription plan at any time, and the changes will be + reflected in their next billing cycle. +- Drakkar-Software reserves the right to change the + subscription fees at any time, with or without notice to + the user. Continued use of OctoBot cloud after any fee + changes indicates the user's acceptance of the modified + fees. +- If the user chooses to cancel their subscription, the + subscription will be canceled instantly before the end of + their current billing cycle. The user may request a refund + in accordance with the [Refund Policy](terms/refund). + +## Payment Options + +- Users can pay directly using their credit cards or crypto + to access the services offered by OctoBot cloud. +- When paying with a credit card, the user's card will be + charged at the time of purchase. The user's subscription + fee will be billed on a recurring basis, according to the + selected plan. +- When paying with crypto, the user's account will be + charged at the time of purchase. The user's subscription + fee will be billed on a recurring basis, according to the + selected plan. +- The user is responsible for ensuring that their payment + information is up-to-date and accurate, and for promptly + updating their information if any changes occur. Failure + to maintain accurate payment information may result in the + suspension or termination of the user's account. +- Users should not use stolen or credit cards that they do + not own. Users should not use stolen crypto or crypto that they + do not own. The use of unauthorized or stolen payment + information may result in the suspension or termination of + the user's account, and may also subject the user to + criminal or civil liabilities. The user is responsible for + ensuring that their payment information is properly + obtained. OctoBot cloud reserves the right to take + appropriate legal action against any user who uses + unauthorized or stolen payment information. By using the + platform, the user represents and warrants that their + payment information is accurate and that they have the + right to use the payment method they have provided. +- OctoBot cloud may offer users a free trial period during + which they can access and use the service without + incurring any charges. The free trial period will be + specified at the time of registration and may be subject + to additional terms and conditions. Users may not be + required to provide a valid payment method to sign up for + a free trial and will not be charged unless they continue + to use the service after the trial period has ended. + OctoBot Cloud reserves the right to modify or discontinue + the free trial offer at any time without notice. +- OctoBot cloud does offer refunds in accordance with the + free trial. Users are responsible for understanding the features and + limitations of the service before making a purchase. In + accordance with applicable laws, the right of the user to + retract from their purchase is excluded in the case of the + provision of services, such as access to OctoBot cloud + services. + +## OctoBot Rewards Program + +- OctoBot cloud reserves the right, in its sole and absolute + discretion, to void, cancel, or remove any and all OctoBot + cloud reward points ("experience points") and levels a + user may have earned or accumulated, at any time and + without prior notice. This action may be taken if OctoBot + cloud reasonably believes or suspects that such points or + levels have been obtained or accumulated through + fraudulent activity, suspicious behavior, or any means in + violation of these Terms and Conditions, or in any manner + inconsistent with the purpose or intention of the OctoBot + Rewards Program. This includes, but is not limited to, the + usage of automated methods, deception, or + misrepresentation of any sort. No appeal or challenge + process shall be available to the user in relation to any + such decision by OctoBot cloud. +- By using the services of OctoBot cloud, you agree to these + Rewards Program terms and acknowledge that the decision of + OctoBot in these matters is final and binding. + +## Account management + +- The user is responsible for maintaining the + confidentiality of their OctoBot cloud account and + password, and for restricting access to their computer or + device. The user agrees to accept responsibility for all + activities that occur under their account or password. +- The user may not use OctoBot cloud for any illegal or + unauthorized purposes, and they must comply with all + local, state, federal, and international laws and + regulations. +- The user may not modify, adapt, translate, or reverse + engineer any part of OctoBot cloud, or attempt to do so. +- Drakkar-Software reserves the right to terminate or + suspend the user's account, without notice and without + liability, if the user violates any of these terms and + conditions, or engages in any illegal or unauthorized + activities. +- The user may cancel their account at any time, by + contacting Drakkar-Software. Upon cancellation, the user's + account and all associated data will be permanently + deleted after up to 3 months. The user may request a + refund for any subscription in accordance with the + [Refund Policy](terms/refund). +- The user may not transfer their account to any other + person or entity, and any attempt to do so will be null + and void. +- The user is responsible for regularly backing up any data + or information stored on OctoBot cloud, as + Drakkar-Software is not responsible for any loss of data + or information. + +## Availability of Service + +- OctoBot cloud is a Service-as-a-Software (SaaS) service + offered by Drakkar-Software, located at www.octobot.cloud. +- OctoBot cloud provides its services to its users on a + reasonable effort basis and will use commercially + reasonable efforts to make the service available 24 hours + a day, 7 days a week, except for: (i) planned downtime (of + which OctoBot cloud shall give at least 4 hours notice via + the website), or (ii) any unavailability caused by + circumstances beyond OctoBot cloud's reasonable control, + including without limitation, acts of God, acts of + government, floods, fires, earthquakes, civil unrest, acts + of terror, strikes or other labor problems, or Internet + service provider failures or delays. +- OctoBot cloud does not guarantee the availability of its + service and does not accept any responsibility for any + unavailability. + +## Responsibility of User Content + +- By using the OctoBot cloud service, you grant OctoBot + Cloud and Drakkar-Software the right to use, modify, + display, distribute, and create derivative works of your + User Content for the purpose of providing the OctoBot + Cloud service. +- You are solely responsible for the User Content that you + make available through the OctoBot cloud platform. You + represent and warrant that: (i) you either are the sole + and exclusive owner of all User Content or you have all + rights, licenses, consents, and releases necessary to + grant OctoBot cloud and Drakkar-Software the rights in + such User Content, and (ii) neither the User Content, nor + your submission, uploading, publishing, or otherwise + making available of such User Content, nor OctoBot cloud + and Drakkar-Software's use of the User Content as + permitted herein will infringe, misappropriate or violate + a third party's patent, copyright, trademark, trade + secret, moral rights or other proprietary or intellectual + property rights, or rights of publicity or privacy, or + result in the violation of any applicable law or + regulation. +- OctoBot cloud and Drakkar-Software reserve the right to + remove any User Content from the OctoBot cloud platform at + any time, for any reason or for no reason, including User + Content that OctoBot cloud and Drakkar-Software believe + violates these terms and conditions. + +## Links and third party sites + +- OctoBot cloud and its content (including information sent + to you) may contain links or references to third-party + websites. Such links and references may display content + from third-party websites. Any such links, references and + content are provided for your convenience only. OctoBot + cloud has no control over third party websites and accept + no legal responsibility for any content, material or + information contained in them. The display of any + hyperlink and reference to any third party website does + not mean that OctoBot cloud endorse that third party's + website, products or services. Your use of a third party + website may be governed by the terms and conditions of + that third party website. + +## Modification of Terms and Conditions + +- Drakkar-Software reserves the right to modify these Terms + and Conditions at any time, in its sole discretion. If + Drakkar-Software makes a material change to these Terms + and Conditions, Drakkar-Software will notify the user + through the OctoBot cloud website (www.octobot.cloud) or + through the user's registered email. The user's continued + use of the OctoBot cloud and OctoBot software following + the posting of changes to these Terms and Conditions + constitutes acceptance of those changes. If the user does + not agree to the modified terms, the user must discontinue + using the OctoBot cloud and OctoBot software. + +## Termination of Service + +- Drakkar-Software may, at its discretion, terminate or + suspend the user's access to the OctoBot cloud and OctoBot + software at any time and without notice, for any reason, + including but not limited to, the user's violation of + these Terms and Conditions. +- Upon termination, the user's right to use the OctoBot + Cloud and OctoBot software will immediately cease, and + Drakkar-Software may, but shall not be obligated to, + remove all the user's information and files from its + servers. The user agrees that Drakkar-Software shall not + be liable to the user or any third party for any + termination of the user's access to the OctoBot cloud and + OctoBot software. + +## Warranties and Disclaimers + +- OctoBot cloud and all associated services are provided "as + is" and "as available", without warranty of any kind, + either express or implied. +- Drakkar-Software makes no representations or warranties of + any kind, express or implied, as to the operation of + OctoBot cloud, or the information, content, materials, or + products included on OctoBot cloud. The user agrees that + their use of OctoBot cloud is at their sole risk. +- Drakkar-Software does not warrant that OctoBot cloud will + be uninterrupted or error-free, and the company does not + make any warranties as to the accuracy, completeness, + reliability, or availability of OctoBot cloud or its + associated services. +- The user has the option to use the provided trading + strategies as is, customize them, or create copies of + them. The user acknowledges that the use of OctoBot cloud + and the execution of trades through the platform is + subject to market risk, and that the user may lose money. + The user is solely responsible for their trades and for + any associated losses. +- Drakkar-Software is not responsible for any losses or + damages that may arise from the use of OctoBot cloud, + including but not limited to direct, indirect, incidental, + punitive, and consequential damages. +- Drakkar-Software makes no warranty that OctoBot cloud will + meet the user's requirements, or that OctoBot cloud will + be uninterrupted, timely, secure, or error-free. +- The user agrees to indemnify and hold Drakkar-Software and + its affiliates, officers, agents, and employees harmless + from any claim or demand, including reasonable attorneys' + fees, made by any third party due to or arising out of the + user's use of OctoBot cloud, or the user's violation of + these terms and conditions. +- These terms and conditions, together with the OctoBot + Cloud privacy policy, constitute the entire agreement + between the user and Drakkar-Software, and supersede all + prior agreements, representations, and understandings. + +## Limitation of Liability + +- OctoBot, OctoBot cloud, and Drakkar-Software are not + responsible for any trading-related loss or any loss + caused by the use of the OctoBot software. +- In no event shall OctoBot cloud or Drakkar-Software be + liable for any direct, indirect, incidental, special, + punitive, or consequential damages arising from the use of + the OctoBot cloud service or OctoBot software. +- OctoBot cloud and Drakkar-Software make no representations + or warranties about the accuracy, completeness, security, + or reliability of the OctoBot cloud service or the OctoBot + software. +- Users understand and agree that the use of OctoBot cloud + and OctoBot software is at their own risk. + +## Third party rights + +- A contract under these terms and conditions is for our + benefit and your benefit, and is not intended to benefit + or be enforceable by any third party. +- The exercise of the parties' rights under a contract under + these terms and conditions is not subject to the consent + of any third party. + +## Linking to OctoBot cloud + +Organizations may link to OctoBot cloud, publications or to +other OctoBot cloud information so long as the link: (a) is +not in any way deceptive; (b) does not falsely imply +sponsorship, endorsement or approval of the linking party +and its products and/or services; and (c) fits within the +context of the linking party's site. + +## Third party licenses + +OctoBot cloud makes use of various graphic elements provided +by Freepik, a platform that offers a large selection of free +graphic resources. These elements are used under the free +license offered by Freepik and are subject to its terms and +conditions, which can be found at freepik.com. Most images +are designed by Deinos Art and vectorjuice / Freepik. + +## Risks Disclosure Statement + +Please be advised that the use of OctoBot cloud and its +services may involve certain risks and uncertainties, +including but not limited to the risk of technological +malfunctions, operational errors, and security breaches. The +user acknowledges and agrees that they use OctoBot cloud and +its services at their own risk. + +## Privacy policy + +Users are referred to the OctoBot cloud Privacy Policy, +available at www.octobot.cloud/fr/terms/privacy, for further information regarding the collection, use, and +disclosure of their personal information by OctoBot cloud. + +## Copyright Notice + +All content included on OctoBot cloud website, such as text, +graphics, logos, images, data compilations, software, and +all other material is the property of Drakkar-Software, +OctoBot cloud or its content suppliers and is protected by +international copyright laws. Any use, including but not +limited to the reproduction, distribution, display or +transmission of the content of this website is strictly +prohibited, unless authorized by Drakkar-Software, or +OctoBot cloud. Users do not acquire any ownership rights by +using OctoBot cloud services or accessing the content +provided on the website. Any unauthorized use may result in +severe civil and criminal penalties. + +## Updates + +- Updated the 18/06/2024 to add the "OctoBot Rewards Program" + and "Links and third party sites" sections. + +- Updated the 01/02/2024 to add crypto payments terms and to introduce the Refund Policy. + +- Updated the 07/01/2025 to update user-created strategies and + crypto-baskets updates terms. Also added AI content terms + and crypto-assets risks disclaimer. + +- Updated the 22/04/2025 to include privacy policy and risk disclose links. diff --git a/docs/i18n/fr/docusaurus-plugin-content-pages/terms/privacy.md b/docs/i18n/fr/docusaurus-plugin-content-pages/terms/privacy.md new file mode 100644 index 0000000000..bcbcf14ea1 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-pages/terms/privacy.md @@ -0,0 +1,95 @@ +--- +title: "Politique de Confidentialité" +description: "Découvrez tout sur la politique de confidentialité d'OctoBot Cloud. Consentez à l'utilisation de vos informations, fichiers journaux, cookies et aux politiques des tiers." +--- + +# Politique de Confidentialité + +Chez OctoBot Cloud, accessible depuis www.octobot.cloud, la confidentialité de nos visiteurs est l'une de nos priorités. Ce document de Politique de Confidentialité détaille les types d'informations collectées et enregistrées par OctoBot Cloud, ainsi que leur utilisation. + +Si vous avez des questions supplémentaires ou besoin de plus d'informations concernant notre Politique de Confidentialité, n'hésitez pas à nous contacter. + +Cette Politique de Confidentialité s'applique uniquement à nos activités en ligne et est valable pour les visiteurs de notre site web concernant les informations qu'ils partagent et/ou que nous collectons sur OctoBot Cloud. Cette politique ne s'applique pas aux informations collectées hors ligne ou via d'autres canaux que ce site web. + +## Consentement + +- OctoBot Cloud est un service SaaS (Software as a Service) proposé par Drakkar-Software, fournissant un hébergement pour OctoBot, un bot de trading de cryptomonnaies. +- L'objectif d'OctoBot Cloud est de permettre aux utilisateurs d'accéder et d'utiliser les fonctionnalités du logiciel OctoBot pour le trading de cryptomonnaies sur les plateformes prises en charge. +- En accédant et en utilisant OctoBot Cloud, l'utilisateur accepte d'être lié par ces conditions générales, constituant un accord juridiquement contraignant entre l'utilisateur et Drakkar-Software. +- Drakkar-Software se réserve le droit de modifier ces conditions générales à tout moment, avec notification à l'utilisateur. Il est de la responsabilité de l'utilisateur de consulter régulièrement ces conditions. La poursuite de l'utilisation d'OctoBot Cloud après toute modification signifie l'acceptation des conditions mises à jour. +- Drakkar-Software se réserve le droit de modifier ses produits à tout moment sans notification préalable. +- En utilisant OctoBot Cloud, l'utilisateur consent à la collecte et à l'utilisation de ses informations personnelles conformément à cette Politique de Confidentialité. + +## Informations que nous collectons + +- Informations fournies par l'utilisateur lors de l'inscription ou de l'utilisation du service. +- Données techniques, telles que l'adresse IP, le type de navigateur et les statistiques d'utilisation. + +## Comment nous utilisons vos informations + +Nous utilisons les informations collectées pour : + +- Fournir, exploiter et maintenir notre site web. +- Améliorer, personnaliser et développer notre site web. +- Comprendre et analyser l'utilisation du site web. +- Développer de nouveaux produits, services et fonctionnalités. +- Communiquer avec vous, notamment pour le service client, les mises à jour et le marketing. +- Envoyer des emails. +- Détecter et prévenir les fraudes. + +## Fichiers Journaux + +OctoBot Cloud suit une procédure standard d'utilisation des fichiers journaux. Ces fichiers enregistrent les visiteurs lorsqu'ils naviguent sur le site web, incluant l'adresse IP, le type de navigateur, le fournisseur d'accès à Internet (FAI), la date et l'heure, les pages visitées, et d'autres statistiques d'utilisation. Ces données ne permettent pas d'identifier personnellement les visiteurs et sont utilisées à des fins analytiques. + +## Cookies et Balises Web + +Comme tout autre site web, OctoBot Cloud utilise des "cookies" pour stocker des informations sur les préférences des visiteurs et optimiser l'expérience utilisateur. + +Vous pouvez désactiver les cookies via les paramètres de votre navigateur. Pour plus d'informations, consultez les sites des navigateurs correspondants. + +## Politiques de Confidentialité des Tiers + +La Politique de Confidentialité d'OctoBot Cloud ne s'applique pas aux sites ou services tiers. Nous vous encourageons à consulter les politiques de confidentialité de ces services pour plus d'informations. + +OctoBot Cloud utilise notamment des services tiers d'analyse des données : + +- [Cloudflare Analytics](https://www.cloudflare.com/analytics/) +- [Vercel Analytics](https://vercel.com/analytics) +- [Posthog](https://posthog.com/) + +Pour le traitement des paiements par carte bancaire, nous utilisons **Stripe**, un service tiers de traitement des paiements. Stripe collecte certaines informations personnelles, y compris les numéros de carte bancaire, afin de traiter les transactions en toute sécurité. OctoBot Cloud n'a pas accès à ces informations. Pour en savoir plus, veuillez consulter la [Politique de Confidentialité de Stripe](https://stripe.com/privacy). + +## Droits à la Vie Privée en vertu du CCPA (Californie) + +Selon le **California Consumer Privacy Act (CCPA)**, les consommateurs californiens ont le droit de : + +- Demander la divulgation des catégories et des éléments spécifiques de données personnelles collectées. +- Demander la suppression des données personnelles collectées. +- Refuser la vente de leurs données personnelles. + +Si vous souhaitez exercer ces droits, veuillez nous contacter. Nous avons jusqu'à trois mois pour vous répondre. + +## Droits en vertu du RGPD + +Conformément au **Règlement Général sur la Protection des Données (RGPD)**, vous avez les droits suivants : + +- **Droit d'accès** : Demander des copies de vos données personnelles. +- **Droit de rectification** : Demander la correction de vos données inexactes. +- **Droit à l'effacement** : Demander la suppression de vos données sous certaines conditions. +- **Droit à la limitation du traitement** : Demander la restriction du traitement de vos données sous certaines conditions. +- **Droit d'opposition** : Vous opposer au traitement de vos données sous certaines conditions. +- **Droit à la portabilité des données** : Demander le transfert de vos données à une autre entité sous certaines conditions. + +## Protection des Enfants + +La protection des enfants en ligne est essentielle pour nous. Nous encourageons les parents et tuteurs à surveiller et guider l'activité en ligne de leurs enfants. + +OctoBot Cloud ne collecte pas intentionnellement d'informations personnelles identifiables auprès d'enfants de moins de 13 ans. Si vous pensez que votre enfant a fourni de telles informations, veuillez nous contacter immédiatement afin que nous puissions les supprimer. + +## Modifications de cette Politique de Confidentialité + +Nous pouvons mettre à jour notre Politique de Confidentialité de temps en temps. Nous vous conseillons de consulter cette page régulièrement pour rester informé des éventuelles modifications. + +## Contactez-nous + +Si vous avez des questions ou des suggestions concernant notre Politique de Confidentialité, veuillez nous contacter à contact@drakkar.software diff --git a/docs/i18n/fr/docusaurus-plugin-content-pages/terms/referral.md b/docs/i18n/fr/docusaurus-plugin-content-pages/terms/referral.md new file mode 100644 index 0000000000..8754736def --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-pages/terms/referral.md @@ -0,0 +1,79 @@ +--- +title: "Conditions de Parrainage" +description: "Découvrez les Conditions de Parrainage d'OctoBot Cloud. Obtenez des récompenses en invitant vos amis à rejoindre OctoBot." +--- + +# Conditions de Parrainage + +Nous vous souhaitons la bienvenue sur OctoBot Cloud, un service SaaS proposé par Drakkar-Software, qui héberge OctoBot, un bot de trading de cryptomonnaies. Le logiciel OctoBot, le site web www.octobot.cloud, les applications mobiles et les interfaces de programmation d'applications sont collectivement désignés sous le terme de "Logiciel". + +**En créant ou en partageant un lien de parrainage, vous reconnaissez et acceptez d'être lié par ces Conditions de Parrainage.** + +Pour les aspects non abordés dans les Conditions de Parrainage, les Conditions d'Utilisation s'appliquent. En cas de divergence entre les Conditions de Parrainage et les Conditions d'Utilisation, les Conditions de Parrainage prévalent. Drakkar-Software se réserve le droit de modifier les Conditions de Parrainage, comme indiqué dans la Section 1 des Conditions d'Utilisation. + +Pour plus d'informations, veuillez visiter notre site web www.octobot.cloud ou nous contacter par email à contact@drakkar.software. + +## Définitions + +- Un "**Parrain**" est une personne physique ou morale disposant d'un compte utilisateur sur le Logiciel, sans interdictions, blocages, sanctions ou restrictions, ayant accepté les Conditions de Parrainage et souhaitant participer au programme de parrainage d'OctoBot en attirant de nouveaux clients via le Lien de Parrainage. +- Un "**Client Qualifié**" est une personne physique ou morale qui (a) s'est inscrite avec succès sur le Logiciel en utilisant le Lien de Parrainage, (b) a activé un plan payant ou gratuit sur le Logiciel et (c) n'est pas le Parrain. +- Un "**Lien de Parrainage**" est un lien hypertexte personnalisé menant à la page d'inscription du Logiciel, généré automatiquement pour le Parrain afin qu'il puisse le partager avec de potentiels Clients Qualifiés. + +## Responsabilités du Parrain + +#### Pour devenir Parrain, vous devez : + +- Être un Client enregistré du Logiciel avec un profil complet, incluant toutes les informations requises, notamment vos coordonnées. +- Obtenir un Lien de Parrainage dans la section dédiée du Logiciel. + +#### Le Parrain peut inviter toute personne à devenir un Client Qualifié du Logiciel, à condition que cela soit légal et que cette personne : + +- Ait l'âge légal pour utiliser le Logiciel. +- Ne réside pas dans une juridiction interdisant explicitement l'utilisation de logiciels similaires. +- Comprenne que l'utilisation du Logiciel est sous sa propre responsabilité. + +#### Le Parrain n'est pas autorisé à : + +- Conclure des accords ou agir au nom d'OctoBot ou de Drakkar-Software. +- Se présenter comme un employé ou un partenaire d'OctoBot ou de Drakkar-Software. +- Colluder avec d'autres clients actuels ou potentiels du Logiciel pour obtenir des avantages illégaux. +- Faire des déclarations ou garantir des résultats concernant le Logiciel. +- Nuire à la réputation ou à l'image de Drakkar-Software et/ou du Logiciel. +- Utiliser toute propriété intellectuelle d'OctoBot ou de Drakkar-Software en contradiction avec la licence fournie dans les Conditions d'Utilisation. + +Un nouveau client amené sur le Logiciel par le Parrain est considéré comme un Client Qualifié uniquement s'il a suivi le Lien de Parrainage fourni par le Parrain pour s'inscrire et a activé un plan payant ou gratuit sur le Logiciel. + +## Récompenses de Parrainage + +- Chaque Client Qualifié amené par le Parrain génère des récompenses pour celui-ci. +- Les actions effectuées par les Clients Qualifiés, telles que l'achat réussi (et non remboursé ou annulé) de certains produits, peuvent générer des récompenses pour le Parrain. +- Les montants des récompenses associées à chaque parrainage sont définis dans la section Récompenses du Logiciel. + +## Conformité aux Sanctions + +En utilisant les services de Drakkar-Software, vous confirmez que vous : + +Ne figurez pas sur des listes d'embargos commerciaux ou de sanctions économiques, y compris, mais sans s'y limiter : + +- Les mesures restrictives de l'Union Européenne +- Les sanctions des Nations Unies +- Les sanctions du Gouvernement français +- La liste des ressortissants spécialement désignés du Bureau de Contrôle des Actifs Étrangers (OFAC) du Département du Trésor des États-Unis +- La liste des personnes ou entités interdites du Département du Commerce des États-Unis +- Les listes de sanctions financières du Bureau de Mise en Œuvre des Sanctions Financières (OFSI) du Royaume-Uni + +Et que vous : + +- Ne violez ni ne contournez aucune sanction internationale ou mesure restrictive établie par l'Union Européenne, les Nations Unies, les États-Unis, le Royaume-Uni ou toute autre sanction applicable en France. +- Ne résidez pas dans des pays ou régions soumis à des sanctions imposées par ces entités. + +Drakkar-Software se réserve le droit de choisir les marchés et juridictions dans lesquels elle exerce ses activités et peut restreindre ou refuser la fourniture de services dans certains pays ou régions. + +Si vous devenez sujet à des sanctions internationales, vous devez immédiatement cesser d'utiliser nos services et en informer Drakkar-Software. + +Nous nous réservons le droit de suspendre, restreindre ou résilier nos services à votre encontre, ou de mettre fin à cet accord si : + +- Vous devenez sujet à des sanctions internationales. +- La fourniture de services envers vous viole ou contourne des sanctions internationales. +- Vous êtes lié à un territoire, une activité, une transaction ou une personne faisant l'objet de sanctions internationales selon notre évaluation. +- Nous décidons de limiter nos activités commerciales dans certaines juridictions, comme mentionné dans la Section 4. diff --git a/docs/i18n/fr/docusaurus-plugin-content-pages/terms/refund.md b/docs/i18n/fr/docusaurus-plugin-content-pages/terms/refund.md new file mode 100644 index 0000000000..aa3e7792fe --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-pages/terms/refund.md @@ -0,0 +1,43 @@ +--- +title: "Politique de remboursement" +description: "Explorez la politique de remboursement d'OctoBot Cloud. Annulation d'abonnement, conditions de remboursement pour les plans mensuels et annuels, et détails sur le traitement des paiements." +--- + +# Politique de remboursement + +Cette politique de remboursement est en vigueur à compter du 1er février 2024. Les termes utilisés ici ont les significations définies dans les [Conditions d'utilisation](/terms). + +Pour toute question relative à un paiement ou un remboursement, veuillez contacter le support par email à contact@octobot.cloud. + +## Remboursement des abonnements mensuels + +Éligible à un remboursement dans un délai de quatorze (14) jours après l'achat. +Contactez le support pour faire une demande de remboursement. + +## Remboursement des abonnements annuels + +Pour les abonnements annuels, après la période de grâce de 14 jours, les mois utilisés sont calculés sur la base du tarif de l'abonnement mensuel. +Si un utilisateur annule dans les 180 jours suivant le paiement, il est éligible à un remboursement des mois non utilisés, calculé au tarif mensuel. + +Par exemple, si l'abonnement a été utilisé pendant 3 mois et 15 jours, le remboursement couvrira 8 mois non utilisés. Aucun remboursement ne sera effectué après 180 jours suivant le paiement. + +## Remboursement des achats à vie + +Éligible à un remboursement dans un délai de trente (30) jours après l'achat. +Contactez le support pour faire une demande de remboursement. + +## Traitement des paiements + +Les remboursements sont traités via le mode de paiement initial, y compris Stripe, Apple App Store, Google Play ou les cryptomonnaies. + +## Restrictions sur les remboursements + +Un utilisateur ne peut bénéficier d’un remboursement qu’une seule fois. Une fois la demande de remboursement approuvée, l'utilisateur ne pourra pas engager de contestations, litiges ou annulations de transactions avec OctoBot Cloud ou tout service de paiement tiers. + +## Devise et frais + +Les remboursements sont effectués dans la même devise ou en équivalent. OctoBot Cloud n'est pas responsable des frais appliqués par les prestataires de services de paiement. + +## Limitation des remboursements + +Les demandes de remboursement ne peuvent être effectuées qu’une seule fois. Après approbation d’un remboursement, aucune contestation, litige ou annulation de transaction ne sera autorisée. diff --git a/docs/i18n/fr/docusaurus-plugin-content-pages/terms/risk.md b/docs/i18n/fr/docusaurus-plugin-content-pages/terms/risk.md new file mode 100644 index 0000000000..2c4b406eee --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-pages/terms/risk.md @@ -0,0 +1,30 @@ +--- +title: "Déclaration de divulgation des risques" +description: "Explorez la déclaration de divulgation des risques d'OctoBot Cloud. Comprenez les risques potentiels liés au trading et à l'investissement." +--- + +# Déclaration de divulgation des risques + +L'utilisation des services d'OctoBot Cloud implique des risques et des incertitudes inhérents. En tant qu'utilisateur, vous reconnaissez et acceptez d'assumer l'ensemble de ces risques, y compris, mais sans s'y limiter, ceux décrits dans cette déclaration. + +En utilisant les services d'OctoBot Cloud, vous reconnaissez comprendre et accepter les risques divulgués dans cette déclaration. OctoBot Cloud ne saurait être tenu responsable de toute perte ou tout dommage résultant de l'utilisation de ses services, sauf en cas de faute lourde ou de mauvaise conduite intentionnelle de la part d'OctoBot Cloud. + +## Perte de données + +OctoBot Cloud n'est pas responsable de toute perte ou corruption de données ou d'informations stockées ou traitées via nos services. L'utilisateur est responsable de la sauvegarde de ses données et de la mise en place de mesures appropriées pour les protéger. + +## Interruptions de service + +OctoBot Cloud ne garantit ni la disponibilité ni la fiabilité de ses services. Des interruptions peuvent survenir pour diverses raisons, notamment, mais sans s'y limiter, la maintenance, les mises à jour, les pannes de réseau et d'autres problèmes techniques. + +## Risques liés à la sécurité + +OctoBot Cloud met en place des mesures pour sécuriser ses services et les données des utilisateurs. Cependant, aucune mesure de sécurité ne peut garantir une protection totale contre les accès non autorisés ou les violations de données. L'utilisateur est responsable de la sécurité de ses informations de compte, y compris de la confidentialité de ses mots de passe. + +## Risques de marché + +OctoBot Cloud propose des outils et services liés au trading et à l'investissement, mais les conditions du marché et les performances financières sont soumises à des fluctuations et des incertitudes. OctoBot Cloud ne garantit aucun résultat ou rendement spécifique, et l'utilisateur assume tous les risques liés à ses investissements ou transactions. + +## Risques liés aux services tiers + +OctoBot Cloud s'intègre à divers services et plateformes tiers, qui sont soumis à leurs propres risques et incertitudes. OctoBot Cloud n'est pas responsable des actions ou des performances de ces services et plateformes tiers, et l'utilisateur assume tous les risques liés à leur utilisation. diff --git a/docs/package-lock.json b/docs/package-lock.json new file mode 100644 index 0000000000..c617be333a --- /dev/null +++ b/docs/package-lock.json @@ -0,0 +1,19214 @@ +{ + "name": "octobot-docs", + "version": "0.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "octobot-docs", + "version": "0.0.0", + "dependencies": { + "@docusaurus/core": "3.9.2", + "@docusaurus/plugin-client-redirects": "^3.9.2", + "@docusaurus/preset-classic": "3.9.2", + "@mdx-js/react": "^3.0.0", + "docusaurus-lunr-search": "^3.6.0", + "prism-react-renderer": "^2.3.0", + "react": "^19.0.0", + "react-dom": "^19.0.0" + }, + "devDependencies": { + "@docusaurus/module-type-aliases": "3.9.2", + "@docusaurus/tsconfig": "3.9.2", + "@docusaurus/types": "3.9.2", + "typescript": "~5.6.2" + }, + "engines": { + "node": ">=20.0" + } + }, + "node_modules/@algolia/abtesting": { + "version": "1.16.0", + "resolved": "https://registry.npmjs.org/@algolia/abtesting/-/abtesting-1.16.0.tgz", + "integrity": "sha512-alHFZ68/i9qLC/muEB07VQ9r7cB8AvCcGX6dVQi2PNHhc/ZQRmmFAv8KK1ay4UiseGSFr7f0nXBKsZ/jRg7e4g==", + "license": "MIT", + "dependencies": { + "@algolia/client-common": "5.50.0", + "@algolia/requester-browser-xhr": "5.50.0", + "@algolia/requester-fetch": "5.50.0", + "@algolia/requester-node-http": "5.50.0" + }, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/@algolia/autocomplete-core": { + "version": "1.19.2", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-core/-/autocomplete-core-1.19.2.tgz", + "integrity": "sha512-mKv7RyuAzXvwmq+0XRK8HqZXt9iZ5Kkm2huLjgn5JoCPtDy+oh9yxUMfDDaVCw0oyzZ1isdJBc7l9nuCyyR7Nw==", + "license": "MIT", + "dependencies": { + "@algolia/autocomplete-plugin-algolia-insights": "1.19.2", + "@algolia/autocomplete-shared": "1.19.2" + } + }, + "node_modules/@algolia/autocomplete-plugin-algolia-insights": { + "version": "1.19.2", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-plugin-algolia-insights/-/autocomplete-plugin-algolia-insights-1.19.2.tgz", + "integrity": "sha512-TjxbcC/r4vwmnZaPwrHtkXNeqvlpdyR+oR9Wi2XyfORkiGkLTVhX2j+O9SaCCINbKoDfc+c2PB8NjfOnz7+oKg==", + "license": "MIT", + "dependencies": { + "@algolia/autocomplete-shared": "1.19.2" + }, + "peerDependencies": { + "search-insights": ">= 1 < 3" + } + }, + "node_modules/@algolia/autocomplete-shared": { + "version": "1.19.2", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-shared/-/autocomplete-shared-1.19.2.tgz", + "integrity": "sha512-jEazxZTVD2nLrC+wYlVHQgpBoBB5KPStrJxLzsIFl6Kqd1AlG9sIAGl39V5tECLpIQzB3Qa2T6ZPJ1ChkwMK/w==", + "license": "MIT", + "peerDependencies": { + "@algolia/client-search": ">= 4.9.1 < 6", + "algoliasearch": ">= 4.9.1 < 6" + } + }, + "node_modules/@algolia/client-abtesting": { + "version": "5.50.0", + "resolved": "https://registry.npmjs.org/@algolia/client-abtesting/-/client-abtesting-5.50.0.tgz", + "integrity": "sha512-mfgUdLQNxOAvCZUGzPQxjahEWEPuQkKlV0ZtGmePOa9ZxIQZlk31vRBNbM6ScU8jTH41SCYE77G/lCifDr1SVw==", + "license": "MIT", + "dependencies": { + "@algolia/client-common": "5.50.0", + "@algolia/requester-browser-xhr": "5.50.0", + "@algolia/requester-fetch": "5.50.0", + "@algolia/requester-node-http": "5.50.0" + }, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/@algolia/client-analytics": { + "version": "5.50.0", + "resolved": "https://registry.npmjs.org/@algolia/client-analytics/-/client-analytics-5.50.0.tgz", + "integrity": "sha512-5mjokeKYyPaP3Q8IYJEnutI+O4dW/Ixxx5IgsSxT04pCfGqPXxTOH311hTQxyNpcGGEOGrMv8n8Z+UMTPamioQ==", + "license": "MIT", + "dependencies": { + "@algolia/client-common": "5.50.0", + "@algolia/requester-browser-xhr": "5.50.0", + "@algolia/requester-fetch": "5.50.0", + "@algolia/requester-node-http": "5.50.0" + }, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/@algolia/client-common": { + "version": "5.50.0", + "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-5.50.0.tgz", + "integrity": "sha512-emtOvR6dl3rX3sBJXXbofMNHU1qMQqQSWu319RMrNL5BWoBqyiq7y0Zn6cjJm7aGHV/Qbf+KCCYeWNKEMPI3BQ==", + "license": "MIT", + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/@algolia/client-insights": { + "version": "5.50.0", + "resolved": "https://registry.npmjs.org/@algolia/client-insights/-/client-insights-5.50.0.tgz", + "integrity": "sha512-IerGH2/hcj/6bwkpQg/HHRqmlGN1XwygQWythAk0gZFBrghs9danJaYuSS3ShzLSVoIVth4jY5GDPX9Lbw5cgg==", + "license": "MIT", + "dependencies": { + "@algolia/client-common": "5.50.0", + "@algolia/requester-browser-xhr": "5.50.0", + "@algolia/requester-fetch": "5.50.0", + "@algolia/requester-node-http": "5.50.0" + }, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/@algolia/client-personalization": { + "version": "5.50.0", + "resolved": "https://registry.npmjs.org/@algolia/client-personalization/-/client-personalization-5.50.0.tgz", + "integrity": "sha512-3idPJeXn5L0MmgP9jk9JJqblrQ/SguN93dNK9z9gfgyupBhHnJMOEjrRYcVgTIfvG13Y04wO+Q0FxE2Ut8PVbA==", + "license": "MIT", + "dependencies": { + "@algolia/client-common": "5.50.0", + "@algolia/requester-browser-xhr": "5.50.0", + "@algolia/requester-fetch": "5.50.0", + "@algolia/requester-node-http": "5.50.0" + }, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/@algolia/client-query-suggestions": { + "version": "5.50.0", + "resolved": "https://registry.npmjs.org/@algolia/client-query-suggestions/-/client-query-suggestions-5.50.0.tgz", + "integrity": "sha512-q7qRoWrQK1a8m5EFQEmPlo7+pg9mVQ8X5jsChtChERre0uS2pdYEDixBBl0ydBSGkdGbLUDufcACIhH/077E4g==", + "license": "MIT", + "dependencies": { + "@algolia/client-common": "5.50.0", + "@algolia/requester-browser-xhr": "5.50.0", + "@algolia/requester-fetch": "5.50.0", + "@algolia/requester-node-http": "5.50.0" + }, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/@algolia/client-search": { + "version": "5.50.0", + "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-5.50.0.tgz", + "integrity": "sha512-Jc360x4yqb3eEg4OY4KEIdGePBxZogivKI+OGIU8aLXgAYPTECvzeOBc90312yHA1hr3AeRlAFl0rIc8lQaIrQ==", + "license": "MIT", + "dependencies": { + "@algolia/client-common": "5.50.0", + "@algolia/requester-browser-xhr": "5.50.0", + "@algolia/requester-fetch": "5.50.0", + "@algolia/requester-node-http": "5.50.0" + }, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/@algolia/events": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/@algolia/events/-/events-4.0.1.tgz", + "integrity": "sha512-FQzvOCgoFXAbf5Y6mYozw2aj5KCJoA3m4heImceldzPSMbdyS4atVjJzXKMsfX3wnZTFYwkkt8/z8UesLHlSBQ==", + "license": "MIT" + }, + "node_modules/@algolia/ingestion": { + "version": "1.50.0", + "resolved": "https://registry.npmjs.org/@algolia/ingestion/-/ingestion-1.50.0.tgz", + "integrity": "sha512-OS3/Viao+NPpyBbEY3tf6hLewppG+UclD+9i0ju56mq2DrdMJFCkEky6Sk9S5VPcbLzxzg3BqBX6u9Q35w19aQ==", + "license": "MIT", + "dependencies": { + "@algolia/client-common": "5.50.0", + "@algolia/requester-browser-xhr": "5.50.0", + "@algolia/requester-fetch": "5.50.0", + "@algolia/requester-node-http": "5.50.0" + }, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/@algolia/monitoring": { + "version": "1.50.0", + "resolved": "https://registry.npmjs.org/@algolia/monitoring/-/monitoring-1.50.0.tgz", + "integrity": "sha512-/znwgSiGufpbJVIoDmeQaHtTq+OMdDawFRbMSJVv+12n79hW+qdQXS8/Uu3BD3yn0BzgVFJEvrsHrCsInZKdhw==", + "license": "MIT", + "dependencies": { + "@algolia/client-common": "5.50.0", + "@algolia/requester-browser-xhr": "5.50.0", + "@algolia/requester-fetch": "5.50.0", + "@algolia/requester-node-http": "5.50.0" + }, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/@algolia/recommend": { + "version": "5.50.0", + "resolved": "https://registry.npmjs.org/@algolia/recommend/-/recommend-5.50.0.tgz", + "integrity": "sha512-dHjUfu4jfjdQiKDpCpAnM7LP5yfG0oNShtfpF5rMCel6/4HIoqJ4DC4h5GKDzgrvJYtgAhblo0AYBmOM00T+lQ==", + "license": "MIT", + "dependencies": { + "@algolia/client-common": "5.50.0", + "@algolia/requester-browser-xhr": "5.50.0", + "@algolia/requester-fetch": "5.50.0", + "@algolia/requester-node-http": "5.50.0" + }, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/@algolia/requester-browser-xhr": { + "version": "5.50.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-5.50.0.tgz", + "integrity": "sha512-bffIbUljAWnh/Ctu5uScORajuUavqmZ0ACYd1fQQeSSYA9NNN83ynO26pSc2dZRXpSK0fkc1//qSSFXMKGu+aw==", + "license": "MIT", + "dependencies": { + "@algolia/client-common": "5.50.0" + }, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/@algolia/requester-fetch": { + "version": "5.50.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-fetch/-/requester-fetch-5.50.0.tgz", + "integrity": "sha512-y0EwNvPGvkM+yTAqqO6Gpt9wVGm3CLDtpLvNEiB3VGvN3WzfkjZGtLUsG/ru2kVJIIU7QcV0puuYgEpBeFxcJg==", + "license": "MIT", + "dependencies": { + "@algolia/client-common": "5.50.0" + }, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/@algolia/requester-node-http": { + "version": "5.50.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-5.50.0.tgz", + "integrity": "sha512-xpwefe4fCOWnZgXCbkGpqQY6jgBSCf2hmgnySbyzZIccrv3SoashHKGPE4x6vVG+gdHrGciMTAcDo9HOZwH22Q==", + "license": "MIT", + "dependencies": { + "@algolia/client-common": "5.50.0" + }, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.29.0.tgz", + "integrity": "sha512-9NhCeYjq9+3uxgdtp20LSiJXJvN0FeCtNGpJxuMFZ1Kv3cWUNb6DOhJwUvcVCzKGR66cw4njwM6hrJLqgOwbcw==", + "license": "MIT", + "dependencies": { + "@babel/helper-validator-identifier": "^7.28.5", + "js-tokens": "^4.0.0", + "picocolors": "^1.1.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/compat-data": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.29.0.tgz", + "integrity": "sha512-T1NCJqT/j9+cn8fvkt7jtwbLBfLC/1y1c7NtCeXFRgzGTsafi68MRv8yzkYSapBnFA6L3U2VSc02ciDzoAJhJg==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.29.0.tgz", + "integrity": "sha512-CGOfOJqWjg2qW/Mb6zNsDm+u5vFQ8DxXfbM09z69p5Z6+mE1ikP2jUXw+j42Pf1XTYED2Rni5f95npYeuwMDQA==", + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.29.0", + "@babel/generator": "^7.29.0", + "@babel/helper-compilation-targets": "^7.28.6", + "@babel/helper-module-transforms": "^7.28.6", + "@babel/helpers": "^7.28.6", + "@babel/parser": "^7.29.0", + "@babel/template": "^7.28.6", + "@babel/traverse": "^7.29.0", + "@babel/types": "^7.29.0", + "@jridgewell/remapping": "^2.3.5", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@babel/core/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/generator": { + "version": "7.29.1", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.29.1.tgz", + "integrity": "sha512-qsaF+9Qcm2Qv8SRIMMscAvG4O3lJ0F1GuMo5HR/Bp02LopNgnZBC/EkbevHFeGs4ls/oPz9v+Bsmzbkbe+0dUw==", + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.29.0", + "@babel/types": "^7.29.0", + "@jridgewell/gen-mapping": "^0.3.12", + "@jridgewell/trace-mapping": "^0.3.28", + "jsesc": "^3.0.2" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-annotate-as-pure": { + "version": "7.27.3", + "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.27.3.tgz", + "integrity": "sha512-fXSwMQqitTGeHLBC08Eq5yXz2m37E4pJX1qAU1+2cNedz/ifv/bVXft90VeSav5nFO61EcNgwr0aJxbyPaWBPg==", + "license": "MIT", + "dependencies": { + "@babel/types": "^7.27.3" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.28.6.tgz", + "integrity": "sha512-JYtls3hqi15fcx5GaSNL7SCTJ2MNmjrkHXg4FSpOA/grxK8KwyZ5bubHsCq8FXCkua6xhuaaBit+3b7+VZRfcA==", + "license": "MIT", + "dependencies": { + "@babel/compat-data": "^7.28.6", + "@babel/helper-validator-option": "^7.27.1", + "browserslist": "^4.24.0", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/helper-create-class-features-plugin": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.28.6.tgz", + "integrity": "sha512-dTOdvsjnG3xNT9Y0AUg1wAl38y+4Rl4sf9caSQZOXdNqVn+H+HbbJ4IyyHaIqNR6SW9oJpA/RuRjsjCw2IdIow==", + "license": "MIT", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.27.3", + "@babel/helper-member-expression-to-functions": "^7.28.5", + "@babel/helper-optimise-call-expression": "^7.27.1", + "@babel/helper-replace-supers": "^7.28.6", + "@babel/helper-skip-transparent-expression-wrappers": "^7.27.1", + "@babel/traverse": "^7.28.6", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-create-class-features-plugin/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/helper-create-regexp-features-plugin": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.28.5.tgz", + "integrity": "sha512-N1EhvLtHzOvj7QQOUCCS3NrPJP8c5W6ZXCHDn7Yialuy1iu4r5EmIYkXlKNqT99Ciw+W0mDqWoR6HWMZlFP3hw==", + "license": "MIT", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.27.3", + "regexpu-core": "^6.3.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-create-regexp-features-plugin/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/helper-define-polyfill-provider": { + "version": "0.6.8", + "resolved": "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.6.8.tgz", + "integrity": "sha512-47UwBLPpQi1NoWzLuHNjRoHlYXMwIJoBf7MFou6viC/sIHWYygpvr0B6IAyh5sBdA2nr2LPIRww8lfaUVQINBA==", + "license": "MIT", + "dependencies": { + "@babel/helper-compilation-targets": "^7.28.6", + "@babel/helper-plugin-utils": "^7.28.6", + "debug": "^4.4.3", + "lodash.debounce": "^4.0.8", + "resolve": "^1.22.11" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/@babel/helper-globals": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz", + "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-member-expression-to-functions": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.28.5.tgz", + "integrity": "sha512-cwM7SBRZcPCLgl8a7cY0soT1SptSzAlMH39vwiRpOQkJlh53r5hdHwLSCZpQdVLT39sZt+CRpNwYG4Y2v77atg==", + "license": "MIT", + "dependencies": { + "@babel/traverse": "^7.28.5", + "@babel/types": "^7.28.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.28.6.tgz", + "integrity": "sha512-l5XkZK7r7wa9LucGw9LwZyyCUscb4x37JWTPz7swwFE/0FMQAGpiWUZn8u9DzkSBWEcK25jmvubfpw2dnAMdbw==", + "license": "MIT", + "dependencies": { + "@babel/traverse": "^7.28.6", + "@babel/types": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-transforms": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.28.6.tgz", + "integrity": "sha512-67oXFAYr2cDLDVGLXTEABjdBJZ6drElUSI7WKp70NrpyISso3plG9SAGEF6y7zbha/wOzUByWWTJvEDVNIUGcA==", + "license": "MIT", + "dependencies": { + "@babel/helper-module-imports": "^7.28.6", + "@babel/helper-validator-identifier": "^7.28.5", + "@babel/traverse": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-optimise-call-expression": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.27.1.tgz", + "integrity": "sha512-URMGH08NzYFhubNSGJrpUEphGKQwMQYBySzat5cAByY1/YgIRkULnIy3tAMeszlL/so2HbeilYloUmSpd7GdVw==", + "license": "MIT", + "dependencies": { + "@babel/types": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-plugin-utils": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.28.6.tgz", + "integrity": "sha512-S9gzZ/bz83GRysI7gAD4wPT/AI3uCnY+9xn+Mx/KPs2JwHJIz1W8PZkg2cqyt3RNOBM8ejcXhV6y8Og7ly/Dug==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-remap-async-to-generator": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.27.1.tgz", + "integrity": "sha512-7fiA521aVw8lSPeI4ZOD3vRFkoqkJcS+z4hFo82bFSH/2tNd6eJ5qCVMS5OzDmZh/kaHQeBaeyxK6wljcPtveA==", + "license": "MIT", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.27.1", + "@babel/helper-wrap-function": "^7.27.1", + "@babel/traverse": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-replace-supers": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.28.6.tgz", + "integrity": "sha512-mq8e+laIk94/yFec3DxSjCRD2Z0TAjhVbEJY3UQrlwVo15Lmt7C2wAUbK4bjnTs4APkwsYLTahXRraQXhb1WCg==", + "license": "MIT", + "dependencies": { + "@babel/helper-member-expression-to-functions": "^7.28.5", + "@babel/helper-optimise-call-expression": "^7.27.1", + "@babel/traverse": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-skip-transparent-expression-wrappers": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.27.1.tgz", + "integrity": "sha512-Tub4ZKEXqbPjXgWLl2+3JpQAYBJ8+ikpQ2Ocj/q/r0LwE3UhENh7EUabyHjz2kCEsrRY83ew2DQdHluuiDQFzg==", + "license": "MIT", + "dependencies": { + "@babel/traverse": "^7.27.1", + "@babel/types": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", + "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz", + "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-option": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz", + "integrity": "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-wrap-function": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.28.6.tgz", + "integrity": "sha512-z+PwLziMNBeSQJonizz2AGnndLsP2DeGHIxDAn+wdHOGuo4Fo1x1HBPPXeE9TAOPHNNWQKCSlA2VZyYyyibDnQ==", + "license": "MIT", + "dependencies": { + "@babel/template": "^7.28.6", + "@babel/traverse": "^7.28.6", + "@babel/types": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helpers": { + "version": "7.29.2", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.29.2.tgz", + "integrity": "sha512-HoGuUs4sCZNezVEKdVcwqmZN8GoHirLUcLaYVNBK2J0DadGtdcqgr3BCbvH8+XUo4NGjNl3VOtSjEKNzqfFgKw==", + "license": "MIT", + "dependencies": { + "@babel/template": "^7.28.6", + "@babel/types": "^7.29.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/parser": { + "version": "7.29.2", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.29.2.tgz", + "integrity": "sha512-4GgRzy/+fsBa72/RZVJmGKPmZu9Byn8o4MoLpmNe1m8ZfYnz5emHLQz3U4gLud6Zwl0RZIcgiLD7Uq7ySFuDLA==", + "license": "MIT", + "dependencies": { + "@babel/types": "^7.29.0" + }, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/plugin-bugfix-firefox-class-in-computed-class-key": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-firefox-class-in-computed-class-key/-/plugin-bugfix-firefox-class-in-computed-class-key-7.28.5.tgz", + "integrity": "sha512-87GDMS3tsmMSi/3bWOte1UblL+YUTFMV8SZPZ2eSEL17s74Cw/l63rR6NmGVKMYW2GYi85nE+/d6Hw5N0bEk2Q==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/traverse": "^7.28.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-bugfix-safari-class-field-initializer-scope": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-safari-class-field-initializer-scope/-/plugin-bugfix-safari-class-field-initializer-scope-7.27.1.tgz", + "integrity": "sha512-qNeq3bCKnGgLkEXUuFry6dPlGfCdQNZbn7yUAPCInwAJHMU7THJfrBSozkcWq5sNM6RcF3S8XyQL2A52KNR9IA==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.27.1.tgz", + "integrity": "sha512-g4L7OYun04N1WyqMNjldFwlfPCLVkgB54A/YCXICZYBsvJJE3kByKv9c9+R/nAfmIfjl2rKYLNyMHboYbZaWaA==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.27.1.tgz", + "integrity": "sha512-oO02gcONcD5O1iTLi/6frMJBIwWEHceWGSGqrpCmEL8nogiS6J9PBlE48CaK20/Jx1LuRml9aDftLgdjXT8+Cw==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/helper-skip-transparent-expression-wrappers": "^7.27.1", + "@babel/plugin-transform-optional-chaining": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.13.0" + } + }, + "node_modules/@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly/-/plugin-bugfix-v8-static-class-fields-redefine-readonly-7.28.6.tgz", + "integrity": "sha512-a0aBScVTlNaiUe35UtfxAN7A/tehvvG4/ByO6+46VPKTRSlfnAFsgKy0FUh+qAkQrDTmhDkT+IBOKlOoMUxQ0g==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.28.6", + "@babel/traverse": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-proposal-private-property-in-object": { + "version": "7.21.0-placeholder-for-preset-env.2", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.21.0-placeholder-for-preset-env.2.tgz", + "integrity": "sha512-SOSkfJDddaM7mak6cPEpswyTRnuRltl429hMraQEglW+OkovnCzsiszTmsrlY//qLFjCpQDFRvjdm2wA5pPm9w==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-dynamic-import": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.8.3.tgz", + "integrity": "sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-assertions": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.28.6.tgz", + "integrity": "sha512-pSJUpFHdx9z5nqTSirOCMtYVP2wFgoWhP0p3g8ONK/4IHhLIBd0B9NYqAvIUAhq+OkhO4VM1tENCt0cjlsNShw==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-attributes": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.28.6.tgz", + "integrity": "sha512-jiLC0ma9XkQT3TKJ9uYvlakm66Pamywo+qwL+oL8HJOvc6TWdZXVfhqJr8CCzbSGUAbDOzlGHJC1U+vRfLQDvw==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-jsx": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.28.6.tgz", + "integrity": "sha512-wgEmr06G6sIpqr8YDwA2dSRTE3bJ+V0IfpzfSY3Lfgd7YWOaAdlykvJi13ZKBt8cZHfgH1IXN+CL656W3uUa4w==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-typescript": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.28.6.tgz", + "integrity": "sha512-+nDNmQye7nlnuuHDboPbGm00Vqg3oO8niRRL27/4LYHUsHYh0zJ1xWOz0uRwNFmM1Avzk8wZbc6rdiYhomzv/A==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-unicode-sets-regex": { + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-unicode-sets-regex/-/plugin-syntax-unicode-sets-regex-7.18.6.tgz", + "integrity": "sha512-727YkEAPwSIQTv5im8QHz3upqp92JTWhidIC81Tdx4VJYIte/VndKf1qKrfnnhPLiPghStWfvC/iFaMCQu7Nqg==", + "license": "MIT", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-transform-arrow-functions": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.27.1.tgz", + "integrity": "sha512-8Z4TGic6xW70FKThA5HYEKKyBpOOsucTOD1DjU3fZxDg+K3zBJcXMFnt/4yQiZnf5+MiOMSXQ9PaEK/Ilh1DeA==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-async-generator-functions": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-generator-functions/-/plugin-transform-async-generator-functions-7.29.0.tgz", + "integrity": "sha512-va0VdWro4zlBr2JsXC+ofCPB2iG12wPtVGTWFx2WLDOM3nYQZZIGP82qku2eW/JR83sD+k2k+CsNtyEbUqhU6w==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.28.6", + "@babel/helper-remap-async-to-generator": "^7.27.1", + "@babel/traverse": "^7.29.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-async-to-generator": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.28.6.tgz", + "integrity": "sha512-ilTRcmbuXjsMmcZ3HASTe4caH5Tpo93PkTxF9oG2VZsSWsahydmcEHhix9Ik122RcTnZnUzPbmux4wh1swfv7g==", + "license": "MIT", + "dependencies": { + "@babel/helper-module-imports": "^7.28.6", + "@babel/helper-plugin-utils": "^7.28.6", + "@babel/helper-remap-async-to-generator": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-block-scoped-functions": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.27.1.tgz", + "integrity": "sha512-cnqkuOtZLapWYZUYM5rVIdv1nXYuFVIltZ6ZJ7nIj585QsjKM5dhL2Fu/lICXZ1OyIAFc7Qy+bvDAtTXqGrlhg==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-block-scoping": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.28.6.tgz", + "integrity": "sha512-tt/7wOtBmwHPNMPu7ax4pdPz6shjFrmHDghvNC+FG9Qvj7D6mJcoRQIF5dy4njmxR941l6rgtvfSB2zX3VlUIw==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-class-properties": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-properties/-/plugin-transform-class-properties-7.28.6.tgz", + "integrity": "sha512-dY2wS3I2G7D697VHndN91TJr8/AAfXQNt5ynCTI/MpxMsSzHp+52uNivYT5wCPax3whc47DR8Ba7cmlQMg24bw==", + "license": "MIT", + "dependencies": { + "@babel/helper-create-class-features-plugin": "^7.28.6", + "@babel/helper-plugin-utils": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-class-static-block": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-static-block/-/plugin-transform-class-static-block-7.28.6.tgz", + "integrity": "sha512-rfQ++ghVwTWTqQ7w8qyDxL1XGihjBss4CmTgGRCTAC9RIbhVpyp4fOeZtta0Lbf+dTNIVJer6ych2ibHwkZqsQ==", + "license": "MIT", + "dependencies": { + "@babel/helper-create-class-features-plugin": "^7.28.6", + "@babel/helper-plugin-utils": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.12.0" + } + }, + "node_modules/@babel/plugin-transform-classes": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.28.6.tgz", + "integrity": "sha512-EF5KONAqC5zAqT783iMGuM2ZtmEBy+mJMOKl2BCvPZ2lVrwvXnB6o+OBWCS+CoeCCpVRF2sA2RBKUxvT8tQT5Q==", + "license": "MIT", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.27.3", + "@babel/helper-compilation-targets": "^7.28.6", + "@babel/helper-globals": "^7.28.0", + "@babel/helper-plugin-utils": "^7.28.6", + "@babel/helper-replace-supers": "^7.28.6", + "@babel/traverse": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-computed-properties": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.28.6.tgz", + "integrity": "sha512-bcc3k0ijhHbc2lEfpFHgx7eYw9KNXqOerKWfzbxEHUGKnS3sz9C4CNL9OiFN1297bDNfUiSO7DaLzbvHQQQ1BQ==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.28.6", + "@babel/template": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-destructuring": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.28.5.tgz", + "integrity": "sha512-Kl9Bc6D0zTUcFUvkNuQh4eGXPKKNDOJQXVyyM4ZAQPMveniJdxi8XMJwLo+xSoW3MIq81bD33lcUe9kZpl0MCw==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/traverse": "^7.28.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-dotall-regex": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.28.6.tgz", + "integrity": "sha512-SljjowuNKB7q5Oayv4FoPzeB74g3QgLt8IVJw9ADvWy3QnUb/01aw8I4AVv8wYnPvQz2GDDZ/g3GhcNyDBI4Bg==", + "license": "MIT", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.28.5", + "@babel/helper-plugin-utils": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-duplicate-keys": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.27.1.tgz", + "integrity": "sha512-MTyJk98sHvSs+cvZ4nOauwTTG1JeonDjSGvGGUNHreGQns+Mpt6WX/dVzWBHgg+dYZhkC4X+zTDfkTU+Vy9y7Q==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-duplicate-named-capturing-groups-regex": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-named-capturing-groups-regex/-/plugin-transform-duplicate-named-capturing-groups-regex-7.29.0.tgz", + "integrity": "sha512-zBPcW2lFGxdiD8PUnPwJjag2J9otbcLQzvbiOzDxpYXyCuYX9agOwMPGn1prVH0a4qzhCKu24rlH4c1f7yA8rw==", + "license": "MIT", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.28.5", + "@babel/helper-plugin-utils": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-transform-dynamic-import": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dynamic-import/-/plugin-transform-dynamic-import-7.27.1.tgz", + "integrity": "sha512-MHzkWQcEmjzzVW9j2q8LGjwGWpG2mjwaaB0BNQwst3FIjqsg8Ct/mIZlvSPJvfi9y2AC8mi/ktxbFVL9pZ1I4A==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-explicit-resource-management": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-explicit-resource-management/-/plugin-transform-explicit-resource-management-7.28.6.tgz", + "integrity": "sha512-Iao5Konzx2b6g7EPqTy40UZbcdXE126tTxVFr/nAIj+WItNxjKSYTEw3RC+A2/ZetmdJsgueL1KhaMCQHkLPIg==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.28.6", + "@babel/plugin-transform-destructuring": "^7.28.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-exponentiation-operator": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.28.6.tgz", + "integrity": "sha512-WitabqiGjV/vJ0aPOLSFfNY1u9U3R7W36B03r5I2KoNix+a3sOhJ3pKFB3R5It9/UiK78NiO0KE9P21cMhlPkw==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-export-namespace-from": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-export-namespace-from/-/plugin-transform-export-namespace-from-7.27.1.tgz", + "integrity": "sha512-tQvHWSZ3/jH2xuq/vZDy0jNn+ZdXJeM8gHvX4lnJmsc3+50yPlWdZXIc5ay+umX+2/tJIqHqiEqcJvxlmIvRvQ==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-for-of": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.27.1.tgz", + "integrity": "sha512-BfbWFFEJFQzLCQ5N8VocnCtA8J1CLkNTe2Ms2wocj75dd6VpiqS5Z5quTYcUoo4Yq+DN0rtikODccuv7RU81sw==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/helper-skip-transparent-expression-wrappers": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-function-name": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.27.1.tgz", + "integrity": "sha512-1bQeydJF9Nr1eBCMMbC+hdwmRlsv5XYOMu03YSWFwNs0HsAmtSxxF1fyuYPqemVldVyFmlCU7w8UE14LupUSZQ==", + "license": "MIT", + "dependencies": { + "@babel/helper-compilation-targets": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/traverse": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-json-strings": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-json-strings/-/plugin-transform-json-strings-7.28.6.tgz", + "integrity": "sha512-Nr+hEN+0geQkzhbdgQVPoqr47lZbm+5fCUmO70722xJZd0Mvb59+33QLImGj6F+DkK3xgDi1YVysP8whD6FQAw==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-literals": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.27.1.tgz", + "integrity": "sha512-0HCFSepIpLTkLcsi86GG3mTUzxV5jpmbv97hTETW3yzrAij8aqlD36toB1D0daVFJM8NK6GvKO0gslVQmm+zZA==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-logical-assignment-operators": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-logical-assignment-operators/-/plugin-transform-logical-assignment-operators-7.28.6.tgz", + "integrity": "sha512-+anKKair6gpi8VsM/95kmomGNMD0eLz1NQ8+Pfw5sAwWH9fGYXT50E55ZpV0pHUHWf6IUTWPM+f/7AAff+wr9A==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-member-expression-literals": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.27.1.tgz", + "integrity": "sha512-hqoBX4dcZ1I33jCSWcXrP+1Ku7kdqXf1oeah7ooKOIiAdKQ+uqftgCFNOSzA5AMS2XIHEYeGFg4cKRCdpxzVOQ==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-amd": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.27.1.tgz", + "integrity": "sha512-iCsytMg/N9/oFq6n+gFTvUYDZQOMK5kEdeYxmxt91fcJGycfxVP9CnrxoliM0oumFERba2i8ZtwRUCMhvP1LnA==", + "license": "MIT", + "dependencies": { + "@babel/helper-module-transforms": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-commonjs": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.28.6.tgz", + "integrity": "sha512-jppVbf8IV9iWWwWTQIxJMAJCWBuuKx71475wHwYytrRGQ2CWiDvYlADQno3tcYpS/T2UUWFQp3nVtYfK/YBQrA==", + "license": "MIT", + "dependencies": { + "@babel/helper-module-transforms": "^7.28.6", + "@babel/helper-plugin-utils": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-systemjs": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.29.0.tgz", + "integrity": "sha512-PrujnVFbOdUpw4UHiVwKvKRLMMic8+eC0CuNlxjsyZUiBjhFdPsewdXCkveh2KqBA9/waD0W1b4hXSOBQJezpQ==", + "license": "MIT", + "dependencies": { + "@babel/helper-module-transforms": "^7.28.6", + "@babel/helper-plugin-utils": "^7.28.6", + "@babel/helper-validator-identifier": "^7.28.5", + "@babel/traverse": "^7.29.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-umd": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.27.1.tgz", + "integrity": "sha512-iQBE/xC5BV1OxJbp6WG7jq9IWiD+xxlZhLrdwpPkTX3ydmXdvoCpyfJN7acaIBZaOqTfr76pgzqBJflNbeRK+w==", + "license": "MIT", + "dependencies": { + "@babel/helper-module-transforms": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-named-capturing-groups-regex": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.29.0.tgz", + "integrity": "sha512-1CZQA5KNAD6ZYQLPw7oi5ewtDNxH/2vuCh+6SmvgDfhumForvs8a1o9n0UrEoBD8HU4djO2yWngTQlXl1NDVEQ==", + "license": "MIT", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.28.5", + "@babel/helper-plugin-utils": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-transform-new-target": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.27.1.tgz", + "integrity": "sha512-f6PiYeqXQ05lYq3TIfIDu/MtliKUbNwkGApPUvyo6+tc7uaR4cPjPe7DFPr15Uyycg2lZU6btZ575CuQoYh7MQ==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-nullish-coalescing-operator": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-nullish-coalescing-operator/-/plugin-transform-nullish-coalescing-operator-7.28.6.tgz", + "integrity": "sha512-3wKbRgmzYbw24mDJXT7N+ADXw8BC/imU9yo9c9X9NKaLF1fW+e5H1U5QjMUBe4Qo4Ox/o++IyUkl1sVCLgevKg==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-numeric-separator": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-numeric-separator/-/plugin-transform-numeric-separator-7.28.6.tgz", + "integrity": "sha512-SJR8hPynj8outz+SlStQSwvziMN4+Bq99it4tMIf5/Caq+3iOc0JtKyse8puvyXkk3eFRIA5ID/XfunGgO5i6w==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-object-rest-spread": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-rest-spread/-/plugin-transform-object-rest-spread-7.28.6.tgz", + "integrity": "sha512-5rh+JR4JBC4pGkXLAcYdLHZjXudVxWMXbB6u6+E9lRL5TrGVbHt1TjxGbZ8CkmYw9zjkB7jutzOROArsqtncEA==", + "license": "MIT", + "dependencies": { + "@babel/helper-compilation-targets": "^7.28.6", + "@babel/helper-plugin-utils": "^7.28.6", + "@babel/plugin-transform-destructuring": "^7.28.5", + "@babel/plugin-transform-parameters": "^7.27.7", + "@babel/traverse": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-object-super": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.27.1.tgz", + "integrity": "sha512-SFy8S9plRPbIcxlJ8A6mT/CxFdJx/c04JEctz4jf8YZaVS2px34j7NXRrlGlHkN/M2gnpL37ZpGRGVFLd3l8Ng==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/helper-replace-supers": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-optional-catch-binding": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-catch-binding/-/plugin-transform-optional-catch-binding-7.28.6.tgz", + "integrity": "sha512-R8ja/Pyrv0OGAvAXQhSTmWyPJPml+0TMqXlO5w+AsMEiwb2fg3WkOvob7UxFSL3OIttFSGSRFKQsOhJ/X6HQdQ==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-optional-chaining": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-chaining/-/plugin-transform-optional-chaining-7.28.6.tgz", + "integrity": "sha512-A4zobikRGJTsX9uqVFdafzGkqD30t26ck2LmOzAuLL8b2x6k3TIqRiT2xVvA9fNmFeTX484VpsdgmKNA0bS23w==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.28.6", + "@babel/helper-skip-transparent-expression-wrappers": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-parameters": { + "version": "7.27.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.27.7.tgz", + "integrity": "sha512-qBkYTYCb76RRxUM6CcZA5KRu8K4SM8ajzVeUgVdMVO9NN9uI/GaVmBg/WKJJGnNokV9SY8FxNOVWGXzqzUidBg==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-private-methods": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-methods/-/plugin-transform-private-methods-7.28.6.tgz", + "integrity": "sha512-piiuapX9CRv7+0st8lmuUlRSmX6mBcVeNQ1b4AYzJxfCMuBfB0vBXDiGSmm03pKJw1v6cZ8KSeM+oUnM6yAExg==", + "license": "MIT", + "dependencies": { + "@babel/helper-create-class-features-plugin": "^7.28.6", + "@babel/helper-plugin-utils": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-private-property-in-object": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-property-in-object/-/plugin-transform-private-property-in-object-7.28.6.tgz", + "integrity": "sha512-b97jvNSOb5+ehyQmBpmhOCiUC5oVK4PMnpRvO7+ymFBoqYjeDHIU9jnrNUuwHOiL9RpGDoKBpSViarV+BU+eVA==", + "license": "MIT", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.27.3", + "@babel/helper-create-class-features-plugin": "^7.28.6", + "@babel/helper-plugin-utils": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-property-literals": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.27.1.tgz", + "integrity": "sha512-oThy3BCuCha8kDZ8ZkgOg2exvPYUlprMukKQXI1r1pJ47NCvxfkEy8vK+r/hT9nF0Aa4H1WUPZZjHTFtAhGfmQ==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-constant-elements": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-constant-elements/-/plugin-transform-react-constant-elements-7.27.1.tgz", + "integrity": "sha512-edoidOjl/ZxvYo4lSBOQGDSyToYVkTAwyVoa2tkuYTSmjrB1+uAedoL5iROVLXkxH+vRgA7uP4tMg2pUJpZ3Ug==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-display-name": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-display-name/-/plugin-transform-react-display-name-7.28.0.tgz", + "integrity": "sha512-D6Eujc2zMxKjfa4Zxl4GHMsmhKKZ9VpcqIchJLvwTxad9zWIYulwYItBovpDOoNLISpcZSXoDJ5gaGbQUDqViA==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.28.6.tgz", + "integrity": "sha512-61bxqhiRfAACulXSLd/GxqmAedUSrRZIu/cbaT18T1CetkTmtDN15it7i80ru4DVqRK1WMxQhXs+Lf9kajm5Ow==", + "license": "MIT", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.27.3", + "@babel/helper-module-imports": "^7.28.6", + "@babel/helper-plugin-utils": "^7.28.6", + "@babel/plugin-syntax-jsx": "^7.28.6", + "@babel/types": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-development": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-development/-/plugin-transform-react-jsx-development-7.27.1.tgz", + "integrity": "sha512-ykDdF5yI4f1WrAolLqeF3hmYU12j9ntLQl/AOG1HAS21jxyg1Q0/J/tpREuYLfatGdGmXp/3yS0ZA76kOlVq9Q==", + "license": "MIT", + "dependencies": { + "@babel/plugin-transform-react-jsx": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-pure-annotations": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-pure-annotations/-/plugin-transform-react-pure-annotations-7.27.1.tgz", + "integrity": "sha512-JfuinvDOsD9FVMTHpzA/pBLisxpv1aSf+OIV8lgH3MuWrks19R27e6a6DipIg4aX1Zm9Wpb04p8wljfKrVSnPA==", + "license": "MIT", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-regenerator": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.29.0.tgz", + "integrity": "sha512-FijqlqMA7DmRdg/aINBSs04y8XNTYw/lr1gJ2WsmBnnaNw1iS43EPkJW+zK7z65auG3AWRFXWj+NcTQwYptUog==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-regexp-modifiers": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regexp-modifiers/-/plugin-transform-regexp-modifiers-7.28.6.tgz", + "integrity": "sha512-QGWAepm9qxpaIs7UM9FvUSnCGlb8Ua1RhyM4/veAxLwt3gMat/LSGrZixyuj4I6+Kn9iwvqCyPTtbdxanYoWYg==", + "license": "MIT", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.28.5", + "@babel/helper-plugin-utils": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-transform-reserved-words": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.27.1.tgz", + "integrity": "sha512-V2ABPHIJX4kC7HegLkYoDpfg9PVmuWy/i6vUM5eGK22bx4YVFD3M5F0QQnWQoDs6AGsUWTVOopBiMFQgHaSkVw==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-runtime": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.29.0.tgz", + "integrity": "sha512-jlaRT5dJtMaMCV6fAuLbsQMSwz/QkvaHOHOSXRitGGwSpR1blCY4KUKoyP2tYO8vJcqYe8cEj96cqSztv3uF9w==", + "license": "MIT", + "dependencies": { + "@babel/helper-module-imports": "^7.28.6", + "@babel/helper-plugin-utils": "^7.28.6", + "babel-plugin-polyfill-corejs2": "^0.4.14", + "babel-plugin-polyfill-corejs3": "^0.13.0", + "babel-plugin-polyfill-regenerator": "^0.6.5", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-runtime/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/plugin-transform-shorthand-properties": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.27.1.tgz", + "integrity": "sha512-N/wH1vcn4oYawbJ13Y/FxcQrWk63jhfNa7jef0ih7PHSIHX2LB7GWE1rkPrOnka9kwMxb6hMl19p7lidA+EHmQ==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-spread": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.28.6.tgz", + "integrity": "sha512-9U4QObUC0FtJl05AsUcodau/RWDytrU6uKgkxu09mLR9HLDAtUMoPuuskm5huQsoktmsYpI+bGmq+iapDcriKA==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.28.6", + "@babel/helper-skip-transparent-expression-wrappers": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-sticky-regex": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.27.1.tgz", + "integrity": "sha512-lhInBO5bi/Kowe2/aLdBAawijx+q1pQzicSgnkB6dUPc1+RC8QmJHKf2OjvU+NZWitguJHEaEmbV6VWEouT58g==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-template-literals": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.27.1.tgz", + "integrity": "sha512-fBJKiV7F2DxZUkg5EtHKXQdbsbURW3DZKQUWphDum0uRP6eHGGa/He9mc0mypL680pb+e/lDIthRohlv8NCHkg==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-typeof-symbol": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.27.1.tgz", + "integrity": "sha512-RiSILC+nRJM7FY5srIyc4/fGIwUhyDuuBSdWn4y6yT6gm652DpCHZjIipgn6B7MQ1ITOUnAKWixEUjQRIBIcLw==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-typescript": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.28.6.tgz", + "integrity": "sha512-0YWL2RFxOqEm9Efk5PvreamxPME8OyY0wM5wh5lHjF+VtVhdneCWGzZeSqzOfiobVqQaNCd2z0tQvnI9DaPWPw==", + "license": "MIT", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.27.3", + "@babel/helper-create-class-features-plugin": "^7.28.6", + "@babel/helper-plugin-utils": "^7.28.6", + "@babel/helper-skip-transparent-expression-wrappers": "^7.27.1", + "@babel/plugin-syntax-typescript": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-escapes": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.27.1.tgz", + "integrity": "sha512-Ysg4v6AmF26k9vpfFuTZg8HRfVWzsh1kVfowA23y9j/Gu6dOuahdUVhkLqpObp3JIv27MLSii6noRnuKN8H0Mg==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-property-regex": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-property-regex/-/plugin-transform-unicode-property-regex-7.28.6.tgz", + "integrity": "sha512-4Wlbdl/sIZjzi/8St0evF0gEZrgOswVO6aOzqxh1kDZOl9WmLrHq2HtGhnOJZmHZYKP8WZ1MDLCt5DAWwRo57A==", + "license": "MIT", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.28.5", + "@babel/helper-plugin-utils": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-regex": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.27.1.tgz", + "integrity": "sha512-xvINq24TRojDuyt6JGtHmkVkrfVV3FPT16uytxImLeBZqW3/H52yN+kM1MGuyPkIQxrzKwPHs5U/MP3qKyzkGw==", + "license": "MIT", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-sets-regex": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-sets-regex/-/plugin-transform-unicode-sets-regex-7.28.6.tgz", + "integrity": "sha512-/wHc/paTUmsDYN7SZkpWxogTOBNnlx7nBQYfy6JJlCT7G3mVhltk3e++N7zV0XfgGsrqBxd4rJQt9H16I21Y1Q==", + "license": "MIT", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.28.5", + "@babel/helper-plugin-utils": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/preset-env": { + "version": "7.29.2", + "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.29.2.tgz", + "integrity": "sha512-DYD23veRYGvBFhcTY1iUvJnDNpuqNd/BzBwCvzOTKUnJjKg5kpUBh3/u9585Agdkgj+QuygG7jLfOPWMa2KVNw==", + "license": "MIT", + "dependencies": { + "@babel/compat-data": "^7.29.0", + "@babel/helper-compilation-targets": "^7.28.6", + "@babel/helper-plugin-utils": "^7.28.6", + "@babel/helper-validator-option": "^7.27.1", + "@babel/plugin-bugfix-firefox-class-in-computed-class-key": "^7.28.5", + "@babel/plugin-bugfix-safari-class-field-initializer-scope": "^7.27.1", + "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": "^7.27.1", + "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": "^7.27.1", + "@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly": "^7.28.6", + "@babel/plugin-proposal-private-property-in-object": "7.21.0-placeholder-for-preset-env.2", + "@babel/plugin-syntax-import-assertions": "^7.28.6", + "@babel/plugin-syntax-import-attributes": "^7.28.6", + "@babel/plugin-syntax-unicode-sets-regex": "^7.18.6", + "@babel/plugin-transform-arrow-functions": "^7.27.1", + "@babel/plugin-transform-async-generator-functions": "^7.29.0", + "@babel/plugin-transform-async-to-generator": "^7.28.6", + "@babel/plugin-transform-block-scoped-functions": "^7.27.1", + "@babel/plugin-transform-block-scoping": "^7.28.6", + "@babel/plugin-transform-class-properties": "^7.28.6", + "@babel/plugin-transform-class-static-block": "^7.28.6", + "@babel/plugin-transform-classes": "^7.28.6", + "@babel/plugin-transform-computed-properties": "^7.28.6", + "@babel/plugin-transform-destructuring": "^7.28.5", + "@babel/plugin-transform-dotall-regex": "^7.28.6", + "@babel/plugin-transform-duplicate-keys": "^7.27.1", + "@babel/plugin-transform-duplicate-named-capturing-groups-regex": "^7.29.0", + "@babel/plugin-transform-dynamic-import": "^7.27.1", + "@babel/plugin-transform-explicit-resource-management": "^7.28.6", + "@babel/plugin-transform-exponentiation-operator": "^7.28.6", + "@babel/plugin-transform-export-namespace-from": "^7.27.1", + "@babel/plugin-transform-for-of": "^7.27.1", + "@babel/plugin-transform-function-name": "^7.27.1", + "@babel/plugin-transform-json-strings": "^7.28.6", + "@babel/plugin-transform-literals": "^7.27.1", + "@babel/plugin-transform-logical-assignment-operators": "^7.28.6", + "@babel/plugin-transform-member-expression-literals": "^7.27.1", + "@babel/plugin-transform-modules-amd": "^7.27.1", + "@babel/plugin-transform-modules-commonjs": "^7.28.6", + "@babel/plugin-transform-modules-systemjs": "^7.29.0", + "@babel/plugin-transform-modules-umd": "^7.27.1", + "@babel/plugin-transform-named-capturing-groups-regex": "^7.29.0", + "@babel/plugin-transform-new-target": "^7.27.1", + "@babel/plugin-transform-nullish-coalescing-operator": "^7.28.6", + "@babel/plugin-transform-numeric-separator": "^7.28.6", + "@babel/plugin-transform-object-rest-spread": "^7.28.6", + "@babel/plugin-transform-object-super": "^7.27.1", + "@babel/plugin-transform-optional-catch-binding": "^7.28.6", + "@babel/plugin-transform-optional-chaining": "^7.28.6", + "@babel/plugin-transform-parameters": "^7.27.7", + "@babel/plugin-transform-private-methods": "^7.28.6", + "@babel/plugin-transform-private-property-in-object": "^7.28.6", + "@babel/plugin-transform-property-literals": "^7.27.1", + "@babel/plugin-transform-regenerator": "^7.29.0", + "@babel/plugin-transform-regexp-modifiers": "^7.28.6", + "@babel/plugin-transform-reserved-words": "^7.27.1", + "@babel/plugin-transform-shorthand-properties": "^7.27.1", + "@babel/plugin-transform-spread": "^7.28.6", + "@babel/plugin-transform-sticky-regex": "^7.27.1", + "@babel/plugin-transform-template-literals": "^7.27.1", + "@babel/plugin-transform-typeof-symbol": "^7.27.1", + "@babel/plugin-transform-unicode-escapes": "^7.27.1", + "@babel/plugin-transform-unicode-property-regex": "^7.28.6", + "@babel/plugin-transform-unicode-regex": "^7.27.1", + "@babel/plugin-transform-unicode-sets-regex": "^7.28.6", + "@babel/preset-modules": "0.1.6-no-external-plugins", + "babel-plugin-polyfill-corejs2": "^0.4.15", + "babel-plugin-polyfill-corejs3": "^0.14.0", + "babel-plugin-polyfill-regenerator": "^0.6.6", + "core-js-compat": "^3.48.0", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/preset-env/node_modules/babel-plugin-polyfill-corejs3": { + "version": "0.14.2", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.14.2.tgz", + "integrity": "sha512-coWpDLJ410R781Npmn/SIBZEsAetR4xVi0SxLMXPaMO4lSf1MwnkGYMtkFxew0Dn8B3/CpbpYxN0JCgg8mn67g==", + "license": "MIT", + "dependencies": { + "@babel/helper-define-polyfill-provider": "^0.6.8", + "core-js-compat": "^3.48.0" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/@babel/preset-env/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/preset-modules": { + "version": "0.1.6-no-external-plugins", + "resolved": "https://registry.npmjs.org/@babel/preset-modules/-/preset-modules-0.1.6-no-external-plugins.tgz", + "integrity": "sha512-HrcgcIESLm9aIR842yhJ5RWan/gebQUJ6E/E5+rf0y9o6oj7w0Br+sWuL6kEQ/o/AdfvR1Je9jG18/gnpwjEyA==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.0.0", + "@babel/types": "^7.4.4", + "esutils": "^2.0.2" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/@babel/preset-react": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/preset-react/-/preset-react-7.28.5.tgz", + "integrity": "sha512-Z3J8vhRq7CeLjdC58jLv4lnZ5RKFUJWqH5emvxmv9Hv3BD1T9R/Im713R4MTKwvFaV74ejZ3sM01LyEKk4ugNQ==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/helper-validator-option": "^7.27.1", + "@babel/plugin-transform-react-display-name": "^7.28.0", + "@babel/plugin-transform-react-jsx": "^7.27.1", + "@babel/plugin-transform-react-jsx-development": "^7.27.1", + "@babel/plugin-transform-react-pure-annotations": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/preset-typescript": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/preset-typescript/-/preset-typescript-7.28.5.tgz", + "integrity": "sha512-+bQy5WOI2V6LJZpPVxY+yp66XdZ2yifu0Mc1aP5CQKgjn4QM5IN2i5fAZ4xKop47pr8rpVhiAeu+nDQa12C8+g==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/helper-validator-option": "^7.27.1", + "@babel/plugin-syntax-jsx": "^7.27.1", + "@babel/plugin-transform-modules-commonjs": "^7.27.1", + "@babel/plugin-transform-typescript": "^7.28.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/runtime": { + "version": "7.29.2", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.29.2.tgz", + "integrity": "sha512-JiDShH45zKHWyGe4ZNVRrCjBz8Nh9TMmZG1kh4QTK8hCBTWBi8Da+i7s1fJw7/lYpM4ccepSNfqzZ/QvABBi5g==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/runtime-corejs3": { + "version": "7.29.2", + "resolved": "https://registry.npmjs.org/@babel/runtime-corejs3/-/runtime-corejs3-7.29.2.tgz", + "integrity": "sha512-Lc94FOD5+0aXhdb0Tdg3RUtqT6yWbI/BbFWvlaSJ3gAb9Ks+99nHRDKADVqC37er4eCB0fHyWT+y+K3QOvJKbw==", + "license": "MIT", + "dependencies": { + "core-js-pure": "^3.48.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/template": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.28.6.tgz", + "integrity": "sha512-YA6Ma2KsCdGb+WC6UpBVFJGXL58MDA6oyONbjyF/+5sBgxY/dwkhLogbMT2GXXyU84/IhRw/2D1Os1B/giz+BQ==", + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.28.6", + "@babel/parser": "^7.28.6", + "@babel/types": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.29.0.tgz", + "integrity": "sha512-4HPiQr0X7+waHfyXPZpWPfWL/J7dcN1mx9gL6WdQVMbPnF3+ZhSMs8tCxN7oHddJE9fhNE7+lxdnlyemKfJRuA==", + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.29.0", + "@babel/generator": "^7.29.0", + "@babel/helper-globals": "^7.28.0", + "@babel/parser": "^7.29.0", + "@babel/template": "^7.28.6", + "@babel/types": "^7.29.0", + "debug": "^4.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/types": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.29.0.tgz", + "integrity": "sha512-LwdZHpScM4Qz8Xw2iKSzS+cfglZzJGvofQICy7W7v4caru4EaAmyUuO6BGrbyQ2mYV11W0U8j5mBhd14dd3B0A==", + "license": "MIT", + "dependencies": { + "@babel/helper-string-parser": "^7.27.1", + "@babel/helper-validator-identifier": "^7.28.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@colors/colors": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@colors/colors/-/colors-1.5.0.tgz", + "integrity": "sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ==", + "license": "MIT", + "optional": true, + "engines": { + "node": ">=0.1.90" + } + }, + "node_modules/@csstools/cascade-layer-name-parser": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@csstools/cascade-layer-name-parser/-/cascade-layer-name-parser-2.0.5.tgz", + "integrity": "sha512-p1ko5eHgV+MgXFVa4STPKpvPxr6ReS8oS2jzTukjR74i5zJNyWO1ZM1m8YKBXnzDKWfBN1ztLYlHxbVemDD88A==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4" + } + }, + "node_modules/@csstools/color-helpers": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/@csstools/color-helpers/-/color-helpers-5.1.0.tgz", + "integrity": "sha512-S11EXWJyy0Mz5SYvRmY8nJYTFFd1LCNV+7cXyAgQtOOuzb4EsgfqDufL+9esx72/eLhsRdGZwaldu/h+E4t4BA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "engines": { + "node": ">=18" + } + }, + "node_modules/@csstools/css-calc": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/@csstools/css-calc/-/css-calc-2.1.4.tgz", + "integrity": "sha512-3N8oaj+0juUw/1H3YwmDDJXCgTB1gKU6Hc/bB502u9zR0q2vd786XJH9QfrKIEgFlZmhZiq6epXl4rHqhzsIgQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4" + } + }, + "node_modules/@csstools/css-color-parser": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@csstools/css-color-parser/-/css-color-parser-3.1.0.tgz", + "integrity": "sha512-nbtKwh3a6xNVIp/VRuXV64yTKnb1IjTAEEh3irzS+HkKjAOYLTGNb9pmVNntZ8iVBHcWDA2Dof0QtPgFI1BaTA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "dependencies": { + "@csstools/color-helpers": "^5.1.0", + "@csstools/css-calc": "^2.1.4" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4" + } + }, + "node_modules/@csstools/css-parser-algorithms": { + "version": "3.0.5", + "resolved": "https://registry.npmjs.org/@csstools/css-parser-algorithms/-/css-parser-algorithms-3.0.5.tgz", + "integrity": "sha512-DaDeUkXZKjdGhgYaHNJTV9pV7Y9B3b644jCLs9Upc3VeNGg6LWARAT6O+Q+/COo+2gg/bM5rhpMAtf70WqfBdQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@csstools/css-tokenizer": "^3.0.4" + } + }, + "node_modules/@csstools/css-tokenizer": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@csstools/css-tokenizer/-/css-tokenizer-3.0.4.tgz", + "integrity": "sha512-Vd/9EVDiu6PPJt9yAh6roZP6El1xHrdvIVGjyBsHR0RYwNHgL7FJPyIIW4fANJNG6FtyZfvlRPpFI4ZM/lubvw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "engines": { + "node": ">=18" + } + }, + "node_modules/@csstools/media-query-list-parser": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/@csstools/media-query-list-parser/-/media-query-list-parser-4.0.3.tgz", + "integrity": "sha512-HAYH7d3TLRHDOUQK4mZKf9k9Ph/m8Akstg66ywKR4SFAigjs3yBiUeZtFxywiTm5moZMAp/5W/ZuFnNXXYLuuQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4" + } + }, + "node_modules/@csstools/postcss-alpha-function": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@csstools/postcss-alpha-function/-/postcss-alpha-function-1.0.1.tgz", + "integrity": "sha512-isfLLwksH3yHkFXfCI2Gcaqg7wGGHZZwunoJzEZk0yKYIokgre6hYVFibKL3SYAoR1kBXova8LB+JoO5vZzi9w==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/css-color-parser": "^3.1.0", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4", + "@csstools/postcss-progressive-custom-properties": "^4.2.1", + "@csstools/utilities": "^2.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-cascade-layers": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/@csstools/postcss-cascade-layers/-/postcss-cascade-layers-5.0.2.tgz", + "integrity": "sha512-nWBE08nhO8uWl6kSAeCx4im7QfVko3zLrtgWZY4/bP87zrSPpSyN/3W3TDqz1jJuH+kbKOHXg5rJnK+ZVYcFFg==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/selector-specificity": "^5.0.0", + "postcss-selector-parser": "^7.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-cascade-layers/node_modules/@csstools/selector-specificity": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/@csstools/selector-specificity/-/selector-specificity-5.0.0.tgz", + "integrity": "sha512-PCqQV3c4CoVm3kdPhyeZ07VmBRdH2EpMFA/pd9OASpOEC3aXNGoqPDAZ80D0cLpMBxnmk0+yNhGsEx31hq7Gtw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss-selector-parser": "^7.0.0" + } + }, + "node_modules/@csstools/postcss-cascade-layers/node_modules/postcss-selector-parser": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.1.1.tgz", + "integrity": "sha512-orRsuYpJVw8LdAwqqLykBj9ecS5/cRHlI5+nvTo8LcCKmzDmqVORXtOIYEEQuL9D4BxtA1lm5isAqzQZCoQ6Eg==", + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@csstools/postcss-color-function": { + "version": "4.0.12", + "resolved": "https://registry.npmjs.org/@csstools/postcss-color-function/-/postcss-color-function-4.0.12.tgz", + "integrity": "sha512-yx3cljQKRaSBc2hfh8rMZFZzChaFgwmO2JfFgFr1vMcF3C/uyy5I4RFIBOIWGq1D+XbKCG789CGkG6zzkLpagA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/css-color-parser": "^3.1.0", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4", + "@csstools/postcss-progressive-custom-properties": "^4.2.1", + "@csstools/utilities": "^2.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-color-function-display-p3-linear": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@csstools/postcss-color-function-display-p3-linear/-/postcss-color-function-display-p3-linear-1.0.1.tgz", + "integrity": "sha512-E5qusdzhlmO1TztYzDIi8XPdPoYOjoTY6HBYBCYSj+Gn4gQRBlvjgPQXzfzuPQqt8EhkC/SzPKObg4Mbn8/xMg==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/css-color-parser": "^3.1.0", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4", + "@csstools/postcss-progressive-custom-properties": "^4.2.1", + "@csstools/utilities": "^2.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-color-mix-function": { + "version": "3.0.12", + "resolved": "https://registry.npmjs.org/@csstools/postcss-color-mix-function/-/postcss-color-mix-function-3.0.12.tgz", + "integrity": "sha512-4STERZfCP5Jcs13P1U5pTvI9SkgLgfMUMhdXW8IlJWkzOOOqhZIjcNhWtNJZes2nkBDsIKJ0CJtFtuaZ00moag==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/css-color-parser": "^3.1.0", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4", + "@csstools/postcss-progressive-custom-properties": "^4.2.1", + "@csstools/utilities": "^2.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-color-mix-variadic-function-arguments": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@csstools/postcss-color-mix-variadic-function-arguments/-/postcss-color-mix-variadic-function-arguments-1.0.2.tgz", + "integrity": "sha512-rM67Gp9lRAkTo+X31DUqMEq+iK+EFqsidfecmhrteErxJZb6tUoJBVQca1Vn1GpDql1s1rD1pKcuYzMsg7Z1KQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/css-color-parser": "^3.1.0", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4", + "@csstools/postcss-progressive-custom-properties": "^4.2.1", + "@csstools/utilities": "^2.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-content-alt-text": { + "version": "2.0.8", + "resolved": "https://registry.npmjs.org/@csstools/postcss-content-alt-text/-/postcss-content-alt-text-2.0.8.tgz", + "integrity": "sha512-9SfEW9QCxEpTlNMnpSqFaHyzsiRpZ5J5+KqCu1u5/eEJAWsMhzT40qf0FIbeeglEvrGRMdDzAxMIz3wqoGSb+Q==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4", + "@csstools/postcss-progressive-custom-properties": "^4.2.1", + "@csstools/utilities": "^2.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-contrast-color-function": { + "version": "2.0.12", + "resolved": "https://registry.npmjs.org/@csstools/postcss-contrast-color-function/-/postcss-contrast-color-function-2.0.12.tgz", + "integrity": "sha512-YbwWckjK3qwKjeYz/CijgcS7WDUCtKTd8ShLztm3/i5dhh4NaqzsbYnhm4bjrpFpnLZ31jVcbK8YL77z3GBPzA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/css-color-parser": "^3.1.0", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4", + "@csstools/postcss-progressive-custom-properties": "^4.2.1", + "@csstools/utilities": "^2.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-exponential-functions": { + "version": "2.0.9", + "resolved": "https://registry.npmjs.org/@csstools/postcss-exponential-functions/-/postcss-exponential-functions-2.0.9.tgz", + "integrity": "sha512-abg2W/PI3HXwS/CZshSa79kNWNZHdJPMBXeZNyPQFbbj8sKO3jXxOt/wF7juJVjyDTc6JrvaUZYFcSBZBhaxjw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/css-calc": "^2.1.4", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-font-format-keywords": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@csstools/postcss-font-format-keywords/-/postcss-font-format-keywords-4.0.0.tgz", + "integrity": "sha512-usBzw9aCRDvchpok6C+4TXC57btc4bJtmKQWOHQxOVKen1ZfVqBUuCZ/wuqdX5GHsD0NRSr9XTP+5ID1ZZQBXw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/utilities": "^2.0.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-gamut-mapping": { + "version": "2.0.11", + "resolved": "https://registry.npmjs.org/@csstools/postcss-gamut-mapping/-/postcss-gamut-mapping-2.0.11.tgz", + "integrity": "sha512-fCpCUgZNE2piVJKC76zFsgVW1apF6dpYsqGyH8SIeCcM4pTEsRTWTLCaJIMKFEundsCKwY1rwfhtrio04RJ4Dw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/css-color-parser": "^3.1.0", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-gradients-interpolation-method": { + "version": "5.0.12", + "resolved": "https://registry.npmjs.org/@csstools/postcss-gradients-interpolation-method/-/postcss-gradients-interpolation-method-5.0.12.tgz", + "integrity": "sha512-jugzjwkUY0wtNrZlFeyXzimUL3hN4xMvoPnIXxoZqxDvjZRiSh+itgHcVUWzJ2VwD/VAMEgCLvtaJHX+4Vj3Ow==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/css-color-parser": "^3.1.0", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4", + "@csstools/postcss-progressive-custom-properties": "^4.2.1", + "@csstools/utilities": "^2.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-hwb-function": { + "version": "4.0.12", + "resolved": "https://registry.npmjs.org/@csstools/postcss-hwb-function/-/postcss-hwb-function-4.0.12.tgz", + "integrity": "sha512-mL/+88Z53KrE4JdePYFJAQWFrcADEqsLprExCM04GDNgHIztwFzj0Mbhd/yxMBngq0NIlz58VVxjt5abNs1VhA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/css-color-parser": "^3.1.0", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4", + "@csstools/postcss-progressive-custom-properties": "^4.2.1", + "@csstools/utilities": "^2.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-ic-unit": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/@csstools/postcss-ic-unit/-/postcss-ic-unit-4.0.4.tgz", + "integrity": "sha512-yQ4VmossuOAql65sCPppVO1yfb7hDscf4GseF0VCA/DTDaBc0Wtf8MTqVPfjGYlT5+2buokG0Gp7y0atYZpwjg==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/postcss-progressive-custom-properties": "^4.2.1", + "@csstools/utilities": "^2.0.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-initial": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/@csstools/postcss-initial/-/postcss-initial-2.0.1.tgz", + "integrity": "sha512-L1wLVMSAZ4wovznquK0xmC7QSctzO4D0Is590bxpGqhqjboLXYA16dWZpfwImkdOgACdQ9PqXsuRroW6qPlEsg==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-is-pseudo-class": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/@csstools/postcss-is-pseudo-class/-/postcss-is-pseudo-class-5.0.3.tgz", + "integrity": "sha512-jS/TY4SpG4gszAtIg7Qnf3AS2pjcUM5SzxpApOrlndMeGhIbaTzWBzzP/IApXoNWEW7OhcjkRT48jnAUIFXhAQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/selector-specificity": "^5.0.0", + "postcss-selector-parser": "^7.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-is-pseudo-class/node_modules/@csstools/selector-specificity": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/@csstools/selector-specificity/-/selector-specificity-5.0.0.tgz", + "integrity": "sha512-PCqQV3c4CoVm3kdPhyeZ07VmBRdH2EpMFA/pd9OASpOEC3aXNGoqPDAZ80D0cLpMBxnmk0+yNhGsEx31hq7Gtw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss-selector-parser": "^7.0.0" + } + }, + "node_modules/@csstools/postcss-is-pseudo-class/node_modules/postcss-selector-parser": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.1.1.tgz", + "integrity": "sha512-orRsuYpJVw8LdAwqqLykBj9ecS5/cRHlI5+nvTo8LcCKmzDmqVORXtOIYEEQuL9D4BxtA1lm5isAqzQZCoQ6Eg==", + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@csstools/postcss-light-dark-function": { + "version": "2.0.11", + "resolved": "https://registry.npmjs.org/@csstools/postcss-light-dark-function/-/postcss-light-dark-function-2.0.11.tgz", + "integrity": "sha512-fNJcKXJdPM3Lyrbmgw2OBbaioU7yuKZtiXClf4sGdQttitijYlZMD5K7HrC/eF83VRWRrYq6OZ0Lx92leV2LFA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4", + "@csstools/postcss-progressive-custom-properties": "^4.2.1", + "@csstools/utilities": "^2.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-logical-float-and-clear": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@csstools/postcss-logical-float-and-clear/-/postcss-logical-float-and-clear-3.0.0.tgz", + "integrity": "sha512-SEmaHMszwakI2rqKRJgE+8rpotFfne1ZS6bZqBoQIicFyV+xT1UF42eORPxJkVJVrH9C0ctUgwMSn3BLOIZldQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-logical-overflow": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@csstools/postcss-logical-overflow/-/postcss-logical-overflow-2.0.0.tgz", + "integrity": "sha512-spzR1MInxPuXKEX2csMamshR4LRaSZ3UXVaRGjeQxl70ySxOhMpP2252RAFsg8QyyBXBzuVOOdx1+bVO5bPIzA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-logical-overscroll-behavior": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@csstools/postcss-logical-overscroll-behavior/-/postcss-logical-overscroll-behavior-2.0.0.tgz", + "integrity": "sha512-e/webMjoGOSYfqLunyzByZj5KKe5oyVg/YSbie99VEaSDE2kimFm0q1f6t/6Jo+VVCQ/jbe2Xy+uX+C4xzWs4w==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-logical-resize": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@csstools/postcss-logical-resize/-/postcss-logical-resize-3.0.0.tgz", + "integrity": "sha512-DFbHQOFW/+I+MY4Ycd/QN6Dg4Hcbb50elIJCfnwkRTCX05G11SwViI5BbBlg9iHRl4ytB7pmY5ieAFk3ws7yyg==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-logical-viewport-units": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@csstools/postcss-logical-viewport-units/-/postcss-logical-viewport-units-3.0.4.tgz", + "integrity": "sha512-q+eHV1haXA4w9xBwZLKjVKAWn3W2CMqmpNpZUk5kRprvSiBEGMgrNH3/sJZ8UA3JgyHaOt3jwT9uFa4wLX4EqQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/css-tokenizer": "^3.0.4", + "@csstools/utilities": "^2.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-media-minmax": { + "version": "2.0.9", + "resolved": "https://registry.npmjs.org/@csstools/postcss-media-minmax/-/postcss-media-minmax-2.0.9.tgz", + "integrity": "sha512-af9Qw3uS3JhYLnCbqtZ9crTvvkR+0Se+bBqSr7ykAnl9yKhk6895z9rf+2F4dClIDJWxgn0iZZ1PSdkhrbs2ig==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "dependencies": { + "@csstools/css-calc": "^2.1.4", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4", + "@csstools/media-query-list-parser": "^4.0.3" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-media-queries-aspect-ratio-number-values": { + "version": "3.0.5", + "resolved": "https://registry.npmjs.org/@csstools/postcss-media-queries-aspect-ratio-number-values/-/postcss-media-queries-aspect-ratio-number-values-3.0.5.tgz", + "integrity": "sha512-zhAe31xaaXOY2Px8IYfoVTB3wglbJUVigGphFLj6exb7cjZRH9A6adyE22XfFK3P2PzwRk0VDeTJmaxpluyrDg==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4", + "@csstools/media-query-list-parser": "^4.0.3" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-nested-calc": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@csstools/postcss-nested-calc/-/postcss-nested-calc-4.0.0.tgz", + "integrity": "sha512-jMYDdqrQQxE7k9+KjstC3NbsmC063n1FTPLCgCRS2/qHUbHM0mNy9pIn4QIiQGs9I/Bg98vMqw7mJXBxa0N88A==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/utilities": "^2.0.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-normalize-display-values": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/@csstools/postcss-normalize-display-values/-/postcss-normalize-display-values-4.0.1.tgz", + "integrity": "sha512-TQUGBuRvxdc7TgNSTevYqrL8oItxiwPDixk20qCB5me/W8uF7BPbhRrAvFuhEoywQp/woRsUZ6SJ+sU5idZAIA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-oklab-function": { + "version": "4.0.12", + "resolved": "https://registry.npmjs.org/@csstools/postcss-oklab-function/-/postcss-oklab-function-4.0.12.tgz", + "integrity": "sha512-HhlSmnE1NKBhXsTnNGjxvhryKtO7tJd1w42DKOGFD6jSHtYOrsJTQDKPMwvOfrzUAk8t7GcpIfRyM7ssqHpFjg==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/css-color-parser": "^3.1.0", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4", + "@csstools/postcss-progressive-custom-properties": "^4.2.1", + "@csstools/utilities": "^2.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-position-area-property": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@csstools/postcss-position-area-property/-/postcss-position-area-property-1.0.0.tgz", + "integrity": "sha512-fUP6KR8qV2NuUZV3Cw8itx0Ep90aRjAZxAEzC3vrl6yjFv+pFsQbR18UuQctEKmA72K9O27CoYiKEgXxkqjg8Q==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-progressive-custom-properties": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/@csstools/postcss-progressive-custom-properties/-/postcss-progressive-custom-properties-4.2.1.tgz", + "integrity": "sha512-uPiiXf7IEKtUQXsxu6uWtOlRMXd2QWWy5fhxHDnPdXKCQckPP3E34ZgDoZ62r2iT+UOgWsSbM4NvHE5m3mAEdw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-property-rule-prelude-list": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@csstools/postcss-property-rule-prelude-list/-/postcss-property-rule-prelude-list-1.0.0.tgz", + "integrity": "sha512-IxuQjUXq19fobgmSSvUDO7fVwijDJaZMvWQugxfEUxmjBeDCVaDuMpsZ31MsTm5xbnhA+ElDi0+rQ7sQQGisFA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-random-function": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/@csstools/postcss-random-function/-/postcss-random-function-2.0.1.tgz", + "integrity": "sha512-q+FQaNiRBhnoSNo+GzqGOIBKoHQ43lYz0ICrV+UudfWnEF6ksS6DsBIJSISKQT2Bvu3g4k6r7t0zYrk5pDlo8w==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/css-calc": "^2.1.4", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-relative-color-syntax": { + "version": "3.0.12", + "resolved": "https://registry.npmjs.org/@csstools/postcss-relative-color-syntax/-/postcss-relative-color-syntax-3.0.12.tgz", + "integrity": "sha512-0RLIeONxu/mtxRtf3o41Lq2ghLimw0w9ByLWnnEVuy89exmEEq8bynveBxNW3nyHqLAFEeNtVEmC1QK9MZ8Huw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/css-color-parser": "^3.1.0", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4", + "@csstools/postcss-progressive-custom-properties": "^4.2.1", + "@csstools/utilities": "^2.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-scope-pseudo-class": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/@csstools/postcss-scope-pseudo-class/-/postcss-scope-pseudo-class-4.0.1.tgz", + "integrity": "sha512-IMi9FwtH6LMNuLea1bjVMQAsUhFxJnyLSgOp/cpv5hrzWmrUYU5fm0EguNDIIOHUqzXode8F/1qkC/tEo/qN8Q==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "postcss-selector-parser": "^7.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-scope-pseudo-class/node_modules/postcss-selector-parser": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.1.1.tgz", + "integrity": "sha512-orRsuYpJVw8LdAwqqLykBj9ecS5/cRHlI5+nvTo8LcCKmzDmqVORXtOIYEEQuL9D4BxtA1lm5isAqzQZCoQ6Eg==", + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@csstools/postcss-sign-functions": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/@csstools/postcss-sign-functions/-/postcss-sign-functions-1.1.4.tgz", + "integrity": "sha512-P97h1XqRPcfcJndFdG95Gv/6ZzxUBBISem0IDqPZ7WMvc/wlO+yU0c5D/OCpZ5TJoTt63Ok3knGk64N+o6L2Pg==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/css-calc": "^2.1.4", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-stepped-value-functions": { + "version": "4.0.9", + "resolved": "https://registry.npmjs.org/@csstools/postcss-stepped-value-functions/-/postcss-stepped-value-functions-4.0.9.tgz", + "integrity": "sha512-h9btycWrsex4dNLeQfyU3y3w40LMQooJWFMm/SK9lrKguHDcFl4VMkncKKoXi2z5rM9YGWbUQABI8BT2UydIcA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/css-calc": "^2.1.4", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-syntax-descriptor-syntax-production": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@csstools/postcss-syntax-descriptor-syntax-production/-/postcss-syntax-descriptor-syntax-production-1.0.1.tgz", + "integrity": "sha512-GneqQWefjM//f4hJ/Kbox0C6f2T7+pi4/fqTqOFGTL3EjnvOReTqO1qUQ30CaUjkwjYq9qZ41hzarrAxCc4gow==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/css-tokenizer": "^3.0.4" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-system-ui-font-family": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@csstools/postcss-system-ui-font-family/-/postcss-system-ui-font-family-1.0.0.tgz", + "integrity": "sha512-s3xdBvfWYfoPSBsikDXbuorcMG1nN1M6GdU0qBsGfcmNR0A/qhloQZpTxjA3Xsyrk1VJvwb2pOfiOT3at/DuIQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-text-decoration-shorthand": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/@csstools/postcss-text-decoration-shorthand/-/postcss-text-decoration-shorthand-4.0.3.tgz", + "integrity": "sha512-KSkGgZfx0kQjRIYnpsD7X2Om9BUXX/Kii77VBifQW9Ih929hK0KNjVngHDH0bFB9GmfWcR9vJYJJRvw/NQjkrA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/color-helpers": "^5.1.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-trigonometric-functions": { + "version": "4.0.9", + "resolved": "https://registry.npmjs.org/@csstools/postcss-trigonometric-functions/-/postcss-trigonometric-functions-4.0.9.tgz", + "integrity": "sha512-Hnh5zJUdpNrJqK9v1/E3BbrQhaDTj5YiX7P61TOvUhoDHnUmsNNxcDAgkQ32RrcWx9GVUvfUNPcUkn8R3vIX6A==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/css-calc": "^2.1.4", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-unset-value": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@csstools/postcss-unset-value/-/postcss-unset-value-4.0.0.tgz", + "integrity": "sha512-cBz3tOCI5Fw6NIFEwU3RiwK6mn3nKegjpJuzCndoGq3BZPkUjnsq7uQmIeMNeMbMk7YD2MfKcgCpZwX5jyXqCA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/utilities": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@csstools/utilities/-/utilities-2.0.0.tgz", + "integrity": "sha512-5VdOr0Z71u+Yp3ozOx8T11N703wIFGVRgOWbOZMKgglPJsWA54MRIoMNVMa7shUToIhx5J8vX4sOZgD2XiihiQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@discoveryjs/json-ext": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/@discoveryjs/json-ext/-/json-ext-0.5.7.tgz", + "integrity": "sha512-dBVuXR082gk3jsFp7Rd/JI4kytwGHecnCoTtXFb7DB6CNHp4rg5k1bhg0nWdLGLnOV71lmDzGQaLMy8iPLY0pw==", + "license": "MIT", + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/@docsearch/core": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/@docsearch/core/-/core-4.6.2.tgz", + "integrity": "sha512-/S0e6Dj7Zcm8m9Rru49YEX49dhU11be68c+S/BCyN8zQsTTgkKzXlhRbVL5mV6lOLC2+ZRRryaTdcm070Ug2oA==", + "license": "MIT", + "peerDependencies": { + "@types/react": ">= 16.8.0 < 20.0.0", + "react": ">= 16.8.0 < 20.0.0", + "react-dom": ">= 16.8.0 < 20.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "react": { + "optional": true + }, + "react-dom": { + "optional": true + } + } + }, + "node_modules/@docsearch/css": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/@docsearch/css/-/css-4.6.2.tgz", + "integrity": "sha512-fH/cn8BjEEdM2nJdjNMHIvOVYupG6AIDtFVDgIZrNzdCSj4KXr9kd+hsehqsNGYjpUjObeKYKvgy/IwCb1jZYQ==", + "license": "MIT" + }, + "node_modules/@docsearch/react": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/@docsearch/react/-/react-4.6.2.tgz", + "integrity": "sha512-/BbtGFtqVOGwZx0dw/UfhN/0/DmMQYnulY4iv0tPRhC2JCXv0ka/+izwt3Jzo1ZxXS/2eMvv9zHsBJOK1I9f/w==", + "license": "MIT", + "dependencies": { + "@algolia/autocomplete-core": "1.19.2", + "@docsearch/core": "4.6.2", + "@docsearch/css": "4.6.2" + }, + "peerDependencies": { + "@types/react": ">= 16.8.0 < 20.0.0", + "react": ">= 16.8.0 < 20.0.0", + "react-dom": ">= 16.8.0 < 20.0.0", + "search-insights": ">= 1 < 3" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "react": { + "optional": true + }, + "react-dom": { + "optional": true + }, + "search-insights": { + "optional": true + } + } + }, + "node_modules/@docusaurus/babel": { + "version": "3.9.2", + "resolved": "https://registry.npmjs.org/@docusaurus/babel/-/babel-3.9.2.tgz", + "integrity": "sha512-GEANdi/SgER+L7Japs25YiGil/AUDnFFHaCGPBbundxoWtCkA2lmy7/tFmgED4y1htAy6Oi4wkJEQdGssnw9MA==", + "license": "MIT", + "dependencies": { + "@babel/core": "^7.25.9", + "@babel/generator": "^7.25.9", + "@babel/plugin-syntax-dynamic-import": "^7.8.3", + "@babel/plugin-transform-runtime": "^7.25.9", + "@babel/preset-env": "^7.25.9", + "@babel/preset-react": "^7.25.9", + "@babel/preset-typescript": "^7.25.9", + "@babel/runtime": "^7.25.9", + "@babel/runtime-corejs3": "^7.25.9", + "@babel/traverse": "^7.25.9", + "@docusaurus/logger": "3.9.2", + "@docusaurus/utils": "3.9.2", + "babel-plugin-dynamic-import-node": "^2.3.3", + "fs-extra": "^11.1.1", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=20.0" + } + }, + "node_modules/@docusaurus/bundler": { + "version": "3.9.2", + "resolved": "https://registry.npmjs.org/@docusaurus/bundler/-/bundler-3.9.2.tgz", + "integrity": "sha512-ZOVi6GYgTcsZcUzjblpzk3wH1Fya2VNpd5jtHoCCFcJlMQ1EYXZetfAnRHLcyiFeBABaI1ltTYbOBtH/gahGVA==", + "license": "MIT", + "dependencies": { + "@babel/core": "^7.25.9", + "@docusaurus/babel": "3.9.2", + "@docusaurus/cssnano-preset": "3.9.2", + "@docusaurus/logger": "3.9.2", + "@docusaurus/types": "3.9.2", + "@docusaurus/utils": "3.9.2", + "babel-loader": "^9.2.1", + "clean-css": "^5.3.3", + "copy-webpack-plugin": "^11.0.0", + "css-loader": "^6.11.0", + "css-minimizer-webpack-plugin": "^5.0.1", + "cssnano": "^6.1.2", + "file-loader": "^6.2.0", + "html-minifier-terser": "^7.2.0", + "mini-css-extract-plugin": "^2.9.2", + "null-loader": "^4.0.1", + "postcss": "^8.5.4", + "postcss-loader": "^7.3.4", + "postcss-preset-env": "^10.2.1", + "terser-webpack-plugin": "^5.3.9", + "tslib": "^2.6.0", + "url-loader": "^4.1.1", + "webpack": "^5.95.0", + "webpackbar": "^6.0.1" + }, + "engines": { + "node": ">=20.0" + }, + "peerDependencies": { + "@docusaurus/faster": "*" + }, + "peerDependenciesMeta": { + "@docusaurus/faster": { + "optional": true + } + } + }, + "node_modules/@docusaurus/core": { + "version": "3.9.2", + "resolved": "https://registry.npmjs.org/@docusaurus/core/-/core-3.9.2.tgz", + "integrity": "sha512-HbjwKeC+pHUFBfLMNzuSjqFE/58+rLVKmOU3lxQrpsxLBOGosYco/Q0GduBb0/jEMRiyEqjNT/01rRdOMWq5pw==", + "license": "MIT", + "dependencies": { + "@docusaurus/babel": "3.9.2", + "@docusaurus/bundler": "3.9.2", + "@docusaurus/logger": "3.9.2", + "@docusaurus/mdx-loader": "3.9.2", + "@docusaurus/utils": "3.9.2", + "@docusaurus/utils-common": "3.9.2", + "@docusaurus/utils-validation": "3.9.2", + "boxen": "^6.2.1", + "chalk": "^4.1.2", + "chokidar": "^3.5.3", + "cli-table3": "^0.6.3", + "combine-promises": "^1.1.0", + "commander": "^5.1.0", + "core-js": "^3.31.1", + "detect-port": "^1.5.1", + "escape-html": "^1.0.3", + "eta": "^2.2.0", + "eval": "^0.1.8", + "execa": "5.1.1", + "fs-extra": "^11.1.1", + "html-tags": "^3.3.1", + "html-webpack-plugin": "^5.6.0", + "leven": "^3.1.0", + "lodash": "^4.17.21", + "open": "^8.4.0", + "p-map": "^4.0.0", + "prompts": "^2.4.2", + "react-helmet-async": "npm:@slorber/react-helmet-async@1.3.0", + "react-loadable": "npm:@docusaurus/react-loadable@6.0.0", + "react-loadable-ssr-addon-v5-slorber": "^1.0.1", + "react-router": "^5.3.4", + "react-router-config": "^5.1.1", + "react-router-dom": "^5.3.4", + "semver": "^7.5.4", + "serve-handler": "^6.1.6", + "tinypool": "^1.0.2", + "tslib": "^2.6.0", + "update-notifier": "^6.0.2", + "webpack": "^5.95.0", + "webpack-bundle-analyzer": "^4.10.2", + "webpack-dev-server": "^5.2.2", + "webpack-merge": "^6.0.1" + }, + "bin": { + "docusaurus": "bin/docusaurus.mjs" + }, + "engines": { + "node": ">=20.0" + }, + "peerDependencies": { + "@mdx-js/react": "^3.0.0", + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + } + }, + "node_modules/@docusaurus/cssnano-preset": { + "version": "3.9.2", + "resolved": "https://registry.npmjs.org/@docusaurus/cssnano-preset/-/cssnano-preset-3.9.2.tgz", + "integrity": "sha512-8gBKup94aGttRduABsj7bpPFTX7kbwu+xh3K9NMCF5K4bWBqTFYW+REKHF6iBVDHRJ4grZdIPbvkiHd/XNKRMQ==", + "license": "MIT", + "dependencies": { + "cssnano-preset-advanced": "^6.1.2", + "postcss": "^8.5.4", + "postcss-sort-media-queries": "^5.2.0", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=20.0" + } + }, + "node_modules/@docusaurus/logger": { + "version": "3.9.2", + "resolved": "https://registry.npmjs.org/@docusaurus/logger/-/logger-3.9.2.tgz", + "integrity": "sha512-/SVCc57ByARzGSU60c50rMyQlBuMIJCjcsJlkphxY6B0GV4UH3tcA1994N8fFfbJ9kX3jIBe/xg3XP5qBtGDbA==", + "license": "MIT", + "dependencies": { + "chalk": "^4.1.2", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=20.0" + } + }, + "node_modules/@docusaurus/mdx-loader": { + "version": "3.9.2", + "resolved": "https://registry.npmjs.org/@docusaurus/mdx-loader/-/mdx-loader-3.9.2.tgz", + "integrity": "sha512-wiYoGwF9gdd6rev62xDU8AAM8JuLI/hlwOtCzMmYcspEkzecKrP8J8X+KpYnTlACBUUtXNJpSoCwFWJhLRevzQ==", + "license": "MIT", + "dependencies": { + "@docusaurus/logger": "3.9.2", + "@docusaurus/utils": "3.9.2", + "@docusaurus/utils-validation": "3.9.2", + "@mdx-js/mdx": "^3.0.0", + "@slorber/remark-comment": "^1.0.0", + "escape-html": "^1.0.3", + "estree-util-value-to-estree": "^3.0.1", + "file-loader": "^6.2.0", + "fs-extra": "^11.1.1", + "image-size": "^2.0.2", + "mdast-util-mdx": "^3.0.0", + "mdast-util-to-string": "^4.0.0", + "rehype-raw": "^7.0.0", + "remark-directive": "^3.0.0", + "remark-emoji": "^4.0.0", + "remark-frontmatter": "^5.0.0", + "remark-gfm": "^4.0.0", + "stringify-object": "^3.3.0", + "tslib": "^2.6.0", + "unified": "^11.0.3", + "unist-util-visit": "^5.0.0", + "url-loader": "^4.1.1", + "vfile": "^6.0.1", + "webpack": "^5.88.1" + }, + "engines": { + "node": ">=20.0" + }, + "peerDependencies": { + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + } + }, + "node_modules/@docusaurus/module-type-aliases": { + "version": "3.9.2", + "resolved": "https://registry.npmjs.org/@docusaurus/module-type-aliases/-/module-type-aliases-3.9.2.tgz", + "integrity": "sha512-8qVe2QA9hVLzvnxP46ysuofJUIc/yYQ82tvA/rBTrnpXtCjNSFLxEZfd5U8cYZuJIVlkPxamsIgwd5tGZXfvew==", + "license": "MIT", + "dependencies": { + "@docusaurus/types": "3.9.2", + "@types/history": "^4.7.11", + "@types/react": "*", + "@types/react-router-config": "*", + "@types/react-router-dom": "*", + "react-helmet-async": "npm:@slorber/react-helmet-async@1.3.0", + "react-loadable": "npm:@docusaurus/react-loadable@6.0.0" + }, + "peerDependencies": { + "react": "*", + "react-dom": "*" + } + }, + "node_modules/@docusaurus/plugin-client-redirects": { + "version": "3.9.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-client-redirects/-/plugin-client-redirects-3.9.2.tgz", + "integrity": "sha512-lUgMArI9vyOYMzLRBUILcg9vcPTCyyI2aiuXq/4npcMVqOr6GfmwtmBYWSbNMlIUM0147smm4WhpXD0KFboffw==", + "dependencies": { + "@docusaurus/core": "3.9.2", + "@docusaurus/logger": "3.9.2", + "@docusaurus/utils": "3.9.2", + "@docusaurus/utils-common": "3.9.2", + "@docusaurus/utils-validation": "3.9.2", + "eta": "^2.2.0", + "fs-extra": "^11.1.1", + "lodash": "^4.17.21", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=20.0" + }, + "peerDependencies": { + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + } + }, + "node_modules/@docusaurus/plugin-content-blog": { + "version": "3.9.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-blog/-/plugin-content-blog-3.9.2.tgz", + "integrity": "sha512-3I2HXy3L1QcjLJLGAoTvoBnpOwa6DPUa3Q0dMK19UTY9mhPkKQg/DYhAGTiBUKcTR0f08iw7kLPqOhIgdV3eVQ==", + "license": "MIT", + "dependencies": { + "@docusaurus/core": "3.9.2", + "@docusaurus/logger": "3.9.2", + "@docusaurus/mdx-loader": "3.9.2", + "@docusaurus/theme-common": "3.9.2", + "@docusaurus/types": "3.9.2", + "@docusaurus/utils": "3.9.2", + "@docusaurus/utils-common": "3.9.2", + "@docusaurus/utils-validation": "3.9.2", + "cheerio": "1.0.0-rc.12", + "feed": "^4.2.2", + "fs-extra": "^11.1.1", + "lodash": "^4.17.21", + "schema-dts": "^1.1.2", + "srcset": "^4.0.0", + "tslib": "^2.6.0", + "unist-util-visit": "^5.0.0", + "utility-types": "^3.10.0", + "webpack": "^5.88.1" + }, + "engines": { + "node": ">=20.0" + }, + "peerDependencies": { + "@docusaurus/plugin-content-docs": "*", + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + } + }, + "node_modules/@docusaurus/plugin-content-docs": { + "version": "3.9.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-docs/-/plugin-content-docs-3.9.2.tgz", + "integrity": "sha512-C5wZsGuKTY8jEYsqdxhhFOe1ZDjH0uIYJ9T/jebHwkyxqnr4wW0jTkB72OMqNjsoQRcb0JN3PcSeTwFlVgzCZg==", + "license": "MIT", + "dependencies": { + "@docusaurus/core": "3.9.2", + "@docusaurus/logger": "3.9.2", + "@docusaurus/mdx-loader": "3.9.2", + "@docusaurus/module-type-aliases": "3.9.2", + "@docusaurus/theme-common": "3.9.2", + "@docusaurus/types": "3.9.2", + "@docusaurus/utils": "3.9.2", + "@docusaurus/utils-common": "3.9.2", + "@docusaurus/utils-validation": "3.9.2", + "@types/react-router-config": "^5.0.7", + "combine-promises": "^1.1.0", + "fs-extra": "^11.1.1", + "js-yaml": "^4.1.0", + "lodash": "^4.17.21", + "schema-dts": "^1.1.2", + "tslib": "^2.6.0", + "utility-types": "^3.10.0", + "webpack": "^5.88.1" + }, + "engines": { + "node": ">=20.0" + }, + "peerDependencies": { + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + } + }, + "node_modules/@docusaurus/plugin-content-pages": { + "version": "3.9.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-pages/-/plugin-content-pages-3.9.2.tgz", + "integrity": "sha512-s4849w/p4noXUrGpPUF0BPqIAfdAe76BLaRGAGKZ1gTDNiGxGcpsLcwJ9OTi1/V8A+AzvsmI9pkjie2zjIQZKA==", + "license": "MIT", + "dependencies": { + "@docusaurus/core": "3.9.2", + "@docusaurus/mdx-loader": "3.9.2", + "@docusaurus/types": "3.9.2", + "@docusaurus/utils": "3.9.2", + "@docusaurus/utils-validation": "3.9.2", + "fs-extra": "^11.1.1", + "tslib": "^2.6.0", + "webpack": "^5.88.1" + }, + "engines": { + "node": ">=20.0" + }, + "peerDependencies": { + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + } + }, + "node_modules/@docusaurus/plugin-css-cascade-layers": { + "version": "3.9.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-css-cascade-layers/-/plugin-css-cascade-layers-3.9.2.tgz", + "integrity": "sha512-w1s3+Ss+eOQbscGM4cfIFBlVg/QKxyYgj26k5AnakuHkKxH6004ZtuLe5awMBotIYF2bbGDoDhpgQ4r/kcj4rQ==", + "license": "MIT", + "dependencies": { + "@docusaurus/core": "3.9.2", + "@docusaurus/types": "3.9.2", + "@docusaurus/utils": "3.9.2", + "@docusaurus/utils-validation": "3.9.2", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=20.0" + } + }, + "node_modules/@docusaurus/plugin-debug": { + "version": "3.9.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-debug/-/plugin-debug-3.9.2.tgz", + "integrity": "sha512-j7a5hWuAFxyQAkilZwhsQ/b3T7FfHZ+0dub6j/GxKNFJp2h9qk/P1Bp7vrGASnvA9KNQBBL1ZXTe7jlh4VdPdA==", + "license": "MIT", + "dependencies": { + "@docusaurus/core": "3.9.2", + "@docusaurus/types": "3.9.2", + "@docusaurus/utils": "3.9.2", + "fs-extra": "^11.1.1", + "react-json-view-lite": "^2.3.0", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=20.0" + }, + "peerDependencies": { + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + } + }, + "node_modules/@docusaurus/plugin-google-analytics": { + "version": "3.9.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-analytics/-/plugin-google-analytics-3.9.2.tgz", + "integrity": "sha512-mAwwQJ1Us9jL/lVjXtErXto4p4/iaLlweC54yDUK1a97WfkC6Z2k5/769JsFgwOwOP+n5mUQGACXOEQ0XDuVUw==", + "license": "MIT", + "dependencies": { + "@docusaurus/core": "3.9.2", + "@docusaurus/types": "3.9.2", + "@docusaurus/utils-validation": "3.9.2", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=20.0" + }, + "peerDependencies": { + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + } + }, + "node_modules/@docusaurus/plugin-google-gtag": { + "version": "3.9.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-gtag/-/plugin-google-gtag-3.9.2.tgz", + "integrity": "sha512-YJ4lDCphabBtw19ooSlc1MnxtYGpjFV9rEdzjLsUnBCeis2djUyCozZaFhCg6NGEwOn7HDDyMh0yzcdRpnuIvA==", + "license": "MIT", + "dependencies": { + "@docusaurus/core": "3.9.2", + "@docusaurus/types": "3.9.2", + "@docusaurus/utils-validation": "3.9.2", + "@types/gtag.js": "^0.0.12", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=20.0" + }, + "peerDependencies": { + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + } + }, + "node_modules/@docusaurus/plugin-google-tag-manager": { + "version": "3.9.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-tag-manager/-/plugin-google-tag-manager-3.9.2.tgz", + "integrity": "sha512-LJtIrkZN/tuHD8NqDAW1Tnw0ekOwRTfobWPsdO15YxcicBo2ykKF0/D6n0vVBfd3srwr9Z6rzrIWYrMzBGrvNw==", + "license": "MIT", + "dependencies": { + "@docusaurus/core": "3.9.2", + "@docusaurus/types": "3.9.2", + "@docusaurus/utils-validation": "3.9.2", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=20.0" + }, + "peerDependencies": { + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + } + }, + "node_modules/@docusaurus/plugin-sitemap": { + "version": "3.9.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-sitemap/-/plugin-sitemap-3.9.2.tgz", + "integrity": "sha512-WLh7ymgDXjG8oPoM/T4/zUP7KcSuFYRZAUTl8vR6VzYkfc18GBM4xLhcT+AKOwun6kBivYKUJf+vlqYJkm+RHw==", + "license": "MIT", + "dependencies": { + "@docusaurus/core": "3.9.2", + "@docusaurus/logger": "3.9.2", + "@docusaurus/types": "3.9.2", + "@docusaurus/utils": "3.9.2", + "@docusaurus/utils-common": "3.9.2", + "@docusaurus/utils-validation": "3.9.2", + "fs-extra": "^11.1.1", + "sitemap": "^7.1.1", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=20.0" + }, + "peerDependencies": { + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + } + }, + "node_modules/@docusaurus/plugin-svgr": { + "version": "3.9.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-svgr/-/plugin-svgr-3.9.2.tgz", + "integrity": "sha512-n+1DE+5b3Lnf27TgVU5jM1d4x5tUh2oW5LTsBxJX4PsAPV0JGcmI6p3yLYtEY0LRVEIJh+8RsdQmRE66wSV8mw==", + "license": "MIT", + "dependencies": { + "@docusaurus/core": "3.9.2", + "@docusaurus/types": "3.9.2", + "@docusaurus/utils": "3.9.2", + "@docusaurus/utils-validation": "3.9.2", + "@svgr/core": "8.1.0", + "@svgr/webpack": "^8.1.0", + "tslib": "^2.6.0", + "webpack": "^5.88.1" + }, + "engines": { + "node": ">=20.0" + }, + "peerDependencies": { + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + } + }, + "node_modules/@docusaurus/preset-classic": { + "version": "3.9.2", + "resolved": "https://registry.npmjs.org/@docusaurus/preset-classic/-/preset-classic-3.9.2.tgz", + "integrity": "sha512-IgyYO2Gvaigi21LuDIe+nvmN/dfGXAiMcV/murFqcpjnZc7jxFAxW+9LEjdPt61uZLxG4ByW/oUmX/DDK9t/8w==", + "license": "MIT", + "dependencies": { + "@docusaurus/core": "3.9.2", + "@docusaurus/plugin-content-blog": "3.9.2", + "@docusaurus/plugin-content-docs": "3.9.2", + "@docusaurus/plugin-content-pages": "3.9.2", + "@docusaurus/plugin-css-cascade-layers": "3.9.2", + "@docusaurus/plugin-debug": "3.9.2", + "@docusaurus/plugin-google-analytics": "3.9.2", + "@docusaurus/plugin-google-gtag": "3.9.2", + "@docusaurus/plugin-google-tag-manager": "3.9.2", + "@docusaurus/plugin-sitemap": "3.9.2", + "@docusaurus/plugin-svgr": "3.9.2", + "@docusaurus/theme-classic": "3.9.2", + "@docusaurus/theme-common": "3.9.2", + "@docusaurus/theme-search-algolia": "3.9.2", + "@docusaurus/types": "3.9.2" + }, + "engines": { + "node": ">=20.0" + }, + "peerDependencies": { + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + } + }, + "node_modules/@docusaurus/theme-classic": { + "version": "3.9.2", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-classic/-/theme-classic-3.9.2.tgz", + "integrity": "sha512-IGUsArG5hhekXd7RDb11v94ycpJpFdJPkLnt10fFQWOVxAtq5/D7hT6lzc2fhyQKaaCE62qVajOMKL7OiAFAIA==", + "license": "MIT", + "dependencies": { + "@docusaurus/core": "3.9.2", + "@docusaurus/logger": "3.9.2", + "@docusaurus/mdx-loader": "3.9.2", + "@docusaurus/module-type-aliases": "3.9.2", + "@docusaurus/plugin-content-blog": "3.9.2", + "@docusaurus/plugin-content-docs": "3.9.2", + "@docusaurus/plugin-content-pages": "3.9.2", + "@docusaurus/theme-common": "3.9.2", + "@docusaurus/theme-translations": "3.9.2", + "@docusaurus/types": "3.9.2", + "@docusaurus/utils": "3.9.2", + "@docusaurus/utils-common": "3.9.2", + "@docusaurus/utils-validation": "3.9.2", + "@mdx-js/react": "^3.0.0", + "clsx": "^2.0.0", + "infima": "0.2.0-alpha.45", + "lodash": "^4.17.21", + "nprogress": "^0.2.0", + "postcss": "^8.5.4", + "prism-react-renderer": "^2.3.0", + "prismjs": "^1.29.0", + "react-router-dom": "^5.3.4", + "rtlcss": "^4.1.0", + "tslib": "^2.6.0", + "utility-types": "^3.10.0" + }, + "engines": { + "node": ">=20.0" + }, + "peerDependencies": { + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + } + }, + "node_modules/@docusaurus/theme-common": { + "version": "3.9.2", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-common/-/theme-common-3.9.2.tgz", + "integrity": "sha512-6c4DAbR6n6nPbnZhY2V3tzpnKnGL+6aOsLvFL26VRqhlczli9eWG0VDUNoCQEPnGwDMhPS42UhSAnz5pThm5Ag==", + "license": "MIT", + "dependencies": { + "@docusaurus/mdx-loader": "3.9.2", + "@docusaurus/module-type-aliases": "3.9.2", + "@docusaurus/utils": "3.9.2", + "@docusaurus/utils-common": "3.9.2", + "@types/history": "^4.7.11", + "@types/react": "*", + "@types/react-router-config": "*", + "clsx": "^2.0.0", + "parse-numeric-range": "^1.3.0", + "prism-react-renderer": "^2.3.0", + "tslib": "^2.6.0", + "utility-types": "^3.10.0" + }, + "engines": { + "node": ">=20.0" + }, + "peerDependencies": { + "@docusaurus/plugin-content-docs": "*", + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + } + }, + "node_modules/@docusaurus/theme-search-algolia": { + "version": "3.9.2", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-search-algolia/-/theme-search-algolia-3.9.2.tgz", + "integrity": "sha512-GBDSFNwjnh5/LdkxCKQHkgO2pIMX1447BxYUBG2wBiajS21uj64a+gH/qlbQjDLxmGrbrllBrtJkUHxIsiwRnw==", + "license": "MIT", + "dependencies": { + "@docsearch/react": "^3.9.0 || ^4.1.0", + "@docusaurus/core": "3.9.2", + "@docusaurus/logger": "3.9.2", + "@docusaurus/plugin-content-docs": "3.9.2", + "@docusaurus/theme-common": "3.9.2", + "@docusaurus/theme-translations": "3.9.2", + "@docusaurus/utils": "3.9.2", + "@docusaurus/utils-validation": "3.9.2", + "algoliasearch": "^5.37.0", + "algoliasearch-helper": "^3.26.0", + "clsx": "^2.0.0", + "eta": "^2.2.0", + "fs-extra": "^11.1.1", + "lodash": "^4.17.21", + "tslib": "^2.6.0", + "utility-types": "^3.10.0" + }, + "engines": { + "node": ">=20.0" + }, + "peerDependencies": { + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + } + }, + "node_modules/@docusaurus/theme-translations": { + "version": "3.9.2", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-translations/-/theme-translations-3.9.2.tgz", + "integrity": "sha512-vIryvpP18ON9T9rjgMRFLr2xJVDpw1rtagEGf8Ccce4CkTrvM/fRB8N2nyWYOW5u3DdjkwKw5fBa+3tbn9P4PA==", + "license": "MIT", + "dependencies": { + "fs-extra": "^11.1.1", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=20.0" + } + }, + "node_modules/@docusaurus/tsconfig": { + "version": "3.9.2", + "resolved": "https://registry.npmjs.org/@docusaurus/tsconfig/-/tsconfig-3.9.2.tgz", + "integrity": "sha512-j6/Fp4Rlpxsc632cnRnl5HpOWeb6ZKssDj6/XzzAzVGXXfm9Eptx3rxCC+fDzySn9fHTS+CWJjPineCR1bB5WQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/@docusaurus/types": { + "version": "3.9.2", + "resolved": "https://registry.npmjs.org/@docusaurus/types/-/types-3.9.2.tgz", + "integrity": "sha512-Ux1JUNswg+EfUEmajJjyhIohKceitY/yzjRUpu04WXgvVz+fbhVC0p+R0JhvEu4ytw8zIAys2hrdpQPBHRIa8Q==", + "license": "MIT", + "dependencies": { + "@mdx-js/mdx": "^3.0.0", + "@types/history": "^4.7.11", + "@types/mdast": "^4.0.2", + "@types/react": "*", + "commander": "^5.1.0", + "joi": "^17.9.2", + "react-helmet-async": "npm:@slorber/react-helmet-async@1.3.0", + "utility-types": "^3.10.0", + "webpack": "^5.95.0", + "webpack-merge": "^5.9.0" + }, + "peerDependencies": { + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + } + }, + "node_modules/@docusaurus/types/node_modules/webpack-merge": { + "version": "5.10.0", + "resolved": "https://registry.npmjs.org/webpack-merge/-/webpack-merge-5.10.0.tgz", + "integrity": "sha512-+4zXKdx7UnO+1jaN4l2lHVD+mFvnlZQP/6ljaJVb4SZiwIKeUnrT5l0gkT8z+n4hKpC+jpOv6O9R+gLtag7pSA==", + "license": "MIT", + "dependencies": { + "clone-deep": "^4.0.1", + "flat": "^5.0.2", + "wildcard": "^2.0.0" + }, + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/@docusaurus/utils": { + "version": "3.9.2", + "resolved": "https://registry.npmjs.org/@docusaurus/utils/-/utils-3.9.2.tgz", + "integrity": "sha512-lBSBiRruFurFKXr5Hbsl2thmGweAPmddhF3jb99U4EMDA5L+e5Y1rAkOS07Nvrup7HUMBDrCV45meaxZnt28nQ==", + "license": "MIT", + "dependencies": { + "@docusaurus/logger": "3.9.2", + "@docusaurus/types": "3.9.2", + "@docusaurus/utils-common": "3.9.2", + "escape-string-regexp": "^4.0.0", + "execa": "5.1.1", + "file-loader": "^6.2.0", + "fs-extra": "^11.1.1", + "github-slugger": "^1.5.0", + "globby": "^11.1.0", + "gray-matter": "^4.0.3", + "jiti": "^1.20.0", + "js-yaml": "^4.1.0", + "lodash": "^4.17.21", + "micromatch": "^4.0.5", + "p-queue": "^6.6.2", + "prompts": "^2.4.2", + "resolve-pathname": "^3.0.0", + "tslib": "^2.6.0", + "url-loader": "^4.1.1", + "utility-types": "^3.10.0", + "webpack": "^5.88.1" + }, + "engines": { + "node": ">=20.0" + } + }, + "node_modules/@docusaurus/utils-common": { + "version": "3.9.2", + "resolved": "https://registry.npmjs.org/@docusaurus/utils-common/-/utils-common-3.9.2.tgz", + "integrity": "sha512-I53UC1QctruA6SWLvbjbhCpAw7+X7PePoe5pYcwTOEXD/PxeP8LnECAhTHHwWCblyUX5bMi4QLRkxvyZ+IT8Aw==", + "license": "MIT", + "dependencies": { + "@docusaurus/types": "3.9.2", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=20.0" + } + }, + "node_modules/@docusaurus/utils-validation": { + "version": "3.9.2", + "resolved": "https://registry.npmjs.org/@docusaurus/utils-validation/-/utils-validation-3.9.2.tgz", + "integrity": "sha512-l7yk3X5VnNmATbwijJkexdhulNsQaNDwoagiwujXoxFbWLcxHQqNQ+c/IAlzrfMMOfa/8xSBZ7KEKDesE/2J7A==", + "license": "MIT", + "dependencies": { + "@docusaurus/logger": "3.9.2", + "@docusaurus/utils": "3.9.2", + "@docusaurus/utils-common": "3.9.2", + "fs-extra": "^11.2.0", + "joi": "^17.9.2", + "js-yaml": "^4.1.0", + "lodash": "^4.17.21", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=20.0" + } + }, + "node_modules/@hapi/hoek": { + "version": "9.3.0", + "resolved": "https://registry.npmjs.org/@hapi/hoek/-/hoek-9.3.0.tgz", + "integrity": "sha512-/c6rf4UJlmHlC9b5BaNvzAcFv7HZ2QHaV0D4/HNlBdvFnvQq8RI4kYdhyPCl7Xj+oWvTWQ8ujhqS53LIgAe6KQ==", + "license": "BSD-3-Clause" + }, + "node_modules/@hapi/topo": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/@hapi/topo/-/topo-5.1.0.tgz", + "integrity": "sha512-foQZKJig7Ob0BMAYBfcJk8d77QtOe7Wo4ox7ff1lQYoNNAb6jwcY1ncdoy2e9wQZzvNy7ODZCYJkK8kzmcAnAg==", + "license": "BSD-3-Clause", + "dependencies": { + "@hapi/hoek": "^9.0.0" + } + }, + "node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "license": "MIT", + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/types": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", + "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.13", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", + "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==", + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.0", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/remapping": { + "version": "2.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/remapping/-/remapping-2.3.5.tgz", + "integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==", + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/source-map": { + "version": "0.3.11", + "resolved": "https://registry.npmjs.org/@jridgewell/source-map/-/source-map-0.3.11.tgz", + "integrity": "sha512-ZMp1V8ZFcPG5dIWnQLr3NSI1MiCU7UETdS/A0G8V/XWHvJv3ZsFqutJn1Y5RPmAPX6F3BiE397OqveU/9NCuIA==", + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.25" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.31", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz", + "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@jsonjoy.com/base64": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@jsonjoy.com/base64/-/base64-1.1.2.tgz", + "integrity": "sha512-q6XAnWQDIMA3+FTiOYajoYqySkO+JSat0ytXGSuRdq9uXE7o92gzuQwQM14xaCRlBLGq3v5miDGC4vkVTn54xA==", + "license": "Apache-2.0", + "engines": { + "node": ">=10.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/streamich" + }, + "peerDependencies": { + "tslib": "2" + } + }, + "node_modules/@jsonjoy.com/buffers": { + "version": "17.67.0", + "resolved": "https://registry.npmjs.org/@jsonjoy.com/buffers/-/buffers-17.67.0.tgz", + "integrity": "sha512-tfExRpYxBvi32vPs9ZHaTjSP4fHAfzSmcahOfNxtvGHcyJel+aibkPlGeBB+7AoC6hL7lXIE++8okecBxx7lcw==", + "license": "Apache-2.0", + "engines": { + "node": ">=10.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/streamich" + }, + "peerDependencies": { + "tslib": "2" + } + }, + "node_modules/@jsonjoy.com/codegen": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@jsonjoy.com/codegen/-/codegen-1.0.0.tgz", + "integrity": "sha512-E8Oy+08cmCf0EK/NMxpaJZmOxPqM+6iSe2S4nlSBrPZOORoDJILxtbSUEDKQyTamm/BVAhIGllOBNU79/dwf0g==", + "license": "Apache-2.0", + "engines": { + "node": ">=10.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/streamich" + }, + "peerDependencies": { + "tslib": "2" + } + }, + "node_modules/@jsonjoy.com/fs-core": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@jsonjoy.com/fs-core/-/fs-core-4.57.1.tgz", + "integrity": "sha512-YrEi/ZPmgc+GfdO0esBF04qv8boK9Dg9WpRQw/+vM8Qt3nnVIJWIa8HwZ/LXVZ0DB11XUROM8El/7yYTJX+WtA==", + "license": "Apache-2.0", + "dependencies": { + "@jsonjoy.com/fs-node-builtins": "4.57.1", + "@jsonjoy.com/fs-node-utils": "4.57.1", + "thingies": "^2.5.0" + }, + "engines": { + "node": ">=10.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/streamich" + }, + "peerDependencies": { + "tslib": "2" + } + }, + "node_modules/@jsonjoy.com/fs-fsa": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@jsonjoy.com/fs-fsa/-/fs-fsa-4.57.1.tgz", + "integrity": "sha512-ooEPvSW/HQDivPDPZMibHGKZf/QS4WRir1czGZmXmp3MsQqLECZEpN0JobrD8iV9BzsuwdIv+PxtWX9WpPLsIA==", + "license": "Apache-2.0", + "dependencies": { + "@jsonjoy.com/fs-core": "4.57.1", + "@jsonjoy.com/fs-node-builtins": "4.57.1", + "@jsonjoy.com/fs-node-utils": "4.57.1", + "thingies": "^2.5.0" + }, + "engines": { + "node": ">=10.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/streamich" + }, + "peerDependencies": { + "tslib": "2" + } + }, + "node_modules/@jsonjoy.com/fs-node": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@jsonjoy.com/fs-node/-/fs-node-4.57.1.tgz", + "integrity": "sha512-3YaKhP8gXEKN+2O49GLNfNb5l2gbnCFHyAaybbA2JkkbQP3dpdef7WcUaHAulg/c5Dg4VncHsA3NWAUSZMR5KQ==", + "license": "Apache-2.0", + "dependencies": { + "@jsonjoy.com/fs-core": "4.57.1", + "@jsonjoy.com/fs-node-builtins": "4.57.1", + "@jsonjoy.com/fs-node-utils": "4.57.1", + "@jsonjoy.com/fs-print": "4.57.1", + "@jsonjoy.com/fs-snapshot": "4.57.1", + "glob-to-regex.js": "^1.0.0", + "thingies": "^2.5.0" + }, + "engines": { + "node": ">=10.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/streamich" + }, + "peerDependencies": { + "tslib": "2" + } + }, + "node_modules/@jsonjoy.com/fs-node-builtins": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@jsonjoy.com/fs-node-builtins/-/fs-node-builtins-4.57.1.tgz", + "integrity": "sha512-XHkFKQ5GSH3uxm8c3ZYXVrexGdscpWKIcMWKFQpMpMJc8gA3AwOMBJXJlgpdJqmrhPyQXxaY9nbkNeYpacC0Og==", + "license": "Apache-2.0", + "engines": { + "node": ">=10.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/streamich" + }, + "peerDependencies": { + "tslib": "2" + } + }, + "node_modules/@jsonjoy.com/fs-node-to-fsa": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@jsonjoy.com/fs-node-to-fsa/-/fs-node-to-fsa-4.57.1.tgz", + "integrity": "sha512-pqGHyWWzNck4jRfaGV39hkqpY5QjRUQ/nRbNT7FYbBa0xf4bDG+TE1Gt2KWZrSkrkZZDE3qZUjYMbjwSliX6pg==", + "license": "Apache-2.0", + "dependencies": { + "@jsonjoy.com/fs-fsa": "4.57.1", + "@jsonjoy.com/fs-node-builtins": "4.57.1", + "@jsonjoy.com/fs-node-utils": "4.57.1" + }, + "engines": { + "node": ">=10.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/streamich" + }, + "peerDependencies": { + "tslib": "2" + } + }, + "node_modules/@jsonjoy.com/fs-node-utils": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@jsonjoy.com/fs-node-utils/-/fs-node-utils-4.57.1.tgz", + "integrity": "sha512-vp+7ZzIB8v43G+GLXTS4oDUSQmhAsRz532QmmWBbdYA20s465JvwhkSFvX9cVTqRRAQg+vZ7zWDaIEh0lFe2gw==", + "license": "Apache-2.0", + "dependencies": { + "@jsonjoy.com/fs-node-builtins": "4.57.1" + }, + "engines": { + "node": ">=10.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/streamich" + }, + "peerDependencies": { + "tslib": "2" + } + }, + "node_modules/@jsonjoy.com/fs-print": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@jsonjoy.com/fs-print/-/fs-print-4.57.1.tgz", + "integrity": "sha512-Ynct7ZJmfk6qoXDOKfpovNA36ITUx8rChLmRQtW08J73VOiuNsU8PB6d/Xs7fxJC2ohWR3a5AqyjmLojfrw5yw==", + "license": "Apache-2.0", + "dependencies": { + "@jsonjoy.com/fs-node-utils": "4.57.1", + "tree-dump": "^1.1.0" + }, + "engines": { + "node": ">=10.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/streamich" + }, + "peerDependencies": { + "tslib": "2" + } + }, + "node_modules/@jsonjoy.com/fs-snapshot": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@jsonjoy.com/fs-snapshot/-/fs-snapshot-4.57.1.tgz", + "integrity": "sha512-/oG8xBNFMbDXTq9J7vepSA1kerS5vpgd3p5QZSPd+nX59uwodGJftI51gDYyHRpP57P3WCQf7LHtBYPqwUg2Bg==", + "license": "Apache-2.0", + "dependencies": { + "@jsonjoy.com/buffers": "^17.65.0", + "@jsonjoy.com/fs-node-utils": "4.57.1", + "@jsonjoy.com/json-pack": "^17.65.0", + "@jsonjoy.com/util": "^17.65.0" + }, + "engines": { + "node": ">=10.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/streamich" + }, + "peerDependencies": { + "tslib": "2" + } + }, + "node_modules/@jsonjoy.com/fs-snapshot/node_modules/@jsonjoy.com/base64": { + "version": "17.67.0", + "resolved": "https://registry.npmjs.org/@jsonjoy.com/base64/-/base64-17.67.0.tgz", + "integrity": "sha512-5SEsJGsm15aP8TQGkDfJvz9axgPwAEm98S5DxOuYe8e1EbfajcDmgeXXzccEjh+mLnjqEKrkBdjHWS5vFNwDdw==", + "license": "Apache-2.0", + "engines": { + "node": ">=10.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/streamich" + }, + "peerDependencies": { + "tslib": "2" + } + }, + "node_modules/@jsonjoy.com/fs-snapshot/node_modules/@jsonjoy.com/codegen": { + "version": "17.67.0", + "resolved": "https://registry.npmjs.org/@jsonjoy.com/codegen/-/codegen-17.67.0.tgz", + "integrity": "sha512-idnkUplROpdBOV0HMcwhsCUS5TRUi9poagdGs70A6S4ux9+/aPuKbh8+UYRTLYQHtXvAdNfQWXDqZEx5k4Dj2Q==", + "license": "Apache-2.0", + "engines": { + "node": ">=10.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/streamich" + }, + "peerDependencies": { + "tslib": "2" + } + }, + "node_modules/@jsonjoy.com/fs-snapshot/node_modules/@jsonjoy.com/json-pack": { + "version": "17.67.0", + "resolved": "https://registry.npmjs.org/@jsonjoy.com/json-pack/-/json-pack-17.67.0.tgz", + "integrity": "sha512-t0ejURcGaZsn1ClbJ/3kFqSOjlryd92eQY465IYrezsXmPcfHPE/av4twRSxf6WE+TkZgLY+71vCZbiIiFKA/w==", + "license": "Apache-2.0", + "dependencies": { + "@jsonjoy.com/base64": "17.67.0", + "@jsonjoy.com/buffers": "17.67.0", + "@jsonjoy.com/codegen": "17.67.0", + "@jsonjoy.com/json-pointer": "17.67.0", + "@jsonjoy.com/util": "17.67.0", + "hyperdyperid": "^1.2.0", + "thingies": "^2.5.0", + "tree-dump": "^1.1.0" + }, + "engines": { + "node": ">=10.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/streamich" + }, + "peerDependencies": { + "tslib": "2" + } + }, + "node_modules/@jsonjoy.com/fs-snapshot/node_modules/@jsonjoy.com/json-pointer": { + "version": "17.67.0", + "resolved": "https://registry.npmjs.org/@jsonjoy.com/json-pointer/-/json-pointer-17.67.0.tgz", + "integrity": "sha512-+iqOFInH+QZGmSuaybBUNdh7yvNrXvqR+h3wjXm0N/3JK1EyyFAeGJvqnmQL61d1ARLlk/wJdFKSL+LHJ1eaUA==", + "license": "Apache-2.0", + "dependencies": { + "@jsonjoy.com/util": "17.67.0" + }, + "engines": { + "node": ">=10.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/streamich" + }, + "peerDependencies": { + "tslib": "2" + } + }, + "node_modules/@jsonjoy.com/fs-snapshot/node_modules/@jsonjoy.com/util": { + "version": "17.67.0", + "resolved": "https://registry.npmjs.org/@jsonjoy.com/util/-/util-17.67.0.tgz", + "integrity": "sha512-6+8xBaz1rLSohlGh68D1pdw3AwDi9xydm8QNlAFkvnavCJYSze+pxoW2VKP8p308jtlMRLs5NTHfPlZLd4w7ew==", + "license": "Apache-2.0", + "dependencies": { + "@jsonjoy.com/buffers": "17.67.0", + "@jsonjoy.com/codegen": "17.67.0" + }, + "engines": { + "node": ">=10.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/streamich" + }, + "peerDependencies": { + "tslib": "2" + } + }, + "node_modules/@jsonjoy.com/json-pack": { + "version": "1.21.0", + "resolved": "https://registry.npmjs.org/@jsonjoy.com/json-pack/-/json-pack-1.21.0.tgz", + "integrity": "sha512-+AKG+R2cfZMShzrF2uQw34v3zbeDYUqnQ+jg7ORic3BGtfw9p/+N6RJbq/kkV8JmYZaINknaEQ2m0/f693ZPpg==", + "license": "Apache-2.0", + "dependencies": { + "@jsonjoy.com/base64": "^1.1.2", + "@jsonjoy.com/buffers": "^1.2.0", + "@jsonjoy.com/codegen": "^1.0.0", + "@jsonjoy.com/json-pointer": "^1.0.2", + "@jsonjoy.com/util": "^1.9.0", + "hyperdyperid": "^1.2.0", + "thingies": "^2.5.0", + "tree-dump": "^1.1.0" + }, + "engines": { + "node": ">=10.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/streamich" + }, + "peerDependencies": { + "tslib": "2" + } + }, + "node_modules/@jsonjoy.com/json-pack/node_modules/@jsonjoy.com/buffers": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@jsonjoy.com/buffers/-/buffers-1.2.1.tgz", + "integrity": "sha512-12cdlDwX4RUM3QxmUbVJWqZ/mrK6dFQH4Zxq6+r1YXKXYBNgZXndx2qbCJwh3+WWkCSn67IjnlG3XYTvmvYtgA==", + "license": "Apache-2.0", + "engines": { + "node": ">=10.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/streamich" + }, + "peerDependencies": { + "tslib": "2" + } + }, + "node_modules/@jsonjoy.com/json-pointer": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@jsonjoy.com/json-pointer/-/json-pointer-1.0.2.tgz", + "integrity": "sha512-Fsn6wM2zlDzY1U+v4Nc8bo3bVqgfNTGcn6dMgs6FjrEnt4ZCe60o6ByKRjOGlI2gow0aE/Q41QOigdTqkyK5fg==", + "license": "Apache-2.0", + "dependencies": { + "@jsonjoy.com/codegen": "^1.0.0", + "@jsonjoy.com/util": "^1.9.0" + }, + "engines": { + "node": ">=10.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/streamich" + }, + "peerDependencies": { + "tslib": "2" + } + }, + "node_modules/@jsonjoy.com/util": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/@jsonjoy.com/util/-/util-1.9.0.tgz", + "integrity": "sha512-pLuQo+VPRnN8hfPqUTLTHk126wuYdXVxE6aDmjSeV4NCAgyxWbiOIeNJVtID3h1Vzpoi9m4jXezf73I6LgabgQ==", + "license": "Apache-2.0", + "dependencies": { + "@jsonjoy.com/buffers": "^1.0.0", + "@jsonjoy.com/codegen": "^1.0.0" + }, + "engines": { + "node": ">=10.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/streamich" + }, + "peerDependencies": { + "tslib": "2" + } + }, + "node_modules/@jsonjoy.com/util/node_modules/@jsonjoy.com/buffers": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@jsonjoy.com/buffers/-/buffers-1.2.1.tgz", + "integrity": "sha512-12cdlDwX4RUM3QxmUbVJWqZ/mrK6dFQH4Zxq6+r1YXKXYBNgZXndx2qbCJwh3+WWkCSn67IjnlG3XYTvmvYtgA==", + "license": "Apache-2.0", + "engines": { + "node": ">=10.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/streamich" + }, + "peerDependencies": { + "tslib": "2" + } + }, + "node_modules/@leichtgewicht/ip-codec": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@leichtgewicht/ip-codec/-/ip-codec-2.0.5.tgz", + "integrity": "sha512-Vo+PSpZG2/fmgmiNzYK9qWRh8h/CHrwD0mo1h1DzL4yzHNSfWYujGTYsWGreD000gcgmZ7K4Ys6Tx9TxtsKdDw==", + "license": "MIT" + }, + "node_modules/@mdx-js/mdx": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@mdx-js/mdx/-/mdx-3.1.1.tgz", + "integrity": "sha512-f6ZO2ifpwAQIpzGWaBQT2TXxPv6z3RBzQKpVftEWN78Vl/YweF1uwussDx8ECAXVtr3Rs89fKyG9YlzUs9DyGQ==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdx": "^2.0.0", + "acorn": "^8.0.0", + "collapse-white-space": "^2.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "estree-util-scope": "^1.0.0", + "estree-walker": "^3.0.0", + "hast-util-to-jsx-runtime": "^2.0.0", + "markdown-extensions": "^2.0.0", + "recma-build-jsx": "^1.0.0", + "recma-jsx": "^1.0.0", + "recma-stringify": "^1.0.0", + "rehype-recma": "^1.0.0", + "remark-mdx": "^3.0.0", + "remark-parse": "^11.0.0", + "remark-rehype": "^11.0.0", + "source-map": "^0.7.0", + "unified": "^11.0.0", + "unist-util-position-from-estree": "^2.0.0", + "unist-util-stringify-position": "^4.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/@mdx-js/react": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@mdx-js/react/-/react-3.1.1.tgz", + "integrity": "sha512-f++rKLQgUVYDAtECQ6fn/is15GkEH9+nZPM3MS0RcxVqoTfawHvDlSCH7JbMhAM6uJ32v3eXLvLmLvjGu7PTQw==", + "license": "MIT", + "dependencies": { + "@types/mdx": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + }, + "peerDependencies": { + "@types/react": ">=16", + "react": ">=16" + } + }, + "node_modules/@noble/hashes": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/@noble/hashes/-/hashes-1.4.0.tgz", + "integrity": "sha512-V1JJ1WTRUqHHrOSh597hURcMqVKVGL/ea3kv0gSnEdsEZ0/+VyPghM1lMNGc00z7CIQorSvbKpuJkxvuHbvdbg==", + "license": "MIT", + "engines": { + "node": ">= 16" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "license": "MIT", + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@peculiar/asn1-cms": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/@peculiar/asn1-cms/-/asn1-cms-2.6.1.tgz", + "integrity": "sha512-vdG4fBF6Lkirkcl53q6eOdn3XYKt+kJTG59edgRZORlg/3atWWEReRCx5rYE1ZzTTX6vLK5zDMjHh7vbrcXGtw==", + "license": "MIT", + "dependencies": { + "@peculiar/asn1-schema": "^2.6.0", + "@peculiar/asn1-x509": "^2.6.1", + "@peculiar/asn1-x509-attr": "^2.6.1", + "asn1js": "^3.0.6", + "tslib": "^2.8.1" + } + }, + "node_modules/@peculiar/asn1-csr": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/@peculiar/asn1-csr/-/asn1-csr-2.6.1.tgz", + "integrity": "sha512-WRWnKfIocHyzFYQTka8O/tXCiBquAPSrRjXbOkHbO4qdmS6loffCEGs+rby6WxxGdJCuunnhS2duHURhjyio6w==", + "license": "MIT", + "dependencies": { + "@peculiar/asn1-schema": "^2.6.0", + "@peculiar/asn1-x509": "^2.6.1", + "asn1js": "^3.0.6", + "tslib": "^2.8.1" + } + }, + "node_modules/@peculiar/asn1-ecc": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/@peculiar/asn1-ecc/-/asn1-ecc-2.6.1.tgz", + "integrity": "sha512-+Vqw8WFxrtDIN5ehUdvlN2m73exS2JVG0UAyfVB31gIfor3zWEAQPD+K9ydCxaj3MLen9k0JhKpu9LqviuCE1g==", + "license": "MIT", + "dependencies": { + "@peculiar/asn1-schema": "^2.6.0", + "@peculiar/asn1-x509": "^2.6.1", + "asn1js": "^3.0.6", + "tslib": "^2.8.1" + } + }, + "node_modules/@peculiar/asn1-pfx": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/@peculiar/asn1-pfx/-/asn1-pfx-2.6.1.tgz", + "integrity": "sha512-nB5jVQy3MAAWvq0KY0R2JUZG8bO/bTLpnwyOzXyEh/e54ynGTatAR+csOnXkkVD9AFZ2uL8Z7EV918+qB1qDvw==", + "license": "MIT", + "dependencies": { + "@peculiar/asn1-cms": "^2.6.1", + "@peculiar/asn1-pkcs8": "^2.6.1", + "@peculiar/asn1-rsa": "^2.6.1", + "@peculiar/asn1-schema": "^2.6.0", + "asn1js": "^3.0.6", + "tslib": "^2.8.1" + } + }, + "node_modules/@peculiar/asn1-pkcs8": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/@peculiar/asn1-pkcs8/-/asn1-pkcs8-2.6.1.tgz", + "integrity": "sha512-JB5iQ9Izn5yGMw3ZG4Nw3Xn/hb/G38GYF3lf7WmJb8JZUydhVGEjK/ZlFSWhnlB7K/4oqEs8HnfFIKklhR58Tw==", + "license": "MIT", + "dependencies": { + "@peculiar/asn1-schema": "^2.6.0", + "@peculiar/asn1-x509": "^2.6.1", + "asn1js": "^3.0.6", + "tslib": "^2.8.1" + } + }, + "node_modules/@peculiar/asn1-pkcs9": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/@peculiar/asn1-pkcs9/-/asn1-pkcs9-2.6.1.tgz", + "integrity": "sha512-5EV8nZoMSxeWmcxWmmcolg22ojZRgJg+Y9MX2fnE2bGRo5KQLqV5IL9kdSQDZxlHz95tHvIq9F//bvL1OeNILw==", + "license": "MIT", + "dependencies": { + "@peculiar/asn1-cms": "^2.6.1", + "@peculiar/asn1-pfx": "^2.6.1", + "@peculiar/asn1-pkcs8": "^2.6.1", + "@peculiar/asn1-schema": "^2.6.0", + "@peculiar/asn1-x509": "^2.6.1", + "@peculiar/asn1-x509-attr": "^2.6.1", + "asn1js": "^3.0.6", + "tslib": "^2.8.1" + } + }, + "node_modules/@peculiar/asn1-rsa": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/@peculiar/asn1-rsa/-/asn1-rsa-2.6.1.tgz", + "integrity": "sha512-1nVMEh46SElUt5CB3RUTV4EG/z7iYc7EoaDY5ECwganibQPkZ/Y2eMsTKB/LeyrUJ+W/tKoD9WUqIy8vB+CEdA==", + "license": "MIT", + "dependencies": { + "@peculiar/asn1-schema": "^2.6.0", + "@peculiar/asn1-x509": "^2.6.1", + "asn1js": "^3.0.6", + "tslib": "^2.8.1" + } + }, + "node_modules/@peculiar/asn1-schema": { + "version": "2.6.0", + "resolved": "https://registry.npmjs.org/@peculiar/asn1-schema/-/asn1-schema-2.6.0.tgz", + "integrity": "sha512-xNLYLBFTBKkCzEZIw842BxytQQATQv+lDTCEMZ8C196iJcJJMBUZxrhSTxLaohMyKK8QlzRNTRkUmanucnDSqg==", + "license": "MIT", + "dependencies": { + "asn1js": "^3.0.6", + "pvtsutils": "^1.3.6", + "tslib": "^2.8.1" + } + }, + "node_modules/@peculiar/asn1-x509": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/@peculiar/asn1-x509/-/asn1-x509-2.6.1.tgz", + "integrity": "sha512-O9jT5F1A2+t3r7C4VT7LYGXqkGLK7Kj1xFpz7U0isPrubwU5PbDoyYtx6MiGst29yq7pXN5vZbQFKRCP+lLZlA==", + "license": "MIT", + "dependencies": { + "@peculiar/asn1-schema": "^2.6.0", + "asn1js": "^3.0.6", + "pvtsutils": "^1.3.6", + "tslib": "^2.8.1" + } + }, + "node_modules/@peculiar/asn1-x509-attr": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/@peculiar/asn1-x509-attr/-/asn1-x509-attr-2.6.1.tgz", + "integrity": "sha512-tlW6cxoHwgcQghnJwv3YS+9OO1737zgPogZ+CgWRUK4roEwIPzRH4JEiG770xe5HX2ATfCpmX60gurfWIF9dcQ==", + "license": "MIT", + "dependencies": { + "@peculiar/asn1-schema": "^2.6.0", + "@peculiar/asn1-x509": "^2.6.1", + "asn1js": "^3.0.6", + "tslib": "^2.8.1" + } + }, + "node_modules/@peculiar/x509": { + "version": "1.14.3", + "resolved": "https://registry.npmjs.org/@peculiar/x509/-/x509-1.14.3.tgz", + "integrity": "sha512-C2Xj8FZ0uHWeCXXqX5B4/gVFQmtSkiuOolzAgutjTfseNOHT3pUjljDZsTSxXFGgio54bCzVFqmEOUrIVk8RDA==", + "license": "MIT", + "dependencies": { + "@peculiar/asn1-cms": "^2.6.0", + "@peculiar/asn1-csr": "^2.6.0", + "@peculiar/asn1-ecc": "^2.6.0", + "@peculiar/asn1-pkcs9": "^2.6.0", + "@peculiar/asn1-rsa": "^2.6.0", + "@peculiar/asn1-schema": "^2.6.0", + "@peculiar/asn1-x509": "^2.6.0", + "pvtsutils": "^1.3.6", + "reflect-metadata": "^0.2.2", + "tslib": "^2.8.1", + "tsyringe": "^4.10.0" + }, + "engines": { + "node": ">=20.0.0" + } + }, + "node_modules/@pnpm/config.env-replace": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@pnpm/config.env-replace/-/config.env-replace-1.1.0.tgz", + "integrity": "sha512-htyl8TWnKL7K/ESFa1oW2UB5lVDxuF5DpM7tBi6Hu2LNL3mWkIzNLG6N4zoCUP1lCKNxWy/3iu8mS8MvToGd6w==", + "license": "MIT", + "engines": { + "node": ">=12.22.0" + } + }, + "node_modules/@pnpm/network.ca-file": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@pnpm/network.ca-file/-/network.ca-file-1.0.2.tgz", + "integrity": "sha512-YcPQ8a0jwYU9bTdJDpXjMi7Brhkr1mXsXrUJvjqM2mQDgkRiz8jFaQGOdaLxgjtUfQgZhKy/O3cG/YwmgKaxLA==", + "license": "MIT", + "dependencies": { + "graceful-fs": "4.2.10" + }, + "engines": { + "node": ">=12.22.0" + } + }, + "node_modules/@pnpm/network.ca-file/node_modules/graceful-fs": { + "version": "4.2.10", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.10.tgz", + "integrity": "sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA==", + "license": "ISC" + }, + "node_modules/@pnpm/npm-conf": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@pnpm/npm-conf/-/npm-conf-3.0.2.tgz", + "integrity": "sha512-h104Kh26rR8tm+a3Qkc5S4VLYint3FE48as7+/5oCEcKR2idC/pF1G6AhIXKI+eHPJa/3J9i5z0Al47IeGHPkA==", + "license": "MIT", + "dependencies": { + "@pnpm/config.env-replace": "^1.1.0", + "@pnpm/network.ca-file": "^1.0.1", + "config-chain": "^1.1.11" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@polka/url": { + "version": "1.0.0-next.29", + "resolved": "https://registry.npmjs.org/@polka/url/-/url-1.0.0-next.29.tgz", + "integrity": "sha512-wwQAWhWSuHaag8c4q/KN/vCoeOJYshAIvMQwD4GpSb3OiZklFfvAgmj0VCBBImRpuF/aFgIRzllXlVX93Jevww==", + "license": "MIT" + }, + "node_modules/@sideway/address": { + "version": "4.1.5", + "resolved": "https://registry.npmjs.org/@sideway/address/-/address-4.1.5.tgz", + "integrity": "sha512-IqO/DUQHUkPeixNQ8n0JA6102hT9CmaljNTPmQ1u8MEhBo/R4Q8eKLN/vGZxuebwOroDB4cbpjheD4+/sKFK4Q==", + "license": "BSD-3-Clause", + "dependencies": { + "@hapi/hoek": "^9.0.0" + } + }, + "node_modules/@sideway/formula": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@sideway/formula/-/formula-3.0.1.tgz", + "integrity": "sha512-/poHZJJVjx3L+zVD6g9KgHfYnb443oi7wLu/XKojDviHy6HOEOA6z1Trk5aR1dGcmPenJEgb2sK2I80LeS3MIg==", + "license": "BSD-3-Clause" + }, + "node_modules/@sideway/pinpoint": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@sideway/pinpoint/-/pinpoint-2.0.0.tgz", + "integrity": "sha512-RNiOoTPkptFtSVzQevY/yWtZwf/RxyVnPy/OcA9HBM3MlGDnBEYL5B41H0MTn0Uec8Hi+2qUtTfG2WWZBmMejQ==", + "license": "BSD-3-Clause" + }, + "node_modules/@sinclair/typebox": { + "version": "0.27.10", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.10.tgz", + "integrity": "sha512-MTBk/3jGLNB2tVxv6uLlFh1iu64iYOQ2PbdOSK3NW8JZsmlaOh2q6sdtKowBhfw8QFLmYNzTW4/oK4uATIi6ZA==", + "license": "MIT" + }, + "node_modules/@sindresorhus/is": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-4.6.0.tgz", + "integrity": "sha512-t09vSN3MdfsyCHoFcTRCH/iUtG7OJ0CsjzB8cjAmKc/va/kIgeDI/TxsigdncE/4be734m0cvIYwNaV4i2XqAw==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/is?sponsor=1" + } + }, + "node_modules/@slorber/remark-comment": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@slorber/remark-comment/-/remark-comment-1.0.0.tgz", + "integrity": "sha512-RCE24n7jsOj1M0UPvIQCHTe7fI0sFL4S2nwKVWwHyVr/wI/H8GosgsJGyhnsZoGFnD/P2hLf1mSbrrgSLN93NA==", + "license": "MIT", + "dependencies": { + "micromark-factory-space": "^1.0.0", + "micromark-util-character": "^1.1.0", + "micromark-util-symbol": "^1.0.1" + } + }, + "node_modules/@svgr/babel-plugin-add-jsx-attribute": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-add-jsx-attribute/-/babel-plugin-add-jsx-attribute-8.0.0.tgz", + "integrity": "sha512-b9MIk7yhdS1pMCZM8VeNfUlSKVRhsHZNMl5O9SfaX0l0t5wjdgu4IDzGB8bpnGBBOjGST3rRFVsaaEtI4W6f7g==", + "license": "MIT", + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-remove-jsx-attribute": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-remove-jsx-attribute/-/babel-plugin-remove-jsx-attribute-8.0.0.tgz", + "integrity": "sha512-BcCkm/STipKvbCl6b7QFrMh/vx00vIP63k2eM66MfHJzPr6O2U0jYEViXkHJWqXqQYjdeA9cuCl5KWmlwjDvbA==", + "license": "MIT", + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-remove-jsx-empty-expression": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-remove-jsx-empty-expression/-/babel-plugin-remove-jsx-empty-expression-8.0.0.tgz", + "integrity": "sha512-5BcGCBfBxB5+XSDSWnhTThfI9jcO5f0Ai2V24gZpG+wXF14BzwxxdDb4g6trdOux0rhibGs385BeFMSmxtS3uA==", + "license": "MIT", + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-replace-jsx-attribute-value": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-replace-jsx-attribute-value/-/babel-plugin-replace-jsx-attribute-value-8.0.0.tgz", + "integrity": "sha512-KVQ+PtIjb1BuYT3ht8M5KbzWBhdAjjUPdlMtpuw/VjT8coTrItWX6Qafl9+ji831JaJcu6PJNKCV0bp01lBNzQ==", + "license": "MIT", + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-svg-dynamic-title": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-svg-dynamic-title/-/babel-plugin-svg-dynamic-title-8.0.0.tgz", + "integrity": "sha512-omNiKqwjNmOQJ2v6ge4SErBbkooV2aAWwaPFs2vUY7p7GhVkzRkJ00kILXQvRhA6miHnNpXv7MRnnSjdRjK8og==", + "license": "MIT", + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-svg-em-dimensions": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-svg-em-dimensions/-/babel-plugin-svg-em-dimensions-8.0.0.tgz", + "integrity": "sha512-mURHYnu6Iw3UBTbhGwE/vsngtCIbHE43xCRK7kCw4t01xyGqb2Pd+WXekRRoFOBIY29ZoOhUCTEweDMdrjfi9g==", + "license": "MIT", + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-transform-react-native-svg": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-transform-react-native-svg/-/babel-plugin-transform-react-native-svg-8.1.0.tgz", + "integrity": "sha512-Tx8T58CHo+7nwJ+EhUwx3LfdNSG9R2OKfaIXXs5soiy5HtgoAEkDay9LIimLOcG8dJQH1wPZp/cnAv6S9CrR1Q==", + "license": "MIT", + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-transform-svg-component": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-transform-svg-component/-/babel-plugin-transform-svg-component-8.0.0.tgz", + "integrity": "sha512-DFx8xa3cZXTdb/k3kfPeaixecQLgKh5NVBMwD0AQxOzcZawK4oo1Jh9LbrcACUivsCA7TLG8eeWgrDXjTMhRmw==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-preset": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-preset/-/babel-preset-8.1.0.tgz", + "integrity": "sha512-7EYDbHE7MxHpv4sxvnVPngw5fuR6pw79SkcrILHJ/iMpuKySNCl5W1qcwPEpU+LgyRXOaAFgH0KhwD18wwg6ug==", + "license": "MIT", + "dependencies": { + "@svgr/babel-plugin-add-jsx-attribute": "8.0.0", + "@svgr/babel-plugin-remove-jsx-attribute": "8.0.0", + "@svgr/babel-plugin-remove-jsx-empty-expression": "8.0.0", + "@svgr/babel-plugin-replace-jsx-attribute-value": "8.0.0", + "@svgr/babel-plugin-svg-dynamic-title": "8.0.0", + "@svgr/babel-plugin-svg-em-dimensions": "8.0.0", + "@svgr/babel-plugin-transform-react-native-svg": "8.1.0", + "@svgr/babel-plugin-transform-svg-component": "8.0.0" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/core": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/@svgr/core/-/core-8.1.0.tgz", + "integrity": "sha512-8QqtOQT5ACVlmsvKOJNEaWmRPmcojMOzCz4Hs2BGG/toAp/K38LcsMRyLp349glq5AzJbCEeimEoxaX6v/fLrA==", + "license": "MIT", + "dependencies": { + "@babel/core": "^7.21.3", + "@svgr/babel-preset": "8.1.0", + "camelcase": "^6.2.0", + "cosmiconfig": "^8.1.3", + "snake-case": "^3.0.4" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + } + }, + "node_modules/@svgr/hast-util-to-babel-ast": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/hast-util-to-babel-ast/-/hast-util-to-babel-ast-8.0.0.tgz", + "integrity": "sha512-EbDKwO9GpfWP4jN9sGdYwPBU0kdomaPIL2Eu4YwmgP+sJeXT+L7bMwJUBnhzfH8Q2qMBqZ4fJwpCyYsAN3mt2Q==", + "license": "MIT", + "dependencies": { + "@babel/types": "^7.21.3", + "entities": "^4.4.0" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + } + }, + "node_modules/@svgr/plugin-jsx": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/@svgr/plugin-jsx/-/plugin-jsx-8.1.0.tgz", + "integrity": "sha512-0xiIyBsLlr8quN+WyuxooNW9RJ0Dpr8uOnH/xrCVO8GLUcwHISwj1AG0k+LFzteTkAA0GbX0kj9q6Dk70PTiPA==", + "license": "MIT", + "dependencies": { + "@babel/core": "^7.21.3", + "@svgr/babel-preset": "8.1.0", + "@svgr/hast-util-to-babel-ast": "8.0.0", + "svg-parser": "^2.0.4" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@svgr/core": "*" + } + }, + "node_modules/@svgr/plugin-svgo": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/@svgr/plugin-svgo/-/plugin-svgo-8.1.0.tgz", + "integrity": "sha512-Ywtl837OGO9pTLIN/onoWLmDQ4zFUycI1g76vuKGEz6evR/ZTJlJuz3G/fIkb6OVBJ2g0o6CGJzaEjfmEo3AHA==", + "license": "MIT", + "dependencies": { + "cosmiconfig": "^8.1.3", + "deepmerge": "^4.3.1", + "svgo": "^3.0.2" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@svgr/core": "*" + } + }, + "node_modules/@svgr/webpack": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/@svgr/webpack/-/webpack-8.1.0.tgz", + "integrity": "sha512-LnhVjMWyMQV9ZmeEy26maJk+8HTIbd59cH4F2MJ439k9DqejRisfFNGAPvRYlKETuh9LrImlS8aKsBgKjMA8WA==", + "license": "MIT", + "dependencies": { + "@babel/core": "^7.21.3", + "@babel/plugin-transform-react-constant-elements": "^7.21.3", + "@babel/preset-env": "^7.20.2", + "@babel/preset-react": "^7.18.6", + "@babel/preset-typescript": "^7.21.0", + "@svgr/core": "8.1.0", + "@svgr/plugin-jsx": "8.1.0", + "@svgr/plugin-svgo": "8.1.0" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + } + }, + "node_modules/@szmarczak/http-timer": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/@szmarczak/http-timer/-/http-timer-5.0.1.tgz", + "integrity": "sha512-+PmQX0PiAYPMeVYe237LJAYvOMYW1j2rH5YROyS3b4CTVJum34HfRvKvAzozHAQG0TnHNdUfY9nCeUyRAs//cw==", + "license": "MIT", + "dependencies": { + "defer-to-connect": "^2.0.1" + }, + "engines": { + "node": ">=14.16" + } + }, + "node_modules/@types/body-parser": { + "version": "1.19.6", + "resolved": "https://registry.npmjs.org/@types/body-parser/-/body-parser-1.19.6.tgz", + "integrity": "sha512-HLFeCYgz89uk22N5Qg3dvGvsv46B8GLvKKo1zKG4NybA8U2DiEO3w9lqGg29t/tfLRJpJ6iQxnVw4OnB7MoM9g==", + "license": "MIT", + "dependencies": { + "@types/connect": "*", + "@types/node": "*" + } + }, + "node_modules/@types/bonjour": { + "version": "3.5.13", + "resolved": "https://registry.npmjs.org/@types/bonjour/-/bonjour-3.5.13.tgz", + "integrity": "sha512-z9fJ5Im06zvUL548KvYNecEVlA7cVDkGUi6kZusb04mpyEFKCIZJvloCcmpmLaIahDpOQGHaHmG6imtPMmPXGQ==", + "license": "MIT", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/connect": { + "version": "3.4.38", + "resolved": "https://registry.npmjs.org/@types/connect/-/connect-3.4.38.tgz", + "integrity": "sha512-K6uROf1LD88uDQqJCktA4yzL1YYAK6NgfsI0v/mTgyPKWsX1CnJ0XPSDhViejru1GcRkLWb8RlzFYJRqGUbaug==", + "license": "MIT", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/connect-history-api-fallback": { + "version": "1.5.4", + "resolved": "https://registry.npmjs.org/@types/connect-history-api-fallback/-/connect-history-api-fallback-1.5.4.tgz", + "integrity": "sha512-n6Cr2xS1h4uAulPRdlw6Jl6s1oG8KrVilPN2yUITEs+K48EzMJJ3W1xy8K5eWuFvjp3R74AOIGSmp2UfBJ8HFw==", + "license": "MIT", + "dependencies": { + "@types/express-serve-static-core": "*", + "@types/node": "*" + } + }, + "node_modules/@types/debug": { + "version": "4.1.13", + "resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.13.tgz", + "integrity": "sha512-KSVgmQmzMwPlmtljOomayoR89W4FynCAi3E8PPs7vmDVPe84hT+vGPKkJfThkmXs0x0jAaa9U8uW8bbfyS2fWw==", + "license": "MIT", + "dependencies": { + "@types/ms": "*" + } + }, + "node_modules/@types/eslint": { + "version": "9.6.1", + "resolved": "https://registry.npmjs.org/@types/eslint/-/eslint-9.6.1.tgz", + "integrity": "sha512-FXx2pKgId/WyYo2jXw63kk7/+TY7u7AziEJxJAnSFzHlqTAS3Ync6SvgYAN/k4/PQpnnVuzoMuVnByKK2qp0ag==", + "license": "MIT", + "dependencies": { + "@types/estree": "*", + "@types/json-schema": "*" + } + }, + "node_modules/@types/eslint-scope": { + "version": "3.7.7", + "resolved": "https://registry.npmjs.org/@types/eslint-scope/-/eslint-scope-3.7.7.tgz", + "integrity": "sha512-MzMFlSLBqNF2gcHWO0G1vP/YQyfvrxZ0bF+u7mzUdZ1/xK4A4sru+nraZz5i3iEIk1l1uyicaDVTB4QbbEkAYg==", + "license": "MIT", + "dependencies": { + "@types/eslint": "*", + "@types/estree": "*" + } + }, + "node_modules/@types/estree": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", + "license": "MIT" + }, + "node_modules/@types/estree-jsx": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@types/estree-jsx/-/estree-jsx-1.0.5.tgz", + "integrity": "sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg==", + "license": "MIT", + "dependencies": { + "@types/estree": "*" + } + }, + "node_modules/@types/express": { + "version": "4.17.25", + "resolved": "https://registry.npmjs.org/@types/express/-/express-4.17.25.tgz", + "integrity": "sha512-dVd04UKsfpINUnK0yBoYHDF3xu7xVH4BuDotC/xGuycx4CgbP48X/KF/586bcObxT0HENHXEU8Nqtu6NR+eKhw==", + "license": "MIT", + "dependencies": { + "@types/body-parser": "*", + "@types/express-serve-static-core": "^4.17.33", + "@types/qs": "*", + "@types/serve-static": "^1" + } + }, + "node_modules/@types/express-serve-static-core": { + "version": "4.19.8", + "resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-4.19.8.tgz", + "integrity": "sha512-02S5fmqeoKzVZCHPZid4b8JH2eM5HzQLZWN2FohQEy/0eXTq8VXZfSN6Pcr3F6N9R/vNrj7cpgbhjie6m/1tCA==", + "license": "MIT", + "dependencies": { + "@types/node": "*", + "@types/qs": "*", + "@types/range-parser": "*", + "@types/send": "*" + } + }, + "node_modules/@types/gtag.js": { + "version": "0.0.12", + "resolved": "https://registry.npmjs.org/@types/gtag.js/-/gtag.js-0.0.12.tgz", + "integrity": "sha512-YQV9bUsemkzG81Ea295/nF/5GijnD2Af7QhEofh7xu+kvCN6RdodgNwwGWXB5GMI3NoyvQo0odNctoH/qLMIpg==", + "license": "MIT" + }, + "node_modules/@types/hast": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz", + "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==", + "license": "MIT", + "dependencies": { + "@types/unist": "*" + } + }, + "node_modules/@types/history": { + "version": "4.7.11", + "resolved": "https://registry.npmjs.org/@types/history/-/history-4.7.11.tgz", + "integrity": "sha512-qjDJRrmvBMiTx+jyLxvLfJU7UznFuokDv4f3WRuriHKERccVpFU+8XMQUAbDzoiJCsmexxRExQeMwwCdamSKDA==", + "license": "MIT" + }, + "node_modules/@types/html-minifier-terser": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/@types/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz", + "integrity": "sha512-oh/6byDPnL1zeNXFrDXFLyZjkr1MsBG667IM792caf1L2UPOOMf65NFzjUH/ltyfwjAGfs1rsX1eftK0jC/KIg==", + "license": "MIT" + }, + "node_modules/@types/http-cache-semantics": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/@types/http-cache-semantics/-/http-cache-semantics-4.2.0.tgz", + "integrity": "sha512-L3LgimLHXtGkWikKnsPg0/VFx9OGZaC+eN1u4r+OB1XRqH3meBIAVC2zr1WdMH+RHmnRkqliQAOHNJ/E0j/e0Q==", + "license": "MIT" + }, + "node_modules/@types/http-errors": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@types/http-errors/-/http-errors-2.0.5.tgz", + "integrity": "sha512-r8Tayk8HJnX0FztbZN7oVqGccWgw98T/0neJphO91KkmOzug1KkofZURD4UaD5uH8AqcFLfdPErnBod0u71/qg==", + "license": "MIT" + }, + "node_modules/@types/http-proxy": { + "version": "1.17.17", + "resolved": "https://registry.npmjs.org/@types/http-proxy/-/http-proxy-1.17.17.tgz", + "integrity": "sha512-ED6LB+Z1AVylNTu7hdzuBqOgMnvG/ld6wGCG8wFnAzKX5uyW2K3WD52v0gnLCTK/VLpXtKckgWuyScYK6cSPaw==", + "license": "MIT", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/istanbul-lib-coverage": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.6.tgz", + "integrity": "sha512-2QF/t/auWm0lsy8XtKVPG19v3sSOQlJe/YHZgfjb/KBBHOGSV+J2q/S671rcq9uTBrLAXmZpqJiaQbMT+zNU1w==", + "license": "MIT" + }, + "node_modules/@types/istanbul-lib-report": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.3.tgz", + "integrity": "sha512-NQn7AHQnk/RSLOxrBbGyJM/aVQ+pjj5HCgasFxc0K/KhoATfQ/47AyUl15I2yBUpihjmas+a+VJBOqecrFH+uA==", + "license": "MIT", + "dependencies": { + "@types/istanbul-lib-coverage": "*" + } + }, + "node_modules/@types/istanbul-reports": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.4.tgz", + "integrity": "sha512-pk2B1NWalF9toCRu6gjBzR69syFjP4Od8WRAX+0mmf9lAjCRicLOWc+ZrxZHx/0XRjotgkF9t6iaMJ+aXcOdZQ==", + "license": "MIT", + "dependencies": { + "@types/istanbul-lib-report": "*" + } + }, + "node_modules/@types/json-schema": { + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", + "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", + "license": "MIT" + }, + "node_modules/@types/mdast": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-4.0.4.tgz", + "integrity": "sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA==", + "license": "MIT", + "dependencies": { + "@types/unist": "*" + } + }, + "node_modules/@types/mdx": { + "version": "2.0.13", + "resolved": "https://registry.npmjs.org/@types/mdx/-/mdx-2.0.13.tgz", + "integrity": "sha512-+OWZQfAYyio6YkJb3HLxDrvnx6SWWDbC0zVPfBRzUk0/nqoDyf6dNxQi3eArPe8rJ473nobTMQ/8Zk+LxJ+Yuw==", + "license": "MIT" + }, + "node_modules/@types/mime": { + "version": "1.3.5", + "resolved": "https://registry.npmjs.org/@types/mime/-/mime-1.3.5.tgz", + "integrity": "sha512-/pyBZWSLD2n0dcHE3hq8s8ZvcETHtEuF+3E7XVt0Ig2nvsVQXdghHVcEkIWjy9A0wKfTn97a/PSDYohKIlnP/w==", + "license": "MIT" + }, + "node_modules/@types/ms": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@types/ms/-/ms-2.1.0.tgz", + "integrity": "sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==", + "license": "MIT" + }, + "node_modules/@types/node": { + "version": "25.5.0", + "resolved": "https://registry.npmjs.org/@types/node/-/node-25.5.0.tgz", + "integrity": "sha512-jp2P3tQMSxWugkCUKLRPVUpGaL5MVFwF8RDuSRztfwgN1wmqJeMSbKlnEtQqU8UrhTmzEmZdu2I6v2dpp7XIxw==", + "license": "MIT", + "dependencies": { + "undici-types": "~7.18.0" + } + }, + "node_modules/@types/parse5": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/@types/parse5/-/parse5-5.0.3.tgz", + "integrity": "sha512-kUNnecmtkunAoQ3CnjmMkzNU/gtxG8guhi+Fk2U/kOpIKjIMKnXGp4IJCgQJrXSgMsWYimYG4TGjz/UzbGEBTw==" + }, + "node_modules/@types/prismjs": { + "version": "1.26.6", + "resolved": "https://registry.npmjs.org/@types/prismjs/-/prismjs-1.26.6.tgz", + "integrity": "sha512-vqlvI7qlMvcCBbVe0AKAb4f97//Hy0EBTaiW8AalRnG/xAN5zOiWWyrNqNXeq8+KAuvRewjCVY1+IPxk4RdNYw==", + "license": "MIT" + }, + "node_modules/@types/qs": { + "version": "6.15.0", + "resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.15.0.tgz", + "integrity": "sha512-JawvT8iBVWpzTrz3EGw9BTQFg3BQNmwERdKE22vlTxawwtbyUSlMppvZYKLZzB5zgACXdXxbD3m1bXaMqP/9ow==", + "license": "MIT" + }, + "node_modules/@types/range-parser": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/@types/range-parser/-/range-parser-1.2.7.tgz", + "integrity": "sha512-hKormJbkJqzQGhziax5PItDUTMAM9uE2XXQmM37dyd4hVM+5aVl7oVxMVUiVQn2oCQFN/LKCZdvSM0pFRqbSmQ==", + "license": "MIT" + }, + "node_modules/@types/react": { + "version": "19.2.14", + "resolved": "https://registry.npmjs.org/@types/react/-/react-19.2.14.tgz", + "integrity": "sha512-ilcTH/UniCkMdtexkoCN0bI7pMcJDvmQFPvuPvmEaYA/NSfFTAgdUSLAoVjaRJm7+6PvcM+q1zYOwS4wTYMF9w==", + "license": "MIT", + "dependencies": { + "csstype": "^3.2.2" + } + }, + "node_modules/@types/react-router": { + "version": "5.1.20", + "resolved": "https://registry.npmjs.org/@types/react-router/-/react-router-5.1.20.tgz", + "integrity": "sha512-jGjmu/ZqS7FjSH6owMcD5qpq19+1RS9DeVRqfl1FeBMxTDQAGwlMWOcs52NDoXaNKyG3d1cYQFMs9rCrb88o9Q==", + "license": "MIT", + "dependencies": { + "@types/history": "^4.7.11", + "@types/react": "*" + } + }, + "node_modules/@types/react-router-config": { + "version": "5.0.11", + "resolved": "https://registry.npmjs.org/@types/react-router-config/-/react-router-config-5.0.11.tgz", + "integrity": "sha512-WmSAg7WgqW7m4x8Mt4N6ZyKz0BubSj/2tVUMsAHp+Yd2AMwcSbeFq9WympT19p5heCFmF97R9eD5uUR/t4HEqw==", + "license": "MIT", + "dependencies": { + "@types/history": "^4.7.11", + "@types/react": "*", + "@types/react-router": "^5.1.0" + } + }, + "node_modules/@types/react-router-dom": { + "version": "5.3.3", + "resolved": "https://registry.npmjs.org/@types/react-router-dom/-/react-router-dom-5.3.3.tgz", + "integrity": "sha512-kpqnYK4wcdm5UaWI3fLcELopqLrHgLqNsdpHauzlQktfkHL3npOSwtj1Uz9oKBAzs7lFtVkV8j83voAz2D8fhw==", + "license": "MIT", + "dependencies": { + "@types/history": "^4.7.11", + "@types/react": "*", + "@types/react-router": "*" + } + }, + "node_modules/@types/retry": { + "version": "0.12.2", + "resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.2.tgz", + "integrity": "sha512-XISRgDJ2Tc5q4TRqvgJtzsRkFYNJzZrhTdtMoGVBttwzzQJkPnS3WWTFc7kuDRoPtPakl+T+OfdEUjYJj7Jbow==", + "license": "MIT" + }, + "node_modules/@types/sax": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/@types/sax/-/sax-1.2.7.tgz", + "integrity": "sha512-rO73L89PJxeYM3s3pPPjiPgVVcymqU490g0YO5n5By0k2Erzj6tay/4lr1CHAAU4JyOWd1rpQ8bCf6cZfHU96A==", + "license": "MIT", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/send": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@types/send/-/send-1.2.1.tgz", + "integrity": "sha512-arsCikDvlU99zl1g69TcAB3mzZPpxgw0UQnaHeC1Nwb015xp8bknZv5rIfri9xTOcMuaVgvabfIRA7PSZVuZIQ==", + "license": "MIT", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/serve-index": { + "version": "1.9.4", + "resolved": "https://registry.npmjs.org/@types/serve-index/-/serve-index-1.9.4.tgz", + "integrity": "sha512-qLpGZ/c2fhSs5gnYsQxtDEq3Oy8SXPClIXkW5ghvAvsNuVSA8k+gCONcUCS/UjLEYvYps+e8uBtfgXgvhwfNug==", + "license": "MIT", + "dependencies": { + "@types/express": "*" + } + }, + "node_modules/@types/serve-static": { + "version": "1.15.10", + "resolved": "https://registry.npmjs.org/@types/serve-static/-/serve-static-1.15.10.tgz", + "integrity": "sha512-tRs1dB+g8Itk72rlSI2ZrW6vZg0YrLI81iQSTkMmOqnqCaNr/8Ek4VwWcN5vZgCYWbg/JJSGBlUaYGAOP73qBw==", + "license": "MIT", + "dependencies": { + "@types/http-errors": "*", + "@types/node": "*", + "@types/send": "<1" + } + }, + "node_modules/@types/serve-static/node_modules/@types/send": { + "version": "0.17.6", + "resolved": "https://registry.npmjs.org/@types/send/-/send-0.17.6.tgz", + "integrity": "sha512-Uqt8rPBE8SY0RK8JB1EzVOIZ32uqy8HwdxCnoCOsYrvnswqmFZ/k+9Ikidlk/ImhsdvBsloHbAlewb2IEBV/Og==", + "license": "MIT", + "dependencies": { + "@types/mime": "^1", + "@types/node": "*" + } + }, + "node_modules/@types/sockjs": { + "version": "0.3.36", + "resolved": "https://registry.npmjs.org/@types/sockjs/-/sockjs-0.3.36.tgz", + "integrity": "sha512-MK9V6NzAS1+Ud7JV9lJLFqW85VbC9dq3LmwZCuBe4wBDgKC0Kj/jd8Xl+nSviU+Qc3+m7umHHyHg//2KSa0a0Q==", + "license": "MIT", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/unist": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz", + "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==", + "license": "MIT" + }, + "node_modules/@types/ws": { + "version": "8.18.1", + "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.18.1.tgz", + "integrity": "sha512-ThVF6DCVhA8kUGy+aazFQ4kXQ7E1Ty7A3ypFOe0IcJV8O/M511G99AW24irKrW56Wt44yG9+ij8FaqoBGkuBXg==", + "license": "MIT", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/yargs": { + "version": "17.0.35", + "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.35.tgz", + "integrity": "sha512-qUHkeCyQFxMXg79wQfTtfndEC+N9ZZg76HJftDJp+qH2tV7Gj4OJi7l+PiWwJ+pWtW8GwSmqsDj/oymhrTWXjg==", + "license": "MIT", + "dependencies": { + "@types/yargs-parser": "*" + } + }, + "node_modules/@types/yargs-parser": { + "version": "21.0.3", + "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.3.tgz", + "integrity": "sha512-I4q9QU9MQv4oEOz4tAHJtNz1cwuLxn2F3xcc2iV5WdqLPpUnj30aUuxt1mAxYTG+oe8CZMV/+6rU4S4gRDzqtQ==", + "license": "MIT" + }, + "node_modules/@ungap/structured-clone": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.3.0.tgz", + "integrity": "sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==", + "license": "ISC" + }, + "node_modules/@webassemblyjs/ast": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.14.1.tgz", + "integrity": "sha512-nuBEDgQfm1ccRp/8bCQrx1frohyufl4JlbMMZ4P1wpeOfDhF6FQkxZJ1b/e+PLwr6X1Nhw6OLme5usuBWYBvuQ==", + "license": "MIT", + "dependencies": { + "@webassemblyjs/helper-numbers": "1.13.2", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2" + } + }, + "node_modules/@webassemblyjs/floating-point-hex-parser": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.13.2.tgz", + "integrity": "sha512-6oXyTOzbKxGH4steLbLNOu71Oj+C8Lg34n6CqRvqfS2O71BxY6ByfMDRhBytzknj9yGUPVJ1qIKhRlAwO1AovA==", + "license": "MIT" + }, + "node_modules/@webassemblyjs/helper-api-error": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.13.2.tgz", + "integrity": "sha512-U56GMYxy4ZQCbDZd6JuvvNV/WFildOjsaWD3Tzzvmw/mas3cXzRJPMjP83JqEsgSbyrmaGjBfDtV7KDXV9UzFQ==", + "license": "MIT" + }, + "node_modules/@webassemblyjs/helper-buffer": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.14.1.tgz", + "integrity": "sha512-jyH7wtcHiKssDtFPRB+iQdxlDf96m0E39yb0k5uJVhFGleZFoNw1c4aeIcVUPPbXUVJ94wwnMOAqUHyzoEPVMA==", + "license": "MIT" + }, + "node_modules/@webassemblyjs/helper-numbers": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-numbers/-/helper-numbers-1.13.2.tgz", + "integrity": "sha512-FE8aCmS5Q6eQYcV3gI35O4J789wlQA+7JrqTTpJqn5emA4U2hvwJmvFRC0HODS+3Ye6WioDklgd6scJ3+PLnEA==", + "license": "MIT", + "dependencies": { + "@webassemblyjs/floating-point-hex-parser": "1.13.2", + "@webassemblyjs/helper-api-error": "1.13.2", + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@webassemblyjs/helper-wasm-bytecode": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.13.2.tgz", + "integrity": "sha512-3QbLKy93F0EAIXLh0ogEVR6rOubA9AoZ+WRYhNbFyuB70j3dRdwH9g+qXhLAO0kiYGlg3TxDV+I4rQTr/YNXkA==", + "license": "MIT" + }, + "node_modules/@webassemblyjs/helper-wasm-section": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.14.1.tgz", + "integrity": "sha512-ds5mXEqTJ6oxRoqjhWDU83OgzAYjwsCV8Lo/N+oRsNDmx/ZDpqalmrtgOMkHwxsG0iI//3BwWAErYRHtgn0dZw==", + "license": "MIT", + "dependencies": { + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-buffer": "1.14.1", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2", + "@webassemblyjs/wasm-gen": "1.14.1" + } + }, + "node_modules/@webassemblyjs/ieee754": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.13.2.tgz", + "integrity": "sha512-4LtOzh58S/5lX4ITKxnAK2USuNEvpdVV9AlgGQb8rJDHaLeHciwG4zlGr0j/SNWlr7x3vO1lDEsuePvtcDNCkw==", + "license": "MIT", + "dependencies": { + "@xtuc/ieee754": "^1.2.0" + } + }, + "node_modules/@webassemblyjs/leb128": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.13.2.tgz", + "integrity": "sha512-Lde1oNoIdzVzdkNEAWZ1dZ5orIbff80YPdHx20mrHwHrVNNTjNr8E3xz9BdpcGqRQbAEa+fkrCb+fRFTl/6sQw==", + "license": "Apache-2.0", + "dependencies": { + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@webassemblyjs/utf8": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.13.2.tgz", + "integrity": "sha512-3NQWGjKTASY1xV5m7Hr0iPeXD9+RDobLll3T9d2AO+g3my8xy5peVyjSag4I50mR1bBSN/Ct12lo+R9tJk0NZQ==", + "license": "MIT" + }, + "node_modules/@webassemblyjs/wasm-edit": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.14.1.tgz", + "integrity": "sha512-RNJUIQH/J8iA/1NzlE4N7KtyZNHi3w7at7hDjvRNm5rcUXa00z1vRz3glZoULfJ5mpvYhLybmVcwcjGrC1pRrQ==", + "license": "MIT", + "dependencies": { + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-buffer": "1.14.1", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2", + "@webassemblyjs/helper-wasm-section": "1.14.1", + "@webassemblyjs/wasm-gen": "1.14.1", + "@webassemblyjs/wasm-opt": "1.14.1", + "@webassemblyjs/wasm-parser": "1.14.1", + "@webassemblyjs/wast-printer": "1.14.1" + } + }, + "node_modules/@webassemblyjs/wasm-gen": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.14.1.tgz", + "integrity": "sha512-AmomSIjP8ZbfGQhumkNvgC33AY7qtMCXnN6bL2u2Js4gVCg8fp735aEiMSBbDR7UQIj90n4wKAFUSEd0QN2Ukg==", + "license": "MIT", + "dependencies": { + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2", + "@webassemblyjs/ieee754": "1.13.2", + "@webassemblyjs/leb128": "1.13.2", + "@webassemblyjs/utf8": "1.13.2" + } + }, + "node_modules/@webassemblyjs/wasm-opt": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.14.1.tgz", + "integrity": "sha512-PTcKLUNvBqnY2U6E5bdOQcSM+oVP/PmrDY9NzowJjislEjwP/C4an2303MCVS2Mg9d3AJpIGdUFIQQWbPds0Sw==", + "license": "MIT", + "dependencies": { + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-buffer": "1.14.1", + "@webassemblyjs/wasm-gen": "1.14.1", + "@webassemblyjs/wasm-parser": "1.14.1" + } + }, + "node_modules/@webassemblyjs/wasm-parser": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.14.1.tgz", + "integrity": "sha512-JLBl+KZ0R5qB7mCnud/yyX08jWFw5MsoalJ1pQ4EdFlgj9VdXKGuENGsiCIjegI1W7p91rUlcB/LB5yRJKNTcQ==", + "license": "MIT", + "dependencies": { + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-api-error": "1.13.2", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2", + "@webassemblyjs/ieee754": "1.13.2", + "@webassemblyjs/leb128": "1.13.2", + "@webassemblyjs/utf8": "1.13.2" + } + }, + "node_modules/@webassemblyjs/wast-printer": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.14.1.tgz", + "integrity": "sha512-kPSSXE6De1XOR820C90RIo2ogvZG+c3KiHzqUoO/F34Y2shGzesfqv7o57xrxovZJH/MetF5UjroJ/R/3isoiw==", + "license": "MIT", + "dependencies": { + "@webassemblyjs/ast": "1.14.1", + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@xtuc/ieee754": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@xtuc/ieee754/-/ieee754-1.2.0.tgz", + "integrity": "sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==", + "license": "BSD-3-Clause" + }, + "node_modules/@xtuc/long": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/@xtuc/long/-/long-4.2.2.tgz", + "integrity": "sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==", + "license": "Apache-2.0" + }, + "node_modules/abbrev": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz", + "integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==" + }, + "node_modules/accepts": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", + "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==", + "license": "MIT", + "dependencies": { + "mime-types": "~2.1.34", + "negotiator": "0.6.3" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/accepts/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/accepts/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "license": "MIT", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/accepts/node_modules/negotiator": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", + "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/acorn": { + "version": "8.16.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.16.0.tgz", + "integrity": "sha512-UVJyE9MttOsBQIDKw1skb9nAwQuR5wuGD3+82K6JgJlm/Y+KI92oNsMNGZCYdDsVtRHSak0pcV5Dno5+4jh9sw==", + "license": "MIT", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-import-phases": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/acorn-import-phases/-/acorn-import-phases-1.0.4.tgz", + "integrity": "sha512-wKmbr/DDiIXzEOiWrTTUcDm24kQ2vGfZQvM2fwg2vXqR5uW6aapr7ObPtj1th32b9u90/Pf4AItvdTh42fBmVQ==", + "license": "MIT", + "engines": { + "node": ">=10.13.0" + }, + "peerDependencies": { + "acorn": "^8.14.0" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "license": "MIT", + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/acorn-walk": { + "version": "8.3.5", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.5.tgz", + "integrity": "sha512-HEHNfbars9v4pgpW6SO1KSPkfoS0xVOM/9UzkJltjlsHZmJasxg8aXkuZa7SMf8vKGIBhpUsPluQSqhJFCqebw==", + "license": "MIT", + "dependencies": { + "acorn": "^8.11.0" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/address": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/address/-/address-1.2.2.tgz", + "integrity": "sha512-4B/qKCfeE/ODUaAUpSwfzazo5x29WD4r3vXiWsB7I2mSDAihwEqKO+g8GELZUQSSAo5e1XTYh3ZVfLyxBc12nA==", + "license": "MIT", + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/aggregate-error": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz", + "integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==", + "license": "MIT", + "dependencies": { + "clean-stack": "^2.0.0", + "indent-string": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/ajv": { + "version": "8.18.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.18.0.tgz", + "integrity": "sha512-PlXPeEWMXMZ7sPYOHqmDyCJzcfNrUr3fGNKtezX14ykXOEIvyK81d+qydx89KY5O71FKMPaQ2vBfBFI5NHR63A==", + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.3", + "fast-uri": "^3.0.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ajv-formats": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ajv-formats/-/ajv-formats-2.1.1.tgz", + "integrity": "sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==", + "license": "MIT", + "dependencies": { + "ajv": "^8.0.0" + }, + "peerDependencies": { + "ajv": "^8.0.0" + }, + "peerDependenciesMeta": { + "ajv": { + "optional": true + } + } + }, + "node_modules/ajv-keywords": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz", + "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.3" + }, + "peerDependencies": { + "ajv": "^8.8.2" + } + }, + "node_modules/algoliasearch": { + "version": "5.50.0", + "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-5.50.0.tgz", + "integrity": "sha512-yE5I83Q2s8euVou8Y3feXK08wyZInJWLYXgWO6Xti9jBUEZAGUahyeQ7wSZWkifLWVnQVKEz5RAmBlXG5nqxog==", + "license": "MIT", + "dependencies": { + "@algolia/abtesting": "1.16.0", + "@algolia/client-abtesting": "5.50.0", + "@algolia/client-analytics": "5.50.0", + "@algolia/client-common": "5.50.0", + "@algolia/client-insights": "5.50.0", + "@algolia/client-personalization": "5.50.0", + "@algolia/client-query-suggestions": "5.50.0", + "@algolia/client-search": "5.50.0", + "@algolia/ingestion": "1.50.0", + "@algolia/monitoring": "1.50.0", + "@algolia/recommend": "5.50.0", + "@algolia/requester-browser-xhr": "5.50.0", + "@algolia/requester-fetch": "5.50.0", + "@algolia/requester-node-http": "5.50.0" + }, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/algoliasearch-helper": { + "version": "3.28.1", + "resolved": "https://registry.npmjs.org/algoliasearch-helper/-/algoliasearch-helper-3.28.1.tgz", + "integrity": "sha512-6iXpbkkrAI5HFpCWXlNmIDSBuoN/U1XnEvb2yJAoWfqrZ+DrybI7MQ5P5mthFaprmocq+zbi6HxnR28xnZAYBw==", + "license": "MIT", + "dependencies": { + "@algolia/events": "^4.0.1" + }, + "peerDependencies": { + "algoliasearch": ">= 3.1 < 6" + } + }, + "node_modules/ansi-align": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-3.0.1.tgz", + "integrity": "sha512-IOfwwBF5iczOjp/WeY4YxyjqAFMQoZufdQWDd19SEExbVLNXqvpzSJ/M7Za4/sCPmQ0+GRquoA7bGcINcxew6w==", + "license": "ISC", + "dependencies": { + "string-width": "^4.1.0" + } + }, + "node_modules/ansi-align/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "license": "MIT" + }, + "node_modules/ansi-align/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-escapes": { + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz", + "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==", + "license": "MIT", + "dependencies": { + "type-fest": "^0.21.3" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ansi-escapes/node_modules/type-fest": { + "version": "0.21.3", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz", + "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==", + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ansi-html-community": { + "version": "0.0.8", + "resolved": "https://registry.npmjs.org/ansi-html-community/-/ansi-html-community-0.0.8.tgz", + "integrity": "sha512-1APHAyr3+PCamwNw3bXCPp4HFLONZt/yIH0sZp0/469KWNTEy+qN5jQ3GVX6DMZ1UXAi34yVwtTeaG/HpBuuzw==", + "engines": [ + "node >= 0.8.0" + ], + "license": "Apache-2.0", + "bin": { + "ansi-html": "bin/ansi-html" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "license": "ISC", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/aproba": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/aproba/-/aproba-2.1.0.tgz", + "integrity": "sha512-tLIEcj5GuR2RSTnxNKdkK0dJ/GrC7P38sUkiDmDuHfsHmbagTFAxDVIBltoklXEVIQ/f14IL8IMJ5pn9Hez1Ew==" + }, + "node_modules/arg": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz", + "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==", + "license": "MIT" + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "license": "Python-2.0" + }, + "node_modules/array-flatten": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", + "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==", + "license": "MIT" + }, + "node_modules/array-union": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", + "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/asn1js": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/asn1js/-/asn1js-3.0.7.tgz", + "integrity": "sha512-uLvq6KJu04qoQM6gvBfKFjlh6Gl0vOKQuR5cJMDHQkmwfMOQeN3F3SHCv9SNYSL+CRoHvOGFfllDlVz03GQjvQ==", + "license": "BSD-3-Clause", + "dependencies": { + "pvtsutils": "^1.3.6", + "pvutils": "^1.1.3", + "tslib": "^2.8.1" + }, + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/astring": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/astring/-/astring-1.9.0.tgz", + "integrity": "sha512-LElXdjswlqjWrPpJFg1Fx4wpkOCxj1TDHlSV4PlaRxHGWko024xICaa97ZkMfs6DRKlCguiAI+rbXv5GWwXIkg==", + "license": "MIT", + "bin": { + "astring": "bin/astring" + } + }, + "node_modules/autocomplete.js": { + "version": "0.37.1", + "resolved": "https://registry.npmjs.org/autocomplete.js/-/autocomplete.js-0.37.1.tgz", + "integrity": "sha512-PgSe9fHYhZEsm/9jggbjtVsGXJkPLvd+9mC7gZJ662vVL5CRWEtm/mIrrzCx0MrNxHVwxD5d00UOn6NsmL2LUQ==", + "dependencies": { + "immediate": "^3.2.3" + } + }, + "node_modules/autoprefixer": { + "version": "10.4.27", + "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.27.tgz", + "integrity": "sha512-NP9APE+tO+LuJGn7/9+cohklunJsXWiaWEfV3si4Gi/XHDwVNgkwr1J3RQYFIvPy76GmJ9/bW8vyoU1LcxwKHA==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/autoprefixer" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "browserslist": "^4.28.1", + "caniuse-lite": "^1.0.30001774", + "fraction.js": "^5.3.4", + "picocolors": "^1.1.1", + "postcss-value-parser": "^4.2.0" + }, + "bin": { + "autoprefixer": "bin/autoprefixer" + }, + "engines": { + "node": "^10 || ^12 || >=14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/babel-loader": { + "version": "9.2.1", + "resolved": "https://registry.npmjs.org/babel-loader/-/babel-loader-9.2.1.tgz", + "integrity": "sha512-fqe8naHt46e0yIdkjUZYqddSXfej3AHajX+CSO5X7oy0EmPc6o5Xh+RClNoHjnieWz9AW4kZxW9yyFMhVB1QLA==", + "license": "MIT", + "dependencies": { + "find-cache-dir": "^4.0.0", + "schema-utils": "^4.0.0" + }, + "engines": { + "node": ">= 14.15.0" + }, + "peerDependencies": { + "@babel/core": "^7.12.0", + "webpack": ">=5" + } + }, + "node_modules/babel-plugin-dynamic-import-node": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.3.tgz", + "integrity": "sha512-jZVI+s9Zg3IqA/kdi0i6UDCybUI3aSBLnglhYbSSjKlV7yF1F/5LWv8MakQmvYpnbJDS6fcBL2KzHSxNCMtWSQ==", + "license": "MIT", + "dependencies": { + "object.assign": "^4.1.0" + } + }, + "node_modules/babel-plugin-polyfill-corejs2": { + "version": "0.4.17", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.4.17.tgz", + "integrity": "sha512-aTyf30K/rqAsNwN76zYrdtx8obu0E4KoUME29B1xj+B3WxgvWkp943vYQ+z8Mv3lw9xHXMHpvSPOBxzAkIa94w==", + "license": "MIT", + "dependencies": { + "@babel/compat-data": "^7.28.6", + "@babel/helper-define-polyfill-provider": "^0.6.8", + "semver": "^6.3.1" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/babel-plugin-polyfill-corejs2/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/babel-plugin-polyfill-corejs3": { + "version": "0.13.0", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.13.0.tgz", + "integrity": "sha512-U+GNwMdSFgzVmfhNm8GJUX88AadB3uo9KpJqS3FaqNIPKgySuvMb+bHPsOmmuWyIcuqZj/pzt1RUIUZns4y2+A==", + "license": "MIT", + "dependencies": { + "@babel/helper-define-polyfill-provider": "^0.6.5", + "core-js-compat": "^3.43.0" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/babel-plugin-polyfill-regenerator": { + "version": "0.6.8", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.6.8.tgz", + "integrity": "sha512-M762rNHfSF1EV3SLtnCJXFoQbbIIz0OyRwnCmV0KPC7qosSfCO0QLTSuJX3ayAebubhE6oYBAYPrBA5ljowaZg==", + "license": "MIT", + "dependencies": { + "@babel/helper-define-polyfill-provider": "^0.6.8" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/bail": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz", + "integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "license": "MIT" + }, + "node_modules/baseline-browser-mapping": { + "version": "2.10.11", + "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.10.11.tgz", + "integrity": "sha512-DAKrHphkJyiGuau/cFieRYhcTFeK/lBuD++C7cZ6KZHbMhBrisoi+EvhQ5RZrIfV5qwsW8kgQ07JIC+MDJRAhg==", + "license": "Apache-2.0", + "bin": { + "baseline-browser-mapping": "dist/cli.cjs" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/batch": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/batch/-/batch-0.6.1.tgz", + "integrity": "sha512-x+VAiMRL6UPkx+kudNvxTl6hB2XNNCG2r+7wixVfIYwu/2HKRXimwQyaumLjMveWvT2Hkd/cAJw+QBMfJ/EKVw==", + "license": "MIT" + }, + "node_modules/bcp-47-match": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/bcp-47-match/-/bcp-47-match-1.0.3.tgz", + "integrity": "sha512-LggQ4YTdjWQSKELZF5JwchnBa1u0pIQSZf5lSdOHEdbVP55h0qICA/FUp3+W99q0xqxYa1ZQizTUH87gecII5w==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/big.js": { + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/big.js/-/big.js-5.2.2.tgz", + "integrity": "sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ==", + "license": "MIT", + "engines": { + "node": "*" + } + }, + "node_modules/binary-extensions": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", + "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/body-parser": { + "version": "1.20.4", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.4.tgz", + "integrity": "sha512-ZTgYYLMOXY9qKU/57FAo8F+HA2dGX7bqGc71txDRC1rS4frdFI5R7NhluHxH6M0YItAP0sHB4uqAOcYKxO6uGA==", + "license": "MIT", + "dependencies": { + "bytes": "~3.1.2", + "content-type": "~1.0.5", + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "~1.2.0", + "http-errors": "~2.0.1", + "iconv-lite": "~0.4.24", + "on-finished": "~2.4.1", + "qs": "~6.14.0", + "raw-body": "~2.5.3", + "type-is": "~1.6.18", + "unpipe": "~1.0.0" + }, + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + } + }, + "node_modules/body-parser/node_modules/bytes": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", + "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/body-parser/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "license": "MIT", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/body-parser/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", + "license": "MIT" + }, + "node_modules/bonjour-service": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/bonjour-service/-/bonjour-service-1.3.0.tgz", + "integrity": "sha512-3YuAUiSkWykd+2Azjgyxei8OWf8thdn8AITIog2M4UICzoqfjlqr64WIjEXZllf/W6vK1goqleSR6brGomxQqA==", + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.3", + "multicast-dns": "^7.2.5" + } + }, + "node_modules/boolbase": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz", + "integrity": "sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==", + "license": "ISC" + }, + "node_modules/boxen": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/boxen/-/boxen-6.2.1.tgz", + "integrity": "sha512-H4PEsJXfFI/Pt8sjDWbHlQPx4zL/bvSQjcilJmaulGt5mLDorHOHpmdXAJcBcmru7PhYSp/cDMWRko4ZUMFkSw==", + "license": "MIT", + "dependencies": { + "ansi-align": "^3.0.1", + "camelcase": "^6.2.0", + "chalk": "^4.1.2", + "cli-boxes": "^3.0.0", + "string-width": "^5.0.1", + "type-fest": "^2.5.0", + "widest-line": "^4.0.1", + "wrap-ansi": "^8.0.1" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/brace-expansion": { + "version": "1.1.13", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.13.tgz", + "integrity": "sha512-9ZLprWS6EENmhEOpjCYW2c8VkmOvckIJZfkr7rBW6dObmfgJ/L1GpSYW5Hpo9lDz4D1+n0Ckz8rU7FwHDQiG/w==", + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "license": "MIT", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browserslist": { + "version": "4.28.1", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.28.1.tgz", + "integrity": "sha512-ZC5Bd0LgJXgwGqUknZY/vkUQ04r8NXnJZ3yYi4vDmSiZmC/pdSN0NbNRPxZpbtO4uAfDUAFffO8IZoM3Gj8IkA==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "baseline-browser-mapping": "^2.9.0", + "caniuse-lite": "^1.0.30001759", + "electron-to-chromium": "^1.5.263", + "node-releases": "^2.0.27", + "update-browserslist-db": "^1.2.0" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/buffer-from": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", + "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==", + "license": "MIT" + }, + "node_modules/bundle-name": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/bundle-name/-/bundle-name-4.1.0.tgz", + "integrity": "sha512-tjwM5exMg6BGRI+kNmTntNsvdZS1X8BFYS6tnJ2hdH0kVxM6/eVZ2xy+FqStSWvYmtfFMDLIxurorHwDKfDz5Q==", + "license": "MIT", + "dependencies": { + "run-applescript": "^7.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/bytes": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz", + "integrity": "sha512-pMhOfFDPiv9t5jjIXkHosWmkSyQbvsgEVNkz0ERHbuLh2T/7j4Mqqpz523Fe8MVY89KC6Sh/QfS2sM+SjgFDcw==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/bytestreamjs": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/bytestreamjs/-/bytestreamjs-2.0.1.tgz", + "integrity": "sha512-U1Z/ob71V/bXfVABvNr/Kumf5VyeQRBEm6Txb0PQ6S7V5GpBM3w4Cbqz/xPDicR5tN0uvDifng8C+5qECeGwyQ==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/cacheable-lookup": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/cacheable-lookup/-/cacheable-lookup-7.0.0.tgz", + "integrity": "sha512-+qJyx4xiKra8mZrcwhjMRMUhD5NR1R8esPkzIYxX96JiecFoxAXFuz/GpR3+ev4PE1WamHip78wV0vcmPQtp8w==", + "license": "MIT", + "engines": { + "node": ">=14.16" + } + }, + "node_modules/cacheable-request": { + "version": "10.2.14", + "resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-10.2.14.tgz", + "integrity": "sha512-zkDT5WAF4hSSoUgyfg5tFIxz8XQK+25W/TLVojJTMKBaxevLBBtLxgqguAuVQB8PVW79FVjHcU+GJ9tVbDZ9mQ==", + "license": "MIT", + "dependencies": { + "@types/http-cache-semantics": "^4.0.2", + "get-stream": "^6.0.1", + "http-cache-semantics": "^4.1.1", + "keyv": "^4.5.3", + "mimic-response": "^4.0.0", + "normalize-url": "^8.0.0", + "responselike": "^3.0.0" + }, + "engines": { + "node": ">=14.16" + } + }, + "node_modules/call-bind": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.8.tgz", + "integrity": "sha512-oKlSFMcMwpUg2ednkhQ454wfWiU/ul3CkJe/PEHcTKuiX6RpbehUiFMXu13HalGZxfUwCQzZG747YXBn1im9ww==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.0", + "es-define-property": "^1.0.0", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/call-bind-apply-helpers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/call-bound": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz", + "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "get-intrinsic": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/camel-case": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/camel-case/-/camel-case-4.1.2.tgz", + "integrity": "sha512-gxGWBrTT1JuMx6R+o5PTXMmUnhnVzLQ9SNutD4YqKtI6ap897t3tKECYla6gCWEkplXnlNybEkZg9GEGxKFCgw==", + "license": "MIT", + "dependencies": { + "pascal-case": "^3.1.2", + "tslib": "^2.0.3" + } + }, + "node_modules/camelcase": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", + "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/caniuse-api": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/caniuse-api/-/caniuse-api-3.0.0.tgz", + "integrity": "sha512-bsTwuIg/BZZK/vreVTYYbSWoe2F+71P7K5QGEX+pT250DZbfU1MQ5prOKpPR+LL6uWKK3KMwMCAS74QB3Um1uw==", + "license": "MIT", + "dependencies": { + "browserslist": "^4.0.0", + "caniuse-lite": "^1.0.0", + "lodash.memoize": "^4.1.2", + "lodash.uniq": "^4.5.0" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001781", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001781.tgz", + "integrity": "sha512-RdwNCyMsNBftLjW6w01z8bKEvT6e/5tpPVEgtn22TiLGlstHOVecsX2KHFkD5e/vRnIE4EGzpuIODb3mtswtkw==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "CC-BY-4.0" + }, + "node_modules/ccount": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/ccount/-/ccount-2.0.1.tgz", + "integrity": "sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/char-regex": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/char-regex/-/char-regex-1.0.2.tgz", + "integrity": "sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw==", + "license": "MIT", + "engines": { + "node": ">=10" + } + }, + "node_modules/character-entities": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-2.0.2.tgz", + "integrity": "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities-html4": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/character-entities-html4/-/character-entities-html4-2.1.0.tgz", + "integrity": "sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities-legacy": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-3.0.0.tgz", + "integrity": "sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-reference-invalid": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-2.0.1.tgz", + "integrity": "sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/cheerio": { + "version": "1.0.0-rc.12", + "resolved": "https://registry.npmjs.org/cheerio/-/cheerio-1.0.0-rc.12.tgz", + "integrity": "sha512-VqR8m68vM46BNnuZ5NtnGBKIE/DfN0cRIzg9n40EIq9NOv90ayxLBXA8fXC5gquFRGJSTRqBq25Jt2ECLR431Q==", + "license": "MIT", + "dependencies": { + "cheerio-select": "^2.1.0", + "dom-serializer": "^2.0.0", + "domhandler": "^5.0.3", + "domutils": "^3.0.1", + "htmlparser2": "^8.0.1", + "parse5": "^7.0.0", + "parse5-htmlparser2-tree-adapter": "^7.0.0" + }, + "engines": { + "node": ">= 6" + }, + "funding": { + "url": "https://github.com/cheeriojs/cheerio?sponsor=1" + } + }, + "node_modules/cheerio-select": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/cheerio-select/-/cheerio-select-2.1.0.tgz", + "integrity": "sha512-9v9kG0LvzrlcungtnJtpGNxY+fzECQKhK4EGJX2vByejiMX84MFNQw4UxPJl3bFbTMw+Dfs37XaIkCwTZfLh4g==", + "license": "BSD-2-Clause", + "dependencies": { + "boolbase": "^1.0.0", + "css-select": "^5.1.0", + "css-what": "^6.1.0", + "domelementtype": "^2.3.0", + "domhandler": "^5.0.3", + "domutils": "^3.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/fb55" + } + }, + "node_modules/chokidar": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", + "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", + "license": "MIT", + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/chrome-trace-event": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/chrome-trace-event/-/chrome-trace-event-1.0.4.tgz", + "integrity": "sha512-rNjApaLzuwaOTjCiT8lSDdGN1APCiqkChLMJxJPWLunPAt5fy8xgU9/jNOchV84wfIxrA0lRQB7oCT8jrn/wrQ==", + "license": "MIT", + "engines": { + "node": ">=6.0" + } + }, + "node_modules/ci-info": { + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.9.0.tgz", + "integrity": "sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/sibiraj-s" + } + ], + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/clean-css": { + "version": "5.3.3", + "resolved": "https://registry.npmjs.org/clean-css/-/clean-css-5.3.3.tgz", + "integrity": "sha512-D5J+kHaVb/wKSFcyyV75uCn8fiY4sV38XJoe4CUyGQ+mOU/fMVYUdH1hJC+CJQ5uY3EnW27SbJYS4X8BiLrAFg==", + "license": "MIT", + "dependencies": { + "source-map": "~0.6.0" + }, + "engines": { + "node": ">= 10.0" + } + }, + "node_modules/clean-css/node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/clean-stack": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz", + "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/cli-boxes": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-3.0.0.tgz", + "integrity": "sha512-/lzGpEWL/8PfI0BmBOPRwp0c/wFNX1RdUML3jK/RcSBA9T8mZDdQpqYBKtCFTOfQbwPqWEOpjqW+Fnayc0969g==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/cli-table3": { + "version": "0.6.5", + "resolved": "https://registry.npmjs.org/cli-table3/-/cli-table3-0.6.5.tgz", + "integrity": "sha512-+W/5efTR7y5HRD7gACw9yQjqMVvEMLBHmboM/kPWam+H+Hmyrgjh6YncVKK122YZkXrLudzTuAukUw9FnMf7IQ==", + "license": "MIT", + "dependencies": { + "string-width": "^4.2.0" + }, + "engines": { + "node": "10.* || >= 12.*" + }, + "optionalDependencies": { + "@colors/colors": "1.5.0" + } + }, + "node_modules/cli-table3/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "license": "MIT" + }, + "node_modules/cli-table3/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/clone-deep": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/clone-deep/-/clone-deep-4.0.1.tgz", + "integrity": "sha512-neHB9xuzh/wk0dIHweyAXv2aPGZIVk3pLMe+/RNzINf17fe0OG96QroktYAUm7SM1PBnzTabaLboqqxDyMU+SQ==", + "license": "MIT", + "dependencies": { + "is-plain-object": "^2.0.4", + "kind-of": "^6.0.2", + "shallow-clone": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/clsx": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.1.tgz", + "integrity": "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/collapse-white-space": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/collapse-white-space/-/collapse-white-space-2.1.0.tgz", + "integrity": "sha512-loKTxY1zCOuG4j9f6EPnuyyYkf58RnhhWTvRoZEokgB+WbdXehfjFviyOVYkqzEWz1Q5kRiZdBYS5SwxbQYwzw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "license": "MIT" + }, + "node_modules/color-support": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-support/-/color-support-1.1.3.tgz", + "integrity": "sha512-qiBjkpbMLO/HL68y+lh4q0/O1MZFj2RX6X/KmMa3+gJD3z+WwI1ZzDHysvqHGS3mP6mznPckpXmw1nI9cJjyRg==", + "bin": { + "color-support": "bin.js" + } + }, + "node_modules/colord": { + "version": "2.9.3", + "resolved": "https://registry.npmjs.org/colord/-/colord-2.9.3.tgz", + "integrity": "sha512-jeC1axXpnb0/2nn/Y1LPuLdgXBLH7aDcHu4KEKfqw3CUhX7ZpfBSlPKyqXE6btIgEzfWtrX3/tyBCaCvXvMkOw==", + "license": "MIT" + }, + "node_modules/colorette": { + "version": "2.0.20", + "resolved": "https://registry.npmjs.org/colorette/-/colorette-2.0.20.tgz", + "integrity": "sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w==", + "license": "MIT" + }, + "node_modules/combine-promises": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/combine-promises/-/combine-promises-1.2.0.tgz", + "integrity": "sha512-VcQB1ziGD0NXrhKxiwyNbCDmRzs/OShMs2GqW2DlU2A/Sd0nQxE1oWDAE5O0ygSx5mgQOn9eIFh7yKPgFRVkPQ==", + "license": "MIT", + "engines": { + "node": ">=10" + } + }, + "node_modules/comma-separated-tokens": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-2.0.3.tgz", + "integrity": "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/commander": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-5.1.0.tgz", + "integrity": "sha512-P0CysNDQ7rtVw4QIQtm+MRxV66vKFSvlsQvGYXZWR3qFU0jlMKHZZZgw8e+8DSah4UDKMqnknRDQz+xuQXQ/Zg==", + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/common-path-prefix": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/common-path-prefix/-/common-path-prefix-3.0.0.tgz", + "integrity": "sha512-QE33hToZseCH3jS0qN96O/bSh3kaw/h+Tq7ngyY9eWDUnTlTNUyqfqvCXioLe5Na5jFsL78ra/wuBU4iuEgd4w==", + "license": "ISC" + }, + "node_modules/compressible": { + "version": "2.0.18", + "resolved": "https://registry.npmjs.org/compressible/-/compressible-2.0.18.tgz", + "integrity": "sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg==", + "license": "MIT", + "dependencies": { + "mime-db": ">= 1.43.0 < 2" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/compressible/node_modules/mime-db": { + "version": "1.54.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.54.0.tgz", + "integrity": "sha512-aU5EJuIN2WDemCcAp2vFBfp/m4EAhWJnUNSSw0ixs7/kXbd6Pg64EmwJkNdFhB8aWt1sH2CTXrLxo/iAGV3oPQ==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/compression": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/compression/-/compression-1.8.1.tgz", + "integrity": "sha512-9mAqGPHLakhCLeNyxPkK4xVo746zQ/czLH1Ky+vkitMnWfWZps8r0qXuwhwizagCRttsL4lfG4pIOvaWLpAP0w==", + "license": "MIT", + "dependencies": { + "bytes": "3.1.2", + "compressible": "~2.0.18", + "debug": "2.6.9", + "negotiator": "~0.6.4", + "on-headers": "~1.1.0", + "safe-buffer": "5.2.1", + "vary": "~1.1.2" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/compression/node_modules/bytes": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", + "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/compression/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "license": "MIT", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/compression/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", + "license": "MIT" + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "license": "MIT" + }, + "node_modules/config-chain": { + "version": "1.1.13", + "resolved": "https://registry.npmjs.org/config-chain/-/config-chain-1.1.13.tgz", + "integrity": "sha512-qj+f8APARXHrM0hraqXYb2/bOVSV4PvJQlNZ/DVj0QrmNM2q2euizkeuVckQ57J+W0mRH6Hvi+k50M4Jul2VRQ==", + "license": "MIT", + "dependencies": { + "ini": "^1.3.4", + "proto-list": "~1.2.1" + } + }, + "node_modules/config-chain/node_modules/ini": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", + "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==", + "license": "ISC" + }, + "node_modules/configstore": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/configstore/-/configstore-6.0.0.tgz", + "integrity": "sha512-cD31W1v3GqUlQvbBCGcXmd2Nj9SvLDOP1oQ0YFuLETufzSPaKp11rYBsSOm7rCsW3OnIRAFM3OxRhceaXNYHkA==", + "license": "BSD-2-Clause", + "dependencies": { + "dot-prop": "^6.0.1", + "graceful-fs": "^4.2.6", + "unique-string": "^3.0.0", + "write-file-atomic": "^3.0.3", + "xdg-basedir": "^5.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/yeoman/configstore?sponsor=1" + } + }, + "node_modules/connect-history-api-fallback": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/connect-history-api-fallback/-/connect-history-api-fallback-2.0.0.tgz", + "integrity": "sha512-U73+6lQFmfiNPrYbXqr6kZ1i1wiRqXnp2nhMsINseWXO8lDau0LGEffJ8kQi4EjLZympVgRdvqjAgiZ1tgzDDA==", + "license": "MIT", + "engines": { + "node": ">=0.8" + } + }, + "node_modules/consola": { + "version": "3.4.2", + "resolved": "https://registry.npmjs.org/consola/-/consola-3.4.2.tgz", + "integrity": "sha512-5IKcdX0nnYavi6G7TtOhwkYzyjfJlatbjMjuLSfE2kYT5pMDOilZ4OvMhi637CcDICTmz3wARPoyhqyX1Y+XvA==", + "license": "MIT", + "engines": { + "node": "^14.18.0 || >=16.10.0" + } + }, + "node_modules/console-control-strings": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/console-control-strings/-/console-control-strings-1.1.0.tgz", + "integrity": "sha512-ty/fTekppD2fIwRvnZAVdeOiGd1c7YXEixbgJTNzqcxJWKQnjJ/V1bNEEE6hygpM3WjwHFUVK6HTjWSzV4a8sQ==" + }, + "node_modules/content-disposition": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.2.tgz", + "integrity": "sha512-kRGRZw3bLlFISDBgwTSA1TMBFN6J6GWDeubmDE3AF+3+yXL8hTWv8r5rkLbqYXY4RjPk/EzHnClI3zQf1cFmHA==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/content-type": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", + "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", + "license": "MIT" + }, + "node_modules/cookie": { + "version": "0.7.2", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.2.tgz", + "integrity": "sha512-yki5XnKuf750l50uGTllt6kKILY4nQ1eNIQatoXEByZ5dWgnKqbnqmTrBE5B4N7lrMJKQ2ytWMiTO2o0v6Ew/w==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/cookie-signature": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.7.tgz", + "integrity": "sha512-NXdYc3dLr47pBkpUCHtKSwIOQXLVn8dZEuywboCOJY/osA0wFSLlSawr3KN8qXJEyX66FcONTH8EIlVuK0yyFA==", + "license": "MIT" + }, + "node_modules/copy-webpack-plugin": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/copy-webpack-plugin/-/copy-webpack-plugin-11.0.0.tgz", + "integrity": "sha512-fX2MWpamkW0hZxMEg0+mYnA40LTosOSa5TqZ9GYIBzyJa9C3QUaMPSE2xAi/buNr8u89SfD9wHSQVBzrRa/SOQ==", + "license": "MIT", + "dependencies": { + "fast-glob": "^3.2.11", + "glob-parent": "^6.0.1", + "globby": "^13.1.1", + "normalize-path": "^3.0.0", + "schema-utils": "^4.0.0", + "serialize-javascript": "^6.0.0" + }, + "engines": { + "node": ">= 14.15.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.1.0" + } + }, + "node_modules/copy-webpack-plugin/node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/copy-webpack-plugin/node_modules/globby": { + "version": "13.2.2", + "resolved": "https://registry.npmjs.org/globby/-/globby-13.2.2.tgz", + "integrity": "sha512-Y1zNGV+pzQdh7H39l9zgB4PJqjRNqydvdYCDG4HFXM4XuvSaQQlEc91IU1yALL8gUTDomgBAfz3XJdmUS+oo0w==", + "license": "MIT", + "dependencies": { + "dir-glob": "^3.0.1", + "fast-glob": "^3.3.0", + "ignore": "^5.2.4", + "merge2": "^1.4.1", + "slash": "^4.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/copy-webpack-plugin/node_modules/slash": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-4.0.0.tgz", + "integrity": "sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/core-js": { + "version": "3.49.0", + "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.49.0.tgz", + "integrity": "sha512-es1U2+YTtzpwkxVLwAFdSpaIMyQaq0PBgm3YD1W3Qpsn1NAmO3KSgZfu+oGSWVu6NvLHoHCV/aYcsE5wiB7ALg==", + "hasInstallScript": true, + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/core-js" + } + }, + "node_modules/core-js-compat": { + "version": "3.49.0", + "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.49.0.tgz", + "integrity": "sha512-VQXt1jr9cBz03b331DFDCCP90b3fanciLkgiOoy8SBHy06gNf+vQ1A3WFLqG7I8TipYIKeYK9wxd0tUrvHcOZA==", + "license": "MIT", + "dependencies": { + "browserslist": "^4.28.1" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/core-js" + } + }, + "node_modules/core-js-pure": { + "version": "3.49.0", + "resolved": "https://registry.npmjs.org/core-js-pure/-/core-js-pure-3.49.0.tgz", + "integrity": "sha512-XM4RFka59xATyJv/cS3O3Kml72hQXUeGRuuTmMYFxwzc9/7C8OYTaIR/Ji+Yt8DXzsFLNhat15cE/JP15HrCgw==", + "hasInstallScript": true, + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/core-js" + } + }, + "node_modules/core-util-is": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz", + "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==", + "license": "MIT" + }, + "node_modules/cosmiconfig": { + "version": "8.3.6", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-8.3.6.tgz", + "integrity": "sha512-kcZ6+W5QzcJ3P1Mt+83OUv/oHFqZHIx8DuxG6eZ5RGMERoLqp4BuGjhHLYGK+Kf5XVkQvqBSmAy/nGWN3qDgEA==", + "license": "MIT", + "dependencies": { + "import-fresh": "^3.3.0", + "js-yaml": "^4.1.0", + "parse-json": "^5.2.0", + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/d-fischer" + }, + "peerDependencies": { + "typescript": ">=4.9.5" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/cross-spawn": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "license": "MIT", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/crypto-random-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/crypto-random-string/-/crypto-random-string-4.0.0.tgz", + "integrity": "sha512-x8dy3RnvYdlUcPOjkEHqozhiwzKNSq7GcPuXFbnyMOCHxX8V3OgIg/pYuabl2sbUPfIJaeAQB7PMOK8DFIdoRA==", + "license": "MIT", + "dependencies": { + "type-fest": "^1.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/crypto-random-string/node_modules/type-fest": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-1.4.0.tgz", + "integrity": "sha512-yGSza74xk0UG8k+pLh5oeoYirvIiWo5t0/o3zHHAO2tRDiZcxWP7fywNlXhqb6/r6sWvwi+RsyQMWhVLe4BVuA==", + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/css-blank-pseudo": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/css-blank-pseudo/-/css-blank-pseudo-7.0.1.tgz", + "integrity": "sha512-jf+twWGDf6LDoXDUode+nc7ZlrqfaNphrBIBrcmeP3D8yw1uPaix1gCC8LUQUGQ6CycuK2opkbFFWFuq/a94ag==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "postcss-selector-parser": "^7.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/css-blank-pseudo/node_modules/postcss-selector-parser": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.1.1.tgz", + "integrity": "sha512-orRsuYpJVw8LdAwqqLykBj9ecS5/cRHlI5+nvTo8LcCKmzDmqVORXtOIYEEQuL9D4BxtA1lm5isAqzQZCoQ6Eg==", + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/css-declaration-sorter": { + "version": "7.3.1", + "resolved": "https://registry.npmjs.org/css-declaration-sorter/-/css-declaration-sorter-7.3.1.tgz", + "integrity": "sha512-gz6x+KkgNCjxq3Var03pRYLhyNfwhkKF1g/yoLgDNtFvVu0/fOLV9C8fFEZRjACp/XQLumjAYo7JVjzH3wLbxA==", + "license": "ISC", + "engines": { + "node": "^14 || ^16 || >=18" + }, + "peerDependencies": { + "postcss": "^8.0.9" + } + }, + "node_modules/css-has-pseudo": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/css-has-pseudo/-/css-has-pseudo-7.0.3.tgz", + "integrity": "sha512-oG+vKuGyqe/xvEMoxAQrhi7uY16deJR3i7wwhBerVrGQKSqUC5GiOVxTpM9F9B9hw0J+eKeOWLH7E9gZ1Dr5rA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/selector-specificity": "^5.0.0", + "postcss-selector-parser": "^7.0.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/css-has-pseudo/node_modules/@csstools/selector-specificity": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/@csstools/selector-specificity/-/selector-specificity-5.0.0.tgz", + "integrity": "sha512-PCqQV3c4CoVm3kdPhyeZ07VmBRdH2EpMFA/pd9OASpOEC3aXNGoqPDAZ80D0cLpMBxnmk0+yNhGsEx31hq7Gtw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss-selector-parser": "^7.0.0" + } + }, + "node_modules/css-has-pseudo/node_modules/postcss-selector-parser": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.1.1.tgz", + "integrity": "sha512-orRsuYpJVw8LdAwqqLykBj9ecS5/cRHlI5+nvTo8LcCKmzDmqVORXtOIYEEQuL9D4BxtA1lm5isAqzQZCoQ6Eg==", + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/css-loader": { + "version": "6.11.0", + "resolved": "https://registry.npmjs.org/css-loader/-/css-loader-6.11.0.tgz", + "integrity": "sha512-CTJ+AEQJjq5NzLga5pE39qdiSV56F8ywCIsqNIRF0r7BDgWsN25aazToqAFg7ZrtA/U016xudB3ffgweORxX7g==", + "license": "MIT", + "dependencies": { + "icss-utils": "^5.1.0", + "postcss": "^8.4.33", + "postcss-modules-extract-imports": "^3.1.0", + "postcss-modules-local-by-default": "^4.0.5", + "postcss-modules-scope": "^3.2.0", + "postcss-modules-values": "^4.0.0", + "postcss-value-parser": "^4.2.0", + "semver": "^7.5.4" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "@rspack/core": "0.x || 1.x", + "webpack": "^5.0.0" + }, + "peerDependenciesMeta": { + "@rspack/core": { + "optional": true + }, + "webpack": { + "optional": true + } + } + }, + "node_modules/css-minimizer-webpack-plugin": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/css-minimizer-webpack-plugin/-/css-minimizer-webpack-plugin-5.0.1.tgz", + "integrity": "sha512-3caImjKFQkS+ws1TGcFn0V1HyDJFq1Euy589JlD6/3rV2kj+w7r5G9WDMgSHvpvXHNZ2calVypZWuEDQd9wfLg==", + "license": "MIT", + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.18", + "cssnano": "^6.0.1", + "jest-worker": "^29.4.3", + "postcss": "^8.4.24", + "schema-utils": "^4.0.1", + "serialize-javascript": "^6.0.1" + }, + "engines": { + "node": ">= 14.15.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.0.0" + }, + "peerDependenciesMeta": { + "@parcel/css": { + "optional": true + }, + "@swc/css": { + "optional": true + }, + "clean-css": { + "optional": true + }, + "csso": { + "optional": true + }, + "esbuild": { + "optional": true + }, + "lightningcss": { + "optional": true + } + } + }, + "node_modules/css-prefers-color-scheme": { + "version": "10.0.0", + "resolved": "https://registry.npmjs.org/css-prefers-color-scheme/-/css-prefers-color-scheme-10.0.0.tgz", + "integrity": "sha512-VCtXZAWivRglTZditUfB4StnsWr6YVZ2PRtuxQLKTNRdtAf8tpzaVPE9zXIF3VaSc7O70iK/j1+NXxyQCqdPjQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/css-select": { + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/css-select/-/css-select-5.2.2.tgz", + "integrity": "sha512-TizTzUddG/xYLA3NXodFM0fSbNizXjOKhqiQQwvhlspadZokn1KDy0NZFS0wuEubIYAV5/c1/lAr0TaaFXEXzw==", + "license": "BSD-2-Clause", + "dependencies": { + "boolbase": "^1.0.0", + "css-what": "^6.1.0", + "domhandler": "^5.0.2", + "domutils": "^3.0.1", + "nth-check": "^2.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/fb55" + } + }, + "node_modules/css-selector-parser": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/css-selector-parser/-/css-selector-parser-1.4.1.tgz", + "integrity": "sha512-HYPSb7y/Z7BNDCOrakL4raGO2zltZkbeXyAd6Tg9obzix6QhzxCotdBl6VT0Dv4vZfJGVz3WL/xaEI9Ly3ul0g==" + }, + "node_modules/css-tree": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-2.3.1.tgz", + "integrity": "sha512-6Fv1DV/TYw//QF5IzQdqsNDjx/wc8TrMBZsqjL9eW01tWb7R7k/mq+/VXfJCl7SoD5emsJop9cOByJZfs8hYIw==", + "license": "MIT", + "dependencies": { + "mdn-data": "2.0.30", + "source-map-js": "^1.0.1" + }, + "engines": { + "node": "^10 || ^12.20.0 || ^14.13.0 || >=15.0.0" + } + }, + "node_modules/css-what": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/css-what/-/css-what-6.2.2.tgz", + "integrity": "sha512-u/O3vwbptzhMs3L1fQE82ZSLHQQfto5gyZzwteVIEyeaY5Fc7R4dapF/BvRoSYFeqfBk4m0V1Vafq5Pjv25wvA==", + "license": "BSD-2-Clause", + "engines": { + "node": ">= 6" + }, + "funding": { + "url": "https://github.com/sponsors/fb55" + } + }, + "node_modules/cssdb": { + "version": "8.8.0", + "resolved": "https://registry.npmjs.org/cssdb/-/cssdb-8.8.0.tgz", + "integrity": "sha512-QbLeyz2Bgso1iRlh7IpWk6OKa3lLNGXsujVjDMPl9rOZpxKeiG69icLpbLCFxeURwmcdIfZqQyhlooKJYM4f8Q==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + }, + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + } + ], + "license": "MIT-0" + }, + "node_modules/cssesc": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", + "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", + "license": "MIT", + "bin": { + "cssesc": "bin/cssesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/cssnano": { + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/cssnano/-/cssnano-6.1.2.tgz", + "integrity": "sha512-rYk5UeX7VAM/u0lNqewCdasdtPK81CgX8wJFLEIXHbV2oldWRgJAsZrdhRXkV1NJzA2g850KiFm9mMU2HxNxMA==", + "license": "MIT", + "dependencies": { + "cssnano-preset-default": "^6.1.2", + "lilconfig": "^3.1.1" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/cssnano" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/cssnano-preset-advanced": { + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/cssnano-preset-advanced/-/cssnano-preset-advanced-6.1.2.tgz", + "integrity": "sha512-Nhao7eD8ph2DoHolEzQs5CfRpiEP0xa1HBdnFZ82kvqdmbwVBUr2r1QuQ4t1pi+D1ZpqpcO4T+wy/7RxzJ/WPQ==", + "license": "MIT", + "dependencies": { + "autoprefixer": "^10.4.19", + "browserslist": "^4.23.0", + "cssnano-preset-default": "^6.1.2", + "postcss-discard-unused": "^6.0.5", + "postcss-merge-idents": "^6.0.3", + "postcss-reduce-idents": "^6.0.3", + "postcss-zindex": "^6.0.2" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/cssnano-preset-default": { + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/cssnano-preset-default/-/cssnano-preset-default-6.1.2.tgz", + "integrity": "sha512-1C0C+eNaeN8OcHQa193aRgYexyJtU8XwbdieEjClw+J9d94E41LwT6ivKH0WT+fYwYWB0Zp3I3IZ7tI/BbUbrg==", + "license": "MIT", + "dependencies": { + "browserslist": "^4.23.0", + "css-declaration-sorter": "^7.2.0", + "cssnano-utils": "^4.0.2", + "postcss-calc": "^9.0.1", + "postcss-colormin": "^6.1.0", + "postcss-convert-values": "^6.1.0", + "postcss-discard-comments": "^6.0.2", + "postcss-discard-duplicates": "^6.0.3", + "postcss-discard-empty": "^6.0.3", + "postcss-discard-overridden": "^6.0.2", + "postcss-merge-longhand": "^6.0.5", + "postcss-merge-rules": "^6.1.1", + "postcss-minify-font-values": "^6.1.0", + "postcss-minify-gradients": "^6.0.3", + "postcss-minify-params": "^6.1.0", + "postcss-minify-selectors": "^6.0.4", + "postcss-normalize-charset": "^6.0.2", + "postcss-normalize-display-values": "^6.0.2", + "postcss-normalize-positions": "^6.0.2", + "postcss-normalize-repeat-style": "^6.0.2", + "postcss-normalize-string": "^6.0.2", + "postcss-normalize-timing-functions": "^6.0.2", + "postcss-normalize-unicode": "^6.1.0", + "postcss-normalize-url": "^6.0.2", + "postcss-normalize-whitespace": "^6.0.2", + "postcss-ordered-values": "^6.0.2", + "postcss-reduce-initial": "^6.1.0", + "postcss-reduce-transforms": "^6.0.2", + "postcss-svgo": "^6.0.3", + "postcss-unique-selectors": "^6.0.4" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/cssnano-utils": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/cssnano-utils/-/cssnano-utils-4.0.2.tgz", + "integrity": "sha512-ZR1jHg+wZ8o4c3zqf1SIUSTIvm/9mU343FMR6Obe/unskbvpGhZOo1J6d/r8D1pzkRQYuwbcH3hToOuoA2G7oQ==", + "license": "MIT", + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/csso": { + "version": "5.0.5", + "resolved": "https://registry.npmjs.org/csso/-/csso-5.0.5.tgz", + "integrity": "sha512-0LrrStPOdJj+SPCCrGhzryycLjwcgUSHBtxNA8aIDxf0GLsRh1cKYhB00Gd1lDOS4yGH69+SNn13+TWbVHETFQ==", + "license": "MIT", + "dependencies": { + "css-tree": "~2.2.0" + }, + "engines": { + "node": "^10 || ^12.20.0 || ^14.13.0 || >=15.0.0", + "npm": ">=7.0.0" + } + }, + "node_modules/csso/node_modules/css-tree": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-2.2.1.tgz", + "integrity": "sha512-OA0mILzGc1kCOCSJerOeqDxDQ4HOh+G8NbOJFOTgOCzpw7fCBubk0fEyxp8AgOL/jvLgYA/uV0cMbe43ElF1JA==", + "license": "MIT", + "dependencies": { + "mdn-data": "2.0.28", + "source-map-js": "^1.0.1" + }, + "engines": { + "node": "^10 || ^12.20.0 || ^14.13.0 || >=15.0.0", + "npm": ">=7.0.0" + } + }, + "node_modules/csso/node_modules/mdn-data": { + "version": "2.0.28", + "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.28.tgz", + "integrity": "sha512-aylIc7Z9y4yzHYAJNuESG3hfhC+0Ibp/MAMiaOZgNv4pmEdFyfZhhhny4MNiAfWdBQ1RQ2mfDWmM1x8SvGyp8g==", + "license": "CC0-1.0" + }, + "node_modules/csstype": { + "version": "3.2.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.2.3.tgz", + "integrity": "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==", + "license": "MIT" + }, + "node_modules/debounce": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/debounce/-/debounce-1.2.1.tgz", + "integrity": "sha512-XRRe6Glud4rd/ZGQfiV1ruXSfbvfJedlV9Y6zOlP+2K04vBYiJEte6stfFkCP03aMnY5tsipamumUjL14fofug==", + "license": "MIT" + }, + "node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/decode-named-character-reference": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/decode-named-character-reference/-/decode-named-character-reference-1.3.0.tgz", + "integrity": "sha512-GtpQYB283KrPp6nRw50q3U9/VfOutZOe103qlN7BPP6Ad27xYnOIWv4lPzo8HCAL+mMZofJ9KEy30fq6MfaK6Q==", + "license": "MIT", + "dependencies": { + "character-entities": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/decompress-response": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz", + "integrity": "sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==", + "license": "MIT", + "dependencies": { + "mimic-response": "^3.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/decompress-response/node_modules/mimic-response": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-3.1.0.tgz", + "integrity": "sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/deep-extend": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", + "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==", + "license": "MIT", + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/deepmerge": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", + "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/default-browser": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/default-browser/-/default-browser-5.5.0.tgz", + "integrity": "sha512-H9LMLr5zwIbSxrmvikGuI/5KGhZ8E2zH3stkMgM5LpOWDutGM2JZaj460Udnf1a+946zc7YBgrqEWwbk7zHvGw==", + "license": "MIT", + "dependencies": { + "bundle-name": "^4.1.0", + "default-browser-id": "^5.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/default-browser-id": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/default-browser-id/-/default-browser-id-5.0.1.tgz", + "integrity": "sha512-x1VCxdX4t+8wVfd1so/9w+vQ4vx7lKd2Qp5tDRutErwmR85OgmfX7RlLRMWafRMY7hbEiXIbudNrjOAPa/hL8Q==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/defer-to-connect": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-2.0.1.tgz", + "integrity": "sha512-4tvttepXG1VaYGrRibk5EwJd1t4udunSOVMdLSAL6mId1ix438oPwPZMALY41FCijukO1L0twNcGsdzS7dHgDg==", + "license": "MIT", + "engines": { + "node": ">=10" + } + }, + "node_modules/define-data-property": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", + "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", + "license": "MIT", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/define-lazy-prop": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-2.0.0.tgz", + "integrity": "sha512-Ds09qNh8yw3khSjiJjiUInaGX9xlqZDY7JVryGxdxV7NPeuqQfplOpQ66yJFZut3jLa5zOwkXw1g9EI2uKh4Og==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/define-properties": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.1.tgz", + "integrity": "sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==", + "license": "MIT", + "dependencies": { + "define-data-property": "^1.0.1", + "has-property-descriptors": "^1.0.0", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/depd": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", + "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/dequal": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz", + "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/destroy": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz", + "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==", + "license": "MIT", + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + } + }, + "node_modules/detect-node": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/detect-node/-/detect-node-2.1.0.tgz", + "integrity": "sha512-T0NIuQpnTvFDATNuHN5roPwSBG83rFsuO+MXXH9/3N1eFbn4wcPjttvjMLEPWJ0RGUYgQE7cGgS3tNxbqCGM7g==", + "license": "MIT" + }, + "node_modules/detect-port": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/detect-port/-/detect-port-1.6.1.tgz", + "integrity": "sha512-CmnVc+Hek2egPx1PeTFVta2W78xy2K/9Rkf6cC4T59S50tVnzKj+tnx5mmx5lwvCkujZ4uRrpRSuV+IVs3f90Q==", + "license": "MIT", + "dependencies": { + "address": "^1.0.1", + "debug": "4" + }, + "bin": { + "detect": "bin/detect-port.js", + "detect-port": "bin/detect-port.js" + }, + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/devlop": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/devlop/-/devlop-1.1.0.tgz", + "integrity": "sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==", + "license": "MIT", + "dependencies": { + "dequal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/dir-glob": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", + "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", + "license": "MIT", + "dependencies": { + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/direction": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/direction/-/direction-1.0.4.tgz", + "integrity": "sha512-GYqKi1aH7PJXxdhTeZBFrg8vUBeKXi+cNprXsC1kpJcbcVnV9wBsrOu1cQEdG0WeQwlfHiy3XvnKfIrJ2R0NzQ==", + "bin": { + "direction": "cli.js" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/dns-packet": { + "version": "5.6.1", + "resolved": "https://registry.npmjs.org/dns-packet/-/dns-packet-5.6.1.tgz", + "integrity": "sha512-l4gcSouhcgIKRvyy99RNVOgxXiicE+2jZoNmaNmZ6JXiGajBOJAesk1OBlJuM5k2c+eudGdLxDqXuPCKIj6kpw==", + "license": "MIT", + "dependencies": { + "@leichtgewicht/ip-codec": "^2.0.1" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/docusaurus-lunr-search": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/docusaurus-lunr-search/-/docusaurus-lunr-search-3.6.0.tgz", + "integrity": "sha512-CCEAnj5e67sUZmIb2hOl4xb4nDN07fb0fvRDDmdWlYpUvyS1CSKbw4lsGInLyUFEEEBzxQmT6zaVQdF/8Zretg==", + "dependencies": { + "autocomplete.js": "^0.37.1", + "clsx": "^2.1.1", + "gauge": "^3.0.2", + "hast-util-select": "^4.0.2", + "hast-util-to-text": "^2.0.1", + "hogan.js": "^3.0.2", + "lunr": "^2.3.9", + "lunr-languages": "^1.4.0", + "mark.js": "^8.11.1", + "minimatch": "^3.1.2", + "rehype-parse": "^7.0.1", + "to-vfile": "^6.1.0", + "unified": "^9.2.2", + "unist-util-is": "^4.1.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "peerDependencies": { + "@docusaurus/core": "^2.0.0-alpha.60 || ^2.0.0 || ^3.0.0", + "react": "^16.8.4 || ^17 || ^18 || ^19", + "react-dom": "^16.8.4 || ^17 || ^18 || ^19" + } + }, + "node_modules/docusaurus-lunr-search/node_modules/@types/unist": { + "version": "2.0.11", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.11.tgz", + "integrity": "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==" + }, + "node_modules/docusaurus-lunr-search/node_modules/bail": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/bail/-/bail-1.0.5.tgz", + "integrity": "sha512-xFbRxM1tahm08yHBP16MMjVUAvDaBMD38zsM9EMAUN61omwLmKlOpB/Zku5QkjZ8TZ4vn53pj+t518cH0S03RQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/docusaurus-lunr-search/node_modules/is-plain-obj": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-2.1.0.tgz", + "integrity": "sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==", + "engines": { + "node": ">=8" + } + }, + "node_modules/docusaurus-lunr-search/node_modules/trough": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/trough/-/trough-1.0.5.tgz", + "integrity": "sha512-rvuRbTarPXmMb79SmzEp8aqXNKcK+y0XaB298IXueQ8I2PsrATcPBCSPyK/dDNa2iWOhKlfNnOjdAOTBU/nkFA==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/docusaurus-lunr-search/node_modules/unified": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/unified/-/unified-9.2.2.tgz", + "integrity": "sha512-Sg7j110mtefBD+qunSLO1lqOEKdrwBFBrR6Qd8f4uwkhWNlbkaqwHse6e7QvD3AP/MNoJdEDLaf8OxYyoWgorQ==", + "dependencies": { + "bail": "^1.0.0", + "extend": "^3.0.0", + "is-buffer": "^2.0.0", + "is-plain-obj": "^2.0.0", + "trough": "^1.0.0", + "vfile": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/docusaurus-lunr-search/node_modules/unist-util-is": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-4.1.0.tgz", + "integrity": "sha512-ZOQSsnce92GrxSqlnEEseX0gi7GH9zTJZ0p9dtu87WRb/37mMPO2Ilx1s/t9vBHrFhbgweUwb+t7cIn5dxPhZg==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/docusaurus-lunr-search/node_modules/unist-util-stringify-position": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-2.0.3.tgz", + "integrity": "sha512-3faScn5I+hy9VleOq/qNbAd6pAx7iH5jYBMS9I1HgQVijz/4mv5Bvw5iw1sC/90CODiKo81G/ps8AJrISn687g==", + "dependencies": { + "@types/unist": "^2.0.2" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/docusaurus-lunr-search/node_modules/vfile": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/vfile/-/vfile-4.2.1.tgz", + "integrity": "sha512-O6AE4OskCG5S1emQ/4gl8zK586RqA3srz3nfK/Viy0UPToBc5Trp9BVFb1u0CjsKrAWwnpr4ifM/KBXPWwJbCA==", + "dependencies": { + "@types/unist": "^2.0.0", + "is-buffer": "^2.0.0", + "unist-util-stringify-position": "^2.0.0", + "vfile-message": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/docusaurus-lunr-search/node_modules/vfile-message": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-2.0.4.tgz", + "integrity": "sha512-DjssxRGkMvifUOJre00juHoP9DPWuzjxKuMDrhNbk2TdaYYBNMStsNhEOt3idrtI12VQYM/1+iM0KOzXi4pxwQ==", + "dependencies": { + "@types/unist": "^2.0.0", + "unist-util-stringify-position": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/dom-converter": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/dom-converter/-/dom-converter-0.2.0.tgz", + "integrity": "sha512-gd3ypIPfOMr9h5jIKq8E3sHOTCjeirnl0WK5ZdS1AW0Odt0b1PaWaHdJ4Qk4klv+YB9aJBS7mESXjFoDQPu6DA==", + "license": "MIT", + "dependencies": { + "utila": "~0.4" + } + }, + "node_modules/dom-serializer": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-2.0.0.tgz", + "integrity": "sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg==", + "license": "MIT", + "dependencies": { + "domelementtype": "^2.3.0", + "domhandler": "^5.0.2", + "entities": "^4.2.0" + }, + "funding": { + "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" + } + }, + "node_modules/domelementtype": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.3.0.tgz", + "integrity": "sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/fb55" + } + ], + "license": "BSD-2-Clause" + }, + "node_modules/domhandler": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-5.0.3.tgz", + "integrity": "sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==", + "license": "BSD-2-Clause", + "dependencies": { + "domelementtype": "^2.3.0" + }, + "engines": { + "node": ">= 4" + }, + "funding": { + "url": "https://github.com/fb55/domhandler?sponsor=1" + } + }, + "node_modules/domutils": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/domutils/-/domutils-3.2.2.tgz", + "integrity": "sha512-6kZKyUajlDuqlHKVX1w7gyslj9MPIXzIFiz/rGu35uC1wMi+kMhQwGhl4lt9unC9Vb9INnY9Z3/ZA3+FhASLaw==", + "license": "BSD-2-Clause", + "dependencies": { + "dom-serializer": "^2.0.0", + "domelementtype": "^2.3.0", + "domhandler": "^5.0.3" + }, + "funding": { + "url": "https://github.com/fb55/domutils?sponsor=1" + } + }, + "node_modules/dot-case": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/dot-case/-/dot-case-3.0.4.tgz", + "integrity": "sha512-Kv5nKlh6yRrdrGvxeJ2e5y2eRUpkUosIW4A2AS38zwSz27zu7ufDwQPi5Jhs3XAlGNetl3bmnGhQsMtkKJnj3w==", + "license": "MIT", + "dependencies": { + "no-case": "^3.0.4", + "tslib": "^2.0.3" + } + }, + "node_modules/dot-prop": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-6.0.1.tgz", + "integrity": "sha512-tE7ztYzXHIeyvc7N+hR3oi7FIbf/NIjVP9hmAt3yMXzrQ072/fpjGLx2GxNxGxUl5V73MEqYzioOMoVhGMJ5cA==", + "license": "MIT", + "dependencies": { + "is-obj": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/dot-prop/node_modules/is-obj": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-2.0.0.tgz", + "integrity": "sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/dunder-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", + "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.1", + "es-errors": "^1.3.0", + "gopd": "^1.2.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/duplexer": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/duplexer/-/duplexer-0.1.2.tgz", + "integrity": "sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg==", + "license": "MIT" + }, + "node_modules/eastasianwidth": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", + "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==", + "license": "MIT" + }, + "node_modules/ee-first": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", + "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==", + "license": "MIT" + }, + "node_modules/electron-to-chromium": { + "version": "1.5.328", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.328.tgz", + "integrity": "sha512-QNQ5l45DzYytThO21403XN3FvK0hOkWDG8viNf6jqS42msJ8I4tGDSpBCgvDRRPnkffafiwAym2X2eHeGD2V0w==", + "license": "ISC" + }, + "node_modules/emoji-regex": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", + "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", + "license": "MIT" + }, + "node_modules/emojilib": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/emojilib/-/emojilib-2.4.0.tgz", + "integrity": "sha512-5U0rVMU5Y2n2+ykNLQqMoqklN9ICBT/KsvC1Gz6vqHbz2AXXGkG+Pm5rMWk/8Vjrr/mY9985Hi8DYzn1F09Nyw==", + "license": "MIT" + }, + "node_modules/emojis-list": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/emojis-list/-/emojis-list-3.0.0.tgz", + "integrity": "sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q==", + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/emoticon": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/emoticon/-/emoticon-4.1.0.tgz", + "integrity": "sha512-VWZfnxqwNcc51hIy/sbOdEem6D+cVtpPzEEtVAFdaas30+1dgkyaOQ4sQ6Bp0tOMqWO1v+HQfYaoodOkdhK6SQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/encodeurl": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz", + "integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/enhanced-resolve": { + "version": "5.20.1", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.20.1.tgz", + "integrity": "sha512-Qohcme7V1inbAfvjItgw0EaxVX5q2rdVEZHRBrEQdRZTssLDGsL8Lwrznl8oQ/6kuTJONLaDcGjkNP247XEhcA==", + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.4", + "tapable": "^2.3.0" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/entities": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz", + "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==", + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/error-ex": { + "version": "1.3.4", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.4.tgz", + "integrity": "sha512-sqQamAnR14VgCr1A618A3sGrygcpK+HEbenA/HiEAkkUwcZIIB/tgWqHFxWgOyDh4nB4JCRimh79dR5Ywc9MDQ==", + "license": "MIT", + "dependencies": { + "is-arrayish": "^0.2.1" + } + }, + "node_modules/es-define-property": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", + "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-module-lexer": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-2.0.0.tgz", + "integrity": "sha512-5POEcUuZybH7IdmGsD8wlf0AI55wMecM9rVBTI/qEAy2c1kTOm3DjFYjrBdI2K3BaJjJYfYFeRtM0t9ssnRuxw==", + "license": "MIT" + }, + "node_modules/es-object-atoms": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/esast-util-from-estree": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/esast-util-from-estree/-/esast-util-from-estree-2.0.0.tgz", + "integrity": "sha512-4CyanoAudUSBAn5K13H4JhsMH6L9ZP7XbLVe/dKybkxMO7eDyLsT8UHl9TRNrU2Gr9nz+FovfSIjuXWJ81uVwQ==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "devlop": "^1.0.0", + "estree-util-visit": "^2.0.0", + "unist-util-position-from-estree": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/esast-util-from-js": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/esast-util-from-js/-/esast-util-from-js-2.0.1.tgz", + "integrity": "sha512-8Ja+rNJ0Lt56Pcf3TAmpBZjmx8ZcK5Ts4cAzIOjsjevg9oSXJnl6SUQ2EevU8tv3h6ZLWmoKL5H4fgWvdvfETw==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "acorn": "^8.0.0", + "esast-util-from-estree": "^2.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-goat": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-goat/-/escape-goat-4.0.0.tgz", + "integrity": "sha512-2Sd4ShcWxbx6OY1IHyla/CVNwvg7XwZVoXZHcSu9w9SReNP1EzzD5T8NWKIR38fIqEns9kDWKUQTXXAmlDrdPg==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/escape-html": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", + "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==", + "license": "MIT" + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint-scope": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", + "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", + "license": "BSD-2-Clause", + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^4.1.1" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", + "license": "BSD-2-Clause", + "bin": { + "esparse": "bin/esparse.js", + "esvalidate": "bin/esvalidate.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "license": "BSD-2-Clause", + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esrecurse/node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "license": "BSD-2-Clause", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", + "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", + "license": "BSD-2-Clause", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estree-util-attach-comments": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/estree-util-attach-comments/-/estree-util-attach-comments-3.0.0.tgz", + "integrity": "sha512-cKUwm/HUcTDsYh/9FgnuFqpfquUbwIqwKM26BVCGDPVgvaCl/nDCCjUfiLlx6lsEZ3Z4RFxNbOQ60pkaEwFxGw==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-build-jsx": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/estree-util-build-jsx/-/estree-util-build-jsx-3.0.1.tgz", + "integrity": "sha512-8U5eiL6BTrPxp/CHbs2yMgP8ftMhR5ww1eIKoWRMlqvltHF8fZn5LRDvTKuxD3DUn+shRbLGqXemcP51oFCsGQ==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "estree-walker": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-is-identifier-name": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/estree-util-is-identifier-name/-/estree-util-is-identifier-name-3.0.0.tgz", + "integrity": "sha512-hFtqIDZTIUZ9BXLb8y4pYGyk6+wekIivNVTcmvk8NoOh+VeRn5y6cEHzbURrWbfp1fIqdVipilzj+lfaadNZmg==", + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-scope": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/estree-util-scope/-/estree-util-scope-1.0.0.tgz", + "integrity": "sha512-2CAASclonf+JFWBNJPndcOpA8EMJwa0Q8LUFJEKqXLW6+qBvbFZuF5gItbQOs/umBUkjviCSDCbBwU2cXbmrhQ==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "devlop": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-to-js": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/estree-util-to-js/-/estree-util-to-js-2.0.0.tgz", + "integrity": "sha512-WDF+xj5rRWmD5tj6bIqRi6CkLIXbbNQUcxQHzGysQzvHmdYG2G7p/Tf0J0gpxGgkeMZNTIjT/AoSvC9Xehcgdg==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "astring": "^1.8.0", + "source-map": "^0.7.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-value-to-estree": { + "version": "3.5.0", + "resolved": "https://registry.npmjs.org/estree-util-value-to-estree/-/estree-util-value-to-estree-3.5.0.tgz", + "integrity": "sha512-aMV56R27Gv3QmfmF1MY12GWkGzzeAezAX+UplqHVASfjc9wNzI/X6hC0S9oxq61WT4aQesLGslWP9tKk6ghRZQ==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/remcohaszing" + } + }, + "node_modules/estree-util-visit": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/estree-util-visit/-/estree-util-visit-2.0.0.tgz", + "integrity": "sha512-m5KgiH85xAhhW8Wta0vShLcUvOsh3LLPI2YVwcbio1l7E09NTLL1EyMZFM1OyWowoH0skScNbhOPl4kcBgzTww==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-walker": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", + "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0" + } + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/eta": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/eta/-/eta-2.2.0.tgz", + "integrity": "sha512-UVQ72Rqjy/ZKQalzV5dCCJP80GrmPrMxh6NlNf+erV6ObL0ZFkhCstWRawS85z3smdr3d2wXPsZEY7rDPfGd2g==", + "license": "MIT", + "engines": { + "node": ">=6.0.0" + }, + "funding": { + "url": "https://github.com/eta-dev/eta?sponsor=1" + } + }, + "node_modules/etag": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", + "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/eval": { + "version": "0.1.8", + "resolved": "https://registry.npmjs.org/eval/-/eval-0.1.8.tgz", + "integrity": "sha512-EzV94NYKoO09GLXGjXj9JIlXijVck4ONSr5wiCWDvhsvj5jxSrzTmRU/9C1DyB6uToszLs8aifA6NQ7lEQdvFw==", + "dependencies": { + "@types/node": "*", + "require-like": ">= 0.1.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/eventemitter3": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", + "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==", + "license": "MIT" + }, + "node_modules/events": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz", + "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==", + "license": "MIT", + "engines": { + "node": ">=0.8.x" + } + }, + "node_modules/execa": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", + "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", + "license": "MIT", + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.0", + "human-signals": "^2.1.0", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.1", + "onetime": "^5.1.2", + "signal-exit": "^3.0.3", + "strip-final-newline": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/express": { + "version": "4.22.1", + "resolved": "https://registry.npmjs.org/express/-/express-4.22.1.tgz", + "integrity": "sha512-F2X8g9P1X7uCPZMA3MVf9wcTqlyNp7IhH5qPCI0izhaOIYXaW9L535tGA3qmjRzpH+bZczqq7hVKxTR4NWnu+g==", + "license": "MIT", + "dependencies": { + "accepts": "~1.3.8", + "array-flatten": "1.1.1", + "body-parser": "~1.20.3", + "content-disposition": "~0.5.4", + "content-type": "~1.0.4", + "cookie": "~0.7.1", + "cookie-signature": "~1.0.6", + "debug": "2.6.9", + "depd": "2.0.0", + "encodeurl": "~2.0.0", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "finalhandler": "~1.3.1", + "fresh": "~0.5.2", + "http-errors": "~2.0.0", + "merge-descriptors": "1.0.3", + "methods": "~1.1.2", + "on-finished": "~2.4.1", + "parseurl": "~1.3.3", + "path-to-regexp": "~0.1.12", + "proxy-addr": "~2.0.7", + "qs": "~6.14.0", + "range-parser": "~1.2.1", + "safe-buffer": "5.2.1", + "send": "~0.19.0", + "serve-static": "~1.16.2", + "setprototypeof": "1.2.0", + "statuses": "~2.0.1", + "type-is": "~1.6.18", + "utils-merge": "1.0.1", + "vary": "~1.1.2" + }, + "engines": { + "node": ">= 0.10.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/express/node_modules/content-disposition": { + "version": "0.5.4", + "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz", + "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==", + "license": "MIT", + "dependencies": { + "safe-buffer": "5.2.1" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/express/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "license": "MIT", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/express/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", + "license": "MIT" + }, + "node_modules/express/node_modules/path-to-regexp": { + "version": "0.1.13", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.13.tgz", + "integrity": "sha512-A/AGNMFN3c8bOlvV9RreMdrv7jsmF9XIfDeCd87+I8RNg6s78BhJxMu69NEMHBSJFxKidViTEdruRwEk/WIKqA==", + "license": "MIT" + }, + "node_modules/express/node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/extend": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", + "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==", + "license": "MIT" + }, + "node_modules/extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha512-zCnTtlxNoAiDc3gqY2aYAWFx7XWWiasuF2K8Me5WbN8otHKTUKBwjPtNpRs/rbUZm7KxWAaNj7P1a/p52GbVug==", + "license": "MIT", + "dependencies": { + "is-extendable": "^0.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + "license": "MIT" + }, + "node_modules/fast-glob": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.3.tgz", + "integrity": "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==", + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.8" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "license": "MIT" + }, + "node_modules/fast-uri": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/fast-uri/-/fast-uri-3.1.0.tgz", + "integrity": "sha512-iPeeDKJSWf4IEOasVVrknXpaBV0IApz/gp7S2bb7Z4Lljbl2MGJRqInZiUrQwV16cpzw/D3S5j5Julj/gT52AA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/fastify" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/fastify" + } + ], + "license": "BSD-3-Clause" + }, + "node_modules/fastq": { + "version": "1.20.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.20.1.tgz", + "integrity": "sha512-GGToxJ/w1x32s/D2EKND7kTil4n8OVk/9mycTc4VDza13lOvpUZTGX3mFSCtV9ksdGBVzvsyAVLM6mHFThxXxw==", + "license": "ISC", + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/fault": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/fault/-/fault-2.0.1.tgz", + "integrity": "sha512-WtySTkS4OKev5JtpHXnib4Gxiurzh5NCGvWrFaZ34m6JehfTUhKZvn9njTfw48t6JumVQOmrKqpmGcdwxnhqBQ==", + "license": "MIT", + "dependencies": { + "format": "^0.2.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/faye-websocket": { + "version": "0.11.4", + "resolved": "https://registry.npmjs.org/faye-websocket/-/faye-websocket-0.11.4.tgz", + "integrity": "sha512-CzbClwlXAuiRQAlUyfqPgvPoNKTckTPGfwZV4ZdAhVcP2lh9KUxJg2b5GkE7XbjKQ3YJnQ9z6D9ntLAlB+tP8g==", + "license": "Apache-2.0", + "dependencies": { + "websocket-driver": ">=0.5.1" + }, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/feed": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/feed/-/feed-4.2.2.tgz", + "integrity": "sha512-u5/sxGfiMfZNtJ3OvQpXcvotFpYkL0n9u9mM2vkui2nGo8b4wvDkJ8gAkYqbA8QpGyFCv3RK0Z+Iv+9veCS9bQ==", + "license": "MIT", + "dependencies": { + "xml-js": "^1.6.11" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/figures": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/figures/-/figures-3.2.0.tgz", + "integrity": "sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg==", + "license": "MIT", + "dependencies": { + "escape-string-regexp": "^1.0.5" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/figures/node_modules/escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", + "license": "MIT", + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/file-loader": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/file-loader/-/file-loader-6.2.0.tgz", + "integrity": "sha512-qo3glqyTa61Ytg4u73GultjHGjdRyig3tG6lPtyX/jOEJvHif9uB0/OCI2Kif6ctF3caQTW2G5gym21oAsI4pw==", + "license": "MIT", + "dependencies": { + "loader-utils": "^2.0.0", + "schema-utils": "^3.0.0" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^4.0.0 || ^5.0.0" + } + }, + "node_modules/file-loader/node_modules/ajv": { + "version": "6.14.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.14.0.tgz", + "integrity": "sha512-IWrosm/yrn43eiKqkfkHis7QioDleaXQHdDVPKg0FSwwd/DuvyX79TZnFOnYpB7dcsFAMmtFztZuXPDvSePkFw==", + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/file-loader/node_modules/ajv-keywords": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "license": "MIT", + "peerDependencies": { + "ajv": "^6.9.1" + } + }, + "node_modules/file-loader/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "license": "MIT" + }, + "node_modules/file-loader/node_modules/schema-utils": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", + "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", + "license": "MIT", + "dependencies": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/finalhandler": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.3.2.tgz", + "integrity": "sha512-aA4RyPcd3badbdABGDuTXCMTtOneUCAYH/gxoYRTZlIJdF0YPWuGqiAsIrhNnnqdXGswYk6dGujem4w80UJFhg==", + "license": "MIT", + "dependencies": { + "debug": "2.6.9", + "encodeurl": "~2.0.0", + "escape-html": "~1.0.3", + "on-finished": "~2.4.1", + "parseurl": "~1.3.3", + "statuses": "~2.0.2", + "unpipe": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/finalhandler/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "license": "MIT", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/finalhandler/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", + "license": "MIT" + }, + "node_modules/find-cache-dir": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-4.0.0.tgz", + "integrity": "sha512-9ZonPT4ZAK4a+1pUPVPZJapbi7O5qbbJPdYw/NOQWZZbVLdDTYM3A4R9z/DpAM08IDaFGsvPgiGZ82WEwUDWjg==", + "license": "MIT", + "dependencies": { + "common-path-prefix": "^3.0.0", + "pkg-dir": "^7.0.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/find-up": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-6.3.0.tgz", + "integrity": "sha512-v2ZsoEuVHYy8ZIlYqwPe/39Cy+cFDzp4dXPaxNvkEuouymu+2Jbz0PxpKarJHYJTmv2HWT3O382qY8l4jMWthw==", + "license": "MIT", + "dependencies": { + "locate-path": "^7.1.0", + "path-exists": "^5.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/flat/-/flat-5.0.2.tgz", + "integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==", + "license": "BSD-3-Clause", + "bin": { + "flat": "cli.js" + } + }, + "node_modules/follow-redirects": { + "version": "1.15.11", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.11.tgz", + "integrity": "sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ==", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/RubenVerborgh" + } + ], + "license": "MIT", + "engines": { + "node": ">=4.0" + }, + "peerDependenciesMeta": { + "debug": { + "optional": true + } + } + }, + "node_modules/form-data-encoder": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-2.1.4.tgz", + "integrity": "sha512-yDYSgNMraqvnxiEXO4hi88+YZxaHC6QKzb5N84iRCTDeRO7ZALpir/lVmf/uXUhnwUr2O4HU8s/n6x+yNjQkHw==", + "license": "MIT", + "engines": { + "node": ">= 14.17" + } + }, + "node_modules/format": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/format/-/format-0.2.2.tgz", + "integrity": "sha512-wzsgA6WOq+09wrU1tsJ09udeR/YZRaeArL9e1wPbFg3GG2yDnC2ldKpxs4xunpFF9DgqCqOIra3bc1HWrJ37Ww==", + "engines": { + "node": ">=0.4.x" + } + }, + "node_modules/forwarded": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", + "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/fraction.js": { + "version": "5.3.4", + "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-5.3.4.tgz", + "integrity": "sha512-1X1NTtiJphryn/uLQz3whtY6jK3fTqoE3ohKs0tT+Ujr1W59oopxmoEh7Lu5p6vBaPbgoM0bzveAW4Qi5RyWDQ==", + "license": "MIT", + "engines": { + "node": "*" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/rawify" + } + }, + "node_modules/fresh": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", + "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/fs-extra": { + "version": "11.3.4", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.3.4.tgz", + "integrity": "sha512-CTXd6rk/M3/ULNQj8FBqBWHYBVYybQ3VPBw0xGKFe3tuH7ytT6ACnvzpIQ3UZtB8yvUKC2cXn1a+x+5EVQLovA==", + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=14.14" + } + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/gauge": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/gauge/-/gauge-3.0.2.tgz", + "integrity": "sha512-+5J6MS/5XksCuXq++uFRsnUd7Ovu1XenbeuIuNRJxYWjgQbPuFhT14lAvsWfqfAmnwluf1OwMjz39HjfLPci0Q==", + "deprecated": "This package is no longer supported.", + "dependencies": { + "aproba": "^1.0.3 || ^2.0.0", + "color-support": "^1.1.2", + "console-control-strings": "^1.0.0", + "has-unicode": "^2.0.1", + "object-assign": "^4.1.1", + "signal-exit": "^3.0.0", + "string-width": "^4.2.3", + "strip-ansi": "^6.0.1", + "wide-align": "^1.1.2" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/gauge/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "node_modules/gauge/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/get-intrinsic": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "function-bind": "^1.1.2", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "math-intrinsics": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-own-enumerable-property-symbols": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/get-own-enumerable-property-symbols/-/get-own-enumerable-property-symbols-3.0.2.tgz", + "integrity": "sha512-I0UBV/XOz1XkIJHEUDMZAbzCThU/H8DxmSfmdGcKPnVhu2VfFqr34jr9777IyaTYvxjedWhqVIilEDsCdP5G6g==", + "license": "ISC" + }, + "node_modules/get-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", + "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/get-stream": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", + "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/github-slugger": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/github-slugger/-/github-slugger-1.5.0.tgz", + "integrity": "sha512-wIh+gKBI9Nshz2o46B0B3f5k/W+WI9ZAv6y5Dn5WJ5SK1t0TnDimB4WE5rmTD05ZAIn8HALCZVmCsvj0w0v0lw==", + "license": "ISC" + }, + "node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/glob-to-regex.js": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/glob-to-regex.js/-/glob-to-regex.js-1.2.0.tgz", + "integrity": "sha512-QMwlOQKU/IzqMUOAZWubUOT8Qft+Y0KQWnX9nK3ch0CJg0tTp4TvGZsTfudYKv2NzoQSyPcnA6TYeIQ3jGichQ==", + "license": "Apache-2.0", + "engines": { + "node": ">=10.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/streamich" + }, + "peerDependencies": { + "tslib": "2" + } + }, + "node_modules/glob-to-regexp": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz", + "integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==", + "license": "BSD-2-Clause" + }, + "node_modules/global-dirs": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/global-dirs/-/global-dirs-3.0.1.tgz", + "integrity": "sha512-NBcGGFbBA9s1VzD41QXDG+3++t9Mn5t1FpLdhESY6oKY4gYTFpX4wO3sqGUa0Srjtbfj3szX0RnemmrVRUdULA==", + "license": "MIT", + "dependencies": { + "ini": "2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/globby": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", + "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", + "license": "MIT", + "dependencies": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.2.9", + "ignore": "^5.2.0", + "merge2": "^1.4.1", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/gopd": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", + "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/got": { + "version": "12.6.1", + "resolved": "https://registry.npmjs.org/got/-/got-12.6.1.tgz", + "integrity": "sha512-mThBblvlAF1d4O5oqyvN+ZxLAYwIJK7bpMxgYqPD9okW0C3qm5FFn7k811QrcuEBwaogR3ngOFoCfs6mRv7teQ==", + "license": "MIT", + "dependencies": { + "@sindresorhus/is": "^5.2.0", + "@szmarczak/http-timer": "^5.0.1", + "cacheable-lookup": "^7.0.0", + "cacheable-request": "^10.2.8", + "decompress-response": "^6.0.0", + "form-data-encoder": "^2.1.2", + "get-stream": "^6.0.1", + "http2-wrapper": "^2.1.10", + "lowercase-keys": "^3.0.0", + "p-cancelable": "^3.0.0", + "responselike": "^3.0.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sindresorhus/got?sponsor=1" + } + }, + "node_modules/got/node_modules/@sindresorhus/is": { + "version": "5.6.0", + "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-5.6.0.tgz", + "integrity": "sha512-TV7t8GKYaJWsn00tFDqBw8+Uqmr8A0fRU1tvTQhyZzGv0sJCGRQL3JGMI3ucuKo3XIZdUP+Lx7/gh2t3lewy7g==", + "license": "MIT", + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sindresorhus/is?sponsor=1" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", + "license": "ISC" + }, + "node_modules/gray-matter": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/gray-matter/-/gray-matter-4.0.3.tgz", + "integrity": "sha512-5v6yZd4JK3eMI3FqqCouswVqwugaA9r4dNZB1wwcmrD02QkV5H0y7XBQW8QwQqEaZY1pM9aqORSORhJRdNK44Q==", + "license": "MIT", + "dependencies": { + "js-yaml": "^3.13.1", + "kind-of": "^6.0.2", + "section-matter": "^1.0.0", + "strip-bom-string": "^1.0.0" + }, + "engines": { + "node": ">=6.0" + } + }, + "node_modules/gray-matter/node_modules/argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "license": "MIT", + "dependencies": { + "sprintf-js": "~1.0.2" + } + }, + "node_modules/gray-matter/node_modules/js-yaml": { + "version": "3.14.2", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.2.tgz", + "integrity": "sha512-PMSmkqxr106Xa156c2M265Z+FTrPl+oxd/rgOQy2tijQeK5TxQ43psO1ZCwhVOSdnn+RzkzlRz/eY4BgJBYVpg==", + "license": "MIT", + "dependencies": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/gzip-size": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/gzip-size/-/gzip-size-6.0.0.tgz", + "integrity": "sha512-ax7ZYomf6jqPTQ4+XCpUGyXKHk5WweS+e05MBO4/y3WJ5RkmPXNKvX+bx1behVILVwr6JSQvZAku021CHPXG3Q==", + "license": "MIT", + "dependencies": { + "duplexer": "^0.1.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/handle-thing": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/handle-thing/-/handle-thing-2.0.1.tgz", + "integrity": "sha512-9Qn4yBxelxoh2Ow62nP+Ka/kMnOXRi8BXnRaUwezLNhqelnN49xKz4F/dPP8OYLxLxq6JDtZb2i9XznUQbNPTg==", + "license": "MIT" + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/has-property-descriptors": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", + "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", + "license": "MIT", + "dependencies": { + "es-define-property": "^1.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-symbols": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", + "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-unicode": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/has-unicode/-/has-unicode-2.0.1.tgz", + "integrity": "sha512-8Rf9Y83NBReMnx0gFzA8JImQACstCYWUplepDa9xprwwtmgEZUF0h/i5xSA625zB/I37EtrswSST6OXxwaaIJQ==" + }, + "node_modules/has-yarn": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-yarn/-/has-yarn-3.0.0.tgz", + "integrity": "sha512-IrsVwUHhEULx3R8f/aA8AHuEzAorplsab/v8HBzEiIukwq5i/EC+xmOW+HfP1OaDP+2JkgT1yILHN2O3UFIbcA==", + "license": "MIT", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/hast-util-from-parse5": { + "version": "8.0.3", + "resolved": "https://registry.npmjs.org/hast-util-from-parse5/-/hast-util-from-parse5-8.0.3.tgz", + "integrity": "sha512-3kxEVkEKt0zvcZ3hCRYI8rqrgwtlIOFMWkbclACvjlDw8Li9S2hk/d51OI0nr/gIpdMHNepwgOKqZ/sy0Clpyg==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "devlop": "^1.0.0", + "hastscript": "^9.0.0", + "property-information": "^7.0.0", + "vfile": "^6.0.0", + "vfile-location": "^5.0.0", + "web-namespaces": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-has-property": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/hast-util-has-property/-/hast-util-has-property-1.0.4.tgz", + "integrity": "sha512-ghHup2voGfgFoHMGnaLHOjbYFACKrRh9KFttdCzMCbFoBMJXiNi2+XTrPP8+q6cDJM/RSqlCfVWrjp1H201rZg==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-is-element": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/hast-util-is-element/-/hast-util-is-element-1.1.0.tgz", + "integrity": "sha512-oUmNua0bFbdrD/ELDSSEadRVtWZOf3iF6Lbv81naqsIV99RnSCieTbWuWCY8BAeEfKJTKl0gRdokv+dELutHGQ==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-parse-selector": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/hast-util-parse-selector/-/hast-util-parse-selector-4.0.0.tgz", + "integrity": "sha512-wkQCkSYoOGCRKERFWcxMVMOcYE2K1AaNLU8DXS9arxnLOUEWbOXKXiJUNzEpqZ3JOKpnha3jkFrumEjVliDe7A==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-raw": { + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/hast-util-raw/-/hast-util-raw-9.1.0.tgz", + "integrity": "sha512-Y8/SBAHkZGoNkpzqqfCldijcuUKh7/su31kEBp67cFY09Wy0mTRgtsLYsiIxMJxlu0f6AA5SUTbDR8K0rxnbUw==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "@ungap/structured-clone": "^1.0.0", + "hast-util-from-parse5": "^8.0.0", + "hast-util-to-parse5": "^8.0.0", + "html-void-elements": "^3.0.0", + "mdast-util-to-hast": "^13.0.0", + "parse5": "^7.0.0", + "unist-util-position": "^5.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0", + "web-namespaces": "^2.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-select": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/hast-util-select/-/hast-util-select-4.0.2.tgz", + "integrity": "sha512-8EEG2//bN5rrzboPWD2HdS3ugLijNioS1pqOTIolXNf67xxShYw4SQEmVXd3imiBG+U2bC2nVTySr/iRAA7Cjg==", + "dependencies": { + "bcp-47-match": "^1.0.0", + "comma-separated-tokens": "^1.0.0", + "css-selector-parser": "^1.0.0", + "direction": "^1.0.0", + "hast-util-has-property": "^1.0.0", + "hast-util-is-element": "^1.0.0", + "hast-util-to-string": "^1.0.0", + "hast-util-whitespace": "^1.0.0", + "not": "^0.1.0", + "nth-check": "^2.0.0", + "property-information": "^5.0.0", + "space-separated-tokens": "^1.0.0", + "unist-util-visit": "^2.0.0", + "zwitch": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-select/node_modules/@types/unist": { + "version": "2.0.11", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.11.tgz", + "integrity": "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==" + }, + "node_modules/hast-util-select/node_modules/comma-separated-tokens": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-1.0.8.tgz", + "integrity": "sha512-GHuDRO12Sypu2cV70d1dkA2EUmXHgntrzbpvOB+Qy+49ypNfGgFQIC2fhhXbnyrJRynDCAARsT7Ou0M6hirpfw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/hast-util-select/node_modules/hast-util-whitespace": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/hast-util-whitespace/-/hast-util-whitespace-1.0.4.tgz", + "integrity": "sha512-I5GTdSfhYfAPNztx2xJRQpG8cuDSNt599/7YUn7Gx/WxNMsG+a835k97TDkFgk123cwjfwINaZknkKkphx/f2A==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-select/node_modules/property-information": { + "version": "5.6.0", + "resolved": "https://registry.npmjs.org/property-information/-/property-information-5.6.0.tgz", + "integrity": "sha512-YUHSPk+A30YPv+0Qf8i9Mbfe/C0hdPXk1s1jPVToV8pk8BQtpw10ct89Eo7OWkutrwqvT0eicAxlOg3dOAu8JA==", + "dependencies": { + "xtend": "^4.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/hast-util-select/node_modules/space-separated-tokens": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-1.1.5.tgz", + "integrity": "sha512-q/JSVd1Lptzhf5bkYm4ob4iWPjx0KiRe3sRFBNrVqbJkFaBm5vbbowy1mymoPNLRa52+oadOhJ+K49wsSeSjTA==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/hast-util-select/node_modules/unist-util-is": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-4.1.0.tgz", + "integrity": "sha512-ZOQSsnce92GrxSqlnEEseX0gi7GH9zTJZ0p9dtu87WRb/37mMPO2Ilx1s/t9vBHrFhbgweUwb+t7cIn5dxPhZg==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-select/node_modules/unist-util-visit": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-2.0.3.tgz", + "integrity": "sha512-iJ4/RczbJMkD0712mGktuGpm/U4By4FfDonL7N/9tATGIF4imikjOuagyMY53tnZq3NP6BcmlrHhEKAfGWjh7Q==", + "dependencies": { + "@types/unist": "^2.0.0", + "unist-util-is": "^4.0.0", + "unist-util-visit-parents": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-select/node_modules/unist-util-visit-parents": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-3.1.1.tgz", + "integrity": "sha512-1KROIZWo6bcMrZEwiH2UrXDyalAa0uqzWCxCJj6lPOvTve2WkfgCytoDTPaMnodXh1WrXOq0haVYHj99ynJlsg==", + "dependencies": { + "@types/unist": "^2.0.0", + "unist-util-is": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-select/node_modules/zwitch": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/zwitch/-/zwitch-1.0.5.tgz", + "integrity": "sha512-V50KMwwzqJV0NpZIZFwfOD5/lyny3WlSzRiXgA0G7VUnRlqttta1L6UQIHzd6EuBY/cHGfwTIck7w1yH6Q5zUw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/hast-util-to-estree": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/hast-util-to-estree/-/hast-util-to-estree-3.1.3.tgz", + "integrity": "sha512-48+B/rJWAp0jamNbAAf9M7Uf//UVqAoMmgXhBdxTDJLGKY+LRnZ99qcG+Qjl5HfMpYNzS5v4EAwVEF34LeAj7w==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "devlop": "^1.0.0", + "estree-util-attach-comments": "^3.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "hast-util-whitespace": "^3.0.0", + "mdast-util-mdx-expression": "^2.0.0", + "mdast-util-mdx-jsx": "^3.0.0", + "mdast-util-mdxjs-esm": "^2.0.0", + "property-information": "^7.0.0", + "space-separated-tokens": "^2.0.0", + "style-to-js": "^1.0.0", + "unist-util-position": "^5.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-jsx-runtime": { + "version": "2.3.6", + "resolved": "https://registry.npmjs.org/hast-util-to-jsx-runtime/-/hast-util-to-jsx-runtime-2.3.6.tgz", + "integrity": "sha512-zl6s8LwNyo1P9uw+XJGvZtdFF1GdAkOg8ujOw+4Pyb76874fLps4ueHXDhXWdk6YHQ6OgUtinliG7RsYvCbbBg==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "hast-util-whitespace": "^3.0.0", + "mdast-util-mdx-expression": "^2.0.0", + "mdast-util-mdx-jsx": "^3.0.0", + "mdast-util-mdxjs-esm": "^2.0.0", + "property-information": "^7.0.0", + "space-separated-tokens": "^2.0.0", + "style-to-js": "^1.0.0", + "unist-util-position": "^5.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-parse5": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/hast-util-to-parse5/-/hast-util-to-parse5-8.0.1.tgz", + "integrity": "sha512-MlWT6Pjt4CG9lFCjiz4BH7l9wmrMkfkJYCxFwKQic8+RTZgWPuWxwAfjJElsXkex7DJjfSJsQIt931ilUgmwdA==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "devlop": "^1.0.0", + "property-information": "^7.0.0", + "space-separated-tokens": "^2.0.0", + "web-namespaces": "^2.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-string": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/hast-util-to-string/-/hast-util-to-string-1.0.4.tgz", + "integrity": "sha512-eK0MxRX47AV2eZ+Lyr18DCpQgodvaS3fAQO2+b9Two9F5HEoRPhiUMNzoXArMJfZi2yieFzUBMRl3HNJ3Jus3w==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-text": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/hast-util-to-text/-/hast-util-to-text-2.0.1.tgz", + "integrity": "sha512-8nsgCARfs6VkwH2jJU9b8LNTuR4700na+0h3PqCaEk4MAnMDeu5P0tP8mjk9LLNGxIeQRLbiDbZVw6rku+pYsQ==", + "dependencies": { + "hast-util-is-element": "^1.0.0", + "repeat-string": "^1.0.0", + "unist-util-find-after": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-whitespace": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/hast-util-whitespace/-/hast-util-whitespace-3.0.0.tgz", + "integrity": "sha512-88JUN06ipLwsnv+dVn+OIYOvAuvBMy/Qoi6O7mQHxdPXpjy+Cd6xRkWwux7DKO+4sYILtLBRIKgsdpS2gQc7qw==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hastscript": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/hastscript/-/hastscript-9.0.1.tgz", + "integrity": "sha512-g7df9rMFX/SPi34tyGCyUBREQoKkapwdY/T04Qn9TDWfHhAYt4/I0gMVirzK5wEzeUqIjEB+LXC/ypb7Aqno5w==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "hast-util-parse-selector": "^4.0.0", + "property-information": "^7.0.0", + "space-separated-tokens": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/he": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", + "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", + "license": "MIT", + "bin": { + "he": "bin/he" + } + }, + "node_modules/history": { + "version": "4.10.1", + "resolved": "https://registry.npmjs.org/history/-/history-4.10.1.tgz", + "integrity": "sha512-36nwAD620w12kuzPAsyINPWJqlNbij+hpK1k9XRloDtym8mxzGYl2c17LnV6IAGB2Dmg4tEa7G7DlawS0+qjew==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.1.2", + "loose-envify": "^1.2.0", + "resolve-pathname": "^3.0.0", + "tiny-invariant": "^1.0.2", + "tiny-warning": "^1.0.0", + "value-equal": "^1.0.1" + } + }, + "node_modules/hogan.js": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/hogan.js/-/hogan.js-3.0.2.tgz", + "integrity": "sha512-RqGs4wavGYJWE07t35JQccByczmNUXQT0E12ZYV1VKYu5UiAU9lsos/yBAcf840+zrUQQxgVduCR5/B8nNtibg==", + "dependencies": { + "mkdirp": "0.3.0", + "nopt": "1.0.10" + }, + "bin": { + "hulk": "bin/hulk" + } + }, + "node_modules/hoist-non-react-statics": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz", + "integrity": "sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==", + "license": "BSD-3-Clause", + "dependencies": { + "react-is": "^16.7.0" + } + }, + "node_modules/hpack.js": { + "version": "2.1.6", + "resolved": "https://registry.npmjs.org/hpack.js/-/hpack.js-2.1.6.tgz", + "integrity": "sha512-zJxVehUdMGIKsRaNt7apO2Gqp0BdqW5yaiGHXXmbpvxgBYVZnAql+BJb4RO5ad2MgpbZKn5G6nMnegrH1FcNYQ==", + "license": "MIT", + "dependencies": { + "inherits": "^2.0.1", + "obuf": "^1.0.0", + "readable-stream": "^2.0.1", + "wbuf": "^1.1.0" + } + }, + "node_modules/hpack.js/node_modules/isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==", + "license": "MIT" + }, + "node_modules/hpack.js/node_modules/readable-stream": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", + "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", + "license": "MIT", + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/hpack.js/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", + "license": "MIT" + }, + "node_modules/hpack.js/node_modules/string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "license": "MIT", + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, + "node_modules/html-escaper": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", + "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==", + "license": "MIT" + }, + "node_modules/html-minifier-terser": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/html-minifier-terser/-/html-minifier-terser-7.2.0.tgz", + "integrity": "sha512-tXgn3QfqPIpGl9o+K5tpcj3/MN4SfLtsx2GWwBC3SSd0tXQGyF3gsSqad8loJgKZGM3ZxbYDd5yhiBIdWpmvLA==", + "license": "MIT", + "dependencies": { + "camel-case": "^4.1.2", + "clean-css": "~5.3.2", + "commander": "^10.0.0", + "entities": "^4.4.0", + "param-case": "^3.0.4", + "relateurl": "^0.2.7", + "terser": "^5.15.1" + }, + "bin": { + "html-minifier-terser": "cli.js" + }, + "engines": { + "node": "^14.13.1 || >=16.0.0" + } + }, + "node_modules/html-minifier-terser/node_modules/commander": { + "version": "10.0.1", + "resolved": "https://registry.npmjs.org/commander/-/commander-10.0.1.tgz", + "integrity": "sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==", + "license": "MIT", + "engines": { + "node": ">=14" + } + }, + "node_modules/html-tags": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-3.3.1.tgz", + "integrity": "sha512-ztqyC3kLto0e9WbNp0aeP+M3kTt+nbaIveGmUxAtZa+8iFgKLUOD4YKM5j+f3QD89bra7UeumolZHKuOXnTmeQ==", + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/html-void-elements": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/html-void-elements/-/html-void-elements-3.0.0.tgz", + "integrity": "sha512-bEqo66MRXsUGxWHV5IP0PUiAWwoEjba4VCzg0LjFJBpchPaTfyfCKTG6bc5F8ucKec3q5y6qOdGyYTSBEvhCrg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/html-webpack-plugin": { + "version": "5.6.6", + "resolved": "https://registry.npmjs.org/html-webpack-plugin/-/html-webpack-plugin-5.6.6.tgz", + "integrity": "sha512-bLjW01UTrvoWTJQL5LsMRo1SypHW80FTm12OJRSnr3v6YHNhfe+1r0MYUZJMACxnCHURVnBWRwAsWs2yPU9Ezw==", + "license": "MIT", + "dependencies": { + "@types/html-minifier-terser": "^6.0.0", + "html-minifier-terser": "^6.0.2", + "lodash": "^4.17.21", + "pretty-error": "^4.0.0", + "tapable": "^2.0.0" + }, + "engines": { + "node": ">=10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/html-webpack-plugin" + }, + "peerDependencies": { + "@rspack/core": "0.x || 1.x", + "webpack": "^5.20.0" + }, + "peerDependenciesMeta": { + "@rspack/core": { + "optional": true + }, + "webpack": { + "optional": true + } + } + }, + "node_modules/html-webpack-plugin/node_modules/commander": { + "version": "8.3.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-8.3.0.tgz", + "integrity": "sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==", + "license": "MIT", + "engines": { + "node": ">= 12" + } + }, + "node_modules/html-webpack-plugin/node_modules/html-minifier-terser": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz", + "integrity": "sha512-YXxSlJBZTP7RS3tWnQw74ooKa6L9b9i9QYXY21eUEvhZ3u9XLfv6OnFsQq6RxkhHygsaUMvYsZRV5rU/OVNZxw==", + "license": "MIT", + "dependencies": { + "camel-case": "^4.1.2", + "clean-css": "^5.2.2", + "commander": "^8.3.0", + "he": "^1.2.0", + "param-case": "^3.0.4", + "relateurl": "^0.2.7", + "terser": "^5.10.0" + }, + "bin": { + "html-minifier-terser": "cli.js" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/htmlparser2": { + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-8.0.2.tgz", + "integrity": "sha512-GYdjWKDkbRLkZ5geuHs5NY1puJ+PXwP7+fHPRz06Eirsb9ugf6d8kkXav6ADhcODhFFPMIXyxkxSuMf3D6NCFA==", + "funding": [ + "https://github.com/fb55/htmlparser2?sponsor=1", + { + "type": "github", + "url": "https://github.com/sponsors/fb55" + } + ], + "license": "MIT", + "dependencies": { + "domelementtype": "^2.3.0", + "domhandler": "^5.0.3", + "domutils": "^3.0.1", + "entities": "^4.4.0" + } + }, + "node_modules/http-cache-semantics": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.2.0.tgz", + "integrity": "sha512-dTxcvPXqPvXBQpq5dUr6mEMJX4oIEFv6bwom3FDwKRDsuIjjJGANqhBuoAn9c1RQJIdAKav33ED65E2ys+87QQ==", + "license": "BSD-2-Clause" + }, + "node_modules/http-deceiver": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/http-deceiver/-/http-deceiver-1.2.7.tgz", + "integrity": "sha512-LmpOGxTfbpgtGVxJrj5k7asXHCgNZp5nLfp+hWc8QQRqtb7fUy6kRY3BO1h9ddF6yIPYUARgxGOwB42DnxIaNw==", + "license": "MIT" + }, + "node_modules/http-errors": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.1.tgz", + "integrity": "sha512-4FbRdAX+bSdmo4AUFuS0WNiPz8NgFt+r8ThgNWmlrjQjt1Q7ZR9+zTlce2859x4KSXrwIsaeTqDoKQmtP8pLmQ==", + "license": "MIT", + "dependencies": { + "depd": "~2.0.0", + "inherits": "~2.0.4", + "setprototypeof": "~1.2.0", + "statuses": "~2.0.2", + "toidentifier": "~1.0.1" + }, + "engines": { + "node": ">= 0.8" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/http-parser-js": { + "version": "0.5.10", + "resolved": "https://registry.npmjs.org/http-parser-js/-/http-parser-js-0.5.10.tgz", + "integrity": "sha512-Pysuw9XpUq5dVc/2SMHpuTY01RFl8fttgcyunjL7eEMhGM3cI4eOmiCycJDVCo/7O7ClfQD3SaI6ftDzqOXYMA==", + "license": "MIT" + }, + "node_modules/http-proxy": { + "version": "1.18.1", + "resolved": "https://registry.npmjs.org/http-proxy/-/http-proxy-1.18.1.tgz", + "integrity": "sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ==", + "license": "MIT", + "dependencies": { + "eventemitter3": "^4.0.0", + "follow-redirects": "^1.0.0", + "requires-port": "^1.0.0" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/http-proxy-middleware": { + "version": "2.0.9", + "resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-2.0.9.tgz", + "integrity": "sha512-c1IyJYLYppU574+YI7R4QyX2ystMtVXZwIdzazUIPIJsHuWNd+mho2j+bKoHftndicGj9yh+xjd+l0yj7VeT1Q==", + "license": "MIT", + "dependencies": { + "@types/http-proxy": "^1.17.8", + "http-proxy": "^1.18.1", + "is-glob": "^4.0.1", + "is-plain-obj": "^3.0.0", + "micromatch": "^4.0.2" + }, + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "@types/express": "^4.17.13" + }, + "peerDependenciesMeta": { + "@types/express": { + "optional": true + } + } + }, + "node_modules/http-proxy-middleware/node_modules/is-plain-obj": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-3.0.0.tgz", + "integrity": "sha512-gwsOE28k+23GP1B6vFl1oVh/WOzmawBrKwo5Ev6wMKzPkaXaCDIQKzLnvsA42DRlbVTWorkgTKIviAKCWkfUwA==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/http2-wrapper": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/http2-wrapper/-/http2-wrapper-2.2.1.tgz", + "integrity": "sha512-V5nVw1PAOgfI3Lmeaj2Exmeg7fenjhRUgz1lPSezy1CuhPYbgQtbQj4jZfEAEMlaL+vupsvhjqCyjzob0yxsmQ==", + "license": "MIT", + "dependencies": { + "quick-lru": "^5.1.1", + "resolve-alpn": "^1.2.0" + }, + "engines": { + "node": ">=10.19.0" + } + }, + "node_modules/human-signals": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", + "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", + "license": "Apache-2.0", + "engines": { + "node": ">=10.17.0" + } + }, + "node_modules/hyperdyperid": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/hyperdyperid/-/hyperdyperid-1.2.0.tgz", + "integrity": "sha512-Y93lCzHYgGWdrJ66yIktxiaGULYc6oGiABxhcO5AufBeOyoIdZF7bIfLaOrbM0iGIOXQQgxxRrFEnb+Y6w1n4A==", + "license": "MIT", + "engines": { + "node": ">=10.18" + } + }, + "node_modules/iconv-lite": { + "version": "0.4.24", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "license": "MIT", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/icss-utils": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/icss-utils/-/icss-utils-5.1.0.tgz", + "integrity": "sha512-soFhflCVWLfRNOPU3iv5Z9VUdT44xFRbzjLsEzSr5AQmgqPMTHdU3PMT1Cf1ssx8fLNJDA1juftYl+PUcv3MqA==", + "license": "ISC", + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/ignore": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", + "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/image-size": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/image-size/-/image-size-2.0.2.tgz", + "integrity": "sha512-IRqXKlaXwgSMAMtpNzZa1ZAe8m+Sa1770Dhk8VkSsP9LS+iHD62Zd8FQKs8fbPiagBE7BzoFX23cxFnwshpV6w==", + "license": "MIT", + "bin": { + "image-size": "bin/image-size.js" + }, + "engines": { + "node": ">=16.x" + } + }, + "node_modules/immediate": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/immediate/-/immediate-3.3.0.tgz", + "integrity": "sha512-HR7EVodfFUdQCTIeySw+WDRFJlPcLOJbXfwwZ7Oom6tjsvZ3bOkCDJHehQC3nxJrv7+f9XecwazynjU8e4Vw3Q==" + }, + "node_modules/import-fresh": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.1.tgz", + "integrity": "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==", + "license": "MIT", + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/import-lazy": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/import-lazy/-/import-lazy-4.0.0.tgz", + "integrity": "sha512-rKtvo6a868b5Hu3heneU+L4yEQ4jYKLtjpnPeUdK7h0yzXGmyBTypknlkCvHFBqfX9YlorEiMM6Dnq/5atfHkw==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "license": "MIT", + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/indent-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", + "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/infima": { + "version": "0.2.0-alpha.45", + "resolved": "https://registry.npmjs.org/infima/-/infima-0.2.0-alpha.45.tgz", + "integrity": "sha512-uyH0zfr1erU1OohLk0fT4Rrb94AOhguWNOcD9uGrSpRvNB+6gZXUoJX5J0NtvzBO10YZ9PgvA4NFgt+fYg8ojw==", + "license": "MIT", + "engines": { + "node": ">=12" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "license": "ISC" + }, + "node_modules/ini": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ini/-/ini-2.0.0.tgz", + "integrity": "sha512-7PnF4oN3CvZF23ADhA5wRaYEQpJ8qygSkbtTXWBeXWXmEVRXK+1ITciHWwHhsjv1TmW0MgacIv6hEi5pX5NQdA==", + "license": "ISC", + "engines": { + "node": ">=10" + } + }, + "node_modules/inline-style-parser": { + "version": "0.2.7", + "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.2.7.tgz", + "integrity": "sha512-Nb2ctOyNR8DqQoR0OwRG95uNWIC0C1lCgf5Naz5H6Ji72KZ8OcFZLz2P5sNgwlyoJ8Yif11oMuYs5pBQa86csA==", + "license": "MIT" + }, + "node_modules/invariant": { + "version": "2.2.4", + "resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz", + "integrity": "sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==", + "license": "MIT", + "dependencies": { + "loose-envify": "^1.0.0" + } + }, + "node_modules/ipaddr.js": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-2.3.0.tgz", + "integrity": "sha512-Zv/pA+ciVFbCSBBjGfaKUya/CcGmUHzTydLMaTwrUUEM2DIEO3iZvueGxmacvmN50fGpGVKeTXpb2LcYQxeVdg==", + "license": "MIT", + "engines": { + "node": ">= 10" + } + }, + "node_modules/is-alphabetical": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-2.0.1.tgz", + "integrity": "sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-alphanumerical": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-2.0.1.tgz", + "integrity": "sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw==", + "license": "MIT", + "dependencies": { + "is-alphabetical": "^2.0.0", + "is-decimal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==", + "license": "MIT" + }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "license": "MIT", + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-buffer": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-2.0.5.tgz", + "integrity": "sha512-i2R6zNFDwgEHJyQUtJEk0XFi1i0dPFn/oqjK3/vPCcDeJvW5NQ83V8QbicfF1SupOaB0h8ntgBC2YiE7dfyctQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "engines": { + "node": ">=4" + } + }, + "node_modules/is-ci": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/is-ci/-/is-ci-3.0.1.tgz", + "integrity": "sha512-ZYvCgrefwqoQ6yTyYUbQu64HsITZ3NfKX1lzaEYdkTDcfKzzCI/wthRRYKkdjHKFVgNiXKAKm65Zo1pk2as/QQ==", + "license": "MIT", + "dependencies": { + "ci-info": "^3.2.0" + }, + "bin": { + "is-ci": "bin.js" + } + }, + "node_modules/is-core-module": { + "version": "2.16.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz", + "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==", + "license": "MIT", + "dependencies": { + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-decimal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-2.0.1.tgz", + "integrity": "sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-docker": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz", + "integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==", + "license": "MIT", + "bin": { + "is-docker": "cli.js" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-extendable": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz", + "integrity": "sha512-5BMULNob1vgFX6EjQw5izWDxrecWK9AM72rugNr0TFldMOi0fj6Jk+zeKIt0xGj4cEfQIJth4w3OKWOJ4f+AFw==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-hexadecimal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-2.0.1.tgz", + "integrity": "sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-inside-container": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-inside-container/-/is-inside-container-1.0.0.tgz", + "integrity": "sha512-KIYLCCJghfHZxqjYBE7rEy0OBuTd5xCHS7tHVgvCLkx7StIoaxwNW3hCALgEUjFfeRk+MG/Qxmp/vtETEF3tRA==", + "license": "MIT", + "dependencies": { + "is-docker": "^3.0.0" + }, + "bin": { + "is-inside-container": "cli.js" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-inside-container/node_modules/is-docker": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-3.0.0.tgz", + "integrity": "sha512-eljcgEDlEns/7AXFosB5K/2nCM4P7FQPkGc/DWLy5rmFEWvZayGrik1d9/QIY5nJ4f9YsVvBkA6kJpHn9rISdQ==", + "license": "MIT", + "bin": { + "is-docker": "cli.js" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-installed-globally": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/is-installed-globally/-/is-installed-globally-0.4.0.tgz", + "integrity": "sha512-iwGqO3J21aaSkC7jWnHP/difazwS7SFeIqxv6wEtLU8Y5KlzFTjyqcSIT0d8s4+dDhKytsk9PJZ2BkS5eZwQRQ==", + "license": "MIT", + "dependencies": { + "global-dirs": "^3.0.0", + "is-path-inside": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-network-error": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/is-network-error/-/is-network-error-1.3.1.tgz", + "integrity": "sha512-6QCxa49rQbmUWLfk0nuGqzql9U8uaV2H6279bRErPBHe/109hCzsLUBUHfbEtvLIHBd6hyXbgedBSHevm43Edw==", + "license": "MIT", + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-npm": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/is-npm/-/is-npm-6.1.0.tgz", + "integrity": "sha512-O2z4/kNgyjhQwVR1Wpkbfc19JIhggF97NZNCpWTnjH7kVcZMUrnut9XSN7txI7VdyIYk5ZatOq3zvSuWpU8hoA==", + "license": "MIT", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-obj": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-1.0.1.tgz", + "integrity": "sha512-l4RyHgRqGN4Y3+9JHVrNqO+tN0rV5My76uW5/nuO4K1b6vw5G8d/cmFjP9tRfEsdhZNt0IFdZuK/c2Vr4Nb+Qg==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-path-inside": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", + "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-plain-obj": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz", + "integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-plain-object": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz", + "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==", + "license": "MIT", + "dependencies": { + "isobject": "^3.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-regexp": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-regexp/-/is-regexp-1.0.0.tgz", + "integrity": "sha512-7zjFAPO4/gwyQAAgRRmqeEeyIICSdmCqa3tsVHMdBzaXXRiqopZL4Cyghg/XulGWrtABTpbnYYzzIRffLkP4oA==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-stream": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-typedarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", + "integrity": "sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA==", + "license": "MIT" + }, + "node_modules/is-wsl": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz", + "integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==", + "license": "MIT", + "dependencies": { + "is-docker": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-yarn-global": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/is-yarn-global/-/is-yarn-global-0.4.1.tgz", + "integrity": "sha512-/kppl+R+LO5VmhYSEWARUFjodS25D68gvj8W7z0I7OWhUla5xWu8KL6CtB2V0R6yqhnRgbcaREMr4EEM6htLPQ==", + "license": "MIT", + "engines": { + "node": ">=12" + } + }, + "node_modules/isarray": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", + "integrity": "sha512-D2S+3GLxWH+uhrNEcoh/fnmYeP8E8/zHl644d/jdA0g2uyXvy3sb0qxotE+ne0LtccHknQzWwZEzhak7oJ0COQ==", + "license": "MIT" + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "license": "ISC" + }, + "node_modules/isobject": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", + "integrity": "sha512-WhB9zCku7EGTj/HQQRz5aUQEUeoQZH2bWcltRErOpymJ4boYE6wL9Tbr23krRPSZ+C5zqNSrSw+Cc7sZZ4b7vg==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/jest-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", + "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-worker": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.7.0.tgz", + "integrity": "sha512-eIz2msL/EzL9UFTFFx7jBTkeZfku0yUAyZZZmJ93H2TYEiroIx2PQjEXcwYtYl8zXCxb+PAmA2hLIt/6ZEkPHw==", + "license": "MIT", + "dependencies": { + "@types/node": "*", + "jest-util": "^29.7.0", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-worker/node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/jiti": { + "version": "1.21.7", + "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.21.7.tgz", + "integrity": "sha512-/imKNG4EbWNrVjoNC/1H5/9GFy+tqjGBHCaSsN+P2RnPqjsLmv6UD3Ej+Kj8nBWaRAwyk7kK5ZUc+OEatnTR3A==", + "license": "MIT", + "bin": { + "jiti": "bin/jiti.js" + } + }, + "node_modules/joi": { + "version": "17.13.3", + "resolved": "https://registry.npmjs.org/joi/-/joi-17.13.3.tgz", + "integrity": "sha512-otDA4ldcIx+ZXsKHWmp0YizCweVRZG96J10b0FevjfuncLO1oX59THoAmHkNubYJ+9gWsYsp5k8v4ib6oDv1fA==", + "license": "BSD-3-Clause", + "dependencies": { + "@hapi/hoek": "^9.3.0", + "@hapi/topo": "^5.1.0", + "@sideway/address": "^4.1.5", + "@sideway/formula": "^3.0.1", + "@sideway/pinpoint": "^2.0.0" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "license": "MIT" + }, + "node_modules/js-yaml": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/jsesc": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", + "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", + "license": "MIT", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", + "license": "MIT" + }, + "node_modules/json-parse-even-better-errors": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", + "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==", + "license": "MIT" + }, + "node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", + "license": "MIT" + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "license": "MIT", + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/jsonfile": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", + "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", + "license": "MIT", + "dependencies": { + "universalify": "^2.0.0" + }, + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "license": "MIT", + "dependencies": { + "json-buffer": "3.0.1" + } + }, + "node_modules/kind-of": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/kleur": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", + "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/latest-version": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/latest-version/-/latest-version-7.0.0.tgz", + "integrity": "sha512-KvNT4XqAMzdcL6ka6Tl3i2lYeFDgXNCuIX+xNx6ZMVR1dFq+idXd9FLKNMOIx0t9mJ9/HudyX4oZWXZQ0UJHeg==", + "license": "MIT", + "dependencies": { + "package-json": "^8.1.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/launch-editor": { + "version": "2.13.2", + "resolved": "https://registry.npmjs.org/launch-editor/-/launch-editor-2.13.2.tgz", + "integrity": "sha512-4VVDnbOpLXy/s8rdRCSXb+zfMeFR0WlJWpET1iA9CQdlZDfwyLjUuGQzXU4VeOoey6AicSAluWan7Etga6Kcmg==", + "license": "MIT", + "dependencies": { + "picocolors": "^1.1.1", + "shell-quote": "^1.8.3" + } + }, + "node_modules/leven": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz", + "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/lilconfig": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-3.1.3.tgz", + "integrity": "sha512-/vlFKAoH5Cgt3Ie+JLhRbwOsCQePABiU3tJ1egGvyQ+33R/vcwM2Zl2QR/LzjsBeItPt3oSVXapn+m4nQDvpzw==", + "license": "MIT", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/antonk52" + } + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", + "license": "MIT" + }, + "node_modules/loader-runner": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/loader-runner/-/loader-runner-4.3.1.tgz", + "integrity": "sha512-IWqP2SCPhyVFTBtRcgMHdzlf9ul25NwaFx4wCEH/KjAXuuHY4yNjvPXsBokp8jCB936PyWRaPKUNh8NvylLp2Q==", + "license": "MIT", + "engines": { + "node": ">=6.11.5" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/loader-utils": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-2.0.4.tgz", + "integrity": "sha512-xXqpXoINfFhgua9xiqD8fPFHgkoq1mmmpE92WlDbm9rNRd/EbRb+Gqf908T2DMfuHjjJlksiK2RbHVOdD/MqSw==", + "license": "MIT", + "dependencies": { + "big.js": "^5.2.2", + "emojis-list": "^3.0.0", + "json5": "^2.1.2" + }, + "engines": { + "node": ">=8.9.0" + } + }, + "node_modules/locate-path": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-7.2.0.tgz", + "integrity": "sha512-gvVijfZvn7R+2qyPX8mAuKcFGDf6Nc61GdvGafQsHL0sBIxfKzA+usWn4GFC/bk+QdwPUD4kWFJLhElipq+0VA==", + "license": "MIT", + "dependencies": { + "p-locate": "^6.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash": { + "version": "4.17.23", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.23.tgz", + "integrity": "sha512-LgVTMpQtIopCi79SJeDiP0TfWi5CNEc/L/aRdTh3yIvmZXTnheWpKjSZhnvMl8iXbC1tFg9gdHHDMLoV7CnG+w==", + "license": "MIT" + }, + "node_modules/lodash.debounce": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz", + "integrity": "sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==", + "license": "MIT" + }, + "node_modules/lodash.memoize": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz", + "integrity": "sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag==", + "license": "MIT" + }, + "node_modules/lodash.uniq": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz", + "integrity": "sha512-xfBaXQd9ryd9dlSDvnvI0lvxfLJlYAZzXomUYzLKtUeOQvOP5piqAWuGtrhWeqaXK9hhoM/iyJc5AV+XfsX3HQ==", + "license": "MIT" + }, + "node_modules/longest-streak": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/longest-streak/-/longest-streak-3.1.0.tgz", + "integrity": "sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/loose-envify": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "license": "MIT", + "dependencies": { + "js-tokens": "^3.0.0 || ^4.0.0" + }, + "bin": { + "loose-envify": "cli.js" + } + }, + "node_modules/lower-case": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/lower-case/-/lower-case-2.0.2.tgz", + "integrity": "sha512-7fm3l3NAF9WfN6W3JOmf5drwpVqX78JtoGJ3A6W0a6ZnldM41w2fV5D490psKFTpMds8TJse/eHLFFsNHHjHgg==", + "license": "MIT", + "dependencies": { + "tslib": "^2.0.3" + } + }, + "node_modules/lowercase-keys": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-3.0.0.tgz", + "integrity": "sha512-ozCC6gdQ+glXOQsveKD0YsDy8DSQFjDTz4zyzEHNV5+JP5D62LmfDZ6o1cycFx9ouG940M5dE8C8CTewdj2YWQ==", + "license": "MIT", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "license": "ISC", + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/lunr": { + "version": "2.3.9", + "resolved": "https://registry.npmjs.org/lunr/-/lunr-2.3.9.tgz", + "integrity": "sha512-zTU3DaZaF3Rt9rhN3uBMGQD3dD2/vFQqnvZCDv4dl5iOzq2IZQqTxu90r4E5J+nP70J3ilqVCrbho2eWaeW8Ow==" + }, + "node_modules/lunr-languages": { + "version": "1.14.0", + "resolved": "https://registry.npmjs.org/lunr-languages/-/lunr-languages-1.14.0.tgz", + "integrity": "sha512-hWUAb2KqM3L7J5bcrngszzISY4BxrXn/Xhbb9TTCJYEGqlR1nG67/M14sp09+PTIRklobrn57IAxcdcO/ZFyNA==" + }, + "node_modules/mark.js": { + "version": "8.11.1", + "resolved": "https://registry.npmjs.org/mark.js/-/mark.js-8.11.1.tgz", + "integrity": "sha512-1I+1qpDt4idfgLQG+BNWmrqku+7/2bi5nLf4YwF8y8zXvmfiTBY3PV3ZibfrjBueCByROpuBjLLFCajqkgYoLQ==" + }, + "node_modules/markdown-extensions": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/markdown-extensions/-/markdown-extensions-2.0.0.tgz", + "integrity": "sha512-o5vL7aDWatOTX8LzaS1WMoaoxIiLRQJuIKKe2wAw6IeULDHaqbiqiggmx+pKvZDb1Sj+pE46Sn1T7lCqfFtg1Q==", + "license": "MIT", + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/markdown-table": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/markdown-table/-/markdown-table-3.0.4.tgz", + "integrity": "sha512-wiYz4+JrLyb/DqW2hkFJxP7Vd7JuTDm77fvbM8VfEQdmSMqcImWeeRbHwZjBjIFki/VaMK2BhFi7oUUZeM5bqw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/math-intrinsics": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", + "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/mdast-util-directive": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-directive/-/mdast-util-directive-3.1.0.tgz", + "integrity": "sha512-I3fNFt+DHmpWCYAT7quoM6lHf9wuqtI+oCOfvILnoicNIqjh5E3dEJWiXuYME2gNe8vl1iMQwyUHa7bgFmak6Q==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "ccount": "^2.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "parse-entities": "^4.0.0", + "stringify-entities": "^4.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-find-and-replace": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/mdast-util-find-and-replace/-/mdast-util-find-and-replace-3.0.2.tgz", + "integrity": "sha512-Tmd1Vg/m3Xz43afeNxDIhWRtFZgM2VLyaf4vSTYwudTyeuTneoL3qtWMA5jeLyz/O1vDJmmV4QuScFCA2tBPwg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "escape-string-regexp": "^5.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-find-and-replace/node_modules/escape-string-regexp": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", + "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mdast-util-from-markdown": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-2.0.3.tgz", + "integrity": "sha512-W4mAWTvSlKvf8L6J+VN9yLSqQ9AOAAvHuoDAmPkz4dHf553m5gVj2ejadHJhoJmcmxEnOv6Pa8XJhpxE93kb8Q==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "mdast-util-to-string": "^4.0.0", + "micromark": "^4.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-decode-string": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unist-util-stringify-position": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-from-markdown/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/mdast-util-frontmatter": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-frontmatter/-/mdast-util-frontmatter-2.0.1.tgz", + "integrity": "sha512-LRqI9+wdgC25P0URIJY9vwocIzCcksduHQ9OF2joxQoyTNVduwLAFUzjoopuRJbJAReaKrNQKAZKL3uCMugWJA==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "escape-string-regexp": "^5.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "micromark-extension-frontmatter": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-frontmatter/node_modules/escape-string-regexp": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", + "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mdast-util-gfm": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm/-/mdast-util-gfm-3.1.0.tgz", + "integrity": "sha512-0ulfdQOM3ysHhCJ1p06l0b0VKlhU0wuQs3thxZQagjcjPrlFRqY215uZGHHJan9GEAXd9MbfPjFJz+qMkVR6zQ==", + "license": "MIT", + "dependencies": { + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-gfm-autolink-literal": "^2.0.0", + "mdast-util-gfm-footnote": "^2.0.0", + "mdast-util-gfm-strikethrough": "^2.0.0", + "mdast-util-gfm-table": "^2.0.0", + "mdast-util-gfm-task-list-item": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-autolink-literal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-autolink-literal/-/mdast-util-gfm-autolink-literal-2.0.1.tgz", + "integrity": "sha512-5HVP2MKaP6L+G6YaxPNjuL0BPrq9orG3TsrZ9YXbA3vDw/ACI4MEsnoDpn6ZNm7GnZgtAcONJyPhOP8tNJQavQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "ccount": "^2.0.0", + "devlop": "^1.0.0", + "mdast-util-find-and-replace": "^3.0.0", + "micromark-util-character": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-autolink-literal/node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/mdast-util-gfm-autolink-literal/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/mdast-util-gfm-footnote": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-footnote/-/mdast-util-gfm-footnote-2.1.0.tgz", + "integrity": "sha512-sqpDWlsHn7Ac9GNZQMeUzPQSMzR6Wv0WKRNvQRg0KqHh02fpTz69Qc1QSseNX29bhz1ROIyNyxExfawVKTm1GQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.1.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-strikethrough": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-strikethrough/-/mdast-util-gfm-strikethrough-2.0.0.tgz", + "integrity": "sha512-mKKb915TF+OC5ptj5bJ7WFRPdYtuHv0yTRxK2tJvi+BDqbkiG7h7u/9SI89nRAYcmap2xHQL9D+QG/6wSrTtXg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-table": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-table/-/mdast-util-gfm-table-2.0.0.tgz", + "integrity": "sha512-78UEvebzz/rJIxLvE7ZtDd/vIQ0RHv+3Mh5DR96p7cS7HsBhYIICDBCu8csTNWNO6tBWfqXPWekRuj2FNOGOZg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "markdown-table": "^3.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-task-list-item": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-task-list-item/-/mdast-util-gfm-task-list-item-2.0.0.tgz", + "integrity": "sha512-IrtvNvjxC1o06taBAVJznEnkiHxLFTzgonUdy8hzFVeDun0uTjxxrRGVaNFqkU1wJR3RBPEfsxmU6jDWPofrTQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-mdx/-/mdast-util-mdx-3.0.0.tgz", + "integrity": "sha512-JfbYLAW7XnYTTbUsmpu0kdBUVe+yKVJZBItEjwyYJiDJuZ9w4eeaqks4HQO+R7objWgS2ymV60GYpI14Ug554w==", + "license": "MIT", + "dependencies": { + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-mdx-expression": "^2.0.0", + "mdast-util-mdx-jsx": "^3.0.0", + "mdast-util-mdxjs-esm": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx-expression": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-mdx-expression/-/mdast-util-mdx-expression-2.0.1.tgz", + "integrity": "sha512-J6f+9hUp+ldTZqKRSg7Vw5V6MqjATc+3E4gf3CFNcuZNWD8XdyI6zQ8GqH7f8169MM6P7hMBRDVGnn7oHB9kXQ==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx-jsx": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/mdast-util-mdx-jsx/-/mdast-util-mdx-jsx-3.2.0.tgz", + "integrity": "sha512-lj/z8v0r6ZtsN/cGNNtemmmfoLAFZnjMbNyLzBafjzikOM+glrjNHPlf6lQDOTccj9n5b0PPihEBbhneMyGs1Q==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "ccount": "^2.0.0", + "devlop": "^1.1.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "parse-entities": "^4.0.0", + "stringify-entities": "^4.0.0", + "unist-util-stringify-position": "^4.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdxjs-esm": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-mdxjs-esm/-/mdast-util-mdxjs-esm-2.0.1.tgz", + "integrity": "sha512-EcmOpxsZ96CvlP03NghtH1EsLtr0n9Tm4lPUJUBccV9RwUOneqSycg19n5HGzCf+10LozMRSObtVr3ee1WoHtg==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-phrasing": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-phrasing/-/mdast-util-phrasing-4.1.0.tgz", + "integrity": "sha512-TqICwyvJJpBwvGAMZjj4J2n0X8QWp21b9l0o7eXyVJ25YNWYbJDVIyD1bZXE6WtV6RmKJVYmQAKWa0zWOABz2w==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "unist-util-is": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-hast": { + "version": "13.2.1", + "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-13.2.1.tgz", + "integrity": "sha512-cctsq2wp5vTsLIcaymblUriiTcZd0CwWtCbLvrOzYCDZoWyMNV8sZ7krj09FSnsiJi3WVsHLM4k6Dq/yaPyCXA==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "@ungap/structured-clone": "^1.0.0", + "devlop": "^1.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "trim-lines": "^3.0.0", + "unist-util-position": "^5.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-markdown": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/mdast-util-to-markdown/-/mdast-util-to-markdown-2.1.2.tgz", + "integrity": "sha512-xj68wMTvGXVOKonmog6LwyJKrYXZPvlwabaryTjLh9LuvovB/KAH+kvi8Gjj+7rJjsFi23nkUxRQv1KqSroMqA==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "longest-streak": "^3.0.0", + "mdast-util-phrasing": "^4.0.0", + "mdast-util-to-string": "^4.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-decode-string": "^2.0.0", + "unist-util-visit": "^5.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-4.0.0.tgz", + "integrity": "sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdn-data": { + "version": "2.0.30", + "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.30.tgz", + "integrity": "sha512-GaqWWShW4kv/G9IEucWScBx9G1/vsFZZJUO+tD26M8J8z3Kw5RDQjaoZe03YAClgeS/SWPOcb4nkFBTEi5DUEA==", + "license": "CC0-1.0" + }, + "node_modules/media-typer": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", + "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/memfs": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/memfs/-/memfs-4.57.1.tgz", + "integrity": "sha512-WvzrWPwMQT+PtbX2Et64R4qXKK0fj/8pO85MrUCzymX3twwCiJCdvntW3HdhG1teLJcHDDLIKx5+c3HckWYZtQ==", + "license": "Apache-2.0", + "dependencies": { + "@jsonjoy.com/fs-core": "4.57.1", + "@jsonjoy.com/fs-fsa": "4.57.1", + "@jsonjoy.com/fs-node": "4.57.1", + "@jsonjoy.com/fs-node-builtins": "4.57.1", + "@jsonjoy.com/fs-node-to-fsa": "4.57.1", + "@jsonjoy.com/fs-node-utils": "4.57.1", + "@jsonjoy.com/fs-print": "4.57.1", + "@jsonjoy.com/fs-snapshot": "4.57.1", + "@jsonjoy.com/json-pack": "^1.11.0", + "@jsonjoy.com/util": "^1.9.0", + "glob-to-regex.js": "^1.0.1", + "thingies": "^2.5.0", + "tree-dump": "^1.0.3", + "tslib": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/streamich" + }, + "peerDependencies": { + "tslib": "2" + } + }, + "node_modules/merge-descriptors": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.3.tgz", + "integrity": "sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/merge-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==", + "license": "MIT" + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/methods": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", + "integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/micromark": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/micromark/-/micromark-4.0.2.tgz", + "integrity": "sha512-zpe98Q6kvavpCr1NPVSCMebCKfD7CA2NqZ+rykeNhONIJBpc1tFKt9hucLGwha3jNTNI8lHpctWJWoimVF4PfA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "@types/debug": "^4.0.0", + "debug": "^4.0.0", + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-encode": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-subtokenize": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-core-commonmark": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/micromark-core-commonmark/-/micromark-core-commonmark-2.0.3.tgz", + "integrity": "sha512-RDBrHEMSxVFLg6xvnXmb1Ayr2WzLAWjeSATAoxwKYJV94TeNavgoIdA0a9ytzDSVzBy2YKFK+emCPOEibLeCrg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "micromark-factory-destination": "^2.0.0", + "micromark-factory-label": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-factory-title": "^2.0.0", + "micromark-factory-whitespace": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-html-tag-name": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-subtokenize": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-core-commonmark/node_modules/micromark-factory-space": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", + "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-core-commonmark/node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-core-commonmark/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-extension-directive": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/micromark-extension-directive/-/micromark-extension-directive-3.0.2.tgz", + "integrity": "sha512-wjcXHgk+PPdmvR58Le9d7zQYWy+vKEU9Se44p2CrCDPiLr2FMyiT4Fyb5UFKFC66wGB3kPlgD7q3TnoqPS7SZA==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-factory-whitespace": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "parse-entities": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-directive/node_modules/micromark-factory-space": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", + "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-directive/node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-directive/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-extension-frontmatter": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-frontmatter/-/micromark-extension-frontmatter-2.0.0.tgz", + "integrity": "sha512-C4AkuM3dA58cgZha7zVnuVxBhDsbttIMiytjgsM2XbHAB2faRVaHRle40558FBN+DJcrLNCoqG5mlrpdU4cRtg==", + "license": "MIT", + "dependencies": { + "fault": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-frontmatter/node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-frontmatter/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-extension-gfm": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm/-/micromark-extension-gfm-3.0.0.tgz", + "integrity": "sha512-vsKArQsicm7t0z2GugkCKtZehqUm31oeGBV/KVSorWSy8ZlNAv7ytjFhvaryUiCUJYqs+NoE6AFhpQvBTM6Q4w==", + "license": "MIT", + "dependencies": { + "micromark-extension-gfm-autolink-literal": "^2.0.0", + "micromark-extension-gfm-footnote": "^2.0.0", + "micromark-extension-gfm-strikethrough": "^2.0.0", + "micromark-extension-gfm-table": "^2.0.0", + "micromark-extension-gfm-tagfilter": "^2.0.0", + "micromark-extension-gfm-task-list-item": "^2.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-autolink-literal": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-autolink-literal/-/micromark-extension-gfm-autolink-literal-2.1.0.tgz", + "integrity": "sha512-oOg7knzhicgQ3t4QCjCWgTmfNhvQbDDnJeVu9v81r7NltNCVmhPy1fJRX27pISafdjL+SVc4d3l48Gb6pbRypw==", + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-autolink-literal/node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm-autolink-literal/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-extension-gfm-footnote": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-footnote/-/micromark-extension-gfm-footnote-2.1.0.tgz", + "integrity": "sha512-/yPhxI1ntnDNsiHtzLKYnE3vf9JZ6cAisqVDauhp4CEHxlb4uoOTxOCJ+9s51bIB8U1N1FJ1RXOKTIlD5B/gqw==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-footnote/node_modules/micromark-factory-space": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", + "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm-footnote/node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm-footnote/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-extension-gfm-strikethrough": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-strikethrough/-/micromark-extension-gfm-strikethrough-2.1.0.tgz", + "integrity": "sha512-ADVjpOOkjz1hhkZLlBiYA9cR2Anf8F4HqZUO6e5eDcPQd0Txw5fxLzzxnEkSkfnD0wziSGiv7sYhk/ktvbf1uw==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-strikethrough/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-extension-gfm-table": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-table/-/micromark-extension-gfm-table-2.1.1.tgz", + "integrity": "sha512-t2OU/dXXioARrC6yWfJ4hqB7rct14e8f7m0cbI5hUmDyyIlwv5vEtooptH8INkbLzOatzKuVbQmAYcbWoyz6Dg==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-table/node_modules/micromark-factory-space": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", + "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm-table/node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm-table/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-extension-gfm-tagfilter": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-tagfilter/-/micromark-extension-gfm-tagfilter-2.0.0.tgz", + "integrity": "sha512-xHlTOmuCSotIA8TW1mDIM6X2O1SiX5P9IuDtqGonFhEK0qgRI4yeC6vMxEV2dgyr2TiD+2PQ10o+cOhdVAcwfg==", + "license": "MIT", + "dependencies": { + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-task-list-item": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-task-list-item/-/micromark-extension-gfm-task-list-item-2.1.0.tgz", + "integrity": "sha512-qIBZhqxqI6fjLDYFTBIa4eivDMnP+OZqsNwmQ3xNLE4Cxwc+zfQEfbs6tzAo2Hjq+bh6q5F+Z8/cksrLFYWQQw==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-task-list-item/node_modules/micromark-factory-space": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", + "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm-task-list-item/node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm-task-list-item/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-extension-mdx-expression": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/micromark-extension-mdx-expression/-/micromark-extension-mdx-expression-3.0.1.tgz", + "integrity": "sha512-dD/ADLJ1AeMvSAKBwO22zG22N4ybhe7kFIZ3LsDI0GlsNr2A3KYxb0LdC1u5rj4Nw+CHKY0RVdnHX8vj8ejm4Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "devlop": "^1.0.0", + "micromark-factory-mdx-expression": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-events-to-acorn": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-mdx-expression/node_modules/micromark-factory-space": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", + "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-mdx-expression/node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-mdx-expression/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-extension-mdx-jsx": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/micromark-extension-mdx-jsx/-/micromark-extension-mdx-jsx-3.0.2.tgz", + "integrity": "sha512-e5+q1DjMh62LZAJOnDraSSbDMvGJ8x3cbjygy2qFEi7HCeUT4BDKCvMozPozcD6WmOt6sVvYDNBKhFSz3kjOVQ==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "micromark-factory-mdx-expression": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-events-to-acorn": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-mdx-jsx/node_modules/micromark-factory-space": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", + "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-mdx-jsx/node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-mdx-jsx/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-extension-mdx-md": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-mdx-md/-/micromark-extension-mdx-md-2.0.0.tgz", + "integrity": "sha512-EpAiszsB3blw4Rpba7xTOUptcFeBFi+6PY8VnJ2hhimH+vCQDirWgsMpz7w1XcZE7LVrSAUGb9VJpG9ghlYvYQ==", + "license": "MIT", + "dependencies": { + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-mdxjs": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-mdxjs/-/micromark-extension-mdxjs-3.0.0.tgz", + "integrity": "sha512-A873fJfhnJ2siZyUrJ31l34Uqwy4xIFmvPY1oj+Ean5PHcPBYzEsvqvWGaWcfEIr11O5Dlw3p2y0tZWpKHDejQ==", + "license": "MIT", + "dependencies": { + "acorn": "^8.0.0", + "acorn-jsx": "^5.0.0", + "micromark-extension-mdx-expression": "^3.0.0", + "micromark-extension-mdx-jsx": "^3.0.0", + "micromark-extension-mdx-md": "^2.0.0", + "micromark-extension-mdxjs-esm": "^3.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-mdxjs-esm": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-mdxjs-esm/-/micromark-extension-mdxjs-esm-3.0.0.tgz", + "integrity": "sha512-DJFl4ZqkErRpq/dAPyeWp15tGrcrrJho1hKK5uBS70BCtfrIFg81sqcTVu3Ta+KD1Tk5vAtBNElWxtAa+m8K9A==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-events-to-acorn": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unist-util-position-from-estree": "^2.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-mdxjs-esm/node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-mdxjs-esm/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-factory-destination": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-destination/-/micromark-factory-destination-2.0.1.tgz", + "integrity": "sha512-Xe6rDdJlkmbFRExpTOmRj9N3MaWmbAgdpSrBQvCFqhezUn4AHqJHbaEnfbVYYiexVSs//tqOdY/DxhjdCiJnIA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-destination/node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-destination/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-factory-label": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-label/-/micromark-factory-label-2.0.1.tgz", + "integrity": "sha512-VFMekyQExqIW7xIChcXn4ok29YE3rnuyveW3wZQWWqF4Nv9Wk5rgJ99KzPvHjkmPXF93FXIbBp6YdW3t71/7Vg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-label/node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-label/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-factory-mdx-expression": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/micromark-factory-mdx-expression/-/micromark-factory-mdx-expression-2.0.3.tgz", + "integrity": "sha512-kQnEtA3vzucU2BkrIa8/VaSAsP+EJ3CKOvhMuJgOEGg9KDC6OAY6nSnNDVRiVNRqj7Y4SlSzcStaH/5jge8JdQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-events-to-acorn": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unist-util-position-from-estree": "^2.0.0", + "vfile-message": "^4.0.0" + } + }, + "node_modules/micromark-factory-mdx-expression/node_modules/micromark-factory-space": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", + "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-mdx-expression/node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-mdx-expression/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-factory-space": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-1.1.0.tgz", + "integrity": "sha512-cRzEj7c0OL4Mw2v6nwzttyOZe8XY/Z8G0rzmWQZTBi/jjwyw/U4uqKtUORXQrR5bAZZnbTI/feRV/R7hc4jQYQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^1.0.0", + "micromark-util-types": "^1.0.0" + } + }, + "node_modules/micromark-factory-space/node_modules/micromark-util-types": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-1.1.0.tgz", + "integrity": "sha512-ukRBgie8TIAcacscVHSiddHjO4k/q3pnedmzMQ4iwDcK0FtFCohKOlFbaOL/mPgfnPsL3C1ZyxJa4sbWrBl3jg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-factory-title": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-title/-/micromark-factory-title-2.0.1.tgz", + "integrity": "sha512-5bZ+3CjhAd9eChYTHsjy6TGxpOFSKgKKJPJxr293jTbfry2KDoWkhBb6TcPVB4NmzaPhMs1Frm9AZH7OD4Cjzw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-title/node_modules/micromark-factory-space": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", + "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-title/node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-title/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-factory-whitespace": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-whitespace/-/micromark-factory-whitespace-2.0.1.tgz", + "integrity": "sha512-Ob0nuZ3PKt/n0hORHyvoD9uZhr+Za8sFoP+OnMcnWK5lngSzALgQYKMr9RJVOWLqQYuyn6ulqGWSXdwf6F80lQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-whitespace/node_modules/micromark-factory-space": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", + "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-whitespace/node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-whitespace/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-character": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-1.2.0.tgz", + "integrity": "sha512-lXraTwcX3yH/vMDaFWCQJP1uIszLVebzUa3ZHdrgxr7KEU/9mL4mVgCpGbyhvNLNlauROiNUq7WN5u7ndbY6xg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0" + } + }, + "node_modules/micromark-util-character/node_modules/micromark-util-types": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-1.1.0.tgz", + "integrity": "sha512-ukRBgie8TIAcacscVHSiddHjO4k/q3pnedmzMQ4iwDcK0FtFCohKOlFbaOL/mPgfnPsL3C1ZyxJa4sbWrBl3jg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-chunked": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-chunked/-/micromark-util-chunked-2.0.1.tgz", + "integrity": "sha512-QUNFEOPELfmvv+4xiNg2sRYeS/P84pTW0TCgP5zc9FpXetHY0ab7SxKyAQCNCc1eK0459uoLI1y5oO5Vc1dbhA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-chunked/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-classify-character": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-classify-character/-/micromark-util-classify-character-2.0.1.tgz", + "integrity": "sha512-K0kHzM6afW/MbeWYWLjoHQv1sgg2Q9EccHEDzSkxiP/EaagNzCm7T/WMKZ3rjMbvIpvBiZgwR3dKMygtA4mG1Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-classify-character/node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-classify-character/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-combine-extensions": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-combine-extensions/-/micromark-util-combine-extensions-2.0.1.tgz", + "integrity": "sha512-OnAnH8Ujmy59JcyZw8JSbK9cGpdVY44NKgSM7E9Eh7DiLS2E9RNQf0dONaGDzEG9yjEl5hcqeIsj4hfRkLH/Bg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-chunked": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-numeric-character-reference": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-2.0.2.tgz", + "integrity": "sha512-ccUbYk6CwVdkmCQMyr64dXz42EfHGkPQlBj5p7YVGzq8I7CtjXZJrubAYezf7Rp+bjPseiROqe7G6foFd+lEuw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-numeric-character-reference/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-decode-string": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-decode-string/-/micromark-util-decode-string-2.0.1.tgz", + "integrity": "sha512-nDV/77Fj6eH1ynwscYTOsbK7rR//Uj0bZXBwJZRfaLEJ1iGBR6kIfNmlNqaqJf649EP0F3NWNdeJi03elllNUQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-string/node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-string/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-encode": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-encode/-/micromark-util-encode-2.0.1.tgz", + "integrity": "sha512-c3cVx2y4KqUnwopcO9b/SCdo2O67LwJJ/UyqGfbigahfegL9myoEFoDYZgkT7f36T0bLrM9hZTAaAyH+PCAXjw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-events-to-acorn": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/micromark-util-events-to-acorn/-/micromark-util-events-to-acorn-2.0.3.tgz", + "integrity": "sha512-jmsiEIiZ1n7X1Rr5k8wVExBQCg5jy4UXVADItHmNk1zkwEVhBuIUKRu3fqv+hs4nxLISi2DQGlqIOGiFxgbfHg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/unist": "^3.0.0", + "devlop": "^1.0.0", + "estree-util-visit": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "vfile-message": "^4.0.0" + } + }, + "node_modules/micromark-util-events-to-acorn/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-html-tag-name": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-html-tag-name/-/micromark-util-html-tag-name-2.0.1.tgz", + "integrity": "sha512-2cNEiYDhCWKI+Gs9T0Tiysk136SnR13hhO8yW6BGNyhOC4qYFnwF1nKfD3HFAIXA5c45RrIG1ub11GiXeYd1xA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-normalize-identifier": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-2.0.1.tgz", + "integrity": "sha512-sxPqmo70LyARJs0w2UclACPUUEqltCkJ6PhKdMIDuJ3gSf/Q+/GIe3WKl0Ijb/GyH9lOpUkRAO2wp0GVkLvS9Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-normalize-identifier/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-resolve-all": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-resolve-all/-/micromark-util-resolve-all-2.0.1.tgz", + "integrity": "sha512-VdQyxFWFT2/FGJgwQnJYbe1jjQoNTS4RjglmSjTUlpUMa95Htx9NHeYW4rGDJzbjvCsl9eLjMQwGeElsqmzcHg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-sanitize-uri": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-2.0.1.tgz", + "integrity": "sha512-9N9IomZ/YuGGZZmQec1MbgxtlgougxTodVwDzzEouPKo3qFWvymFHWcnDi2vzV1ff6kas9ucW+o3yzJK9YB1AQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-encode": "^2.0.0", + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-sanitize-uri/node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-sanitize-uri/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-subtokenize": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-subtokenize/-/micromark-util-subtokenize-2.1.0.tgz", + "integrity": "sha512-XQLu552iSctvnEcgXw6+Sx75GflAPNED1qx7eBJ+wydBb2KCbRZe+NwvIEEMM83uml1+2WSXpBAcp9IUCgCYWA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-subtokenize/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-symbol": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-1.1.0.tgz", + "integrity": "sha512-uEjpEYY6KMs1g7QfJ2eX1SQEV+ZT4rUD3UcF6l57acZvLNK7PBZL+ty82Z1qhK1/yXIY4bdx04FKMgR0g4IAag==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-types": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-2.0.2.tgz", + "integrity": "sha512-Yw0ECSpJoViF1qTU4DC6NwtC4aWGt1EkzaQB8KPPyCRR8z9TWeV0HbEFGTO+ZY1wB22zmxnJqhPyTpOVCpeHTA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark/node_modules/micromark-factory-space": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", + "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark/node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromatch": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "license": "MIT", + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/mime": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", + "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", + "license": "MIT", + "bin": { + "mime": "cli.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/mime-db": { + "version": "1.33.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.33.0.tgz", + "integrity": "sha512-BHJ/EKruNIqJf/QahvxwQZXKygOQ256myeN/Ew+THcAa5q+PjyTTMMeNQC4DZw5AwfvelsUrA6B67NKMqXDbzQ==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.18", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.18.tgz", + "integrity": "sha512-lc/aahn+t4/SWV/qcmumYjymLsWfN3ELhpmVuUFjgsORruuZPVSwAQryq+HHGvO/SI2KVX26bx+En+zhM8g8hQ==", + "license": "MIT", + "dependencies": { + "mime-db": "~1.33.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/mimic-response": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-4.0.0.tgz", + "integrity": "sha512-e5ISH9xMYU0DzrT+jl8q2ze9D6eWBto+I8CNpe+VI+K2J/F/k3PdkdTdz4wvGVH4NTpo+NRYTVIuMQEMMcsLqg==", + "license": "MIT", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mini-css-extract-plugin": { + "version": "2.10.2", + "resolved": "https://registry.npmjs.org/mini-css-extract-plugin/-/mini-css-extract-plugin-2.10.2.tgz", + "integrity": "sha512-AOSS0IdEB95ayVkxn5oGzNQwqAi2J0Jb/kKm43t7H73s8+f5873g0yuj0PNvK4dO75mu5DHg4nlgp4k6Kga8eg==", + "license": "MIT", + "dependencies": { + "schema-utils": "^4.0.0", + "tapable": "^2.2.1" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.0.0" + } + }, + "node_modules/minimalistic-assert": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz", + "integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==", + "license": "ISC" + }, + "node_modules/minimatch": { + "version": "3.1.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.5.tgz", + "integrity": "sha512-VgjWUsnnT6n+NUk6eZq77zeFdpW2LWDzP6zFGrCbHXiYNul5Dzqk2HHQ5uFH2DNW5Xbp8+jVzaeNt94ssEEl4w==", + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/mkdirp": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.3.0.tgz", + "integrity": "sha512-OHsdUcVAQ6pOtg5JYWpCBo9W/GySVuwvP9hueRMW7UqshC0tbfzLv8wjySTPm3tfUZ/21CE9E1pJagOA91Pxew==", + "deprecated": "Legacy versions of mkdirp are no longer supported. Please update to mkdirp 1.x. (Note that the API surface has changed to use Promises in 1.x.)", + "engines": { + "node": "*" + } + }, + "node_modules/mrmime": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mrmime/-/mrmime-2.0.1.tgz", + "integrity": "sha512-Y3wQdFg2Va6etvQ5I82yUhGdsKrcYox6p7FfL1LbK2J4V01F9TGlepTIhnK24t7koZibmg82KGglhA1XK5IsLQ==", + "license": "MIT", + "engines": { + "node": ">=10" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, + "node_modules/multicast-dns": { + "version": "7.2.5", + "resolved": "https://registry.npmjs.org/multicast-dns/-/multicast-dns-7.2.5.tgz", + "integrity": "sha512-2eznPJP8z2BFLX50tf0LuODrpINqP1RVIm/CObbTcBRITQgmC/TjcREF1NeTBzIcR5XO/ukWo+YHOjBbFwIupg==", + "license": "MIT", + "dependencies": { + "dns-packet": "^5.2.2", + "thunky": "^1.0.2" + }, + "bin": { + "multicast-dns": "cli.js" + } + }, + "node_modules/nanoid": { + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/negotiator": { + "version": "0.6.4", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.4.tgz", + "integrity": "sha512-myRT3DiWPHqho5PrJaIRyaMv2kgYf0mUVgBNOYMuCH5Ki1yEiQaf/ZJuQ62nvpc44wL5WDbTX7yGJi1Neevw8w==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/neo-async": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", + "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==", + "license": "MIT" + }, + "node_modules/no-case": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/no-case/-/no-case-3.0.4.tgz", + "integrity": "sha512-fgAN3jGAh+RoxUGZHTSOLJIqUc2wmoBwGR4tbpNAKmmovFoWq0OdRkb0VkldReO2a2iBT/OEulG9XSUc10r3zg==", + "license": "MIT", + "dependencies": { + "lower-case": "^2.0.2", + "tslib": "^2.0.3" + } + }, + "node_modules/node-emoji": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/node-emoji/-/node-emoji-2.2.0.tgz", + "integrity": "sha512-Z3lTE9pLaJF47NyMhd4ww1yFTAP8YhYI8SleJiHzM46Fgpm5cnNzSl9XfzFNqbaz+VlJrIj3fXQ4DeN1Rjm6cw==", + "license": "MIT", + "dependencies": { + "@sindresorhus/is": "^4.6.0", + "char-regex": "^1.0.2", + "emojilib": "^2.4.0", + "skin-tone": "^2.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/node-releases": { + "version": "2.0.36", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.36.tgz", + "integrity": "sha512-TdC8FSgHz8Mwtw9g5L4gR/Sh9XhSP/0DEkQxfEFXOpiul5IiHgHan2VhYYb6agDSfp4KuvltmGApc8HMgUrIkA==", + "license": "MIT" + }, + "node_modules/nopt": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/nopt/-/nopt-1.0.10.tgz", + "integrity": "sha512-NWmpvLSqUrgrAC9HCuxEvb+PSloHpqVu+FqcO4eeF2h5qYRhA7ev6KvelyQAKtegUbC6RypJnlEOhd8vloNKYg==", + "dependencies": { + "abbrev": "1" + }, + "bin": { + "nopt": "bin/nopt.js" + }, + "engines": { + "node": "*" + } + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/normalize-url": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-8.1.1.tgz", + "integrity": "sha512-JYc0DPlpGWB40kH5g07gGTrYuMqV653k3uBKY6uITPWds3M0ov3GaWGp9lbE3Bzngx8+XkfzgvASb9vk9JDFXQ==", + "license": "MIT", + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/not": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/not/-/not-0.1.0.tgz", + "integrity": "sha512-5PDmaAsVfnWUgTUbJ3ERwn7u79Z0dYxN9ErxCpVJJqe2RK0PJ3z+iFUxuqjwtlDDegXvtWoxD/3Fzxox7tFGWA==" + }, + "node_modules/npm-run-path": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "license": "MIT", + "dependencies": { + "path-key": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/nprogress": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/nprogress/-/nprogress-0.2.0.tgz", + "integrity": "sha512-I19aIingLgR1fmhftnbWWO3dXc0hSxqHQHQb3H8m+K3TnEn/iSeTZZOyvKXWqQESMwuUVnatlCnZdLBZZt2VSA==", + "license": "MIT" + }, + "node_modules/nth-check": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.1.1.tgz", + "integrity": "sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==", + "license": "BSD-2-Clause", + "dependencies": { + "boolbase": "^1.0.0" + }, + "funding": { + "url": "https://github.com/fb55/nth-check?sponsor=1" + } + }, + "node_modules/null-loader": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/null-loader/-/null-loader-4.0.1.tgz", + "integrity": "sha512-pxqVbi4U6N26lq+LmgIbB5XATP0VdZKOG25DhHi8btMmJJefGArFyDg1yc4U3hWCJbMqSrw0qyrz1UQX+qYXqg==", + "license": "MIT", + "dependencies": { + "loader-utils": "^2.0.0", + "schema-utils": "^3.0.0" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^4.0.0 || ^5.0.0" + } + }, + "node_modules/null-loader/node_modules/ajv": { + "version": "6.14.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.14.0.tgz", + "integrity": "sha512-IWrosm/yrn43eiKqkfkHis7QioDleaXQHdDVPKg0FSwwd/DuvyX79TZnFOnYpB7dcsFAMmtFztZuXPDvSePkFw==", + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/null-loader/node_modules/ajv-keywords": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "license": "MIT", + "peerDependencies": { + "ajv": "^6.9.1" + } + }, + "node_modules/null-loader/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "license": "MIT" + }, + "node_modules/null-loader/node_modules/schema-utils": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", + "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", + "license": "MIT", + "dependencies": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-inspect": { + "version": "1.13.4", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz", + "integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object-keys": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", + "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/object.assign": { + "version": "4.1.7", + "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.7.tgz", + "integrity": "sha512-nK28WOo+QIjBkDduTINE4JkF/UJJKyf2EJxvJKfblDpyg0Q+pkOHNTL0Qwy6NP6FhE/EnzV73BxxqcJaXY9anw==", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.3", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0", + "has-symbols": "^1.1.0", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/obuf": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/obuf/-/obuf-1.1.2.tgz", + "integrity": "sha512-PX1wu0AmAdPqOL1mWhqmlOd8kOIZQwGZw6rh7uby9fTc5lhaOWFLX3I6R1hrF9k3zUY40e6igsLGkDXK92LJNg==", + "license": "MIT" + }, + "node_modules/on-finished": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", + "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==", + "license": "MIT", + "dependencies": { + "ee-first": "1.1.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/on-headers": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.1.0.tgz", + "integrity": "sha512-737ZY3yNnXy37FHkQxPzt4UZ2UWPWiCZWLvFZ4fu5cueciegX0zGPnrlY6bwRg4FdQOe9YU8MkmJwGhoMybl8A==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "license": "MIT", + "dependencies": { + "mimic-fn": "^2.1.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/open": { + "version": "8.4.2", + "resolved": "https://registry.npmjs.org/open/-/open-8.4.2.tgz", + "integrity": "sha512-7x81NCL719oNbsq/3mh+hVrAWmFuEYUqrq/Iw3kUzH8ReypT9QQ0BLoJS7/G9k6N81XjW4qHWtjWwe/9eLy1EQ==", + "license": "MIT", + "dependencies": { + "define-lazy-prop": "^2.0.0", + "is-docker": "^2.1.1", + "is-wsl": "^2.2.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/opener": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/opener/-/opener-1.5.2.tgz", + "integrity": "sha512-ur5UIdyw5Y7yEj9wLzhqXiy6GZ3Mwx0yGI+5sMn2r0N0v3cKJvUmFH5yPP+WXh9e0xfyzyJX95D8l088DNFj7A==", + "license": "(WTFPL OR MIT)", + "bin": { + "opener": "bin/opener-bin.js" + } + }, + "node_modules/p-cancelable": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-3.0.0.tgz", + "integrity": "sha512-mlVgR3PGuzlo0MmTdk4cXqXWlwQDLnONTAg6sm62XkMJEiRxN3GL3SffkYvqwonbkJBcrI7Uvv5Zh9yjvn2iUw==", + "license": "MIT", + "engines": { + "node": ">=12.20" + } + }, + "node_modules/p-finally": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz", + "integrity": "sha512-LICb2p9CB7FS+0eR1oqWnHhp0FljGLZCWBE9aix0Uye9W8LTQPwMTYVGWQWIw9RdQiDg4+epXQODwIYJtSJaow==", + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/p-limit": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-4.0.0.tgz", + "integrity": "sha512-5b0R4txpzjPWVw/cXXUResoD4hb6U/x9BH08L7nw+GN1sezDzPdxeRvpc9c433fZhBan/wusjbCsqwqm4EIBIQ==", + "license": "MIT", + "dependencies": { + "yocto-queue": "^1.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-6.0.0.tgz", + "integrity": "sha512-wPrq66Llhl7/4AGC6I+cqxT07LhXvWL08LNXz1fENOw0Ap4sRZZ/gZpTTJ5jpurzzzfS2W/Ge9BY3LgLjCShcw==", + "license": "MIT", + "dependencies": { + "p-limit": "^4.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-map": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/p-map/-/p-map-4.0.0.tgz", + "integrity": "sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==", + "license": "MIT", + "dependencies": { + "aggregate-error": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-queue": { + "version": "6.6.2", + "resolved": "https://registry.npmjs.org/p-queue/-/p-queue-6.6.2.tgz", + "integrity": "sha512-RwFpb72c/BhQLEXIZ5K2e+AhgNVmIejGlTgiB9MzZ0e93GRvqZ7uSi0dvRF7/XIXDeNkra2fNHBxTyPDGySpjQ==", + "license": "MIT", + "dependencies": { + "eventemitter3": "^4.0.4", + "p-timeout": "^3.2.0" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-retry": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/p-retry/-/p-retry-6.2.1.tgz", + "integrity": "sha512-hEt02O4hUct5wtwg4H4KcWgDdm+l1bOaEy/hWzd8xtXB9BqxTWBBhb+2ImAtH4Cv4rPjV76xN3Zumqk3k3AhhQ==", + "license": "MIT", + "dependencies": { + "@types/retry": "0.12.2", + "is-network-error": "^1.0.0", + "retry": "^0.13.1" + }, + "engines": { + "node": ">=16.17" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-timeout": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/p-timeout/-/p-timeout-3.2.0.tgz", + "integrity": "sha512-rhIwUycgwwKcP9yTOOFK/AKsAopjjCakVqLHePO3CC6Mir1Z99xT+R63jZxAT5lFZLa2inS5h+ZS2GvR99/FBg==", + "license": "MIT", + "dependencies": { + "p-finally": "^1.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/package-json": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/package-json/-/package-json-8.1.1.tgz", + "integrity": "sha512-cbH9IAIJHNj9uXi196JVsRlt7cHKak6u/e6AkL/bkRelZ7rlL3X1YKxsZwa36xipOEKAsdtmaG6aAJoM1fx2zA==", + "license": "MIT", + "dependencies": { + "got": "^12.1.0", + "registry-auth-token": "^5.0.1", + "registry-url": "^6.0.0", + "semver": "^7.3.7" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/param-case": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/param-case/-/param-case-3.0.4.tgz", + "integrity": "sha512-RXlj7zCYokReqWpOPH9oYivUzLYZ5vAPIfEmCTNViosC78F8F0H9y7T7gG2M39ymgutxF5gcFEsyZQSph9Bp3A==", + "license": "MIT", + "dependencies": { + "dot-case": "^3.0.4", + "tslib": "^2.0.3" + } + }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "license": "MIT", + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/parse-entities": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-4.0.2.tgz", + "integrity": "sha512-GG2AQYWoLgL877gQIKeRPGO1xF9+eG1ujIb5soS5gPvLQ1y2o8FL90w2QWNdf9I361Mpp7726c+lj3U0qK1uGw==", + "license": "MIT", + "dependencies": { + "@types/unist": "^2.0.0", + "character-entities-legacy": "^3.0.0", + "character-reference-invalid": "^2.0.0", + "decode-named-character-reference": "^1.0.0", + "is-alphanumerical": "^2.0.0", + "is-decimal": "^2.0.0", + "is-hexadecimal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/parse-entities/node_modules/@types/unist": { + "version": "2.0.11", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.11.tgz", + "integrity": "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==", + "license": "MIT" + }, + "node_modules/parse-json": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", + "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/parse-numeric-range": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/parse-numeric-range/-/parse-numeric-range-1.3.0.tgz", + "integrity": "sha512-twN+njEipszzlMJd4ONUYgSfZPDxgHhT9Ahed5uTigpQn90FggW4SA/AIPq/6a149fTbE9qBEcSwE3FAEp6wQQ==", + "license": "ISC" + }, + "node_modules/parse5": { + "version": "7.3.0", + "resolved": "https://registry.npmjs.org/parse5/-/parse5-7.3.0.tgz", + "integrity": "sha512-IInvU7fabl34qmi9gY8XOVxhYyMyuH2xUNpb2q8/Y+7552KlejkRvqvD19nMoUW/uQGGbqNpA6Tufu5FL5BZgw==", + "license": "MIT", + "dependencies": { + "entities": "^6.0.0" + }, + "funding": { + "url": "https://github.com/inikulin/parse5?sponsor=1" + } + }, + "node_modules/parse5-htmlparser2-tree-adapter": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-7.1.0.tgz", + "integrity": "sha512-ruw5xyKs6lrpo9x9rCZqZZnIUntICjQAd0Wsmp396Ul9lN/h+ifgVV1x1gZHi8euej6wTfpqX8j+BFQxF0NS/g==", + "license": "MIT", + "dependencies": { + "domhandler": "^5.0.3", + "parse5": "^7.0.0" + }, + "funding": { + "url": "https://github.com/inikulin/parse5?sponsor=1" + } + }, + "node_modules/parse5/node_modules/entities": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/entities/-/entities-6.0.1.tgz", + "integrity": "sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g==", + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/parseurl": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", + "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/pascal-case": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/pascal-case/-/pascal-case-3.1.2.tgz", + "integrity": "sha512-uWlGT3YSnK9x3BQJaOdcZwrnV6hPpd8jFH1/ucpiLRPh/2zCVJKS19E4GvYHvaCcACn3foXZ0cLB9Wrx1KGe5g==", + "license": "MIT", + "dependencies": { + "no-case": "^3.0.4", + "tslib": "^2.0.3" + } + }, + "node_modules/path-exists": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-5.0.0.tgz", + "integrity": "sha512-RjhtfwJOxzcFmNOi6ltcbcu4Iu+FL3zEj83dk4kAS+fVpTxXLO1b38RvJgT/0QwvV/L3aY9TAnyv0EOqW4GoMQ==", + "license": "MIT", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + } + }, + "node_modules/path-is-inside": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/path-is-inside/-/path-is-inside-1.0.2.tgz", + "integrity": "sha512-DUWJr3+ULp4zXmol/SZkFf3JGsS9/SIv+Y3Rt93/UjPpDpklB5f1er4O3POIbUuUJ3FXgqte2Q7SrU6zAqwk8w==", + "license": "(WTFPL OR MIT)" + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", + "license": "MIT" + }, + "node_modules/path-to-regexp": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.9.0.tgz", + "integrity": "sha512-xIp7/apCFJuUHdDLWe8O1HIkb0kQrOMb/0u6FXQjemHn/ii5LrIzU6bdECnsiTF/GjZkMEKg1xdiZwNqDYlZ6g==", + "license": "MIT", + "dependencies": { + "isarray": "0.0.1" + } + }, + "node_modules/path-type": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", + "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.2.tgz", + "integrity": "sha512-V7+vQEJ06Z+c5tSye8S+nHUfI51xoXIXjHQ99cQtKUkQqqO1kO/KCJUfZXuB47h/YBlDhah2H3hdUGXn8ie0oA==", + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pkg-dir": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-7.0.0.tgz", + "integrity": "sha512-Ie9z/WINcxxLp27BKOCHGde4ITq9UklYKDzVo1nhk5sqGEXU3FpkwP5GM2voTGJkGd9B3Otl+Q4uwSOeSUtOBA==", + "license": "MIT", + "dependencies": { + "find-up": "^6.3.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/pkijs": { + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/pkijs/-/pkijs-3.4.0.tgz", + "integrity": "sha512-emEcLuomt2j03vxD54giVB4SxTjnsqkU692xZOZXHDVoYyypEm+b3jpiTcc+Cf+myooc+/Ly0z01jqeNHVgJGw==", + "license": "BSD-3-Clause", + "dependencies": { + "@noble/hashes": "1.4.0", + "asn1js": "^3.0.6", + "bytestreamjs": "^2.0.1", + "pvtsutils": "^1.3.6", + "pvutils": "^1.1.3", + "tslib": "^2.8.1" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/postcss": { + "version": "8.5.8", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.8.tgz", + "integrity": "sha512-OW/rX8O/jXnm82Ey1k44pObPtdblfiuWnrd8X7GJ7emImCOstunGbXUpp7HdBrFQX6rJzn3sPT397Wp5aCwCHg==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.11", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/postcss-attribute-case-insensitive": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/postcss-attribute-case-insensitive/-/postcss-attribute-case-insensitive-7.0.1.tgz", + "integrity": "sha512-Uai+SupNSqzlschRyNx3kbCTWgY/2hcwtHEI/ej2LJWc9JJ77qKgGptd8DHwY1mXtZ7Aoh4z4yxfwMBue9eNgw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "dependencies": { + "postcss-selector-parser": "^7.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-attribute-case-insensitive/node_modules/postcss-selector-parser": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.1.1.tgz", + "integrity": "sha512-orRsuYpJVw8LdAwqqLykBj9ecS5/cRHlI5+nvTo8LcCKmzDmqVORXtOIYEEQuL9D4BxtA1lm5isAqzQZCoQ6Eg==", + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-calc": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/postcss-calc/-/postcss-calc-9.0.1.tgz", + "integrity": "sha512-TipgjGyzP5QzEhsOZUaIkeO5mKeMFpebWzRogWG/ysonUlnHcq5aJe0jOjpfzUU8PeSaBQnrE8ehR0QA5vs8PQ==", + "license": "MIT", + "dependencies": { + "postcss-selector-parser": "^6.0.11", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.2.2" + } + }, + "node_modules/postcss-clamp": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/postcss-clamp/-/postcss-clamp-4.1.0.tgz", + "integrity": "sha512-ry4b1Llo/9zz+PKC+030KUnPITTJAHeOwjfAyyB60eT0AorGLdzp52s31OsPRHRf8NchkgFoG2y6fCfn1IV1Ow==", + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": ">=7.6.0" + }, + "peerDependencies": { + "postcss": "^8.4.6" + } + }, + "node_modules/postcss-color-functional-notation": { + "version": "7.0.12", + "resolved": "https://registry.npmjs.org/postcss-color-functional-notation/-/postcss-color-functional-notation-7.0.12.tgz", + "integrity": "sha512-TLCW9fN5kvO/u38/uesdpbx3e8AkTYhMvDZYa9JpmImWuTE99bDQ7GU7hdOADIZsiI9/zuxfAJxny/khknp1Zw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/css-color-parser": "^3.1.0", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4", + "@csstools/postcss-progressive-custom-properties": "^4.2.1", + "@csstools/utilities": "^2.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-color-hex-alpha": { + "version": "10.0.0", + "resolved": "https://registry.npmjs.org/postcss-color-hex-alpha/-/postcss-color-hex-alpha-10.0.0.tgz", + "integrity": "sha512-1kervM2cnlgPs2a8Vt/Qbe5cQ++N7rkYo/2rz2BkqJZIHQwaVuJgQH38REHrAi4uM0b1fqxMkWYmese94iMp3w==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "dependencies": { + "@csstools/utilities": "^2.0.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-color-rebeccapurple": { + "version": "10.0.0", + "resolved": "https://registry.npmjs.org/postcss-color-rebeccapurple/-/postcss-color-rebeccapurple-10.0.0.tgz", + "integrity": "sha512-JFta737jSP+hdAIEhk1Vs0q0YF5P8fFcj+09pweS8ktuGuZ8pPlykHsk6mPxZ8awDl4TrcxUqJo9l1IhVr/OjQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/utilities": "^2.0.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-colormin": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/postcss-colormin/-/postcss-colormin-6.1.0.tgz", + "integrity": "sha512-x9yX7DOxeMAR+BgGVnNSAxmAj98NX/YxEMNFP+SDCEeNLb2r3i6Hh1ksMsnW8Ub5SLCpbescQqn9YEbE9554Sw==", + "license": "MIT", + "dependencies": { + "browserslist": "^4.23.0", + "caniuse-api": "^3.0.0", + "colord": "^2.9.3", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-convert-values": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/postcss-convert-values/-/postcss-convert-values-6.1.0.tgz", + "integrity": "sha512-zx8IwP/ts9WvUM6NkVSkiU902QZL1bwPhaVaLynPtCsOTqp+ZKbNi+s6XJg3rfqpKGA/oc7Oxk5t8pOQJcwl/w==", + "license": "MIT", + "dependencies": { + "browserslist": "^4.23.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-custom-media": { + "version": "11.0.6", + "resolved": "https://registry.npmjs.org/postcss-custom-media/-/postcss-custom-media-11.0.6.tgz", + "integrity": "sha512-C4lD4b7mUIw+RZhtY7qUbf4eADmb7Ey8BFA2px9jUbwg7pjTZDl4KY4bvlUV+/vXQvzQRfiGEVJyAbtOsCMInw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "dependencies": { + "@csstools/cascade-layer-name-parser": "^2.0.5", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4", + "@csstools/media-query-list-parser": "^4.0.3" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-custom-properties": { + "version": "14.0.6", + "resolved": "https://registry.npmjs.org/postcss-custom-properties/-/postcss-custom-properties-14.0.6.tgz", + "integrity": "sha512-fTYSp3xuk4BUeVhxCSJdIPhDLpJfNakZKoiTDx7yRGCdlZrSJR7mWKVOBS4sBF+5poPQFMj2YdXx1VHItBGihQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "dependencies": { + "@csstools/cascade-layer-name-parser": "^2.0.5", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4", + "@csstools/utilities": "^2.0.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-custom-selectors": { + "version": "8.0.5", + "resolved": "https://registry.npmjs.org/postcss-custom-selectors/-/postcss-custom-selectors-8.0.5.tgz", + "integrity": "sha512-9PGmckHQswiB2usSO6XMSswO2yFWVoCAuih1yl9FVcwkscLjRKjwsjM3t+NIWpSU2Jx3eOiK2+t4vVTQaoCHHg==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "dependencies": { + "@csstools/cascade-layer-name-parser": "^2.0.5", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4", + "postcss-selector-parser": "^7.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-custom-selectors/node_modules/postcss-selector-parser": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.1.1.tgz", + "integrity": "sha512-orRsuYpJVw8LdAwqqLykBj9ecS5/cRHlI5+nvTo8LcCKmzDmqVORXtOIYEEQuL9D4BxtA1lm5isAqzQZCoQ6Eg==", + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-dir-pseudo-class": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/postcss-dir-pseudo-class/-/postcss-dir-pseudo-class-9.0.1.tgz", + "integrity": "sha512-tRBEK0MHYvcMUrAuYMEOa0zg9APqirBcgzi6P21OhxtJyJADo/SWBwY1CAwEohQ/6HDaa9jCjLRG7K3PVQYHEA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "postcss-selector-parser": "^7.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-dir-pseudo-class/node_modules/postcss-selector-parser": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.1.1.tgz", + "integrity": "sha512-orRsuYpJVw8LdAwqqLykBj9ecS5/cRHlI5+nvTo8LcCKmzDmqVORXtOIYEEQuL9D4BxtA1lm5isAqzQZCoQ6Eg==", + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-discard-comments": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-discard-comments/-/postcss-discard-comments-6.0.2.tgz", + "integrity": "sha512-65w/uIqhSBBfQmYnG92FO1mWZjJ4GL5b8atm5Yw2UgrwD7HiNiSSNwJor1eCFGzUgYnN/iIknhNRVqjrrpuglw==", + "license": "MIT", + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-discard-duplicates": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/postcss-discard-duplicates/-/postcss-discard-duplicates-6.0.3.tgz", + "integrity": "sha512-+JA0DCvc5XvFAxwx6f/e68gQu/7Z9ud584VLmcgto28eB8FqSFZwtrLwB5Kcp70eIoWP/HXqz4wpo8rD8gpsTw==", + "license": "MIT", + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-discard-empty": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/postcss-discard-empty/-/postcss-discard-empty-6.0.3.tgz", + "integrity": "sha512-znyno9cHKQsK6PtxL5D19Fj9uwSzC2mB74cpT66fhgOadEUPyXFkbgwm5tvc3bt3NAy8ltE5MrghxovZRVnOjQ==", + "license": "MIT", + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-discard-overridden": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-discard-overridden/-/postcss-discard-overridden-6.0.2.tgz", + "integrity": "sha512-j87xzI4LUggC5zND7KdjsI25APtyMuynXZSujByMaav2roV6OZX+8AaCUcZSWqckZpjAjRyFDdpqybgjFO0HJQ==", + "license": "MIT", + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-discard-unused": { + "version": "6.0.5", + "resolved": "https://registry.npmjs.org/postcss-discard-unused/-/postcss-discard-unused-6.0.5.tgz", + "integrity": "sha512-wHalBlRHkaNnNwfC8z+ppX57VhvS+HWgjW508esjdaEYr3Mx7Gnn2xA4R/CKf5+Z9S5qsqC+Uzh4ueENWwCVUA==", + "license": "MIT", + "dependencies": { + "postcss-selector-parser": "^6.0.16" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-double-position-gradients": { + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/postcss-double-position-gradients/-/postcss-double-position-gradients-6.0.4.tgz", + "integrity": "sha512-m6IKmxo7FxSP5nF2l63QbCC3r+bWpFUWmZXZf096WxG0m7Vl1Q1+ruFOhpdDRmKrRS+S3Jtk+TVk/7z0+BVK6g==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/postcss-progressive-custom-properties": "^4.2.1", + "@csstools/utilities": "^2.0.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-focus-visible": { + "version": "10.0.1", + "resolved": "https://registry.npmjs.org/postcss-focus-visible/-/postcss-focus-visible-10.0.1.tgz", + "integrity": "sha512-U58wyjS/I1GZgjRok33aE8juW9qQgQUNwTSdxQGuShHzwuYdcklnvK/+qOWX1Q9kr7ysbraQ6ht6r+udansalA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "postcss-selector-parser": "^7.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-focus-visible/node_modules/postcss-selector-parser": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.1.1.tgz", + "integrity": "sha512-orRsuYpJVw8LdAwqqLykBj9ecS5/cRHlI5+nvTo8LcCKmzDmqVORXtOIYEEQuL9D4BxtA1lm5isAqzQZCoQ6Eg==", + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-focus-within": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/postcss-focus-within/-/postcss-focus-within-9.0.1.tgz", + "integrity": "sha512-fzNUyS1yOYa7mOjpci/bR+u+ESvdar6hk8XNK/TRR0fiGTp2QT5N+ducP0n3rfH/m9I7H/EQU6lsa2BrgxkEjw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "postcss-selector-parser": "^7.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-focus-within/node_modules/postcss-selector-parser": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.1.1.tgz", + "integrity": "sha512-orRsuYpJVw8LdAwqqLykBj9ecS5/cRHlI5+nvTo8LcCKmzDmqVORXtOIYEEQuL9D4BxtA1lm5isAqzQZCoQ6Eg==", + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-font-variant": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/postcss-font-variant/-/postcss-font-variant-5.0.0.tgz", + "integrity": "sha512-1fmkBaCALD72CK2a9i468mA/+tr9/1cBxRRMXOUaZqO43oWPR5imcyPjXwuv7PXbCid4ndlP5zWhidQVVa3hmA==", + "license": "MIT", + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-gap-properties": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/postcss-gap-properties/-/postcss-gap-properties-6.0.0.tgz", + "integrity": "sha512-Om0WPjEwiM9Ru+VhfEDPZJAKWUd0mV1HmNXqp2C29z80aQ2uP9UVhLc7e3aYMIor/S5cVhoPgYQ7RtfeZpYTRw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-image-set-function": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/postcss-image-set-function/-/postcss-image-set-function-7.0.0.tgz", + "integrity": "sha512-QL7W7QNlZuzOwBTeXEmbVckNt1FSmhQtbMRvGGqqU4Nf4xk6KUEQhAoWuMzwbSv5jxiRiSZ5Tv7eiDB9U87znA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/utilities": "^2.0.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-lab-function": { + "version": "7.0.12", + "resolved": "https://registry.npmjs.org/postcss-lab-function/-/postcss-lab-function-7.0.12.tgz", + "integrity": "sha512-tUcyRk1ZTPec3OuKFsqtRzW2Go5lehW29XA21lZ65XmzQkz43VY2tyWEC202F7W3mILOjw0voOiuxRGTsN+J9w==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/css-color-parser": "^3.1.0", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4", + "@csstools/postcss-progressive-custom-properties": "^4.2.1", + "@csstools/utilities": "^2.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-loader": { + "version": "7.3.4", + "resolved": "https://registry.npmjs.org/postcss-loader/-/postcss-loader-7.3.4.tgz", + "integrity": "sha512-iW5WTTBSC5BfsBJ9daFMPVrLT36MrNiC6fqOZTTaHjBNX6Pfd5p+hSBqe/fEeNd7pc13QiAyGt7VdGMw4eRC4A==", + "license": "MIT", + "dependencies": { + "cosmiconfig": "^8.3.5", + "jiti": "^1.20.0", + "semver": "^7.5.4" + }, + "engines": { + "node": ">= 14.15.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "postcss": "^7.0.0 || ^8.0.1", + "webpack": "^5.0.0" + } + }, + "node_modules/postcss-logical": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/postcss-logical/-/postcss-logical-8.1.0.tgz", + "integrity": "sha512-pL1hXFQ2fEXNKiNiAgtfA005T9FBxky5zkX6s4GZM2D8RkVgRqz3f4g1JUoq925zXv495qk8UNldDwh8uGEDoA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-merge-idents": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/postcss-merge-idents/-/postcss-merge-idents-6.0.3.tgz", + "integrity": "sha512-1oIoAsODUs6IHQZkLQGO15uGEbK3EAl5wi9SS8hs45VgsxQfMnxvt+L+zIr7ifZFIH14cfAeVe2uCTa+SPRa3g==", + "license": "MIT", + "dependencies": { + "cssnano-utils": "^4.0.2", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-merge-longhand": { + "version": "6.0.5", + "resolved": "https://registry.npmjs.org/postcss-merge-longhand/-/postcss-merge-longhand-6.0.5.tgz", + "integrity": "sha512-5LOiordeTfi64QhICp07nzzuTDjNSO8g5Ksdibt44d+uvIIAE1oZdRn8y/W5ZtYgRH/lnLDlvi9F8btZcVzu3w==", + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.2.0", + "stylehacks": "^6.1.1" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-merge-rules": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/postcss-merge-rules/-/postcss-merge-rules-6.1.1.tgz", + "integrity": "sha512-KOdWF0gju31AQPZiD+2Ar9Qjowz1LTChSjFFbS+e2sFgc4uHOp3ZvVX4sNeTlk0w2O31ecFGgrFzhO0RSWbWwQ==", + "license": "MIT", + "dependencies": { + "browserslist": "^4.23.0", + "caniuse-api": "^3.0.0", + "cssnano-utils": "^4.0.2", + "postcss-selector-parser": "^6.0.16" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-minify-font-values": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/postcss-minify-font-values/-/postcss-minify-font-values-6.1.0.tgz", + "integrity": "sha512-gklfI/n+9rTh8nYaSJXlCo3nOKqMNkxuGpTn/Qm0gstL3ywTr9/WRKznE+oy6fvfolH6dF+QM4nCo8yPLdvGJg==", + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-minify-gradients": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/postcss-minify-gradients/-/postcss-minify-gradients-6.0.3.tgz", + "integrity": "sha512-4KXAHrYlzF0Rr7uc4VrfwDJ2ajrtNEpNEuLxFgwkhFZ56/7gaE4Nr49nLsQDZyUe+ds+kEhf+YAUolJiYXF8+Q==", + "license": "MIT", + "dependencies": { + "colord": "^2.9.3", + "cssnano-utils": "^4.0.2", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-minify-params": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/postcss-minify-params/-/postcss-minify-params-6.1.0.tgz", + "integrity": "sha512-bmSKnDtyyE8ujHQK0RQJDIKhQ20Jq1LYiez54WiaOoBtcSuflfK3Nm596LvbtlFcpipMjgClQGyGr7GAs+H1uA==", + "license": "MIT", + "dependencies": { + "browserslist": "^4.23.0", + "cssnano-utils": "^4.0.2", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-minify-selectors": { + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/postcss-minify-selectors/-/postcss-minify-selectors-6.0.4.tgz", + "integrity": "sha512-L8dZSwNLgK7pjTto9PzWRoMbnLq5vsZSTu8+j1P/2GB8qdtGQfn+K1uSvFgYvgh83cbyxT5m43ZZhUMTJDSClQ==", + "license": "MIT", + "dependencies": { + "postcss-selector-parser": "^6.0.16" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-modules-extract-imports": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/postcss-modules-extract-imports/-/postcss-modules-extract-imports-3.1.0.tgz", + "integrity": "sha512-k3kNe0aNFQDAZGbin48pL2VNidTF0w4/eASDsxlyspobzU3wZQLOGj7L9gfRe0Jo9/4uud09DsjFNH7winGv8Q==", + "license": "ISC", + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-modules-local-by-default": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/postcss-modules-local-by-default/-/postcss-modules-local-by-default-4.2.0.tgz", + "integrity": "sha512-5kcJm/zk+GJDSfw+V/42fJ5fhjL5YbFDl8nVdXkJPLLW+Vf9mTD5Xe0wqIaDnLuL2U6cDNpTr+UQ+v2HWIBhzw==", + "license": "MIT", + "dependencies": { + "icss-utils": "^5.0.0", + "postcss-selector-parser": "^7.0.0", + "postcss-value-parser": "^4.1.0" + }, + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-modules-local-by-default/node_modules/postcss-selector-parser": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.1.1.tgz", + "integrity": "sha512-orRsuYpJVw8LdAwqqLykBj9ecS5/cRHlI5+nvTo8LcCKmzDmqVORXtOIYEEQuL9D4BxtA1lm5isAqzQZCoQ6Eg==", + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-modules-scope": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/postcss-modules-scope/-/postcss-modules-scope-3.2.1.tgz", + "integrity": "sha512-m9jZstCVaqGjTAuny8MdgE88scJnCiQSlSrOWcTQgM2t32UBe+MUmFSO5t7VMSfAf/FJKImAxBav8ooCHJXCJA==", + "license": "ISC", + "dependencies": { + "postcss-selector-parser": "^7.0.0" + }, + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-modules-scope/node_modules/postcss-selector-parser": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.1.1.tgz", + "integrity": "sha512-orRsuYpJVw8LdAwqqLykBj9ecS5/cRHlI5+nvTo8LcCKmzDmqVORXtOIYEEQuL9D4BxtA1lm5isAqzQZCoQ6Eg==", + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-modules-values": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/postcss-modules-values/-/postcss-modules-values-4.0.0.tgz", + "integrity": "sha512-RDxHkAiEGI78gS2ofyvCsu7iycRv7oqw5xMWn9iMoR0N/7mf9D50ecQqUo5BZ9Zh2vH4bCUR/ktCqbB9m8vJjQ==", + "license": "ISC", + "dependencies": { + "icss-utils": "^5.0.0" + }, + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-nesting": { + "version": "13.0.2", + "resolved": "https://registry.npmjs.org/postcss-nesting/-/postcss-nesting-13.0.2.tgz", + "integrity": "sha512-1YCI290TX+VP0U/K/aFxzHzQWHWURL+CtHMSbex1lCdpXD1SoR2sYuxDu5aNI9lPoXpKTCggFZiDJbwylU0LEQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/selector-resolve-nested": "^3.1.0", + "@csstools/selector-specificity": "^5.0.0", + "postcss-selector-parser": "^7.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-nesting/node_modules/@csstools/selector-resolve-nested": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@csstools/selector-resolve-nested/-/selector-resolve-nested-3.1.0.tgz", + "integrity": "sha512-mf1LEW0tJLKfWyvn5KdDrhpxHyuxpbNwTIwOYLIvsTffeyOf85j5oIzfG0yosxDgx/sswlqBnESYUcQH0vgZ0g==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss-selector-parser": "^7.0.0" + } + }, + "node_modules/postcss-nesting/node_modules/@csstools/selector-specificity": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/@csstools/selector-specificity/-/selector-specificity-5.0.0.tgz", + "integrity": "sha512-PCqQV3c4CoVm3kdPhyeZ07VmBRdH2EpMFA/pd9OASpOEC3aXNGoqPDAZ80D0cLpMBxnmk0+yNhGsEx31hq7Gtw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss-selector-parser": "^7.0.0" + } + }, + "node_modules/postcss-nesting/node_modules/postcss-selector-parser": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.1.1.tgz", + "integrity": "sha512-orRsuYpJVw8LdAwqqLykBj9ecS5/cRHlI5+nvTo8LcCKmzDmqVORXtOIYEEQuL9D4BxtA1lm5isAqzQZCoQ6Eg==", + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-normalize-charset": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-charset/-/postcss-normalize-charset-6.0.2.tgz", + "integrity": "sha512-a8N9czmdnrjPHa3DeFlwqst5eaL5W8jYu3EBbTTkI5FHkfMhFZh1EGbku6jhHhIzTA6tquI2P42NtZ59M/H/kQ==", + "license": "MIT", + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-normalize-display-values": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-display-values/-/postcss-normalize-display-values-6.0.2.tgz", + "integrity": "sha512-8H04Mxsb82ON/aAkPeq8kcBbAtI5Q2a64X/mnRRfPXBq7XeogoQvReqxEfc0B4WPq1KimjezNC8flUtC3Qz6jg==", + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-normalize-positions": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-positions/-/postcss-normalize-positions-6.0.2.tgz", + "integrity": "sha512-/JFzI441OAB9O7VnLA+RtSNZvQ0NCFZDOtp6QPFo1iIyawyXg0YI3CYM9HBy1WvwCRHnPep/BvI1+dGPKoXx/Q==", + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-normalize-repeat-style": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-repeat-style/-/postcss-normalize-repeat-style-6.0.2.tgz", + "integrity": "sha512-YdCgsfHkJ2jEXwR4RR3Tm/iOxSfdRt7jplS6XRh9Js9PyCR/aka/FCb6TuHT2U8gQubbm/mPmF6L7FY9d79VwQ==", + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-normalize-string": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-string/-/postcss-normalize-string-6.0.2.tgz", + "integrity": "sha512-vQZIivlxlfqqMp4L9PZsFE4YUkWniziKjQWUtsxUiVsSSPelQydwS8Wwcuw0+83ZjPWNTl02oxlIvXsmmG+CiQ==", + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-normalize-timing-functions": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-timing-functions/-/postcss-normalize-timing-functions-6.0.2.tgz", + "integrity": "sha512-a+YrtMox4TBtId/AEwbA03VcJgtyW4dGBizPl7e88cTFULYsprgHWTbfyjSLyHeBcK/Q9JhXkt2ZXiwaVHoMzA==", + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-normalize-unicode": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/postcss-normalize-unicode/-/postcss-normalize-unicode-6.1.0.tgz", + "integrity": "sha512-QVC5TQHsVj33otj8/JD869Ndr5Xcc/+fwRh4HAsFsAeygQQXm+0PySrKbr/8tkDKzW+EVT3QkqZMfFrGiossDg==", + "license": "MIT", + "dependencies": { + "browserslist": "^4.23.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-normalize-url": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-url/-/postcss-normalize-url-6.0.2.tgz", + "integrity": "sha512-kVNcWhCeKAzZ8B4pv/DnrU1wNh458zBNp8dh4y5hhxih5RZQ12QWMuQrDgPRw3LRl8mN9vOVfHl7uhvHYMoXsQ==", + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-normalize-whitespace": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-whitespace/-/postcss-normalize-whitespace-6.0.2.tgz", + "integrity": "sha512-sXZ2Nj1icbJOKmdjXVT9pnyHQKiSAyuNQHSgRCUgThn2388Y9cGVDR+E9J9iAYbSbLHI+UUwLVl1Wzco/zgv0Q==", + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-opacity-percentage": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/postcss-opacity-percentage/-/postcss-opacity-percentage-3.0.0.tgz", + "integrity": "sha512-K6HGVzyxUxd/VgZdX04DCtdwWJ4NGLG212US4/LA1TLAbHgmAsTWVR86o+gGIbFtnTkfOpb9sCRBx8K7HO66qQ==", + "funding": [ + { + "type": "kofi", + "url": "https://ko-fi.com/mrcgrtz" + }, + { + "type": "liberapay", + "url": "https://liberapay.com/mrcgrtz" + } + ], + "license": "MIT", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-ordered-values": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-ordered-values/-/postcss-ordered-values-6.0.2.tgz", + "integrity": "sha512-VRZSOB+JU32RsEAQrO94QPkClGPKJEL/Z9PCBImXMhIeK5KAYo6slP/hBYlLgrCjFxyqvn5VC81tycFEDBLG1Q==", + "license": "MIT", + "dependencies": { + "cssnano-utils": "^4.0.2", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-overflow-shorthand": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/postcss-overflow-shorthand/-/postcss-overflow-shorthand-6.0.0.tgz", + "integrity": "sha512-BdDl/AbVkDjoTofzDQnwDdm/Ym6oS9KgmO7Gr+LHYjNWJ6ExORe4+3pcLQsLA9gIROMkiGVjjwZNoL/mpXHd5Q==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-page-break": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/postcss-page-break/-/postcss-page-break-3.0.4.tgz", + "integrity": "sha512-1JGu8oCjVXLa9q9rFTo4MbeeA5FMe00/9C7lN4va606Rdb+HkxXtXsmEDrIraQ11fGz/WvKWa8gMuCKkrXpTsQ==", + "license": "MIT", + "peerDependencies": { + "postcss": "^8" + } + }, + "node_modules/postcss-place": { + "version": "10.0.0", + "resolved": "https://registry.npmjs.org/postcss-place/-/postcss-place-10.0.0.tgz", + "integrity": "sha512-5EBrMzat2pPAxQNWYavwAfoKfYcTADJ8AXGVPcUZ2UkNloUTWzJQExgrzrDkh3EKzmAx1evfTAzF9I8NGcc+qw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-preset-env": { + "version": "10.6.1", + "resolved": "https://registry.npmjs.org/postcss-preset-env/-/postcss-preset-env-10.6.1.tgz", + "integrity": "sha512-yrk74d9EvY+W7+lO9Aj1QmjWY9q5NsKjK2V9drkOPZB/X6KZ0B3igKsHUYakb7oYVhnioWypQX3xGuePf89f3g==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/postcss-alpha-function": "^1.0.1", + "@csstools/postcss-cascade-layers": "^5.0.2", + "@csstools/postcss-color-function": "^4.0.12", + "@csstools/postcss-color-function-display-p3-linear": "^1.0.1", + "@csstools/postcss-color-mix-function": "^3.0.12", + "@csstools/postcss-color-mix-variadic-function-arguments": "^1.0.2", + "@csstools/postcss-content-alt-text": "^2.0.8", + "@csstools/postcss-contrast-color-function": "^2.0.12", + "@csstools/postcss-exponential-functions": "^2.0.9", + "@csstools/postcss-font-format-keywords": "^4.0.0", + "@csstools/postcss-gamut-mapping": "^2.0.11", + "@csstools/postcss-gradients-interpolation-method": "^5.0.12", + "@csstools/postcss-hwb-function": "^4.0.12", + "@csstools/postcss-ic-unit": "^4.0.4", + "@csstools/postcss-initial": "^2.0.1", + "@csstools/postcss-is-pseudo-class": "^5.0.3", + "@csstools/postcss-light-dark-function": "^2.0.11", + "@csstools/postcss-logical-float-and-clear": "^3.0.0", + "@csstools/postcss-logical-overflow": "^2.0.0", + "@csstools/postcss-logical-overscroll-behavior": "^2.0.0", + "@csstools/postcss-logical-resize": "^3.0.0", + "@csstools/postcss-logical-viewport-units": "^3.0.4", + "@csstools/postcss-media-minmax": "^2.0.9", + "@csstools/postcss-media-queries-aspect-ratio-number-values": "^3.0.5", + "@csstools/postcss-nested-calc": "^4.0.0", + "@csstools/postcss-normalize-display-values": "^4.0.1", + "@csstools/postcss-oklab-function": "^4.0.12", + "@csstools/postcss-position-area-property": "^1.0.0", + "@csstools/postcss-progressive-custom-properties": "^4.2.1", + "@csstools/postcss-property-rule-prelude-list": "^1.0.0", + "@csstools/postcss-random-function": "^2.0.1", + "@csstools/postcss-relative-color-syntax": "^3.0.12", + "@csstools/postcss-scope-pseudo-class": "^4.0.1", + "@csstools/postcss-sign-functions": "^1.1.4", + "@csstools/postcss-stepped-value-functions": "^4.0.9", + "@csstools/postcss-syntax-descriptor-syntax-production": "^1.0.1", + "@csstools/postcss-system-ui-font-family": "^1.0.0", + "@csstools/postcss-text-decoration-shorthand": "^4.0.3", + "@csstools/postcss-trigonometric-functions": "^4.0.9", + "@csstools/postcss-unset-value": "^4.0.0", + "autoprefixer": "^10.4.23", + "browserslist": "^4.28.1", + "css-blank-pseudo": "^7.0.1", + "css-has-pseudo": "^7.0.3", + "css-prefers-color-scheme": "^10.0.0", + "cssdb": "^8.6.0", + "postcss-attribute-case-insensitive": "^7.0.1", + "postcss-clamp": "^4.1.0", + "postcss-color-functional-notation": "^7.0.12", + "postcss-color-hex-alpha": "^10.0.0", + "postcss-color-rebeccapurple": "^10.0.0", + "postcss-custom-media": "^11.0.6", + "postcss-custom-properties": "^14.0.6", + "postcss-custom-selectors": "^8.0.5", + "postcss-dir-pseudo-class": "^9.0.1", + "postcss-double-position-gradients": "^6.0.4", + "postcss-focus-visible": "^10.0.1", + "postcss-focus-within": "^9.0.1", + "postcss-font-variant": "^5.0.0", + "postcss-gap-properties": "^6.0.0", + "postcss-image-set-function": "^7.0.0", + "postcss-lab-function": "^7.0.12", + "postcss-logical": "^8.1.0", + "postcss-nesting": "^13.0.2", + "postcss-opacity-percentage": "^3.0.0", + "postcss-overflow-shorthand": "^6.0.0", + "postcss-page-break": "^3.0.4", + "postcss-place": "^10.0.0", + "postcss-pseudo-class-any-link": "^10.0.1", + "postcss-replace-overflow-wrap": "^4.0.0", + "postcss-selector-not": "^8.0.1" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-pseudo-class-any-link": { + "version": "10.0.1", + "resolved": "https://registry.npmjs.org/postcss-pseudo-class-any-link/-/postcss-pseudo-class-any-link-10.0.1.tgz", + "integrity": "sha512-3el9rXlBOqTFaMFkWDOkHUTQekFIYnaQY55Rsp8As8QQkpiSgIYEcF/6Ond93oHiDsGb4kad8zjt+NPlOC1H0Q==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "postcss-selector-parser": "^7.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-pseudo-class-any-link/node_modules/postcss-selector-parser": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.1.1.tgz", + "integrity": "sha512-orRsuYpJVw8LdAwqqLykBj9ecS5/cRHlI5+nvTo8LcCKmzDmqVORXtOIYEEQuL9D4BxtA1lm5isAqzQZCoQ6Eg==", + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-reduce-idents": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/postcss-reduce-idents/-/postcss-reduce-idents-6.0.3.tgz", + "integrity": "sha512-G3yCqZDpsNPoQgbDUy3T0E6hqOQ5xigUtBQyrmq3tn2GxlyiL0yyl7H+T8ulQR6kOcHJ9t7/9H4/R2tv8tJbMA==", + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-reduce-initial": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/postcss-reduce-initial/-/postcss-reduce-initial-6.1.0.tgz", + "integrity": "sha512-RarLgBK/CrL1qZags04oKbVbrrVK2wcxhvta3GCxrZO4zveibqbRPmm2VI8sSgCXwoUHEliRSbOfpR0b/VIoiw==", + "license": "MIT", + "dependencies": { + "browserslist": "^4.23.0", + "caniuse-api": "^3.0.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-reduce-transforms": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-reduce-transforms/-/postcss-reduce-transforms-6.0.2.tgz", + "integrity": "sha512-sB+Ya++3Xj1WaT9+5LOOdirAxP7dJZms3GRcYheSPi1PiTMigsxHAdkrbItHxwYHr4kt1zL7mmcHstgMYT+aiA==", + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-replace-overflow-wrap": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/postcss-replace-overflow-wrap/-/postcss-replace-overflow-wrap-4.0.0.tgz", + "integrity": "sha512-KmF7SBPphT4gPPcKZc7aDkweHiKEEO8cla/GjcBK+ckKxiZslIu3C4GCRW3DNfL0o7yW7kMQu9xlZ1kXRXLXtw==", + "license": "MIT", + "peerDependencies": { + "postcss": "^8.0.3" + } + }, + "node_modules/postcss-selector-not": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/postcss-selector-not/-/postcss-selector-not-8.0.1.tgz", + "integrity": "sha512-kmVy/5PYVb2UOhy0+LqUYAhKj7DUGDpSWa5LZqlkWJaaAV+dxxsOG3+St0yNLu6vsKD7Dmqx+nWQt0iil89+WA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "dependencies": { + "postcss-selector-parser": "^7.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-selector-not/node_modules/postcss-selector-parser": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.1.1.tgz", + "integrity": "sha512-orRsuYpJVw8LdAwqqLykBj9ecS5/cRHlI5+nvTo8LcCKmzDmqVORXtOIYEEQuL9D4BxtA1lm5isAqzQZCoQ6Eg==", + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-selector-parser": { + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.1.2.tgz", + "integrity": "sha512-Q8qQfPiZ+THO/3ZrOrO0cJJKfpYCagtMUkXbnEfmgUjwXg6z/WBeOyS9APBBPCTSiDV+s4SwQGu8yFsiMRIudg==", + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-sort-media-queries": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/postcss-sort-media-queries/-/postcss-sort-media-queries-5.2.0.tgz", + "integrity": "sha512-AZ5fDMLD8SldlAYlvi8NIqo0+Z8xnXU2ia0jxmuhxAU+Lqt9K+AlmLNJ/zWEnE9x+Zx3qL3+1K20ATgNOr3fAA==", + "license": "MIT", + "dependencies": { + "sort-css-media-queries": "2.2.0" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "postcss": "^8.4.23" + } + }, + "node_modules/postcss-svgo": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/postcss-svgo/-/postcss-svgo-6.0.3.tgz", + "integrity": "sha512-dlrahRmxP22bX6iKEjOM+c8/1p+81asjKT+V5lrgOH944ryx/OHpclnIbGsKVd3uWOXFLYJwCVf0eEkJGvO96g==", + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.2.0", + "svgo": "^3.2.0" + }, + "engines": { + "node": "^14 || ^16 || >= 18" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-unique-selectors": { + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/postcss-unique-selectors/-/postcss-unique-selectors-6.0.4.tgz", + "integrity": "sha512-K38OCaIrO8+PzpArzkLKB42dSARtC2tmG6PvD4b1o1Q2E9Os8jzfWFfSy/rixsHwohtsDdFtAWGjFVFUdwYaMg==", + "license": "MIT", + "dependencies": { + "postcss-selector-parser": "^6.0.16" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-value-parser": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz", + "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==", + "license": "MIT" + }, + "node_modules/postcss-zindex": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-zindex/-/postcss-zindex-6.0.2.tgz", + "integrity": "sha512-5BxW9l1evPB/4ZIc+2GobEBoKC+h8gPGCMi+jxsYvd2x0mjq7wazk6DrP71pStqxE9Foxh5TVnonbWpFZzXaYg==", + "license": "MIT", + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/pretty-error": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/pretty-error/-/pretty-error-4.0.0.tgz", + "integrity": "sha512-AoJ5YMAcXKYxKhuJGdcvse+Voc6v1RgnsR3nWcYU7q4t6z0Q6T86sv5Zq8VIRbOWWFpvdGE83LtdSMNd+6Y0xw==", + "license": "MIT", + "dependencies": { + "lodash": "^4.17.20", + "renderkid": "^3.0.0" + } + }, + "node_modules/pretty-time": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/pretty-time/-/pretty-time-1.1.0.tgz", + "integrity": "sha512-28iF6xPQrP8Oa6uxE6a1biz+lWeTOAPKggvjB8HAs6nVMKZwf5bG++632Dx614hIWgUPkgivRfG+a8uAXGTIbA==", + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/prism-react-renderer": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/prism-react-renderer/-/prism-react-renderer-2.4.1.tgz", + "integrity": "sha512-ey8Ls/+Di31eqzUxC46h8MksNuGx/n0AAC8uKpwFau4RPDYLuE3EXTp8N8G2vX2N7UC/+IXeNUnlWBGGcAG+Ig==", + "license": "MIT", + "dependencies": { + "@types/prismjs": "^1.26.0", + "clsx": "^2.0.0" + }, + "peerDependencies": { + "react": ">=16.0.0" + } + }, + "node_modules/prismjs": { + "version": "1.30.0", + "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.30.0.tgz", + "integrity": "sha512-DEvV2ZF2r2/63V+tK8hQvrR2ZGn10srHbXviTlcv7Kpzw8jWiNTqbVgjO3IY8RxrrOUF8VPMQQFysYYYv0YZxw==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/process-nextick-args": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", + "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==", + "license": "MIT" + }, + "node_modules/prompts": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz", + "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==", + "license": "MIT", + "dependencies": { + "kleur": "^3.0.3", + "sisteransi": "^1.0.5" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/prop-types": { + "version": "15.8.1", + "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz", + "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==", + "license": "MIT", + "dependencies": { + "loose-envify": "^1.4.0", + "object-assign": "^4.1.1", + "react-is": "^16.13.1" + } + }, + "node_modules/property-information": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/property-information/-/property-information-7.1.0.tgz", + "integrity": "sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/proto-list": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/proto-list/-/proto-list-1.2.4.tgz", + "integrity": "sha512-vtK/94akxsTMhe0/cbfpR+syPuszcuwhqVjJq26CuNDgFGj682oRBXOP5MJpv2r7JtE8MsiepGIqvvOTBwn2vA==", + "license": "ISC" + }, + "node_modules/proxy-addr": { + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", + "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==", + "license": "MIT", + "dependencies": { + "forwarded": "0.2.0", + "ipaddr.js": "1.9.1" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/proxy-addr/node_modules/ipaddr.js": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", + "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==", + "license": "MIT", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/punycode": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/pupa": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/pupa/-/pupa-3.3.0.tgz", + "integrity": "sha512-LjgDO2zPtoXP2wJpDjZrGdojii1uqO0cnwKoIoUzkfS98HDmbeiGmYiXo3lXeFlq2xvne1QFQhwYXSUCLKtEuA==", + "license": "MIT", + "dependencies": { + "escape-goat": "^4.0.0" + }, + "engines": { + "node": ">=12.20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/pvtsutils": { + "version": "1.3.6", + "resolved": "https://registry.npmjs.org/pvtsutils/-/pvtsutils-1.3.6.tgz", + "integrity": "sha512-PLgQXQ6H2FWCaeRak8vvk1GW462lMxB5s3Jm673N82zI4vqtVUPuZdffdZbPDFRoU8kAhItWFtPCWiPpp4/EDg==", + "license": "MIT", + "dependencies": { + "tslib": "^2.8.1" + } + }, + "node_modules/pvutils": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/pvutils/-/pvutils-1.1.5.tgz", + "integrity": "sha512-KTqnxsgGiQ6ZAzZCVlJH5eOjSnvlyEgx1m8bkRJfOhmGRqfo5KLvmAlACQkrjEtOQ4B7wF9TdSLIs9O90MX9xA==", + "license": "MIT", + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/qs": { + "version": "6.14.2", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.14.2.tgz", + "integrity": "sha512-V/yCWTTF7VJ9hIh18Ugr2zhJMP01MY7c5kh4J870L7imm6/DIzBsNLTXzMwUA3yZ5b/KBqLx8Kp3uRvd7xSe3Q==", + "license": "BSD-3-Clause", + "dependencies": { + "side-channel": "^1.1.0" + }, + "engines": { + "node": ">=0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/quick-lru": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/quick-lru/-/quick-lru-5.1.1.tgz", + "integrity": "sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/randombytes": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", + "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", + "license": "MIT", + "dependencies": { + "safe-buffer": "^5.1.0" + } + }, + "node_modules/range-parser": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.0.tgz", + "integrity": "sha512-kA5WQoNVo4t9lNx2kQNFCxKeBl5IbbSNBl1M/tLkw9WCn+hxNBAW5Qh8gdhs63CJnhjJ2zQWFoqPJP2sK1AV5A==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/raw-body": { + "version": "2.5.3", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.3.tgz", + "integrity": "sha512-s4VSOf6yN0rvbRZGxs8Om5CWj6seneMwK3oDb4lWDH0UPhWcxwOWw5+qk24bxq87szX1ydrwylIOp2uG1ojUpA==", + "license": "MIT", + "dependencies": { + "bytes": "~3.1.2", + "http-errors": "~2.0.1", + "iconv-lite": "~0.4.24", + "unpipe": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/raw-body/node_modules/bytes": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", + "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/rc": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", + "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==", + "license": "(BSD-2-Clause OR MIT OR Apache-2.0)", + "dependencies": { + "deep-extend": "^0.6.0", + "ini": "~1.3.0", + "minimist": "^1.2.0", + "strip-json-comments": "~2.0.1" + }, + "bin": { + "rc": "cli.js" + } + }, + "node_modules/rc/node_modules/ini": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", + "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==", + "license": "ISC" + }, + "node_modules/rc/node_modules/strip-json-comments": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", + "integrity": "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react": { + "version": "19.2.4", + "resolved": "https://registry.npmjs.org/react/-/react-19.2.4.tgz", + "integrity": "sha512-9nfp2hYpCwOjAN+8TZFGhtWEwgvWHXqESH8qT89AT/lWklpLON22Lc8pEtnpsZz7VmawabSU0gCjnj8aC0euHQ==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-dom": { + "version": "19.2.4", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.2.4.tgz", + "integrity": "sha512-AXJdLo8kgMbimY95O2aKQqsz2iWi9jMgKJhRBAxECE4IFxfcazB2LmzloIoibJI3C12IlY20+KFaLv+71bUJeQ==", + "license": "MIT", + "dependencies": { + "scheduler": "^0.27.0" + }, + "peerDependencies": { + "react": "^19.2.4" + } + }, + "node_modules/react-fast-compare": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/react-fast-compare/-/react-fast-compare-3.2.2.tgz", + "integrity": "sha512-nsO+KSNgo1SbJqJEYRE9ERzo7YtYbou/OqjSQKxV7jcKox7+usiUVZOAC+XnDOABXggQTno0Y1CpVnuWEc1boQ==", + "license": "MIT" + }, + "node_modules/react-helmet-async": { + "name": "@slorber/react-helmet-async", + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/@slorber/react-helmet-async/-/react-helmet-async-1.3.0.tgz", + "integrity": "sha512-e9/OK8VhwUSc67diWI8Rb3I0YgI9/SBQtnhe9aEuK6MhZm7ntZZimXgwXnd8W96YTmSOb9M4d8LwhRZyhWr/1A==", + "license": "Apache-2.0", + "dependencies": { + "@babel/runtime": "^7.12.5", + "invariant": "^2.2.4", + "prop-types": "^15.7.2", + "react-fast-compare": "^3.2.0", + "shallowequal": "^1.1.0" + }, + "peerDependencies": { + "react": "^16.6.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", + "react-dom": "^16.6.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/react-is": { + "version": "16.13.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", + "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==", + "license": "MIT" + }, + "node_modules/react-json-view-lite": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/react-json-view-lite/-/react-json-view-lite-2.5.0.tgz", + "integrity": "sha512-tk7o7QG9oYyELWHL8xiMQ8x4WzjCzbWNyig3uexmkLb54r8jO0yH3WCWx8UZS0c49eSA4QUmG5caiRJ8fAn58g==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "react": "^18.0.0 || ^19.0.0" + } + }, + "node_modules/react-loadable": { + "name": "@docusaurus/react-loadable", + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/@docusaurus/react-loadable/-/react-loadable-6.0.0.tgz", + "integrity": "sha512-YMMxTUQV/QFSnbgrP3tjDzLHRg7vsbMn8e9HAa8o/1iXoiomo48b7sk/kkmWEuWNDPJVlKSJRB6Y2fHqdJk+SQ==", + "license": "MIT", + "dependencies": { + "@types/react": "*" + }, + "peerDependencies": { + "react": "*" + } + }, + "node_modules/react-loadable-ssr-addon-v5-slorber": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/react-loadable-ssr-addon-v5-slorber/-/react-loadable-ssr-addon-v5-slorber-1.0.3.tgz", + "integrity": "sha512-GXfh9VLwB5ERaCsU6RULh7tkemeX15aNh6wuMEBtfdyMa7fFG8TXrhXlx1SoEK2Ty/l6XIkzzYIQmyaWW3JgdQ==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.3" + }, + "engines": { + "node": ">=10.13.0" + }, + "peerDependencies": { + "react-loadable": "*", + "webpack": ">=4.41.1 || 5.x" + } + }, + "node_modules/react-router": { + "version": "5.3.4", + "resolved": "https://registry.npmjs.org/react-router/-/react-router-5.3.4.tgz", + "integrity": "sha512-Ys9K+ppnJah3QuaRiLxk+jDWOR1MekYQrlytiXxC1RyfbdsZkS5pvKAzCCr031xHixZwpnsYNT5xysdFHQaYsA==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.12.13", + "history": "^4.9.0", + "hoist-non-react-statics": "^3.1.0", + "loose-envify": "^1.3.1", + "path-to-regexp": "^1.7.0", + "prop-types": "^15.6.2", + "react-is": "^16.6.0", + "tiny-invariant": "^1.0.2", + "tiny-warning": "^1.0.0" + }, + "peerDependencies": { + "react": ">=15" + } + }, + "node_modules/react-router-config": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/react-router-config/-/react-router-config-5.1.1.tgz", + "integrity": "sha512-DuanZjaD8mQp1ppHjgnnUnyOlqYXZVjnov/JzFhjLEwd3Z4dYjMSnqrEzzGThH47vpCOqPPwJM2FtthLeJ8Pbg==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.1.2" + }, + "peerDependencies": { + "react": ">=15", + "react-router": ">=5" + } + }, + "node_modules/react-router-dom": { + "version": "5.3.4", + "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-5.3.4.tgz", + "integrity": "sha512-m4EqFMHv/Ih4kpcBCONHbkT68KoAeHN4p3lAGoNryfHi0dMy0kCzEZakiKRsvg5wHZ/JLrLW8o8KomWiz/qbYQ==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.12.13", + "history": "^4.9.0", + "loose-envify": "^1.3.1", + "prop-types": "^15.6.2", + "react-router": "5.3.4", + "tiny-invariant": "^1.0.2", + "tiny-warning": "^1.0.0" + }, + "peerDependencies": { + "react": ">=15" + } + }, + "node_modules/readable-stream": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "license": "MIT", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "license": "MIT", + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/recma-build-jsx": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/recma-build-jsx/-/recma-build-jsx-1.0.0.tgz", + "integrity": "sha512-8GtdyqaBcDfva+GUKDr3nev3VpKAhup1+RvkMvUxURHpW7QyIvk9F5wz7Vzo06CEMSilw6uArgRqhpiUcWp8ew==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "estree-util-build-jsx": "^3.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/recma-jsx": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/recma-jsx/-/recma-jsx-1.0.1.tgz", + "integrity": "sha512-huSIy7VU2Z5OLv6oFLosQGGDqPqdO1iq6bWNAdhzMxSJP7RAso4fCZ1cKu8j9YHCZf3TPrq4dw3okhrylgcd7w==", + "license": "MIT", + "dependencies": { + "acorn-jsx": "^5.0.0", + "estree-util-to-js": "^2.0.0", + "recma-parse": "^1.0.0", + "recma-stringify": "^1.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + }, + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/recma-parse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/recma-parse/-/recma-parse-1.0.0.tgz", + "integrity": "sha512-OYLsIGBB5Y5wjnSnQW6t3Xg7q3fQ7FWbw/vcXtORTnyaSFscOtABg+7Pnz6YZ6c27fG1/aN8CjfwoUEUIdwqWQ==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "esast-util-from-js": "^2.0.0", + "unified": "^11.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/recma-stringify": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/recma-stringify/-/recma-stringify-1.0.0.tgz", + "integrity": "sha512-cjwII1MdIIVloKvC9ErQ+OgAtwHBmcZ0Bg4ciz78FtbT8In39aAYbaA7zvxQ61xVMSPE8WxhLwLbhif4Js2C+g==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "estree-util-to-js": "^2.0.0", + "unified": "^11.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/reflect-metadata": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/reflect-metadata/-/reflect-metadata-0.2.2.tgz", + "integrity": "sha512-urBwgfrvVP/eAyXx4hluJivBKzuEbSQs9rKWCrCkbSxNv8mxPcUZKeuoF3Uy4mJl3Lwprp6yy5/39VWigZ4K6Q==", + "license": "Apache-2.0" + }, + "node_modules/regenerate": { + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.2.tgz", + "integrity": "sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A==", + "license": "MIT" + }, + "node_modules/regenerate-unicode-properties": { + "version": "10.2.2", + "resolved": "https://registry.npmjs.org/regenerate-unicode-properties/-/regenerate-unicode-properties-10.2.2.tgz", + "integrity": "sha512-m03P+zhBeQd1RGnYxrGyDAPpWX/epKirLrp8e3qevZdVkKtnCrjjWczIbYc8+xd6vcTStVlqfycTx1KR4LOr0g==", + "license": "MIT", + "dependencies": { + "regenerate": "^1.4.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/regexpu-core": { + "version": "6.4.0", + "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-6.4.0.tgz", + "integrity": "sha512-0ghuzq67LI9bLXpOX/ISfve/Mq33a4aFRzoQYhnnok1JOFpmE/A2TBGkNVenOGEeSBCjIiWcc6MVOG5HEQv0sA==", + "license": "MIT", + "dependencies": { + "regenerate": "^1.4.2", + "regenerate-unicode-properties": "^10.2.2", + "regjsgen": "^0.8.0", + "regjsparser": "^0.13.0", + "unicode-match-property-ecmascript": "^2.0.0", + "unicode-match-property-value-ecmascript": "^2.2.1" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/registry-auth-token": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/registry-auth-token/-/registry-auth-token-5.1.1.tgz", + "integrity": "sha512-P7B4+jq8DeD2nMsAcdfaqHbssgHtZ7Z5+++a5ask90fvmJ8p5je4mOa+wzu+DB4vQ5tdJV/xywY+UnVFeQLV5Q==", + "license": "MIT", + "dependencies": { + "@pnpm/npm-conf": "^3.0.2" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/registry-url": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/registry-url/-/registry-url-6.0.1.tgz", + "integrity": "sha512-+crtS5QjFRqFCoQmvGduwYWEBng99ZvmFvF+cUJkGYF1L1BfU8C6Zp9T7f5vPAwyLkUExpvK+ANVZmGU49qi4Q==", + "license": "MIT", + "dependencies": { + "rc": "1.2.8" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/regjsgen": { + "version": "0.8.0", + "resolved": "https://registry.npmjs.org/regjsgen/-/regjsgen-0.8.0.tgz", + "integrity": "sha512-RvwtGe3d7LvWiDQXeQw8p5asZUmfU1G/l6WbUXeHta7Y2PEIvBTwH6E2EfmYUK8pxcxEdEmaomqyp0vZZ7C+3Q==", + "license": "MIT" + }, + "node_modules/regjsparser": { + "version": "0.13.0", + "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.13.0.tgz", + "integrity": "sha512-NZQZdC5wOE/H3UT28fVGL+ikOZcEzfMGk/c3iN9UGxzWHMa1op7274oyiUVrAG4B2EuFhus8SvkaYnhvW92p9Q==", + "license": "BSD-2-Clause", + "dependencies": { + "jsesc": "~3.1.0" + }, + "bin": { + "regjsparser": "bin/parser" + } + }, + "node_modules/rehype-parse": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/rehype-parse/-/rehype-parse-7.0.1.tgz", + "integrity": "sha512-fOiR9a9xH+Le19i4fGzIEowAbwG7idy2Jzs4mOrFWBSJ0sNUgy0ev871dwWnbOo371SjgjG4pwzrbgSVrKxecw==", + "dependencies": { + "hast-util-from-parse5": "^6.0.0", + "parse5": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/rehype-parse/node_modules/@types/hast": { + "version": "2.3.10", + "resolved": "https://registry.npmjs.org/@types/hast/-/hast-2.3.10.tgz", + "integrity": "sha512-McWspRw8xx8J9HurkVBfYj0xKoE25tOFlHGdx4MJ5xORQrMGZNqJhVQWaIbm6Oyla5kYOXtDiopzKRJzEOkwJw==", + "dependencies": { + "@types/unist": "^2" + } + }, + "node_modules/rehype-parse/node_modules/@types/unist": { + "version": "2.0.11", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.11.tgz", + "integrity": "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==" + }, + "node_modules/rehype-parse/node_modules/comma-separated-tokens": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-1.0.8.tgz", + "integrity": "sha512-GHuDRO12Sypu2cV70d1dkA2EUmXHgntrzbpvOB+Qy+49ypNfGgFQIC2fhhXbnyrJRynDCAARsT7Ou0M6hirpfw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/rehype-parse/node_modules/hast-util-from-parse5": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/hast-util-from-parse5/-/hast-util-from-parse5-6.0.1.tgz", + "integrity": "sha512-jeJUWiN5pSxW12Rh01smtVkZgZr33wBokLzKLwinYOUfSzm1Nl/c3GUGebDyOKjdsRgMvoVbV0VpAcpjF4NrJA==", + "dependencies": { + "@types/parse5": "^5.0.0", + "hastscript": "^6.0.0", + "property-information": "^5.0.0", + "vfile": "^4.0.0", + "vfile-location": "^3.2.0", + "web-namespaces": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/rehype-parse/node_modules/hast-util-parse-selector": { + "version": "2.2.5", + "resolved": "https://registry.npmjs.org/hast-util-parse-selector/-/hast-util-parse-selector-2.2.5.tgz", + "integrity": "sha512-7j6mrk/qqkSehsM92wQjdIgWM2/BW61u/53G6xmC8i1OmEdKLHbk419QKQUjz6LglWsfqoiHmyMRkP1BGjecNQ==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/rehype-parse/node_modules/hastscript": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/hastscript/-/hastscript-6.0.0.tgz", + "integrity": "sha512-nDM6bvd7lIqDUiYEiu5Sl/+6ReP0BMk/2f4U/Rooccxkj0P5nm+acM5PrGJ/t5I8qPGiqZSE6hVAwZEdZIvP4w==", + "dependencies": { + "@types/hast": "^2.0.0", + "comma-separated-tokens": "^1.0.0", + "hast-util-parse-selector": "^2.0.0", + "property-information": "^5.0.0", + "space-separated-tokens": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/rehype-parse/node_modules/parse5": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/parse5/-/parse5-6.0.1.tgz", + "integrity": "sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw==" + }, + "node_modules/rehype-parse/node_modules/property-information": { + "version": "5.6.0", + "resolved": "https://registry.npmjs.org/property-information/-/property-information-5.6.0.tgz", + "integrity": "sha512-YUHSPk+A30YPv+0Qf8i9Mbfe/C0hdPXk1s1jPVToV8pk8BQtpw10ct89Eo7OWkutrwqvT0eicAxlOg3dOAu8JA==", + "dependencies": { + "xtend": "^4.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/rehype-parse/node_modules/space-separated-tokens": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-1.1.5.tgz", + "integrity": "sha512-q/JSVd1Lptzhf5bkYm4ob4iWPjx0KiRe3sRFBNrVqbJkFaBm5vbbowy1mymoPNLRa52+oadOhJ+K49wsSeSjTA==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/rehype-parse/node_modules/unist-util-stringify-position": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-2.0.3.tgz", + "integrity": "sha512-3faScn5I+hy9VleOq/qNbAd6pAx7iH5jYBMS9I1HgQVijz/4mv5Bvw5iw1sC/90CODiKo81G/ps8AJrISn687g==", + "dependencies": { + "@types/unist": "^2.0.2" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/rehype-parse/node_modules/vfile": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/vfile/-/vfile-4.2.1.tgz", + "integrity": "sha512-O6AE4OskCG5S1emQ/4gl8zK586RqA3srz3nfK/Viy0UPToBc5Trp9BVFb1u0CjsKrAWwnpr4ifM/KBXPWwJbCA==", + "dependencies": { + "@types/unist": "^2.0.0", + "is-buffer": "^2.0.0", + "unist-util-stringify-position": "^2.0.0", + "vfile-message": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/rehype-parse/node_modules/vfile-location": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/vfile-location/-/vfile-location-3.2.0.tgz", + "integrity": "sha512-aLEIZKv/oxuCDZ8lkJGhuhztf/BW4M+iHdCwglA/eWc+vtuRFJj8EtgceYFX4LRjOhCAAiNHsKGssC6onJ+jbA==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/rehype-parse/node_modules/vfile-message": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-2.0.4.tgz", + "integrity": "sha512-DjssxRGkMvifUOJre00juHoP9DPWuzjxKuMDrhNbk2TdaYYBNMStsNhEOt3idrtI12VQYM/1+iM0KOzXi4pxwQ==", + "dependencies": { + "@types/unist": "^2.0.0", + "unist-util-stringify-position": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/rehype-parse/node_modules/web-namespaces": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/web-namespaces/-/web-namespaces-1.1.4.tgz", + "integrity": "sha512-wYxSGajtmoP4WxfejAPIr4l0fVh+jeMXZb08wNc0tMg6xsfZXj3cECqIK0G7ZAqUq0PP8WlMDtaOGVBTAWztNw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/rehype-raw": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/rehype-raw/-/rehype-raw-7.0.0.tgz", + "integrity": "sha512-/aE8hCfKlQeA8LmyeyQvQF3eBiLRGNlfBJEvWH7ivp9sBqs7TNqBL5X3v157rM4IFETqDnIOO+z5M/biZbo9Ww==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "hast-util-raw": "^9.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/rehype-recma": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/rehype-recma/-/rehype-recma-1.0.0.tgz", + "integrity": "sha512-lqA4rGUf1JmacCNWWZx0Wv1dHqMwxzsDWYMTowuplHF3xH0N/MmrZ/G3BDZnzAkRmxDadujCjaKM2hqYdCBOGw==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/hast": "^3.0.0", + "hast-util-to-estree": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/relateurl": { + "version": "0.2.7", + "resolved": "https://registry.npmjs.org/relateurl/-/relateurl-0.2.7.tgz", + "integrity": "sha512-G08Dxvm4iDN3MLM0EsP62EDV9IuhXPR6blNz6Utcp7zyV3tr4HVNINt6MpaRWbxoOHT3Q7YN2P+jaHX8vUbgog==", + "license": "MIT", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/remark-directive": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/remark-directive/-/remark-directive-3.0.1.tgz", + "integrity": "sha512-gwglrEQEZcZYgVyG1tQuA+h58EZfq5CSULw7J90AFuCTyib1thgHPoqQ+h9iFvU6R+vnZ5oNFQR5QKgGpk741A==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-directive": "^3.0.0", + "micromark-extension-directive": "^3.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-emoji": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/remark-emoji/-/remark-emoji-4.0.1.tgz", + "integrity": "sha512-fHdvsTR1dHkWKev9eNyhTo4EFwbUvJ8ka9SgeWkMPYFX4WoI7ViVBms3PjlQYgw5TLvNQso3GUB/b/8t3yo+dg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.2", + "emoticon": "^4.0.1", + "mdast-util-find-and-replace": "^3.0.1", + "node-emoji": "^2.1.0", + "unified": "^11.0.4" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + } + }, + "node_modules/remark-frontmatter": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/remark-frontmatter/-/remark-frontmatter-5.0.0.tgz", + "integrity": "sha512-XTFYvNASMe5iPN0719nPrdItC9aU0ssC4v14mH1BCi1u0n1gAocqcujWUrByftZTbLhRtiKRyjYTSIOcr69UVQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-frontmatter": "^2.0.0", + "micromark-extension-frontmatter": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-gfm": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/remark-gfm/-/remark-gfm-4.0.1.tgz", + "integrity": "sha512-1quofZ2RQ9EWdeN34S79+KExV1764+wCUGop5CPL1WGdD0ocPpu91lzPGbwWMECpEpd42kJGQwzRfyov9j4yNg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-gfm": "^3.0.0", + "micromark-extension-gfm": "^3.0.0", + "remark-parse": "^11.0.0", + "remark-stringify": "^11.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-mdx": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/remark-mdx/-/remark-mdx-3.1.1.tgz", + "integrity": "sha512-Pjj2IYlUY3+D8x00UJsIOg5BEvfMyeI+2uLPn9VO9Wg4MEtN/VTIq2NEJQfde9PnX15KgtHyl9S0BcTnWrIuWg==", + "license": "MIT", + "dependencies": { + "mdast-util-mdx": "^3.0.0", + "micromark-extension-mdxjs": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-parse": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-11.0.0.tgz", + "integrity": "sha512-FCxlKLNGknS5ba/1lmpYijMUzX2esxW5xQqjWxw2eHFfS2MSdaHVINFmhjo+qN1WhZhNimq0dZATN9pH0IDrpA==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-from-markdown": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-rehype": { + "version": "11.1.2", + "resolved": "https://registry.npmjs.org/remark-rehype/-/remark-rehype-11.1.2.tgz", + "integrity": "sha512-Dh7l57ianaEoIpzbp0PC9UKAdCSVklD8E5Rpw7ETfbTl3FqcOOgq5q2LVDhgGCkaBv7p24JXikPdvhhmHvKMsw==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "mdast-util-to-hast": "^13.0.0", + "unified": "^11.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-stringify": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/remark-stringify/-/remark-stringify-11.0.0.tgz", + "integrity": "sha512-1OSmLd3awB/t8qdoEOMazZkNsfVTeY4fTsgzcQFdXNq8ToTN4ZGwrMnlda4K6smTFKD+GRV6O48i6Z4iKgPPpw==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-to-markdown": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/renderkid": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/renderkid/-/renderkid-3.0.0.tgz", + "integrity": "sha512-q/7VIQA8lmM1hF+jn+sFSPWGlMkSAeNYcPLmDQx2zzuiDfaLrOmumR8iaUKlenFgh0XRPIUeSPlH3A+AW3Z5pg==", + "license": "MIT", + "dependencies": { + "css-select": "^4.1.3", + "dom-converter": "^0.2.0", + "htmlparser2": "^6.1.0", + "lodash": "^4.17.21", + "strip-ansi": "^6.0.1" + } + }, + "node_modules/renderkid/node_modules/css-select": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/css-select/-/css-select-4.3.0.tgz", + "integrity": "sha512-wPpOYtnsVontu2mODhA19JrqWxNsfdatRKd64kmpRbQgh1KtItko5sTnEpPdpSaJszTOhEMlF/RPz28qj4HqhQ==", + "license": "BSD-2-Clause", + "dependencies": { + "boolbase": "^1.0.0", + "css-what": "^6.0.1", + "domhandler": "^4.3.1", + "domutils": "^2.8.0", + "nth-check": "^2.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/fb55" + } + }, + "node_modules/renderkid/node_modules/dom-serializer": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-1.4.1.tgz", + "integrity": "sha512-VHwB3KfrcOOkelEG2ZOfxqLZdfkil8PtJi4P8N2MMXucZq2yLp75ClViUlOVwyoHEDjYU433Aq+5zWP61+RGag==", + "license": "MIT", + "dependencies": { + "domelementtype": "^2.0.1", + "domhandler": "^4.2.0", + "entities": "^2.0.0" + }, + "funding": { + "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" + } + }, + "node_modules/renderkid/node_modules/domhandler": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-4.3.1.tgz", + "integrity": "sha512-GrwoxYN+uWlzO8uhUXRl0P+kHE4GtVPfYzVLcUxPL7KNdHKj66vvlhiweIHqYYXWlw+T8iLMp42Lm67ghw4WMQ==", + "license": "BSD-2-Clause", + "dependencies": { + "domelementtype": "^2.2.0" + }, + "engines": { + "node": ">= 4" + }, + "funding": { + "url": "https://github.com/fb55/domhandler?sponsor=1" + } + }, + "node_modules/renderkid/node_modules/domutils": { + "version": "2.8.0", + "resolved": "https://registry.npmjs.org/domutils/-/domutils-2.8.0.tgz", + "integrity": "sha512-w96Cjofp72M5IIhpjgobBimYEfoPjx1Vx0BSX9P30WBdZW2WIKU0T1Bd0kz2eNZ9ikjKgHbEyKx8BB6H1L3h3A==", + "license": "BSD-2-Clause", + "dependencies": { + "dom-serializer": "^1.0.1", + "domelementtype": "^2.2.0", + "domhandler": "^4.2.0" + }, + "funding": { + "url": "https://github.com/fb55/domutils?sponsor=1" + } + }, + "node_modules/renderkid/node_modules/entities": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-2.2.0.tgz", + "integrity": "sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==", + "license": "BSD-2-Clause", + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/renderkid/node_modules/htmlparser2": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-6.1.0.tgz", + "integrity": "sha512-gyyPk6rgonLFEDGoeRgQNaEUvdJ4ktTmmUh/h2t7s+M8oPpIPxgNACWa+6ESR57kXstwqPiCut0V8NRpcwgU7A==", + "funding": [ + "https://github.com/fb55/htmlparser2?sponsor=1", + { + "type": "github", + "url": "https://github.com/sponsors/fb55" + } + ], + "license": "MIT", + "dependencies": { + "domelementtype": "^2.0.1", + "domhandler": "^4.0.0", + "domutils": "^2.5.2", + "entities": "^2.0.0" + } + }, + "node_modules/repeat-string": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz", + "integrity": "sha512-PV0dzCYDNfRi1jCDbJzpW7jNNDRuCOG/jI5ctQcGKt/clZD+YcPS3yIlWuTJMmESC8aevCFmWJy5wjAFgNqN6w==", + "license": "MIT", + "engines": { + "node": ">=0.10" + } + }, + "node_modules/require-from-string": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", + "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/require-like": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/require-like/-/require-like-0.1.2.tgz", + "integrity": "sha512-oyrU88skkMtDdauHDuKVrgR+zuItqr6/c//FXzvmxRGMexSDc6hNvJInGW3LL46n+8b50RykrvwSUIIQH2LQ5A==", + "engines": { + "node": "*" + } + }, + "node_modules/requires-port": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", + "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==", + "license": "MIT" + }, + "node_modules/resolve": { + "version": "1.22.11", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.11.tgz", + "integrity": "sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ==", + "license": "MIT", + "dependencies": { + "is-core-module": "^2.16.1", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve-alpn": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/resolve-alpn/-/resolve-alpn-1.2.1.tgz", + "integrity": "sha512-0a1F4l73/ZFZOakJnQ3FvkJ2+gSTQWz/r2KE5OdDY0TxPm5h4GkqkWWfM47T7HsbnOtcJVEF4epCVy6u7Q3K+g==", + "license": "MIT" + }, + "node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/resolve-pathname": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/resolve-pathname/-/resolve-pathname-3.0.0.tgz", + "integrity": "sha512-C7rARubxI8bXFNB/hqcp/4iUeIXJhJZvFPFPiSPRnhU5UPxzMFIl+2E6yY6c4k9giDJAhtV+enfA+G89N6Csng==", + "license": "MIT" + }, + "node_modules/responselike": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/responselike/-/responselike-3.0.0.tgz", + "integrity": "sha512-40yHxbNcl2+rzXvZuVkrYohathsSJlMTXKryG5y8uciHv1+xDLHQpgjG64JUO9nrEq2jGLH6IZ8BcZyw3wrweg==", + "license": "MIT", + "dependencies": { + "lowercase-keys": "^3.0.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/retry": { + "version": "0.13.1", + "resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz", + "integrity": "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==", + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/reusify": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.1.0.tgz", + "integrity": "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==", + "license": "MIT", + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/rtlcss": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/rtlcss/-/rtlcss-4.3.0.tgz", + "integrity": "sha512-FI+pHEn7Wc4NqKXMXFM+VAYKEj/mRIcW4h24YVwVtyjI+EqGrLc2Hx/Ny0lrZ21cBWU2goLy36eqMcNj3AQJig==", + "license": "MIT", + "dependencies": { + "escalade": "^3.1.1", + "picocolors": "^1.0.0", + "postcss": "^8.4.21", + "strip-json-comments": "^3.1.1" + }, + "bin": { + "rtlcss": "bin/rtlcss.js" + }, + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/run-applescript": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/run-applescript/-/run-applescript-7.1.0.tgz", + "integrity": "sha512-DPe5pVFaAsinSaV6QjQ6gdiedWDcRCbUuiQfQa2wmWV7+xC9bGulGI8+TdRmoFkAPaBXk8CrAbnlY2ISniJ47Q==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "license": "MIT" + }, + "node_modules/sax": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/sax/-/sax-1.6.0.tgz", + "integrity": "sha512-6R3J5M4AcbtLUdZmRv2SygeVaM7IhrLXu9BmnOGmmACak8fiUtOsYNWUS4uK7upbmHIBbLBeFeI//477BKLBzA==", + "license": "BlueOak-1.0.0", + "engines": { + "node": ">=11.0.0" + } + }, + "node_modules/scheduler": { + "version": "0.27.0", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.27.0.tgz", + "integrity": "sha512-eNv+WrVbKu1f3vbYJT/xtiF5syA5HPIMtf9IgY/nKg0sWqzAUEvqY/xm7OcZc/qafLx/iO9FgOmeSAp4v5ti/Q==", + "license": "MIT" + }, + "node_modules/schema-dts": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/schema-dts/-/schema-dts-1.1.5.tgz", + "integrity": "sha512-RJr9EaCmsLzBX2NDiO5Z3ux2BVosNZN5jo0gWgsyKvxKIUL5R3swNvoorulAeL9kLB0iTSX7V6aokhla2m7xbg==", + "license": "Apache-2.0" + }, + "node_modules/schema-utils": { + "version": "4.3.3", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.3.3.tgz", + "integrity": "sha512-eflK8wEtyOE6+hsaRVPxvUKYCpRgzLqDTb8krvAsRIwOGlHoSgYLgBXoubGgLd2fT41/OUYdb48v4k4WWHQurA==", + "license": "MIT", + "dependencies": { + "@types/json-schema": "^7.0.9", + "ajv": "^8.9.0", + "ajv-formats": "^2.1.1", + "ajv-keywords": "^5.1.0" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/search-insights": { + "version": "2.17.3", + "resolved": "https://registry.npmjs.org/search-insights/-/search-insights-2.17.3.tgz", + "integrity": "sha512-RQPdCYTa8A68uM2jwxoY842xDhvx3E5LFL1LxvxCNMev4o5mLuokczhzjAgGwUZBAmOKZknArSxLKmXtIi2AxQ==", + "license": "MIT", + "peer": true + }, + "node_modules/section-matter": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/section-matter/-/section-matter-1.0.0.tgz", + "integrity": "sha512-vfD3pmTzGpufjScBh50YHKzEu2lxBWhVEHsNGoEXmCmn2hKGfeNLYMzCJpe8cD7gqX7TJluOVpBkAequ6dgMmA==", + "license": "MIT", + "dependencies": { + "extend-shallow": "^2.0.1", + "kind-of": "^6.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/select-hose": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/select-hose/-/select-hose-2.0.0.tgz", + "integrity": "sha512-mEugaLK+YfkijB4fx0e6kImuJdCIt2LxCRcbEYPqRGCs4F2ogyfZU5IAZRdjCP8JPq2AtdNoC/Dux63d9Kiryg==", + "license": "MIT" + }, + "node_modules/selfsigned": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/selfsigned/-/selfsigned-5.5.0.tgz", + "integrity": "sha512-ftnu3TW4+3eBfLRFnDEkzGxSF/10BJBkaLJuBHZX0kiPS7bRdlpZGu6YGt4KngMkdTwJE6MbjavFpqHvqVt+Ew==", + "license": "MIT", + "dependencies": { + "@peculiar/x509": "^1.14.2", + "pkijs": "^3.3.3" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/semver": { + "version": "7.7.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.4.tgz", + "integrity": "sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA==", + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/semver-diff": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/semver-diff/-/semver-diff-4.0.0.tgz", + "integrity": "sha512-0Ju4+6A8iOnpL/Thra7dZsSlOHYAHIeMxfhWQRI1/VLcT3WDBZKKtQt/QkBOsiIN9ZpuvHE6cGZ0x4glCMmfiA==", + "license": "MIT", + "dependencies": { + "semver": "^7.3.5" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/send": { + "version": "0.19.2", + "resolved": "https://registry.npmjs.org/send/-/send-0.19.2.tgz", + "integrity": "sha512-VMbMxbDeehAxpOtWJXlcUS5E8iXh6QmN+BkRX1GARS3wRaXEEgzCcB10gTQazO42tpNIya8xIyNx8fll1OFPrg==", + "license": "MIT", + "dependencies": { + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "encodeurl": "~2.0.0", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "fresh": "~0.5.2", + "http-errors": "~2.0.1", + "mime": "1.6.0", + "ms": "2.1.3", + "on-finished": "~2.4.1", + "range-parser": "~1.2.1", + "statuses": "~2.0.2" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/send/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "license": "MIT", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/send/node_modules/debug/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", + "license": "MIT" + }, + "node_modules/send/node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/serialize-javascript": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.2.tgz", + "integrity": "sha512-Saa1xPByTTq2gdeFZYLLo+RFE35NHZkAbqZeWNd3BpzppeVisAqpDjcp8dyf6uIvEqJRd46jemmyA4iFIeVk8g==", + "license": "BSD-3-Clause", + "dependencies": { + "randombytes": "^2.1.0" + } + }, + "node_modules/serve-handler": { + "version": "6.1.7", + "resolved": "https://registry.npmjs.org/serve-handler/-/serve-handler-6.1.7.tgz", + "integrity": "sha512-CinAq1xWb0vR3twAv9evEU8cNWkXCb9kd5ePAHUKJBkOsUpR1wt/CvGdeca7vqumL1U5cSaeVQ6zZMxiJ3yWsg==", + "license": "MIT", + "dependencies": { + "bytes": "3.0.0", + "content-disposition": "0.5.2", + "mime-types": "2.1.18", + "minimatch": "3.1.5", + "path-is-inside": "1.0.2", + "path-to-regexp": "3.3.0", + "range-parser": "1.2.0" + } + }, + "node_modules/serve-handler/node_modules/path-to-regexp": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-3.3.0.tgz", + "integrity": "sha512-qyCH421YQPS2WFDxDjftfc1ZR5WKQzVzqsp4n9M2kQhVOo/ByahFoUNJfl58kOcEGfQ//7weFTDhm+ss8Ecxgw==", + "license": "MIT" + }, + "node_modules/serve-index": { + "version": "1.9.2", + "resolved": "https://registry.npmjs.org/serve-index/-/serve-index-1.9.2.tgz", + "integrity": "sha512-KDj11HScOaLmrPxl70KYNW1PksP4Nb/CLL2yvC+Qd2kHMPEEpfc4Re2e4FOay+bC/+XQl/7zAcWON3JVo5v3KQ==", + "license": "MIT", + "dependencies": { + "accepts": "~1.3.8", + "batch": "0.6.1", + "debug": "2.6.9", + "escape-html": "~1.0.3", + "http-errors": "~1.8.0", + "mime-types": "~2.1.35", + "parseurl": "~1.3.3" + }, + "engines": { + "node": ">= 0.8.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/serve-index/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "license": "MIT", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/serve-index/node_modules/depd": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz", + "integrity": "sha512-7emPTl6Dpo6JRXOXjLRxck+FlLRX5847cLKEn00PLAgc3g2hTZZgr+e4c2v6QpSmLeFP3n5yUo7ft6avBK/5jQ==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/serve-index/node_modules/http-errors": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.8.1.tgz", + "integrity": "sha512-Kpk9Sm7NmI+RHhnj6OIWDI1d6fIoFAtFt9RLaTMRlg/8w49juAStsrBgp0Dp4OdxdVbRIeKhtCUvoi/RuAhO4g==", + "license": "MIT", + "dependencies": { + "depd": "~1.1.2", + "inherits": "2.0.4", + "setprototypeof": "1.2.0", + "statuses": ">= 1.5.0 < 2", + "toidentifier": "1.0.1" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/serve-index/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/serve-index/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "license": "MIT", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/serve-index/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", + "license": "MIT" + }, + "node_modules/serve-index/node_modules/statuses": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz", + "integrity": "sha512-OpZ3zP+jT1PI7I8nemJX4AKmAX070ZkYPVWV/AaKTJl+tXCTGyVdC1a4SL8RUQYEwk/f34ZX8UTykN68FwrqAA==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/serve-static": { + "version": "1.16.3", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.16.3.tgz", + "integrity": "sha512-x0RTqQel6g5SY7Lg6ZreMmsOzncHFU7nhnRWkKgWuMTu5NN0DR5oruckMqRvacAN9d5w6ARnRBXl9xhDCgfMeA==", + "license": "MIT", + "dependencies": { + "encodeurl": "~2.0.0", + "escape-html": "~1.0.3", + "parseurl": "~1.3.3", + "send": "~0.19.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/set-function-length": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", + "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", + "license": "MIT", + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/setprototypeof": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", + "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==", + "license": "ISC" + }, + "node_modules/shallow-clone": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/shallow-clone/-/shallow-clone-3.0.1.tgz", + "integrity": "sha512-/6KqX+GVUdqPuPPd2LxDDxzX6CAbjJehAAOKlNpqqUpAqPM6HeL8f+o3a+JsyGjn2lv0WY8UsTgUJjU9Ok55NA==", + "license": "MIT", + "dependencies": { + "kind-of": "^6.0.2" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shallowequal": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/shallowequal/-/shallowequal-1.1.0.tgz", + "integrity": "sha512-y0m1JoUZSlPAjXVtPPW70aZWfIL/dSP7AFkRnniLCrK/8MDKog3TySTBmckD+RObVxH0v4Tox67+F14PdED2oQ==", + "license": "MIT" + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "license": "MIT", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/shell-quote": { + "version": "1.8.3", + "resolved": "https://registry.npmjs.org/shell-quote/-/shell-quote-1.8.3.tgz", + "integrity": "sha512-ObmnIF4hXNg1BqhnHmgbDETF8dLPCggZWBjkQfhZpbszZnYur5DUljTcCHii5LC3J5E0yeO/1LIMyH+UvHQgyw==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz", + "integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3", + "side-channel-list": "^1.0.0", + "side-channel-map": "^1.0.1", + "side-channel-weakmap": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-list": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.0.tgz", + "integrity": "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-map": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz", + "integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==", + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-weakmap": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz", + "integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==", + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3", + "side-channel-map": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", + "license": "ISC" + }, + "node_modules/sirv": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/sirv/-/sirv-2.0.4.tgz", + "integrity": "sha512-94Bdh3cC2PKrbgSOUqTiGPWVZeSiXfKOVZNJniWoqrWrRkB1CJzBU3NEbiTsPcYy1lDsANA/THzS+9WBiy5nfQ==", + "license": "MIT", + "dependencies": { + "@polka/url": "^1.0.0-next.24", + "mrmime": "^2.0.0", + "totalist": "^3.0.0" + }, + "engines": { + "node": ">= 10" + } + }, + "node_modules/sisteransi": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", + "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==", + "license": "MIT" + }, + "node_modules/sitemap": { + "version": "7.1.3", + "resolved": "https://registry.npmjs.org/sitemap/-/sitemap-7.1.3.tgz", + "integrity": "sha512-tAjEd+wt/YwnEbfNB2ht51ybBJxbEWwe5ki/Z//Wh0rpBFTCUSj46GnxUKEWzhfuJTsee8x3lybHxFgUMig2hw==", + "license": "MIT", + "dependencies": { + "@types/node": "^17.0.5", + "@types/sax": "^1.2.1", + "arg": "^5.0.0", + "sax": "^1.2.4" + }, + "bin": { + "sitemap": "dist/cli.js" + }, + "engines": { + "node": ">=12.0.0", + "npm": ">=5.6.0" + } + }, + "node_modules/sitemap/node_modules/@types/node": { + "version": "17.0.45", + "resolved": "https://registry.npmjs.org/@types/node/-/node-17.0.45.tgz", + "integrity": "sha512-w+tIMs3rq2afQdsPJlODhoUEKzFP1ayaoyl1CcnwtIlsVe7K7bA1NGm4s3PraqTLlXnbIN84zuBlxBWo1u9BLw==", + "license": "MIT" + }, + "node_modules/skin-tone": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/skin-tone/-/skin-tone-2.0.0.tgz", + "integrity": "sha512-kUMbT1oBJCpgrnKoSr0o6wPtvRWT9W9UKvGLwfJYO2WuahZRHOpEyL1ckyMGgMWh0UdpmaoFqKKD29WTomNEGA==", + "license": "MIT", + "dependencies": { + "unicode-emoji-modifier-base": "^1.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/snake-case": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/snake-case/-/snake-case-3.0.4.tgz", + "integrity": "sha512-LAOh4z89bGQvl9pFfNF8V146i7o7/CqFPbqzYgP+yYzDIDeS9HaNFtXABamRW+AQzEVODcvE79ljJ+8a9YSdMg==", + "license": "MIT", + "dependencies": { + "dot-case": "^3.0.4", + "tslib": "^2.0.3" + } + }, + "node_modules/sockjs": { + "version": "0.3.24", + "resolved": "https://registry.npmjs.org/sockjs/-/sockjs-0.3.24.tgz", + "integrity": "sha512-GJgLTZ7vYb/JtPSSZ10hsOYIvEYsjbNU+zPdIHcUaWVNUEPivzxku31865sSSud0Da0W4lEeOPlmw93zLQchuQ==", + "license": "MIT", + "dependencies": { + "faye-websocket": "^0.11.3", + "uuid": "^8.3.2", + "websocket-driver": "^0.7.4" + } + }, + "node_modules/sort-css-media-queries": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/sort-css-media-queries/-/sort-css-media-queries-2.2.0.tgz", + "integrity": "sha512-0xtkGhWCC9MGt/EzgnvbbbKhqWjl1+/rncmhTh5qCpbYguXh6S/qwePfv/JQ8jePXXmqingylxoC49pCkSPIbA==", + "license": "MIT", + "engines": { + "node": ">= 6.3.0" + } + }, + "node_modules/source-map": { + "version": "0.7.6", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.7.6.tgz", + "integrity": "sha512-i5uvt8C3ikiWeNZSVZNWcfZPItFQOsYTUAOkcUPGd8DqDy1uOUikjt5dG+uRlwyvR108Fb9DOd4GvXfT0N2/uQ==", + "license": "BSD-3-Clause", + "engines": { + "node": ">= 12" + } + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-support": { + "version": "0.5.21", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz", + "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==", + "license": "MIT", + "dependencies": { + "buffer-from": "^1.0.0", + "source-map": "^0.6.0" + } + }, + "node_modules/source-map-support/node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/space-separated-tokens": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-2.0.2.tgz", + "integrity": "sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/spdy": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/spdy/-/spdy-4.0.2.tgz", + "integrity": "sha512-r46gZQZQV+Kl9oItvl1JZZqJKGr+oEkB08A6BzkiR7593/7IbtuncXHd2YoYeTsG4157ZssMu9KYvUHLcjcDoA==", + "license": "MIT", + "dependencies": { + "debug": "^4.1.0", + "handle-thing": "^2.0.0", + "http-deceiver": "^1.2.7", + "select-hose": "^2.0.0", + "spdy-transport": "^3.0.0" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/spdy-transport": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/spdy-transport/-/spdy-transport-3.0.0.tgz", + "integrity": "sha512-hsLVFE5SjA6TCisWeJXFKniGGOpBgMLmerfO2aCyCU5s7nJ/rpAepqmFifv/GCbSbueEeAJJnmSQ2rKC/g8Fcw==", + "license": "MIT", + "dependencies": { + "debug": "^4.1.0", + "detect-node": "^2.0.4", + "hpack.js": "^2.1.6", + "obuf": "^1.1.2", + "readable-stream": "^3.0.6", + "wbuf": "^1.7.3" + } + }, + "node_modules/sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==", + "license": "BSD-3-Clause" + }, + "node_modules/srcset": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/srcset/-/srcset-4.0.0.tgz", + "integrity": "sha512-wvLeHgcVHKO8Sc/H/5lkGreJQVeYMm9rlmt8PuR1xE31rIuXhuzznUUqAt8MqLhB3MqJdFzlNAfpcWnxiFUcPw==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/statuses": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.2.tgz", + "integrity": "sha512-DvEy55V3DB7uknRo+4iOGT5fP1slR8wQohVdknigZPMpMstaKJQWhwiYBACJE3Ul2pTnATihhBYnRhZQHGBiRw==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/std-env": { + "version": "3.10.0", + "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.10.0.tgz", + "integrity": "sha512-5GS12FdOZNliM5mAOxFRg7Ir0pWz8MdpYm6AY6VPkGpbA7ZzmbzNcBJQ0GPvvyWgcY7QAhCgf9Uy89I03faLkg==", + "license": "MIT" + }, + "node_modules/string_decoder": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", + "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "license": "MIT", + "dependencies": { + "safe-buffer": "~5.2.0" + } + }, + "node_modules/string-width": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", + "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "license": "MIT", + "dependencies": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/string-width/node_modules/ansi-regex": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", + "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/string-width/node_modules/strip-ansi": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.2.0.tgz", + "integrity": "sha512-yDPMNjp4WyfYBkHnjIRLfca1i6KMyGCtsVgoKe/z1+6vukgaENdgGBZt+ZmKPc4gavvEZ5OgHfHdrazhgNyG7w==", + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.2.2" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/stringify-entities": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/stringify-entities/-/stringify-entities-4.0.4.tgz", + "integrity": "sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg==", + "license": "MIT", + "dependencies": { + "character-entities-html4": "^2.0.0", + "character-entities-legacy": "^3.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/stringify-object": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/stringify-object/-/stringify-object-3.3.0.tgz", + "integrity": "sha512-rHqiFh1elqCQ9WPLIC8I0Q/g/wj5J1eMkyoiD6eoQApWHP0FtlK7rqnhmabL5VUY9JQCcqwwvlOaSuutekgyrw==", + "license": "BSD-2-Clause", + "dependencies": { + "get-own-enumerable-property-symbols": "^3.0.0", + "is-obj": "^1.0.1", + "is-regexp": "^1.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-bom-string": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/strip-bom-string/-/strip-bom-string-1.0.0.tgz", + "integrity": "sha512-uCC2VHvQRYu+lMh4My/sFNmF2klFymLX1wHJeXnbEJERpV/ZsVuonzerjfrGpIGF7LBVa1O7i9kjiWvJiFck8g==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/strip-final-newline": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", + "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/style-to-js": { + "version": "1.1.21", + "resolved": "https://registry.npmjs.org/style-to-js/-/style-to-js-1.1.21.tgz", + "integrity": "sha512-RjQetxJrrUJLQPHbLku6U/ocGtzyjbJMP9lCNK7Ag0CNh690nSH8woqWH9u16nMjYBAok+i7JO1NP2pOy8IsPQ==", + "license": "MIT", + "dependencies": { + "style-to-object": "1.0.14" + } + }, + "node_modules/style-to-object": { + "version": "1.0.14", + "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-1.0.14.tgz", + "integrity": "sha512-LIN7rULI0jBscWQYaSswptyderlarFkjQ+t79nzty8tcIAceVomEVlLzH5VP4Cmsv6MtKhs7qaAiwlcp+Mgaxw==", + "license": "MIT", + "dependencies": { + "inline-style-parser": "0.2.7" + } + }, + "node_modules/stylehacks": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/stylehacks/-/stylehacks-6.1.1.tgz", + "integrity": "sha512-gSTTEQ670cJNoaeIp9KX6lZmm8LJ3jPB5yJmX8Zq/wQxOsAFXV3qjWzHas3YYk1qesuVIyYWWUpZ0vSE/dTSGg==", + "license": "MIT", + "dependencies": { + "browserslist": "^4.23.0", + "postcss-selector-parser": "^6.0.16" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/svg-parser": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/svg-parser/-/svg-parser-2.0.4.tgz", + "integrity": "sha512-e4hG1hRwoOdRb37cIMSgzNsxyzKfayW6VOflrwvR+/bzrkyxY/31WkbgnQpgtrNp1SdpJvpUAGTa/ZoiPNDuRQ==", + "license": "MIT" + }, + "node_modules/svgo": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/svgo/-/svgo-3.3.3.tgz", + "integrity": "sha512-+wn7I4p7YgJhHs38k2TNjy1vCfPIfLIJWR5MnCStsN8WuuTcBnRKcMHQLMM2ijxGZmDoZwNv8ipl5aTTen62ng==", + "license": "MIT", + "dependencies": { + "commander": "^7.2.0", + "css-select": "^5.1.0", + "css-tree": "^2.3.1", + "css-what": "^6.1.0", + "csso": "^5.0.5", + "picocolors": "^1.0.0", + "sax": "^1.5.0" + }, + "bin": { + "svgo": "bin/svgo" + }, + "engines": { + "node": ">=14.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/svgo" + } + }, + "node_modules/svgo/node_modules/commander": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz", + "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==", + "license": "MIT", + "engines": { + "node": ">= 10" + } + }, + "node_modules/tapable": { + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.3.2.tgz", + "integrity": "sha512-1MOpMXuhGzGL5TTCZFItxCc0AARf1EZFQkGqMm7ERKj8+Hgr5oLvJOVFcC+lRmR8hCe2S3jC4T5D7Vg/d7/fhA==", + "license": "MIT", + "engines": { + "node": ">=6" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/terser": { + "version": "5.46.1", + "resolved": "https://registry.npmjs.org/terser/-/terser-5.46.1.tgz", + "integrity": "sha512-vzCjQO/rgUuK9sf8VJZvjqiqiHFaZLnOiimmUuOKODxWL8mm/xua7viT7aqX7dgPY60otQjUotzFMmCB4VdmqQ==", + "license": "BSD-2-Clause", + "dependencies": { + "@jridgewell/source-map": "^0.3.3", + "acorn": "^8.15.0", + "commander": "^2.20.0", + "source-map-support": "~0.5.20" + }, + "bin": { + "terser": "bin/terser" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/terser-webpack-plugin": { + "version": "5.4.0", + "resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-5.4.0.tgz", + "integrity": "sha512-Bn5vxm48flOIfkdl5CaD2+1CiUVbonWQ3KQPyP7/EuIl9Gbzq/gQFOzaMFUEgVjB1396tcK0SG8XcNJ/2kDH8g==", + "license": "MIT", + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.25", + "jest-worker": "^27.4.5", + "schema-utils": "^4.3.0", + "terser": "^5.31.1" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.1.0" + }, + "peerDependenciesMeta": { + "@swc/core": { + "optional": true + }, + "esbuild": { + "optional": true + }, + "uglify-js": { + "optional": true + } + } + }, + "node_modules/terser-webpack-plugin/node_modules/jest-worker": { + "version": "27.5.1", + "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-27.5.1.tgz", + "integrity": "sha512-7vuh85V5cdDofPyxn58nrPjBktZo0u9x1g8WtjQol+jZDaE+fhN+cIvTj11GndBnMnyfrUOG1sZQxCdjKh+DKg==", + "license": "MIT", + "dependencies": { + "@types/node": "*", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + }, + "engines": { + "node": ">= 10.13.0" + } + }, + "node_modules/terser-webpack-plugin/node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/terser/node_modules/commander": { + "version": "2.20.3", + "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", + "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==", + "license": "MIT" + }, + "node_modules/thingies": { + "version": "2.6.0", + "resolved": "https://registry.npmjs.org/thingies/-/thingies-2.6.0.tgz", + "integrity": "sha512-rMHRjmlFLM1R96UYPvpmnc3LYtdFrT33JIB7L9hetGue1qAPfn1N2LJeEjxUSidu1Iku+haLZXDuEXUHNGO/lg==", + "license": "MIT", + "engines": { + "node": ">=10.18" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/streamich" + }, + "peerDependencies": { + "tslib": "^2" + } + }, + "node_modules/thunky": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/thunky/-/thunky-1.1.0.tgz", + "integrity": "sha512-eHY7nBftgThBqOyHGVN+l8gF0BucP09fMo0oO/Lb0w1OF80dJv+lDVpXG60WMQvkcxAkNybKsrEIE3ZtKGmPrA==", + "license": "MIT" + }, + "node_modules/tiny-invariant": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/tiny-invariant/-/tiny-invariant-1.3.3.tgz", + "integrity": "sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg==", + "license": "MIT" + }, + "node_modules/tiny-warning": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/tiny-warning/-/tiny-warning-1.0.3.tgz", + "integrity": "sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA==", + "license": "MIT" + }, + "node_modules/tinypool": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/tinypool/-/tinypool-1.1.1.tgz", + "integrity": "sha512-Zba82s87IFq9A9XmjiX5uZA/ARWDrB03OHlq+Vw1fSdt0I+4/Kutwy8BP4Y/y/aORMo61FQ0vIb5j44vSo5Pkg==", + "license": "MIT", + "engines": { + "node": "^18.0.0 || >=20.0.0" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/to-vfile": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/to-vfile/-/to-vfile-6.1.0.tgz", + "integrity": "sha512-BxX8EkCxOAZe+D/ToHdDsJcVI4HqQfmw0tCkp31zf3dNP/XWIAjU4CmeuSwsSoOzOTqHPOL0KUzyZqJplkD0Qw==", + "dependencies": { + "is-buffer": "^2.0.0", + "vfile": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/to-vfile/node_modules/@types/unist": { + "version": "2.0.11", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.11.tgz", + "integrity": "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==" + }, + "node_modules/to-vfile/node_modules/unist-util-stringify-position": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-2.0.3.tgz", + "integrity": "sha512-3faScn5I+hy9VleOq/qNbAd6pAx7iH5jYBMS9I1HgQVijz/4mv5Bvw5iw1sC/90CODiKo81G/ps8AJrISn687g==", + "dependencies": { + "@types/unist": "^2.0.2" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/to-vfile/node_modules/vfile": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/vfile/-/vfile-4.2.1.tgz", + "integrity": "sha512-O6AE4OskCG5S1emQ/4gl8zK586RqA3srz3nfK/Viy0UPToBc5Trp9BVFb1u0CjsKrAWwnpr4ifM/KBXPWwJbCA==", + "dependencies": { + "@types/unist": "^2.0.0", + "is-buffer": "^2.0.0", + "unist-util-stringify-position": "^2.0.0", + "vfile-message": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/to-vfile/node_modules/vfile-message": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-2.0.4.tgz", + "integrity": "sha512-DjssxRGkMvifUOJre00juHoP9DPWuzjxKuMDrhNbk2TdaYYBNMStsNhEOt3idrtI12VQYM/1+iM0KOzXi4pxwQ==", + "dependencies": { + "@types/unist": "^2.0.0", + "unist-util-stringify-position": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/toidentifier": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", + "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==", + "license": "MIT", + "engines": { + "node": ">=0.6" + } + }, + "node_modules/totalist": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/totalist/-/totalist-3.0.1.tgz", + "integrity": "sha512-sf4i37nQ2LBx4m3wB74y+ubopq6W/dIzXg0FDGjsYnZHVa1Da8FH853wlL2gtUhg+xJXjfk3kUZS3BRoQeoQBQ==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/tree-dump": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/tree-dump/-/tree-dump-1.1.0.tgz", + "integrity": "sha512-rMuvhU4MCDbcbnleZTFezWsaZXRFemSqAM+7jPnzUl1fo9w3YEKOxAeui0fz3OI4EU4hf23iyA7uQRVko+UaBA==", + "license": "Apache-2.0", + "engines": { + "node": ">=10.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/streamich" + }, + "peerDependencies": { + "tslib": "2" + } + }, + "node_modules/trim-lines": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/trim-lines/-/trim-lines-3.0.1.tgz", + "integrity": "sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/trough": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/trough/-/trough-2.2.0.tgz", + "integrity": "sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/tslib": { + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + "license": "0BSD" + }, + "node_modules/tsyringe": { + "version": "4.10.0", + "resolved": "https://registry.npmjs.org/tsyringe/-/tsyringe-4.10.0.tgz", + "integrity": "sha512-axr3IdNuVIxnaK5XGEUFTu3YmAQ6lllgrvqfEoR16g/HGnYY/6We4oWENtAnzK6/LpJ2ur9PAb80RBt7/U4ugw==", + "license": "MIT", + "dependencies": { + "tslib": "^1.9.3" + }, + "engines": { + "node": ">= 6.0.0" + } + }, + "node_modules/tsyringe/node_modules/tslib": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz", + "integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==", + "license": "0BSD" + }, + "node_modules/type-fest": { + "version": "2.19.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-2.19.0.tgz", + "integrity": "sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA==", + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=12.20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/type-is": { + "version": "1.6.18", + "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", + "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", + "license": "MIT", + "dependencies": { + "media-typer": "0.3.0", + "mime-types": "~2.1.24" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/type-is/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/type-is/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "license": "MIT", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/typedarray-to-buffer": { + "version": "3.1.5", + "resolved": "https://registry.npmjs.org/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz", + "integrity": "sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q==", + "license": "MIT", + "dependencies": { + "is-typedarray": "^1.0.0" + } + }, + "node_modules/typescript": { + "version": "5.6.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.6.3.tgz", + "integrity": "sha512-hjcS1mhfuyi4WW8IWtjP7brDrG2cuDZukyrYrSauoXGNgx0S7zceP07adYkJycEr56BOUTNPzbInooiN3fn1qw==", + "devOptional": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/undici-types": { + "version": "7.18.2", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-7.18.2.tgz", + "integrity": "sha512-AsuCzffGHJybSaRrmr5eHr81mwJU3kjw6M+uprWvCXiNeN9SOGwQ3Jn8jb8m3Z6izVgknn1R0FTCEAP2QrLY/w==", + "license": "MIT" + }, + "node_modules/unicode-canonical-property-names-ecmascript": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.1.tgz", + "integrity": "sha512-dA8WbNeb2a6oQzAQ55YlT5vQAWGV9WXOsi3SskE3bcCdM0P4SDd+24zS/OCacdRq5BkdsRj9q3Pg6YyQoxIGqg==", + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-emoji-modifier-base": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/unicode-emoji-modifier-base/-/unicode-emoji-modifier-base-1.0.0.tgz", + "integrity": "sha512-yLSH4py7oFH3oG/9K+XWrz1pSi3dfUrWEnInbxMfArOfc1+33BlGPQtLsOYwvdMy11AwUBetYuaRxSPqgkq+8g==", + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-match-property-ecmascript": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-2.0.0.tgz", + "integrity": "sha512-5kaZCrbp5mmbz5ulBkDkbY0SsPOjKqVS35VpL9ulMPfSl0J0Xsm+9Evphv9CoIZFwre7aJoa94AY6seMKGVN5Q==", + "license": "MIT", + "dependencies": { + "unicode-canonical-property-names-ecmascript": "^2.0.0", + "unicode-property-aliases-ecmascript": "^2.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-match-property-value-ecmascript": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-2.2.1.tgz", + "integrity": "sha512-JQ84qTuMg4nVkx8ga4A16a1epI9H6uTXAknqxkGF/aFfRLw1xC/Bp24HNLaZhHSkWd3+84t8iXnp1J0kYcZHhg==", + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-property-aliases-ecmascript": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-2.2.0.tgz", + "integrity": "sha512-hpbDzxUY9BFwX+UeBnxv3Sh1q7HFxj48DTmXchNgRa46lO8uj3/1iEn3MiNUYTg1g9ctIqXCCERn8gYZhHC5lQ==", + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/unified": { + "version": "11.0.5", + "resolved": "https://registry.npmjs.org/unified/-/unified-11.0.5.tgz", + "integrity": "sha512-xKvGhPWw3k84Qjh8bI3ZeJjqnyadK+GEFtazSfZv/rKeTkTjOJho6mFqh2SM96iIcZokxiOpg78GazTSg8+KHA==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "bail": "^2.0.0", + "devlop": "^1.0.0", + "extend": "^3.0.0", + "is-plain-obj": "^4.0.0", + "trough": "^2.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unique-string": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/unique-string/-/unique-string-3.0.0.tgz", + "integrity": "sha512-VGXBUVwxKMBUznyffQweQABPRRW1vHZAbadFZud4pLFAqRGvv/96vafgjWFqzourzr8YonlQiPgH0YCJfawoGQ==", + "license": "MIT", + "dependencies": { + "crypto-random-string": "^4.0.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/unist-util-find-after": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/unist-util-find-after/-/unist-util-find-after-3.0.0.tgz", + "integrity": "sha512-ojlBqfsBftYXExNu3+hHLfJQ/X1jYY/9vdm4yZWjIbf0VuWF6CRufci1ZyoD/wV2TYMKxXUoNuoqwy+CkgzAiQ==", + "dependencies": { + "unist-util-is": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-find-after/node_modules/unist-util-is": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-4.1.0.tgz", + "integrity": "sha512-ZOQSsnce92GrxSqlnEEseX0gi7GH9zTJZ0p9dtu87WRb/37mMPO2Ilx1s/t9vBHrFhbgweUwb+t7cIn5dxPhZg==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-is": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-6.0.1.tgz", + "integrity": "sha512-LsiILbtBETkDz8I9p1dQ0uyRUWuaQzd/cuEeS1hoRSyW5E5XGmTzlwY1OrNzzakGowI9Dr/I8HVaw4hTtnxy8g==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-position": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-5.0.0.tgz", + "integrity": "sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-position-from-estree": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/unist-util-position-from-estree/-/unist-util-position-from-estree-2.0.0.tgz", + "integrity": "sha512-KaFVRjoqLyF6YXCbVLNad/eS4+OfPQQn2yOd7zF/h5T/CSL2v8NpN6a5TPvtbXthAGw5nG+PuTtq+DdIZr+cRQ==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-stringify-position": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-4.0.0.tgz", + "integrity": "sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-5.1.0.tgz", + "integrity": "sha512-m+vIdyeCOpdr/QeQCu2EzxX/ohgS8KbnPDgFni4dQsfSCtpz8UqDyY5GjRru8PDKuYn7Fq19j1CQ+nJSsGKOzg==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit-parents": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-6.0.2.tgz", + "integrity": "sha512-goh1s1TBrqSqukSc8wrjwWhL0hiJxgA8m4kFxGlQ+8FYQ3C/m11FcTs4YYem7V664AhHVvgoQLk890Ssdsr2IQ==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/universalify": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", + "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", + "license": "MIT", + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/unpipe": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", + "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/update-browserslist-db": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.2.3.tgz", + "integrity": "sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "escalade": "^3.2.0", + "picocolors": "^1.1.1" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/update-notifier": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/update-notifier/-/update-notifier-6.0.2.tgz", + "integrity": "sha512-EDxhTEVPZZRLWYcJ4ZXjGFN0oP7qYvbXWzEgRm/Yql4dHX5wDbvh89YHP6PK1lzZJYrMtXUuZZz8XGK+U6U1og==", + "license": "BSD-2-Clause", + "dependencies": { + "boxen": "^7.0.0", + "chalk": "^5.0.1", + "configstore": "^6.0.0", + "has-yarn": "^3.0.0", + "import-lazy": "^4.0.0", + "is-ci": "^3.0.1", + "is-installed-globally": "^0.4.0", + "is-npm": "^6.0.0", + "is-yarn-global": "^0.4.0", + "latest-version": "^7.0.0", + "pupa": "^3.1.0", + "semver": "^7.3.7", + "semver-diff": "^4.0.0", + "xdg-basedir": "^5.1.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/yeoman/update-notifier?sponsor=1" + } + }, + "node_modules/update-notifier/node_modules/boxen": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/boxen/-/boxen-7.1.1.tgz", + "integrity": "sha512-2hCgjEmP8YLWQ130n2FerGv7rYpfBmnmp9Uy2Le1vge6X3gZIfSmEzP5QTDElFxcvVcXlEn8Aq6MU/PZygIOog==", + "license": "MIT", + "dependencies": { + "ansi-align": "^3.0.1", + "camelcase": "^7.0.1", + "chalk": "^5.2.0", + "cli-boxes": "^3.0.0", + "string-width": "^5.1.2", + "type-fest": "^2.13.0", + "widest-line": "^4.0.1", + "wrap-ansi": "^8.1.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/update-notifier/node_modules/camelcase": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-7.0.1.tgz", + "integrity": "sha512-xlx1yCK2Oc1APsPXDL2LdlNP6+uu8OCDdhOBSVT279M/S+y75O30C2VuD8T2ogdePBBl7PfPF4504tnLgX3zfw==", + "license": "MIT", + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/update-notifier/node_modules/chalk": { + "version": "5.6.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-5.6.2.tgz", + "integrity": "sha512-7NzBL0rN6fMUW+f7A6Io4h40qQlG+xGmtMxfbnH/K7TAtt8JQWVQK+6g0UXKMeVJoyV5EkkNsErQ8pVD3bLHbA==", + "license": "MIT", + "engines": { + "node": "^12.17.0 || ^14.13 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "license": "BSD-2-Clause", + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/url-loader": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/url-loader/-/url-loader-4.1.1.tgz", + "integrity": "sha512-3BTV812+AVHHOJQO8O5MkWgZ5aosP7GnROJwvzLS9hWDj00lZ6Z0wNak423Lp9PBZN05N+Jk/N5Si8jRAlGyWA==", + "license": "MIT", + "dependencies": { + "loader-utils": "^2.0.0", + "mime-types": "^2.1.27", + "schema-utils": "^3.0.0" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "file-loader": "*", + "webpack": "^4.0.0 || ^5.0.0" + }, + "peerDependenciesMeta": { + "file-loader": { + "optional": true + } + } + }, + "node_modules/url-loader/node_modules/ajv": { + "version": "6.14.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.14.0.tgz", + "integrity": "sha512-IWrosm/yrn43eiKqkfkHis7QioDleaXQHdDVPKg0FSwwd/DuvyX79TZnFOnYpB7dcsFAMmtFztZuXPDvSePkFw==", + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/url-loader/node_modules/ajv-keywords": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "license": "MIT", + "peerDependencies": { + "ajv": "^6.9.1" + } + }, + "node_modules/url-loader/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "license": "MIT" + }, + "node_modules/url-loader/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/url-loader/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "license": "MIT", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/url-loader/node_modules/schema-utils": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", + "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", + "license": "MIT", + "dependencies": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "license": "MIT" + }, + "node_modules/utila": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/utila/-/utila-0.4.0.tgz", + "integrity": "sha512-Z0DbgELS9/L/75wZbro8xAnT50pBVFQZ+hUEueGDU5FN51YSCYM+jdxsfCiHjwNP/4LCDD0i/graKpeBnOXKRA==", + "license": "MIT" + }, + "node_modules/utility-types": { + "version": "3.11.0", + "resolved": "https://registry.npmjs.org/utility-types/-/utility-types-3.11.0.tgz", + "integrity": "sha512-6Z7Ma2aVEWisaL6TvBCy7P8rm2LQoPv6dJ7ecIaIixHcwfbJ0x7mWdbcwlIM5IGQxPZSFYeqRCqlOOeKoJYMkw==", + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/utils-merge": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", + "integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==", + "license": "MIT", + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/uuid": { + "version": "8.3.2", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", + "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", + "license": "MIT", + "bin": { + "uuid": "dist/bin/uuid" + } + }, + "node_modules/value-equal": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/value-equal/-/value-equal-1.0.1.tgz", + "integrity": "sha512-NOJ6JZCAWr0zlxZt+xqCHNTEKOsrks2HQd4MqhP1qy4z1SkbEP467eNx6TgDKXMvUOb+OENfJCZwM+16n7fRfw==", + "license": "MIT" + }, + "node_modules/vary": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", + "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/vfile": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/vfile/-/vfile-6.0.3.tgz", + "integrity": "sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vfile-location": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/vfile-location/-/vfile-location-5.0.3.tgz", + "integrity": "sha512-5yXvWDEgqeiYiBe1lbxYF7UMAIm/IcopxMHrMQDq3nvKcjPKIhZklUKL+AE7J7uApI4kwe2snsK+eI6UTj9EHg==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vfile-message": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-4.0.3.tgz", + "integrity": "sha512-QTHzsGd1EhbZs4AsQ20JX1rC3cOlt/IWJruk893DfLRr57lcnOeMaWG4K0JrRta4mIJZKth2Au3mM3u03/JWKw==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-stringify-position": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/watchpack": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.5.1.tgz", + "integrity": "sha512-Zn5uXdcFNIA1+1Ei5McRd+iRzfhENPCe7LeABkJtNulSxjma+l7ltNx55BWZkRlwRnpOgHqxnjyaDgJnNXnqzg==", + "license": "MIT", + "dependencies": { + "glob-to-regexp": "^0.4.1", + "graceful-fs": "^4.1.2" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/wbuf": { + "version": "1.7.3", + "resolved": "https://registry.npmjs.org/wbuf/-/wbuf-1.7.3.tgz", + "integrity": "sha512-O84QOnr0icsbFGLS0O3bI5FswxzRr8/gHwWkDlQFskhSPryQXvrTMxjxGP4+iWYoauLoBvfDpkrOauZ+0iZpDA==", + "license": "MIT", + "dependencies": { + "minimalistic-assert": "^1.0.0" + } + }, + "node_modules/web-namespaces": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/web-namespaces/-/web-namespaces-2.0.1.tgz", + "integrity": "sha512-bKr1DkiNa2krS7qxNtdrtHAmzuYGFQLiQ13TsorsdT6ULTkPLKuu5+GsFpDlg6JFjUTwX2DyhMPG2be8uPrqsQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/webpack": { + "version": "5.105.4", + "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.105.4.tgz", + "integrity": "sha512-jTywjboN9aHxFlToqb0K0Zs9SbBoW4zRUlGzI2tYNxVYcEi/IPpn+Xi4ye5jTLvX2YeLuic/IvxNot+Q1jMoOw==", + "license": "MIT", + "dependencies": { + "@types/eslint-scope": "^3.7.7", + "@types/estree": "^1.0.8", + "@types/json-schema": "^7.0.15", + "@webassemblyjs/ast": "^1.14.1", + "@webassemblyjs/wasm-edit": "^1.14.1", + "@webassemblyjs/wasm-parser": "^1.14.1", + "acorn": "^8.16.0", + "acorn-import-phases": "^1.0.3", + "browserslist": "^4.28.1", + "chrome-trace-event": "^1.0.2", + "enhanced-resolve": "^5.20.0", + "es-module-lexer": "^2.0.0", + "eslint-scope": "5.1.1", + "events": "^3.2.0", + "glob-to-regexp": "^0.4.1", + "graceful-fs": "^4.2.11", + "json-parse-even-better-errors": "^2.3.1", + "loader-runner": "^4.3.1", + "mime-types": "^2.1.27", + "neo-async": "^2.6.2", + "schema-utils": "^4.3.3", + "tapable": "^2.3.0", + "terser-webpack-plugin": "^5.3.17", + "watchpack": "^2.5.1", + "webpack-sources": "^3.3.4" + }, + "bin": { + "webpack": "bin/webpack.js" + }, + "engines": { + "node": ">=10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependenciesMeta": { + "webpack-cli": { + "optional": true + } + } + }, + "node_modules/webpack-bundle-analyzer": { + "version": "4.10.2", + "resolved": "https://registry.npmjs.org/webpack-bundle-analyzer/-/webpack-bundle-analyzer-4.10.2.tgz", + "integrity": "sha512-vJptkMm9pk5si4Bv922ZbKLV8UTT4zib4FPgXMhgzUny0bfDDkLXAVQs3ly3fS4/TN9ROFtb0NFrm04UXFE/Vw==", + "license": "MIT", + "dependencies": { + "@discoveryjs/json-ext": "0.5.7", + "acorn": "^8.0.4", + "acorn-walk": "^8.0.0", + "commander": "^7.2.0", + "debounce": "^1.2.1", + "escape-string-regexp": "^4.0.0", + "gzip-size": "^6.0.0", + "html-escaper": "^2.0.2", + "opener": "^1.5.2", + "picocolors": "^1.0.0", + "sirv": "^2.0.3", + "ws": "^7.3.1" + }, + "bin": { + "webpack-bundle-analyzer": "lib/bin/analyzer.js" + }, + "engines": { + "node": ">= 10.13.0" + } + }, + "node_modules/webpack-bundle-analyzer/node_modules/commander": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz", + "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==", + "license": "MIT", + "engines": { + "node": ">= 10" + } + }, + "node_modules/webpack-dev-middleware": { + "version": "7.4.5", + "resolved": "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-7.4.5.tgz", + "integrity": "sha512-uxQ6YqGdE4hgDKNf7hUiPXOdtkXvBJXrfEGYSx7P7LC8hnUYGK70X6xQXUvXeNyBDDcsiQXpG2m3G9vxowaEuA==", + "license": "MIT", + "dependencies": { + "colorette": "^2.0.10", + "memfs": "^4.43.1", + "mime-types": "^3.0.1", + "on-finished": "^2.4.1", + "range-parser": "^1.2.1", + "schema-utils": "^4.0.0" + }, + "engines": { + "node": ">= 18.12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.0.0" + }, + "peerDependenciesMeta": { + "webpack": { + "optional": true + } + } + }, + "node_modules/webpack-dev-middleware/node_modules/mime-db": { + "version": "1.54.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.54.0.tgz", + "integrity": "sha512-aU5EJuIN2WDemCcAp2vFBfp/m4EAhWJnUNSSw0ixs7/kXbd6Pg64EmwJkNdFhB8aWt1sH2CTXrLxo/iAGV3oPQ==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/webpack-dev-middleware/node_modules/mime-types": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-3.0.2.tgz", + "integrity": "sha512-Lbgzdk0h4juoQ9fCKXW4by0UJqj+nOOrI9MJ1sSj4nI8aI2eo1qmvQEie4VD1glsS250n15LsWsYtCugiStS5A==", + "license": "MIT", + "dependencies": { + "mime-db": "^1.54.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/webpack-dev-middleware/node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/webpack-dev-server": { + "version": "5.2.3", + "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-5.2.3.tgz", + "integrity": "sha512-9Gyu2F7+bg4Vv+pjbovuYDhHX+mqdqITykfzdM9UyKqKHlsE5aAjRhR+oOEfXW5vBeu8tarzlJFIZva4ZjAdrQ==", + "license": "MIT", + "dependencies": { + "@types/bonjour": "^3.5.13", + "@types/connect-history-api-fallback": "^1.5.4", + "@types/express": "^4.17.25", + "@types/express-serve-static-core": "^4.17.21", + "@types/serve-index": "^1.9.4", + "@types/serve-static": "^1.15.5", + "@types/sockjs": "^0.3.36", + "@types/ws": "^8.5.10", + "ansi-html-community": "^0.0.8", + "bonjour-service": "^1.2.1", + "chokidar": "^3.6.0", + "colorette": "^2.0.10", + "compression": "^1.8.1", + "connect-history-api-fallback": "^2.0.0", + "express": "^4.22.1", + "graceful-fs": "^4.2.6", + "http-proxy-middleware": "^2.0.9", + "ipaddr.js": "^2.1.0", + "launch-editor": "^2.6.1", + "open": "^10.0.3", + "p-retry": "^6.2.0", + "schema-utils": "^4.2.0", + "selfsigned": "^5.5.0", + "serve-index": "^1.9.1", + "sockjs": "^0.3.24", + "spdy": "^4.0.2", + "webpack-dev-middleware": "^7.4.2", + "ws": "^8.18.0" + }, + "bin": { + "webpack-dev-server": "bin/webpack-dev-server.js" + }, + "engines": { + "node": ">= 18.12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.0.0" + }, + "peerDependenciesMeta": { + "webpack": { + "optional": true + }, + "webpack-cli": { + "optional": true + } + } + }, + "node_modules/webpack-dev-server/node_modules/define-lazy-prop": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-3.0.0.tgz", + "integrity": "sha512-N+MeXYoqr3pOgn8xfyRPREN7gHakLYjhsHhWGT3fWAiL4IkAt0iDw14QiiEm2bE30c5XX5q0FtAA3CK5f9/BUg==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/webpack-dev-server/node_modules/open": { + "version": "10.2.0", + "resolved": "https://registry.npmjs.org/open/-/open-10.2.0.tgz", + "integrity": "sha512-YgBpdJHPyQ2UE5x+hlSXcnejzAvD0b22U2OuAP+8OnlJT+PjWPxtgmGqKKc+RgTM63U9gN0YzrYc71R2WT/hTA==", + "license": "MIT", + "dependencies": { + "default-browser": "^5.2.1", + "define-lazy-prop": "^3.0.0", + "is-inside-container": "^1.0.0", + "wsl-utils": "^0.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/webpack-dev-server/node_modules/ws": { + "version": "8.20.0", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.20.0.tgz", + "integrity": "sha512-sAt8BhgNbzCtgGbt2OxmpuryO63ZoDk/sqaB/znQm94T4fCEsy/yV+7CdC1kJhOU9lboAEU7R3kquuycDoibVA==", + "license": "MIT", + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/webpack-merge": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/webpack-merge/-/webpack-merge-6.0.1.tgz", + "integrity": "sha512-hXXvrjtx2PLYx4qruKl+kyRSLc52V+cCvMxRjmKwoA+CBbbF5GfIBtR6kCvl0fYGqTUPKB+1ktVmTHqMOzgCBg==", + "license": "MIT", + "dependencies": { + "clone-deep": "^4.0.1", + "flat": "^5.0.2", + "wildcard": "^2.0.1" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/webpack-sources": { + "version": "3.3.4", + "resolved": "https://registry.npmjs.org/webpack-sources/-/webpack-sources-3.3.4.tgz", + "integrity": "sha512-7tP1PdV4vF+lYPnkMR0jMY5/la2ub5Fc/8VQrrU+lXkiM6C4TjVfGw7iKfyhnTQOsD+6Q/iKw0eFciziRgD58Q==", + "license": "MIT", + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/webpack/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/webpack/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "license": "MIT", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/webpackbar": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/webpackbar/-/webpackbar-6.0.1.tgz", + "integrity": "sha512-TnErZpmuKdwWBdMoexjio3KKX6ZtoKHRVvLIU0A47R0VVBDtx3ZyOJDktgYixhoJokZTYTt1Z37OkO9pnGJa9Q==", + "license": "MIT", + "dependencies": { + "ansi-escapes": "^4.3.2", + "chalk": "^4.1.2", + "consola": "^3.2.3", + "figures": "^3.2.0", + "markdown-table": "^2.0.0", + "pretty-time": "^1.1.0", + "std-env": "^3.7.0", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=14.21.3" + }, + "peerDependencies": { + "webpack": "3 || 4 || 5" + } + }, + "node_modules/webpackbar/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "license": "MIT" + }, + "node_modules/webpackbar/node_modules/markdown-table": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/markdown-table/-/markdown-table-2.0.0.tgz", + "integrity": "sha512-Ezda85ToJUBhM6WGaG6veasyym+Tbs3cMAw/ZhOPqXiYsr0jgocBV3j3nx+4lk47plLlIqjwuTm/ywVI+zjJ/A==", + "license": "MIT", + "dependencies": { + "repeat-string": "^1.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/webpackbar/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/webpackbar/node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/websocket-driver": { + "version": "0.7.4", + "resolved": "https://registry.npmjs.org/websocket-driver/-/websocket-driver-0.7.4.tgz", + "integrity": "sha512-b17KeDIQVjvb0ssuSDF2cYXSg2iztliJ4B9WdsuB6J952qCPKmnVq4DyW5motImXHDC1cBT/1UezrJVsKw5zjg==", + "license": "Apache-2.0", + "dependencies": { + "http-parser-js": ">=0.5.1", + "safe-buffer": ">=5.1.0", + "websocket-extensions": ">=0.1.1" + }, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/websocket-extensions": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/websocket-extensions/-/websocket-extensions-0.1.4.tgz", + "integrity": "sha512-OqedPIGOfsDlo31UNwYbCFMSaO9m9G/0faIHj5/dZFDMFqPTcx6UwqyOy3COEaEOg/9VsGIpdqn62W5KhoKSpg==", + "license": "Apache-2.0", + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/wide-align": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/wide-align/-/wide-align-1.1.5.tgz", + "integrity": "sha512-eDMORYaPNZ4sQIuuYPDHdQvf4gyCF9rEEV/yPxGfwPkRodwEgiMUUXTx/dex+Me0wxx53S+NgUHaP7y3MGlDmg==", + "dependencies": { + "string-width": "^1.0.2 || 2 || 3 || 4" + } + }, + "node_modules/wide-align/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "node_modules/wide-align/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/widest-line": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-4.0.1.tgz", + "integrity": "sha512-o0cyEG0e8GPzT4iGHphIOh0cJOV8fivsXxddQasHPHfoZf1ZexrfeA21w2NaEN1RHE+fXlfISmOE8R9N3u3Qig==", + "license": "MIT", + "dependencies": { + "string-width": "^5.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/wildcard": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/wildcard/-/wildcard-2.0.1.tgz", + "integrity": "sha512-CC1bOL87PIWSBhDcTrdeLo6eGT7mCFtrg0uIJtqJUFyK+eJnzl8A1niH56uu7KMa5XFrtiV+AQuHO3n7DsHnLQ==", + "license": "MIT" + }, + "node_modules/wrap-ansi": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", + "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", + "license": "MIT", + "dependencies": { + "ansi-styles": "^6.1.0", + "string-width": "^5.0.1", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/ansi-regex": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", + "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/ansi-styles": { + "version": "6.2.3", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz", + "integrity": "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/strip-ansi": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.2.0.tgz", + "integrity": "sha512-yDPMNjp4WyfYBkHnjIRLfca1i6KMyGCtsVgoKe/z1+6vukgaENdgGBZt+ZmKPc4gavvEZ5OgHfHdrazhgNyG7w==", + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.2.2" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/write-file-atomic": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-3.0.3.tgz", + "integrity": "sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q==", + "license": "ISC", + "dependencies": { + "imurmurhash": "^0.1.4", + "is-typedarray": "^1.0.0", + "signal-exit": "^3.0.2", + "typedarray-to-buffer": "^3.1.5" + } + }, + "node_modules/ws": { + "version": "7.5.10", + "resolved": "https://registry.npmjs.org/ws/-/ws-7.5.10.tgz", + "integrity": "sha512-+dbF1tHwZpXcbOJdVOkzLDxZP1ailvSxM6ZweXTegylPny803bFhA+vqBYw4s31NSAk4S2Qz+AKXK9a4wkdjcQ==", + "license": "MIT", + "engines": { + "node": ">=8.3.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": "^5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/wsl-utils": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/wsl-utils/-/wsl-utils-0.1.0.tgz", + "integrity": "sha512-h3Fbisa2nKGPxCpm89Hk33lBLsnaGBvctQopaBSOW/uIs6FTe1ATyAnKFJrzVs9vpGdsTe73WF3V4lIsk4Gacw==", + "license": "MIT", + "dependencies": { + "is-wsl": "^3.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/wsl-utils/node_modules/is-wsl": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-3.1.1.tgz", + "integrity": "sha512-e6rvdUCiQCAuumZslxRJWR/Doq4VpPR82kqclvcS0efgt430SlGIk05vdCN58+VrzgtIcfNODjozVielycD4Sw==", + "license": "MIT", + "dependencies": { + "is-inside-container": "^1.0.0" + }, + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/xdg-basedir": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/xdg-basedir/-/xdg-basedir-5.1.0.tgz", + "integrity": "sha512-GCPAHLvrIH13+c0SuacwvRYj2SxJXQ4kaVTT5xgL3kPrz56XxkF21IGhjSE1+W0aw7gpBWRGXLCPnPby6lSpmQ==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/xml-js": { + "version": "1.6.11", + "resolved": "https://registry.npmjs.org/xml-js/-/xml-js-1.6.11.tgz", + "integrity": "sha512-7rVi2KMfwfWFl+GpPg6m80IVMWXLRjO+PxTq7V2CDhoGak0wzYzFgUY2m4XJ47OGdXd8eLE8EmwfAmdjw7lC1g==", + "license": "MIT", + "dependencies": { + "sax": "^1.2.4" + }, + "bin": { + "xml-js": "bin/cli.js" + } + }, + "node_modules/xtend": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", + "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==", + "engines": { + "node": ">=0.4" + } + }, + "node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", + "license": "ISC" + }, + "node_modules/yocto-queue": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-1.2.2.tgz", + "integrity": "sha512-4LCcse/U2MHZ63HAJVE+v71o7yOdIe4cZ70Wpf8D/IyjDKYQLV5GD46B+hSTjJsvV5PztjvHoU580EftxjDZFQ==", + "license": "MIT", + "engines": { + "node": ">=12.20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/zwitch": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/zwitch/-/zwitch-2.0.4.tgz", + "integrity": "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + } + } +} diff --git a/docs/package.json b/docs/package.json new file mode 100644 index 0000000000..5d8eb21e26 --- /dev/null +++ b/docs/package.json @@ -0,0 +1,50 @@ +{ + "name": "octobot-docs", + "version": "0.0.0", + "private": true, + "scripts": { + "docusaurus": "docusaurus", + "start": "npm run collect && docusaurus start", + "prebuild": "npm run collect", + "build": "docusaurus build", + "collect": "node scripts/collect-tentacles.mjs && node scripts/sync-root-docs.mjs && node scripts/generate-llms-txt.mjs", + "swizzle": "docusaurus swizzle", + "deploy": "docusaurus deploy", + "clear": "docusaurus clear", + "serve": "docusaurus serve", + "write-translations": "docusaurus write-translations", + "write-heading-ids": "docusaurus write-heading-ids", + "typecheck": "tsc" + }, + "dependencies": { + "@docusaurus/core": "3.9.2", + "@docusaurus/plugin-client-redirects": "^3.9.2", + "@docusaurus/preset-classic": "3.9.2", + "@mdx-js/react": "^3.0.0", + "docusaurus-lunr-search": "^3.6.0", + "prism-react-renderer": "^2.3.0", + "react": "^19.0.0", + "react-dom": "^19.0.0" + }, + "devDependencies": { + "@docusaurus/module-type-aliases": "3.9.2", + "@docusaurus/tsconfig": "3.9.2", + "@docusaurus/types": "3.9.2", + "typescript": "~5.6.2" + }, + "browserslist": { + "production": [ + ">0.5%", + "not dead", + "not op_mini all" + ], + "development": [ + "last 3 chrome version", + "last 3 firefox version", + "last 5 safari version" + ] + }, + "engines": { + "node": ">=20.0" + } +} diff --git a/docs/scripts/add-truncate-markers.mjs b/docs/scripts/add-truncate-markers.mjs new file mode 100644 index 0000000000..19af926d87 --- /dev/null +++ b/docs/scripts/add-truncate-markers.mjs @@ -0,0 +1,237 @@ +#!/usr/bin/env node +/** + * Adds <!--truncate--> markers to blog posts that don't have one. + * + * Usage: + * node scripts/add-truncate-markers.mjs # apply changes + * node scripts/add-truncate-markers.mjs --dry-run # preview only + */ + +import { readdir, readFile, writeFile } from 'node:fs/promises'; +import { join } from 'node:path'; + +const BLOG_DIR = new URL('../blog', import.meta.url).pathname; +const DRY_RUN = process.argv.includes('--dry-run'); + +const TRUNCATE_MARKER = '<!--truncate-->'; + +async function main() { + const files = (await readdir(BLOG_DIR)) + .filter(f => f.endsWith('.md') || f.endsWith('.mdx')) + .sort(); + + let modified = 0; + let skipped = 0; + + for (const file of files) { + const filepath = join(BLOG_DIR, file); + const content = await readFile(filepath, 'utf-8'); + + if (content.includes(TRUNCATE_MARKER)) { + skipped++; + continue; + } + + const result = insertTruncateMarker(content, file); + if (!result) { + console.warn(`⚠ Could not find insertion point: ${file}`); + continue; + } + + modified++; + if (DRY_RUN) { + const lines = result.split('\n'); + const idx = lines.findIndex(l => l.trim() === TRUNCATE_MARKER); + const start = Math.max(0, idx - 2); + const end = Math.min(lines.length, idx + 3); + console.log(`\n📝 ${file} (line ${idx + 1}):`); + for (let i = start; i < end; i++) { + const prefix = i === idx ? '>>>' : ' '; + console.log(` ${prefix} ${lines[i]}`); + } + } else { + await writeFile(filepath, result, 'utf-8'); + console.log(`✅ ${file}`); + } + } + + console.log(`\n${DRY_RUN ? '[DRY RUN] ' : ''}Done: ${modified} modified, ${skipped} already had marker, ${files.length} total`); +} + +function isBlank(line) { + return line?.trim() === ''; +} + +function isHeading(line) { + return /^#{1,6}\s/.test(line?.trim() ?? ''); +} + +function isImage(line) { + return /^!\[.*\]\(.*\)/.test(line?.trim() ?? ''); +} + +function isImport(line) { + return /^import\s/.test(line?.trim() ?? ''); +} + +function isJsxSelfClosing(line) { + return /^<\w+[\s/].*\/>/.test(line?.trim() ?? ''); +} + +function isDivOpen(line) { + return /<div[\s>]/.test(line?.trim() ?? ''); +} + +function isBlockquote(line) { + return /^>/.test(line?.trim() ?? ''); +} + +function isList(line) { + return /^[-*\d]/.test(line?.trim() ?? ''); +} + +function isHr(line) { + return /^---+\s*$/.test(line?.trim() ?? ''); +} + +function isFrontmatterLike(line) { + // Lines that look like YAML keys: "key: value" or "key:" — part of a double frontmatter block + return /^\w[\w_]*\s*:/.test(line?.trim() ?? ''); +} + +/** + * Insert <!--truncate--> after the first real content paragraph. + */ +function insertTruncateMarker(content, filename) { + const lines = content.split('\n'); + let i = 0; + + // 1. Skip primary frontmatter block + if (lines[i]?.trim() === '---') { + i++; + while (i < lines.length && lines[i]?.trim() !== '---') i++; + i++; // past closing --- + } + + // 2. Skip blank lines + while (i < lines.length && isBlank(lines[i])) i++; + + // 3. Handle double frontmatter (legacy posts like 2024-01-01-*) + // If the next non-blank line is another `---`, skip the entire second block + if (i < lines.length && isHr(lines[i])) { + i++; // past opening --- + while (i < lines.length && lines[i]?.trim() !== '---') i++; + if (i < lines.length) i++; // past closing --- + while (i < lines.length && isBlank(lines[i])) i++; + } + + // 4. Skip broken/partial imports (like the lucide-react destructuring without `import {`) + // These are lines that are part of a JS destructuring before `from 'module'` + if (i < lines.length && /^\s*\w+,?\s*$/.test(lines[i]) && !isHeading(lines[i])) { + // Look ahead to see if there's a `from '...'` line nearby + let j = i; + while (j < lines.length && j < i + 15) { + if (/}\s*from\s+['"]/.test(lines[j]) || /from\s+['"]/.test(lines[j])) { + i = j + 1; + while (i < lines.length && isBlank(lines[i])) i++; + break; + } + j++; + } + } + + // 5. Collect all import statement positions (for MDX — must place truncate after ALL imports) + let lastImportEnd = -1; + for (let j = i; j < lines.length; j++) { + if (isImport(lines[j])) { + lastImportEnd = j + 1; + // Handle multi-line imports + if (!lines[j].includes('from') || lines[j].trim().endsWith('{')) { + while (j + 1 < lines.length && !lines[j].includes('from')) j++; + lastImportEnd = j + 1; + } + } + } + + // Now walk through the content to find the first good truncation point + // We want to find the first real text paragraph after preamble + while (i < lines.length) { + const line = lines[i]; + const trimmed = line?.trim() ?? ''; + + // Skip blank lines + if (isBlank(line)) { i++; continue; } + + // Skip headings (# Title, ## Section, etc.) + if (isHeading(line)) { i++; continue; } + + // Skip standalone images + if (isImage(line)) { i++; continue; } + + // Skip import statements + if (isImport(line)) { + i++; + // Skip multi-line imports + while (i < lines.length && !lines[i - 1].includes('from')) i++; + continue; + } + + // Skip self-closing JSX components (<YouTube .../>, etc.) + if (isJsxSelfClosing(line)) { i++; continue; } + + // Skip div blocks (cover images in divs) + if (isDivOpen(line)) { + while (i < lines.length && !lines[i].includes('</div>')) i++; + if (i < lines.length) i++; // past </div> + continue; + } + + // Skip italic captions like _"Not your keys..."_ + if (/^_[""'"].*[""'"]_$/.test(trimmed)) { i++; continue; } + + // Skip horizontal rules + if (isHr(line)) { i++; continue; } + + // Skip leftover YAML-like lines from double frontmatter + if (isFrontmatterLike(line) && i < 25) { i++; continue; } + + // Found a real content line — this is the start of a paragraph + break; + } + + if (i >= lines.length) return null; + + // Find the end of this paragraph + const paraStart = i; + while (i < lines.length && !isBlank(lines[i]) && !isHeading(lines[i])) { + // Stop at block elements that aren't continuation of the paragraph + const trimmed = lines[i].trim(); + if (i > paraStart && (isDivOpen(lines[i]) || isImage(lines[i]) || isList(lines[i]))) break; + i++; + } + + if (i === paraStart) return null; + + // Ensure marker is placed AFTER all imports (MDX requirement) + let insertAt = i; + if (lastImportEnd > insertAt) { + // Find first paragraph after last import + insertAt = lastImportEnd; + while (insertAt < lines.length && isBlank(lines[insertAt])) insertAt++; + // Find end of that paragraph + const paraStart2 = insertAt; + while (insertAt < lines.length && !isBlank(lines[insertAt]) && !isHeading(lines[insertAt])) { + if (insertAt > paraStart2 && (isDivOpen(lines[insertAt]) || isImage(lines[insertAt]))) break; + insertAt++; + } + if (insertAt === paraStart2) return null; + } + + lines.splice(insertAt, 0, '', TRUNCATE_MARKER); + return lines.join('\n'); +} + +main().catch(err => { + console.error(err); + process.exit(1); +}); diff --git a/docs/scripts/collect-tentacles.mjs b/docs/scripts/collect-tentacles.mjs new file mode 100644 index 0000000000..4c08c00714 --- /dev/null +++ b/docs/scripts/collect-tentacles.mjs @@ -0,0 +1,180 @@ +/** + * collect-tentacles.mjs + * + * Walks packages/tentacles/ and collects resource .md files into + * audience-based content directories. + */ + +import {readFile, writeFile, mkdir, stat} from 'node:fs/promises'; +import {join, basename, dirname, relative} from 'node:path'; +import {fileURLToPath} from 'node:url'; +import {extractDescription, escapeYaml, toSlug, stripFrontmatter, findMdFiles} from './shared.mjs'; + +const __dirname = dirname(fileURLToPath(import.meta.url)); +const ROOT = join(__dirname, '..', '..'); +const TENTACLES_DIR = join(ROOT, 'packages', 'tentacles'); +const CONTENT_DIR = join(__dirname, '..', 'content'); + +const CATEGORY_MAP = { + 'Trading/Mode': { + audience: 'guides/strategies', + dir: 'trading-modes', + label: 'Trading Modes', + description: 'Trading strategies and modes available in OctoBot', + keywords: ['trading modes', 'strategies', 'octobot'], + slugBase: 'guides/strategies/trading-modes', + position: 2, + }, + 'Trading/Exchange': { + audience: 'guides', + dir: 'exchanges', + label: 'Exchanges', + description: 'Supported cryptocurrency exchanges for OctoBot trading', + keywords: ['exchanges', 'crypto', 'octobot', 'connectors'], + slugBase: 'guides/exchanges', + position: 4, + }, + 'Evaluator/TA': { + audience: 'guides/strategies', + dir: 'evaluators/ta', + label: 'Technical Analysis', + description: 'Technical analysis evaluators for OctoBot trading signals', + keywords: ['technical analysis', 'evaluators', 'indicators', 'octobot'], + slugBase: 'guides/strategies/evaluators/ta', + position: 4, + }, + 'Evaluator/Social': { + audience: 'guides/strategies', + dir: 'evaluators/social', + label: 'Social Evaluators', + description: 'Social signal evaluators including news, sentiment, and trends', + keywords: ['social evaluators', 'sentiment', 'news', 'octobot'], + slugBase: 'guides/strategies/evaluators/social', + position: 5, + }, + 'Evaluator/RealTime': { + audience: 'guides/strategies', + dir: 'evaluators/realtime', + label: 'Real-Time Evaluators', + description: 'Real-time market evaluators for instant signal detection', + keywords: ['realtime evaluators', 'instant', 'octobot'], + slugBase: 'guides/strategies/evaluators/realtime', + position: 6, + }, + 'Evaluator/Strategies': { + audience: 'guides/strategies', + dir: 'evaluators/strategies', + label: 'Strategy Evaluators', + description: 'Strategy evaluators that combine multiple signals into trading decisions', + keywords: ['strategy evaluators', 'combined signals', 'octobot'], + slugBase: 'guides/strategies/evaluators/strategies', + position: 7, + }, +}; + +/** Generate a human-readable title from a filename. e.g. "GridTradingMode" -> "Grid Trading Mode" */ +function toTitle(name) { + return name + .replace(/([a-z])([A-Z])/g, '$1 $2') + .replace(/[_]+/g, ' ') + .trim(); +} + +function getCategory(filePath) { + const rel = relative(TENTACLES_DIR, filePath); + for (const [prefix, config] of Object.entries(CATEGORY_MAP)) { + if (rel.startsWith(prefix)) { + return {prefix, ...config}; + } + } + return null; +} + +async function processFile(filePath, category) { + const rawContent = await readFile(filePath, 'utf-8'); + const name = basename(filePath, '.md'); + const slug = toSlug(name); + const title = toTitle(name); + const content = stripFrontmatter(rawContent); + + const description = extractDescription(content, `${title} - OctoBot tentacle documentation and configuration guide`); + + const frontmatter = `--- +title: "${escapeYaml(title)}" +description: "${escapeYaml(description)}" +keywords: [${category.keywords.map(k => `"${k}"`).join(', ')}, "${slug}"] +slug: /${category.slugBase}/${slug} +format: md +---`; + + const output = `${frontmatter}\n\n${content}\n`; + const outputDir = join(CONTENT_DIR, category.audience, category.dir); + await mkdir(outputDir, {recursive: true}); + await writeFile(join(outputDir, `${slug}.md`), output, 'utf-8'); +} + +async function writeCategoryJson(dir, label, position) { + await mkdir(dir, {recursive: true}); + const json = JSON.stringify( + {label, position, link: {type: 'generated-index', description: `${label} available in OctoBot.`}}, + null, + 2 + ); + await writeFile(join(dir, '_category_.json'), json, 'utf-8'); +} + +async function main() { + console.log('Collecting tentacle documentation...'); + + // Check if tentacles directory exists + try { + await stat(TENTACLES_DIR); + } catch { + console.warn(`Warning: ${TENTACLES_DIR} not found. Skipping tentacle collection.`); + return; + } + + const mdFiles = await findMdFiles(TENTACLES_DIR, p => relative(TENTACLES_DIR, p).includes('/resources/')); + console.log(`Found ${mdFiles.length} tentacle resource files.`); + + for (const config of Object.values(CATEGORY_MAP)) { + const dir = join(CONTENT_DIR, config.audience, config.dir); + await writeCategoryJson(dir, config.label, config.position); + } + + // Also create evaluators parent category under creators + await writeCategoryJson(join(CONTENT_DIR, 'creators', 'evaluators'), 'Evaluators', 3); + + // Process all files + let processed = 0; + const errors = []; + + for (const filePath of mdFiles) { + const category = getCategory(filePath); + if (!category) { + console.warn(` Skipping (no category): ${relative(TENTACLES_DIR, filePath)}`); + continue; + } + + try { + await processFile(filePath, category); + processed++; + } catch (err) { + errors.push({file: filePath, error: err.message}); + } + } + + console.log(`Processed ${processed} tentacle docs.`); + if (errors.length > 0) { + console.error(`Errors (${errors.length}):`); + for (const {file, error} of errors) { + console.error(` ${relative(ROOT, file)}: ${error}`); + } + process.exit(1); + } +} + +main().catch(err => { + console.error('Failed to collect tentacles:', err); + process.exit(1); +}); diff --git a/docs/scripts/generate-llms-txt.mjs b/docs/scripts/generate-llms-txt.mjs new file mode 100644 index 0000000000..5ac4f22194 --- /dev/null +++ b/docs/scripts/generate-llms-txt.mjs @@ -0,0 +1,127 @@ +/** + * generate-llms-txt.mjs + * + * Auto-generates docs/static/llms.txt from the content directory. + * Must run AFTER collect-tentacles and sync-root-docs. + */ + +import {readFile, writeFile} from 'node:fs/promises'; +import {join, dirname} from 'node:path'; +import {fileURLToPath} from 'node:url'; +import {BASE_URL, findMdFiles} from './shared.mjs'; + +const __dirname = dirname(fileURLToPath(import.meta.url)); +const CONTENT_DIR = join(__dirname, '..', 'content'); +const OUTPUT_PATH = join(__dirname, '..', 'static', 'llms.txt'); + +const SECTIONS = [ + {dir: 'guides', label: 'Guides'}, + {dir: 'investing', label: 'OctoBot Cloud'}, + {dir: 'creators', label: 'Creators'}, + {dir: 'developers', label: 'Developers'}, +]; + +const HEADER = `# OctoBot + +> OctoBot is a free, open-source cryptocurrency trading bot that supports automated trading strategies across 15+ exchanges. + +OctoBot is developed by [Drakkar-Software](https://github.com/Drakkar-Software) and is designed to be multi-strategy, multi-exchange, and multi-cryptocurrency. It features a plugin system called "tentacles" that allows extending functionality without modifying core code. + +## Documentation + +This documentation is organized into three sections: + +- [Open Source Guides](${BASE_URL}/guides/octobot): Install, configure, and run OctoBot +- [OctoBot Cloud](${BASE_URL}/investing/introduction): Invest with OctoBot Cloud strategies +- [Creators Guide](${BASE_URL}/creators/getting-started): Build and customize trading strategies +- [Developers Guide](${BASE_URL}/developers/getting-started): Contribute to the OctoBot codebase + +## Key Concepts + +### Architecture +OctoBot is a Python monorepo organized into packages under \`packages/\`. The build system is Pants. Key packages include Trading (order engine), Evaluators (signal analysis), Commons (shared utilities), and Async Channel (event system). + +### Tentacle System +Tentacles are OctoBot's plugin system. They enable adding trading modes, evaluators, and exchange connectors without modifying core code. Tentacles are categorized as: +- **Trading Modes**: Complete trading strategies (Grid, DCA, Daily Trading, AI Trading) +- **Evaluators**: Market analysis tools (Technical Analysis, Social, Real-Time, AI) +- **Exchange Connectors**: Integrations with cryptocurrency exchanges + +### Data Flow +1. Exchange data arrives via websocket/REST +2. Async channels distribute data to evaluators +3. Evaluators produce signals (-1 to +1 scale) +4. Strategies combine signals into decisions +5. Trading modes execute orders on exchanges + +## Links + +- Source Code: https://github.com/Drakkar-Software/OctoBot +- Website: https://www.octobot.cloud +- Discord: https://discord.gg/vHkcb8W +- Telegram: https://t.me/OctoBot_Project`; + +function parseFrontmatter(content, filePath) { + if (!content.startsWith('---')) return null; + const endIdx = content.indexOf('---', 3); + if (endIdx === -1) return null; + + const fm = content.substring(3, endIdx); + // Handle both quoted and unquoted title/description + const title = fm.match(/^title:\s*"(.+)"/m)?.[1] + || fm.match(/^title:\s*(.+)/m)?.[1]?.trim(); + const description = fm.match(/^description:\s*"(.+)"/m)?.[1] + || fm.match(/^description:\s*(.+)/m)?.[1]?.trim(); + let slug = fm.match(/^slug:\s*(.+)/m)?.[1]?.trim(); + + if (!title) return null; + + // Derive slug from file path if not in frontmatter + if (!slug && filePath) { + const relative = filePath.replace(CONTENT_DIR, '').replace(/\.md$/, '').replace(/\/index$/, ''); + slug = relative.startsWith('/') ? relative : `/${relative}`; + } + + if (!slug) return null; + return {title, description: description || '', slug}; +} + +async function main() { + console.log('Generating llms.txt...'); + + const sectionBlocks = []; + + for (const {dir, label} of SECTIONS) { + const sectionDir = join(CONTENT_DIR, dir); + const mdFiles = await findMdFiles(sectionDir); + const pages = []; + + for (const filePath of mdFiles) { + const content = await readFile(filePath, 'utf-8'); + const fm = parseFrontmatter(content, filePath); + if (!fm) continue; + pages.push(fm); + } + + // Sort: getting-started first, then alphabetically by slug + pages.sort((a, b) => { + if (a.slug.endsWith('/getting-started')) return -1; + if (b.slug.endsWith('/getting-started')) return 1; + return a.slug.localeCompare(b.slug); + }); + + const lines = pages.map(p => `- ${BASE_URL}${p.slug}: ${p.description || p.title}`); + sectionBlocks.push(`### ${label}\n${lines.join('\n')}`); + } + + const output = `${HEADER}\n\n## Optional\n\n${sectionBlocks.join('\n\n')}\n`; + await writeFile(OUTPUT_PATH, output, 'utf-8'); + + const totalPages = sectionBlocks.reduce((sum, block) => sum + (block.match(/^- /gm) || []).length, 0); + console.log(`Generated llms.txt with ${totalPages} pages.`); +} + +main().catch(err => { + console.error('Failed to generate llms.txt:', err); + process.exit(1); +}); diff --git a/docs/scripts/shared.mjs b/docs/scripts/shared.mjs new file mode 100644 index 0000000000..c45df9eede --- /dev/null +++ b/docs/scripts/shared.mjs @@ -0,0 +1,96 @@ +/** + * Shared constants and utilities used by all docs collection scripts. + */ + +import {readdir} from 'node:fs/promises'; +import {join} from 'node:path'; + +export const BASE_URL = 'https://docs.octobot.cloud'; +export const DEFAULT_BRANCH = 'dev'; + +/** + * Extract the first contiguous paragraph from markdown content, + * truncated to max 160 characters for SEO meta descriptions. + */ +export function extractDescription(content, fallback) { + const paragraphLines = []; + let started = false; + for (const line of content.split('\n')) { + const trimmed = line.trim(); + if (!started) { + if (trimmed && !trimmed.startsWith('#') && !trimmed.startsWith('---')) { + started = true; + paragraphLines.push(trimmed); + } + } else { + if (!trimmed || trimmed.startsWith('#')) break; + paragraphLines.push(trimmed); + } + } + const firstParagraph = paragraphLines.join(' ').replace(/\s+/g, ' ').trim(); + + if (firstParagraph.length > 10 && firstParagraph.length <= 160) { + return firstParagraph; + } + if (firstParagraph.length > 160) { + const truncated = firstParagraph.substring(0, 157); + const lastSpace = truncated.lastIndexOf(' '); + return (lastSpace > 100 ? truncated.substring(0, lastSpace) : truncated) + '...'; + } + return fallback; +} + +/** Escape special characters for safe inclusion in YAML frontmatter values. */ +export function escapeYaml(value) { + return value + .replace(/\\/g, '\\\\') + .replace(/"/g, '\\"') + .replace(/\n/g, ' ') + .replace(/\r/g, ''); +} + +/** Convert a PascalCase/camelCase name to a URL-friendly slug. */ +export function toSlug(name) { + return name + .replace(/([a-z])([A-Z])/g, '$1-$2') + .replace(/[_\s]+/g, '-') + .toLowerCase(); +} + +/** Strip leading YAML frontmatter (--- ... ---) from markdown content. */ +export function stripFrontmatter(content) { + if (!content.startsWith('---')) return content; + const endIdx = content.indexOf('---', 3); + if (endIdx === -1) return content; + return content.substring(endIdx + 3).trim(); +} + +/** + * Recursively find all .md files under a directory. + * Optional filter function receives relative path from baseDir. + */ +export async function findMdFiles(dir, filter) { + const results = []; + + async function walk(current) { + let entries; + try { + entries = await readdir(current, {withFileTypes: true}); + } catch { + return; + } + for (const entry of entries) { + const fullPath = join(current, entry.name); + if (entry.isDirectory()) { + await walk(fullPath); + } else if (entry.isFile() && entry.name.endsWith('.md')) { + if (!filter || filter(fullPath)) { + results.push(fullPath); + } + } + } + } + + await walk(dir); + return results; +} diff --git a/docs/scripts/sync-root-docs.mjs b/docs/scripts/sync-root-docs.mjs new file mode 100644 index 0000000000..2cdcd0ab06 --- /dev/null +++ b/docs/scripts/sync-root-docs.mjs @@ -0,0 +1,96 @@ +/** + * sync-root-docs.mjs + * + * Copies root-level documentation files (CONTRIBUTING.md, CHANGELOG.md) + * into the docs/content/ directory with Docusaurus frontmatter. + */ + +import {readFile, writeFile, mkdir} from 'node:fs/promises'; +import {join, dirname} from 'node:path'; +import {fileURLToPath} from 'node:url'; +import {DEFAULT_BRANCH, escapeYaml, stripFrontmatter} from './shared.mjs'; + +const __dirname = dirname(fileURLToPath(import.meta.url)); +const ROOT = join(__dirname, '..', '..'); +const OUTPUT_DIR = join(__dirname, '..', 'content', 'developers'); + +const FILES = [ + { + src: 'CONTRIBUTING.md', + dest: 'contributing.md', + frontmatter: { + title: 'Contributing', + description: 'How to contribute to OctoBot. Development guidelines, coding style, and pull request process.', + keywords: ['contributing', 'development', 'octobot', 'pull request', 'guidelines'], + slug: '/developers/contributing', + sidebar_position: 90, + }, + }, + { + src: 'CHANGELOG.md', + dest: 'changelog.md', + frontmatter: { + title: 'Changelog', + description: 'OctoBot release history and changelog. Track new features, improvements, and bug fixes.', + keywords: ['changelog', 'releases', 'octobot', 'updates', 'version history'], + slug: '/developers/changelog', + sidebar_position: 91, + }, + }, +]; + +function buildFrontmatter(meta) { + const lines = ['---']; + lines.push(`title: "${escapeYaml(meta.title)}"`); + lines.push(`description: "${escapeYaml(meta.description)}"`); + lines.push(`keywords: [${meta.keywords.map(k => `"${k}"`).join(', ')}]`); + lines.push(`slug: ${meta.slug}`); + lines.push(`sidebar_position: ${meta.sidebar_position}`); + lines.push('format: md'); + lines.push('---'); + return lines.join('\n'); +} + +/** Adapt relative links to work within the docs site or fall back to GitHub URLs. */ +function adaptLinks(content) { + return content + // Known root-level docs → docs site paths + .replace(/\[([^\]]+)\]\(\.\/packages\/README\.md\)/g, '[$1](/developers/packages/overview)') + .replace(/\[([^\]]+)\]\(\.\/CONTRIBUTING\.md\)/g, '[$1](/developers/contributing)') + .replace(/\[([^\]]+)\]\(\.\/CHANGELOG\.md\)/g, '[$1](/developers/changelog)') + // Remaining relative .md links → GitHub source URLs + .replace(/\[([^\]]+)\]\(\.\/((?!http)[^)]+\.md)\)/g, + `[$1](https://github.com/Drakkar-Software/OctoBot/blob/${DEFAULT_BRANCH}/$2)`); +} + +async function main() { + console.log('Syncing root documentation files...'); + + await mkdir(OUTPUT_DIR, {recursive: true}); + + for (const file of FILES) { + const srcPath = join(ROOT, file.src); + try { + let content = stripFrontmatter(await readFile(srcPath, 'utf-8')); + content = adaptLinks(content); + + const output = `${buildFrontmatter(file.frontmatter)}\n\n${content}\n`; + const destPath = join(OUTPUT_DIR, file.dest); + await writeFile(destPath, output, 'utf-8'); + console.log(` ${file.src} → content/developers/${file.dest}`); + } catch (err) { + if (err.code === 'ENOENT') { + console.warn(` Warning: ${file.src} not found, skipping.`); + } else { + throw err; + } + } + } + + console.log('Done syncing root docs.'); +} + +main().catch(err => { + console.error('Failed to sync root docs:', err); + process.exit(1); +}); diff --git a/docs/sidebars.ts b/docs/sidebars.ts new file mode 100644 index 0000000000..61b19b9dd2 --- /dev/null +++ b/docs/sidebars.ts @@ -0,0 +1,50 @@ +import type {SidebarsConfig} from '@docusaurus/plugin-content-docs'; + +const sidebars: SidebarsConfig = { + guides: [ + { + type: 'autogenerated', + dirName: 'guides', + }, + ], + investing: [ + { + type: 'autogenerated', + dirName: 'investing', + }, + ], + 'octobot-script': [ + { + type: 'autogenerated', + dirName: 'octobot-script', + }, + ], + developers: [ + 'developers/getting-started', + { + type: 'category', + label: 'Architecture', + items: [{type: 'autogenerated', dirName: 'developers/architecture'}], + }, + { + type: 'category', + label: 'Developer Environment', + items: [{type: 'autogenerated', dirName: 'developers/environment'}], + }, + { + type: 'category', + label: 'Tentacles Development', + items: [{type: 'autogenerated', dirName: 'developers/tentacles-dev'}], + }, + { + type: 'category', + label: 'Packages', + link: {type: 'doc', id: 'developers/packages/overview'}, + items: [{type: 'autogenerated', dirName: 'developers/packages'}], + }, +'developers/contributing', + 'developers/changelog', + ], +}; + +export default sidebars; diff --git a/docs/src/components/HomepageFeatures/index.tsx b/docs/src/components/HomepageFeatures/index.tsx new file mode 100644 index 0000000000..e0eb53be61 --- /dev/null +++ b/docs/src/components/HomepageFeatures/index.tsx @@ -0,0 +1,84 @@ +import type {ReactNode} from 'react'; +import Link from '@docusaurus/Link'; +import Heading from '@theme/Heading'; +import styles from './styles.module.css'; + +type AudienceItem = { + title: string; + icon: string; + description: ReactNode; + link: string; + linkLabel: string; +}; + +const AudienceList: AudienceItem[] = [ + { + title: 'Open Source', + icon: '🚀', + description: ( + <> + Install, configure, and run OctoBot. Connect exchanges, set up trading + pairs, manage updates, and monitor your bot through the web interface. + </> + ), + link: '/guides/octobot', + linkLabel: 'Get Started', + }, + { + title: 'OctoBot Cloud', + icon: '☁️', + description: ( + <> + Invest with OctoBot Cloud automated strategies. Follow strategies, + connect exchanges, and automate TradingView alerts. + </> + ), + link: '/investing/introduction', + linkLabel: 'Explore Cloud', + }, + { + title: 'Developers', + icon: '🛠️', + description: ( + <> + Contribute to the OctoBot codebase. Understand the architecture, + explore packages, set up your dev environment, and submit pull requests. + </> + ), + link: '/developers/getting-started', + linkLabel: 'Start Contributing', + }, +]; + +function AudienceCard({title, icon, description, link, linkLabel}: AudienceItem) { + return ( + <div className="col col--4"> + <div className={`text--center padding-horiz--md ${styles.featureCard}`}> + <div className={styles.featureIcon} aria-hidden="true">{icon}</div> + <Heading as="h3">{title}</Heading> + <p>{description}</p> + <Link className="button button--primary button--md" to={link}> + {linkLabel} + </Link> + </div> + </div> + ); +} + +export default function HomepageFeatures(): ReactNode { + return ( + <section className={styles.features}> + <div className="container"> + <div className={`text--center ${styles.sectionHeader}`}> + <Heading as="h2">Choose your path</Heading> + <p>OctoBot documentation is organized by audience. Pick the guide that fits your needs.</p> + </div> + <div className="row"> + {AudienceList.map((props) => ( + <AudienceCard key={props.title} {...props} /> + ))} + </div> + </div> + </section> + ); +} diff --git a/docs/src/components/HomepageFeatures/styles.module.css b/docs/src/components/HomepageFeatures/styles.module.css new file mode 100644 index 0000000000..56e078b35e --- /dev/null +++ b/docs/src/components/HomepageFeatures/styles.module.css @@ -0,0 +1,36 @@ +.features { + display: flex; + align-items: center; + padding: 2rem 0 4rem; + width: 100%; +} + +.sectionHeader { + margin-bottom: 2rem; +} + +.featureCard { + padding: 2rem 1.5rem; + border-radius: 8px; + border: 1px solid var(--ifm-color-emphasis-200); + margin: 1rem 0; + transition: box-shadow 0.2s ease, transform 0.2s ease; + display: flex; + flex-direction: column; + align-items: center; + height: 100%; +} + +.featureCard:hover { + box-shadow: 0 4px 16px rgba(0, 0, 0, 0.08); + transform: translateY(-2px); +} + +[data-theme='dark'] .featureCard:hover { + box-shadow: 0 4px 16px rgba(0, 0, 0, 0.4); +} + +.featureIcon { + font-size: 3rem; + margin-bottom: 1rem; +} diff --git a/docs/src/components/YouTube.tsx b/docs/src/components/YouTube.tsx new file mode 100644 index 0000000000..c237bdbc24 --- /dev/null +++ b/docs/src/components/YouTube.tsx @@ -0,0 +1,23 @@ +import type {ReactNode} from 'react'; + +interface YouTubeProps { + id: string; + title?: string; +} + +export default function YouTube({id, title = 'Video'}: YouTubeProps): ReactNode { + return ( + <div style={{textAlign: 'center', margin: '1.5rem 0'}}> + <iframe + width="100%" + height="400" + style={{maxWidth: 640, borderRadius: 8}} + src={`https://www.youtube.com/embed/${id}`} + title={title} + frameBorder="0" + allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" + allowFullScreen + /> + </div> + ); +} diff --git a/docs/src/css/custom.css b/docs/src/css/custom.css new file mode 100644 index 0000000000..3414a01272 --- /dev/null +++ b/docs/src/css/custom.css @@ -0,0 +1,171 @@ +@import url('https://fonts.googleapis.com/css2?family=DM+Sans:wght@400;500;700&display=swap'); + +:root { + --ifm-color-primary: #2563eb; + --ifm-color-primary-dark: #1d4ed8; + --ifm-color-primary-darker: #1e40af; + --ifm-color-primary-darkest: #1e3a8a; + --ifm-color-primary-light: #3b82f6; + --ifm-color-primary-lighter: #60a5fa; + --ifm-color-primary-lightest: #93bbfd; + --ifm-code-font-size: 95%; + --ifm-font-family-base: 'DM Sans', system-ui, -apple-system, 'Segoe UI', Roboto, sans-serif; + --docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.1); + letter-spacing: -0.3px; +} + +[data-theme='dark'] { + --ifm-color-primary: #60a5fa; + --ifm-color-primary-dark: #3b82f6; + --ifm-color-primary-darker: #2563eb; + --ifm-color-primary-darkest: #1d4ed8; + --ifm-color-primary-light: #93bbfd; + --ifm-color-primary-lighter: #bfdbfe; + --ifm-color-primary-lightest: #dbeafe; + --docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.3); +} + +/* Hero section */ +.hero--primary { + --ifm-hero-background-color: var(--ifm-color-primary); + --ifm-hero-text-color: #ffffff; +} + +/* Sidebar improvements */ +.menu__link { + font-size: 0.9rem; +} + +/* Footer */ +.footer--dark { + --ifm-footer-background-color: #0f172a; +} + +/* ===== Blog list page — card grid ===== */ + +/* Grid container: 3 cols desktop, 1 col mobile */ +.blog-list-page .row > main { + display: grid; + grid-template-columns: repeat(3, 1fr); + gap: 1.25rem; + max-width: 100%; + flex-basis: 100%; +} + +@media (max-width: 768px) { + .blog-list-page .row > main { + grid-template-columns: 1fr; + } +} + +/* Card wrapper */ +.blog-post-card { + border: 1px solid var(--ifm-color-emphasis-200); + border-radius: 8px; + overflow: hidden; + transition: box-shadow 0.2s ease, transform 0.2s ease; + background: var(--ifm-background-color); + display: flex; + flex-direction: column; +} + +.blog-post-card:hover { + box-shadow: 0 8px 24px rgba(0, 0, 0, 0.08); + transform: translateY(-2px); +} + +[data-theme='dark'] .blog-post-card:hover { + box-shadow: 0 8px 24px rgba(0, 0, 0, 0.4); +} + +/* Cover image */ +.blog-post-cover-link { + display: block; + overflow: hidden; + aspect-ratio: 16 / 9; + background: var(--ifm-color-emphasis-100); + flex-shrink: 0; +} + +.blog-post-cover-image { + width: 100%; + height: 100%; + object-fit: cover; + transition: transform 0.3s ease; +} + +.blog-post-card:hover .blog-post-cover-image { + transform: scale(1.03); +} + +/* Card content */ +.blog-post-card > article, +.blog-post-card > div:last-child > article { + padding: 0.5rem 0.75rem 0.75rem; + margin-bottom: 0; + flex: 1; + display: flex; + flex-direction: column; +} + +/* Title */ +.blog-list-page article header h2 { + font-size: 0.95rem; + line-height: 1.3; + margin-bottom: 0.15rem; +} + +.blog-list-page article header h2 a { + display: -webkit-box; + -webkit-line-clamp: 2; + -webkit-box-orient: vertical; + overflow: hidden; +} + +/* Meta line (date + reading time) */ +.blog-list-page article header .margin-vert--md { + margin-top: 0 !important; + margin-bottom: 0.4rem !important; + font-size: 0.75rem; + color: var(--ifm-color-emphasis-600); +} + +/* Excerpt text — clamp to 2 lines */ +.blog-list-page article .markdown { + font-size: 0.8rem; + line-height: 1.45; + color: var(--ifm-color-emphasis-700); + display: -webkit-box; + -webkit-line-clamp: 2; + -webkit-box-orient: vertical; + overflow: hidden; + flex: 1; +} + +.blog-list-page article .markdown > *:first-child { + margin-top: 0; +} + +/* Footer (tags + read more) */ +.blog-list-page article footer.row { + margin-top: auto !important; + padding-top: 0.4rem; + border-top: 1px solid var(--ifm-color-emphasis-200); + font-size: 0.75rem; +} + +/* Hide duplicate H1/H2 in list excerpts */ +.blog-list-page article .markdown > h1:first-child, +.blog-list-page article .markdown > h2:first-child { + display: none; +} + +/* Remove default bottom margin on list items (grid handles spacing) */ +.blog-list-page .row > main > .margin-bottom--xl { + margin-bottom: 0 !important; +} + +/* Pagination — span full grid width */ +.blog-list-page nav.pagination-nav { + grid-column: 1 / -1; +} diff --git a/docs/src/pages/index.module.css b/docs/src/pages/index.module.css new file mode 100644 index 0000000000..7e1d944c57 --- /dev/null +++ b/docs/src/pages/index.module.css @@ -0,0 +1,20 @@ +.heroBanner { + padding: 4rem 0; + text-align: center; + position: relative; + overflow: hidden; +} + +@media screen and (max-width: 996px) { + .heroBanner { + padding: 2rem; + } +} + +.buttons { + display: flex; + align-items: center; + justify-content: center; + flex-wrap: wrap; + gap: 0.75rem; +} diff --git a/docs/src/pages/index.tsx b/docs/src/pages/index.tsx new file mode 100644 index 0000000000..44e3a1a8a0 --- /dev/null +++ b/docs/src/pages/index.tsx @@ -0,0 +1,52 @@ +import type {ReactNode} from 'react'; +import Link from '@docusaurus/Link'; +import useDocusaurusContext from '@docusaurus/useDocusaurusContext'; +import Layout from '@theme/Layout'; +import HomepageFeatures from '@site/src/components/HomepageFeatures'; +import Heading from '@theme/Heading'; + +import styles from './index.module.css'; + +function HomepageHeader() { + const {siteConfig} = useDocusaurusContext(); + return ( + <header className={`hero hero--primary ${styles.heroBanner}`}> + <div className="container"> + <Heading as="h1" className="hero__title"> + {siteConfig.title} + </Heading> + <p className="hero__subtitle">{siteConfig.tagline}</p> + <div className={styles.buttons}> + <Link + className="button button--secondary button--lg" + to="/guides/octobot"> + I want to use OctoBot + </Link> + <Link + className="button button--outline button--secondary button--lg" + to="/investing/introduction"> + I want to use OctoBot Cloud + </Link> + <Link + className="button button--outline button--secondary button--lg" + to="/developers/getting-started"> + I want to contribute + </Link> + </div> + </div> + </header> + ); +} + +export default function Home(): ReactNode { + return ( + <Layout + title="Home" + description="OctoBot is an open-source cryptocurrency trading bot. Guides for users, creators, and developers."> + <HomepageHeader /> + <main> + <HomepageFeatures /> + </main> + </Layout> + ); +} diff --git a/docs/src/pages/terms/about.md b/docs/src/pages/terms/about.md new file mode 100644 index 0000000000..55029f28b6 --- /dev/null +++ b/docs/src/pages/terms/about.md @@ -0,0 +1,46 @@ +--- +title: "About us" +description: "Who is behind OctoBot ? How to contact the team ? Meet the OctoBot team and learn more about the OctoBot community." +--- + + + +# About us + +## Who are we ? + +### OctoBot is made by a team of 2. + +Since 2018, OctoBot has been developed without external funding by [Paul](https://github.com/Drakkar-Software) and [Guillaume](https://github.com/Drakkar-Software). + +Both have a developer background +that allowed them to create OctoBot and the elements of the ecosystem +based on their ideas and strong user feedbacks. + +All this time allowed to develop a very strong code base while +in the mean time being able to +focus of user need and make OctoBot evolve in the right direction. + +### OctoBot is also shaped by its community + +The OctoBot team is also made of the community of its users, some of them are part +of the community for years now. It is thanks to this community that OctoBot keeps +evolving to answer best the users' needs and is growing towards the most useful features. + +For this we are extremely grateful and we thank the OctoBot community for +its help and amazing ideas over the past years. + +## Contact Us + +- Feel free to reach out at contact@octobot.cloud + +## Ecosystem status + +- OctoBot's ecosystem status is available at + <a href="https://status.octobot.online" rel="nofollow">status.octobot.online</a> + +## Thanks + +- <a href="https://www.jetbrains.com/opensource/" rel="nofollow">JetBrains</a> full package license for open source project. + +_OctoBot is entirely developed with the help of Pycharm IDE !_ diff --git a/docs/src/pages/terms/affiliate.md b/docs/src/pages/terms/affiliate.md new file mode 100644 index 0000000000..9f05e1f6a9 --- /dev/null +++ b/docs/src/pages/terms/affiliate.md @@ -0,0 +1,201 @@ +--- +title: "Affiliate terms" +description: "Know everything about OctoBot cloud Affiliate Terms. Earn passive income as an affiliate when your referenced users trade or subscribe to paid plans." +--- + +# Affiliate terms + +We welcome you to OctoBot cloud, a SaaS service provided by Drakkar-Software that hosts OctoBot software, a bot for cryptocurrency trading. The Octobot software, the www.octobot.cloud website, mobile applications, and application program interfaces are collectively referred to as the "Software". + +**By creating or sharing an affiliate link, you acknowledge and consent to being bound by these Affiliate Terms.** + +For matters not addressed in the Affiliate Terms, the Terms of Use will apply. If there is a discrepancy between the Affiliate Terms and the Terms of Use, the Affiliate Terms will prevail. Drakkar-Software reserves the right to modify the Affiliate Terms as outlined in Section 1 of the Terms of Use. + +For additional information, please visit our website at www.octobot.cloud or email us at contact@drakkar.software. + +## Definitions + +- A "**Affiliate**" is an individual or legal entity with a software user account, + without any bans, blocks, sanctions, or limitations, who has accepted + the Affiliate Terms and has expressed interest in participating in + OctoBot's affiliate program by attracting new clients via the + Affiliate Link. Affiliates must be manually approved by + Drakkar-Software to become an Affiliate. +- A "**Qualified Client**" is an individual or legal entity who has (a) successfully registered + on the Software using the Affiliate Link, (b) successfully initiated a + paid or free plan on the Software, and (c) is not the Affiliate. +- A "**Affiliate Link**" is a personalized hyperlink to the Software registration page, + generated automatically for the Affiliate to share with potential + Qualified Clients. + +## Affiliate's Responsibilities + +To become an Affiliate, you must: + +- Be a registered Client of the Software with a complete profile + that includes all requested data, including contact details. +- If representing a legal entity, apply to become an Affiliate by + contacting Drakkar-Software and receiving approval. +- Obtain an Affiliate Link in the affiliate section of the Software. + +The Affiliate can involve any person to become a Qualified Client of the Software, as long as it is legal and they ensure that such individuals: + +- Are of legal age to use the Software. +- Are not under the control of, or residing in, a jurisdiction that + explicitly bans the use of similar software to the Software. +- Understand that the use of the Software is at their discretion and + responsibility. + +The Affiliate is not authorized to: + +- Enter into or conclude any agreements on behalf of OctoBot or + Drakkar-Software. +- Present themselves as an employee or partner of OctoBot or + Drakkar-Software. +- Collude with other existing or potential clients of the Software + for illegal benefits. +- Make any statement or guarantee regarding the Software. + +Damage the reputation or image of Drakkar-Software and/or the Software. This includes, but is not limited to: + +- Misrepresentation of OctoBot or Drakkar-Software, such as + using false or misleading claims about OctoBot or + Drakkar-Software, its products, or services. This includes + implying an official partnership or endorsement by OctoBot or + Drakkar-Software when none exists. +- Spammy marketing practices, such as sending unsolicited emails + (spam) that include OctoBot or Drakkar-Software's name or + products. +- Unethical marketing practices, such as using clickbaits, fake + reviews, or deceptive tactics to promote OctoBot or + Drakkar-Software. +- Negative publicity, such as publicly criticizing or + disparaging OctoBot or Drakkar-Software, its products, or its + services or engaging in behavior that could lead to negative + press or social media backlash against OctoBot or + Drakkar-Software. +- Association with competing brands, such as promoting competing + brands in a way that undermines or devalues OctoBot or + Drakkar-Software's reputation or displaying OctoBot or + Drakkar-Software's ads or links alongside ads for direct + competitors in a manner that creates confusion or conflict. +- Violation of laws or regulations, such as engaging in illegal + activities or promoting OctoBot or Drakkar-Software in a way + that violates local, national, or international laws. This + includes using OctoBot or Drakkar-Software's name or products + in connection with fraudulent or unethical schemes. +- Poor quality or irrelevant content, such as promoting OctoBot + or Drakkar-Software on low-quality websites or platforms that + reflect poorly on OctoBot or Drakkar-Software's image. This + includes placing OctoBot or Drakkar-Software's ads or links on + irrelevant or unrelated content that could confuse or mislead + customers. +- Use any intellectual property of OctoBot or Drakkar-Software + contrary to the license provided under the Terms of Use. +- A new client brought to the Software by the Affiliate is considered a + Qualified Client only if they have followed the Affiliate Link + provided by the Affiliate to register and successfully started a paid + or free plan on the Software. + +## Payment terms + +**For an individual Affiliate** Drakkar-Software will pay a fee calculated as a percentage of the net +amount of any successful (and not refunded or reversed) payment for +the subscription made by a Qualified Client and a percentage of the +trading fees received by Drakkar-Software when the Qualified Client's +associated trading robot executes trades on a partner exchange with an +eligible exchange account: + +- Each Qualified Client brought by the Affiliate is subject to a 25% + fee on its paid subscriptions. +- The percentage of trading fees may vary depending on the + Software's partnership terms with each exchange and at the + discretion of Drakkar-Software. + +**For a legal entity Affiliate** who is accepted by Drakkar-Software to use the Software, +Drakkar-Software will pay a fee calculated as a percentage of the net +amount of any successful (and not refunded or reversed) payment for +the subscription made by a Qualified Client and a percentage of the +trading fees received by Drakkar-Software when the Qualified Client's +associated trading robot executes trades on a partner exchange with an +eligible exchange account: + +- The percentage of subscriptions and trading fees is agreed upon + when Drakkar-Software approves the legal entity’s application to + become an Affiliate. +- Drakkar-Software will pay the Affiliate's fee in US Dollars. + Drakkar-Software may require that the Affiliate sends Drakkar-Software + an invoice for the fee, in which case the Affiliate shall not be + entitled to the fee, unless it has sent Drakkar-Software an invoice + for the fee; +- The Affiliate's fee is credited to the client by Drakkar-Software each + month, proportional to the payments (and trades when applicable) made + by the Qualified Client. Amount might be converted from original + cryptocurrency to US Dollars equivalent at the rate of + Drakkar-Software and at the moment of such credit transaction + processing. + +### Drakkar-Software is not obliged to: + +- Transfer any fee for payments that don't comply with the Affiliate + Terms. +- Transfer funds accumulated in currencies other than acceptable + cryptocurrency, unless approved by Drakkar-Software. + +### With the funds in the Affiliate account, you can either: + +- Use the funds to pay for OctoBot subscriptions (these funds are + non-refundable), or +- Request a withdrawal to your external wallet, subject to + limitations, such as a minimal withdrawal amount, presented within + the Software. To request a withdrawal of the funds, contact + affiliate@octobot.cloud, accompanied by a comprehensive invoice + detailing the precise monetary amount. + +## Compliance with Sanctions + +By using Drakkar-Software's services, you confirm that you: + +Are not included in any trade embargos or economic sanctions lists, including but not limited to: + +- Restrictive measures of the European Union +- Sanctions of the United Nations +- Sanctions of the Government of France +- the list of specially designated nationals maintained by + Office of Foreign Assets Control (OFAC) of the U.S. Department + of the Treasury +- the denied persons or entity list of the U.S. Department of + Commerce +- Lists of subjects to Financial Sanctions maintained by the UK + Office of Financial Sanctions Implementation (OFSI) +- Do not violate or circumvent any international sanctions or + restrictive measures established by the European Union, United + Nations, United States of America, United Kingdom, or any other + sanctions applicable in France +- Are not from countries or geographical regions under sanctions + imposed by the European Union, United Nations, United States of + America, United Kingdom, and or any other international sanctions + applicable in France. + +And: + +- Drakkar-Software reserves the right to choose markets and + jurisdictions to conduct business, and may restrict or refuse the + provision of services in certain countries or regions. +- If you become subject to international sanctions, you must immediately + stop using our services and notify Drakkar-Software. +- We reserve the right to terminate, suspend, or restrict our services + to you, or terminate this Agreement if you become a subject of + international sanctions, if providing services to you violates or + circumvents international sanctions, or if you are related to a + territory, area of activity, transaction, or person subject to + international sanctions according to our assessment. +- We also reserve the right to terminate, suspend, or restrict our + services to you, or terminate this Agreement if we decide to limit our + business activities in certain markets and jurisdictions as mentioned + in Section 4. + +## Updates + +Updated the 02/09/2025 to add examples of behaviors damaging the +reputation or image of Drakkar-Software and/or the Software. diff --git a/docs/src/pages/terms/index.md b/docs/src/pages/terms/index.md new file mode 100644 index 0000000000..f8626fdf38 --- /dev/null +++ b/docs/src/pages/terms/index.md @@ -0,0 +1,462 @@ +--- +title: "Terms of use" +description: "Know everything about OctoBot cloud terms of use. What can you do and what can't you do when using OctoBot cloud" +--- + +# Terms of use + +Welcome to OctoBot cloud, the SaaS service offered by Drakkar-Software to host the OctoBot software, a cryptocurrency trading bot. By accessing and using OctoBot cloud, you agree to be bound by the following terms and conditions. + +OctoBot cloud is a service that allows users to access and use trading strategies for cryptocurrency trading through the OctoBot software. Using OctoBot cloud for trading involves significant risks and potential for financial losses. + +OctoBot cloud and Drakkar-Software are not responsible for any trading related losses or financial losses resulting from the use of the service. + +Users can publish their own trading strategies on the OctoBot cloud platform. OctoBot cloud owns the intellectual property of any strategy published on the platform. + +For more information, please visit our website at www.octobot.cloud or contact us at contact@drakkar.software. + +## Introduction + +- OctoBot cloud is a Service-as-a-Software (SaaS) offering + from Drakkar-Software, which provides hosting services for + OctoBot software, a cryptocurrency trading bot. +- The purpose of OctoBot cloud is to allow users to access + and utilize the capabilities of the OctoBot software for + the purpose of trading cryptocurrencies on supported + exchanges. +- By accessing and using OctoBot cloud, the user agrees to + be bound by these terms and conditions, which constitute a + legally binding agreement between the user and + Drakkar-Software. +- Drakkar-Software reserves the right to modify these terms + and conditions at any time, with notice to the user. The + user is responsible for regularly reviewing these terms + and conditions. Continued use of OctoBot cloud after any + modifications indicates the user's acceptance of the + modified terms. +- Drakkar-Software reserves the right to make changes to any + product at any time without prior notice to the user. It + is the responsibility of the user to regularly review + these details and updates. By continuing to use OctoBot + Cloud after any changes have been made, the user is + acknowledging and accepting the updated product. +- By using OctoBot cloud, the user agrees to the collection + and use of their personal information as outlined in the + Privacy Policy. The [Privacy Policy](terms/privacy), provides more information + about the types of information collected, how it is used, + and with whom it may be shared. The user is encouraged to + review the Privacy Policy carefully and to contact OctoBot + Cloud if they have any questions or concerns. +- By using OctoBot cloud and its services, the user + acknowledges and agrees to the terms and conditions set + forth in this agreement, including but not limited to the + [Risks Disclosure Statement](terms/risk). The user is + advised to regularly review the terms and conditions, + including the [Risks Disclosure Statement](terms/risk), to stay informed + of any changes. + +## Use of Service + +- The OctoBot cloud service is provided by Drakkar-Software + and allows users to access and use the OctoBot software, a + cryptocurrency trading bot, through a subscription + service. +- OctoBot cloud users are responsible for ensuring that + their use of the service complies with all applicable laws + and regulations. +- OctoBot cloud and Drakkar-Software do not provide + financial, investment, legal, tax, or any other + professional advice. The use of the OctoBot software, + whether it be through customization, copying or using + trading strategies or trading signals, involves + significant risks and potential for financial loss. +- OctoBot and OctoBot cloud use crypto exchanges as third + parties to collect price and chart data. +- Users are permitted to create, publish, and update their + own trading strategies on the OctoBot cloud platform. + Users retain control over access to their strategies and + may manage permissions and conditions for other users to + view or use them, in accordance with their own terms and + conditions, provided they do not conflict with these Terms + of use. +- OctoBot cloud is not responsible for the performance of + any trading strategy, or it's traded crypto-assets. Gains + or losses resulting from the use of OctoBot cloud are + user's sole responsibility. Displayed performance is based + on past data and cannot guarantee future performance. +- OctoBot cloud is not responsible for the actions or + security issues of any company, team member, or community + member associated with traded crypto-assets. Users are + sole responsible for assessing the risks and merits of any + investment decision. +- By using crypto baskets, users agree that the composition + of the basket may be automatically updated in response to + changing market conditions to align with the selected + basket's goal. OctoBot cloud is not liable for any + resulting gains or losses associated to these updates. +- Artificial Intelligence (AI) services, strategies, and + answers provided by OctoBot cloud are for informational + purposes only and do not constitute financial, investment, + legal, tax, or any other professional advice. Any + decision made based on AI-generated content made by the + user is at its own risk. OctoBot cloud is not responsible + for any resulting gains or losses. +- OctoBot cloud retains the intellectual property of any + strategy published on the platform. + +## Registration and Accounts + +- In order to use the OctoBot cloud services, you must + create a user account. You are responsible for maintaining + the confidentiality of your account and password, and you + agree to accept responsibility for all activities that + occur under your account. +- You must provide accurate and complete information during + the registration process and keep your information + up-to-date. OctoBot cloud reserves the right to refuse + registration or cancel an account at any time if the + information provided is false, inaccurate, or incomplete. +- You must notify us in writing immediately if you become + aware of any unauthorized use of your account. +- You must not use any other person's account to access the + website. + +## Subscription + +- To access the trading strategies or other services offered + by OctoBot cloud, users may be required to pay a + subscription fee. The subscription fee will be billed on a + recurring basis, according to the selected plan. +- The user may choose to upgrade or downgrade their + subscription plan at any time, and the changes will be + reflected in their next billing cycle. +- Drakkar-Software reserves the right to change the + subscription fees at any time, with or without notice to + the user. Continued use of OctoBot cloud after any fee + changes indicates the user's acceptance of the modified + fees. +- If the user chooses to cancel their subscription, the + subscription will be canceled instantly before the end of + their current billing cycle. The user may request a refund + in accordance with the [Refund Policy](terms/refund). + +## Payment Options + +- Users can pay directly using their credit cards or crypto + to access the services offered by OctoBot cloud. +- When paying with a credit card, the user's card will be + charged at the time of purchase. The user's subscription + fee will be billed on a recurring basis, according to the + selected plan. +- When paying with crypto, the user's account will be + charged at the time of purchase. The user's subscription + fee will be billed on a recurring basis, according to the + selected plan. +- The user is responsible for ensuring that their payment + information is up-to-date and accurate, and for promptly + updating their information if any changes occur. Failure + to maintain accurate payment information may result in the + suspension or termination of the user's account. +- Users should not use stolen or credit cards that they do + not own. Users should not use stolen crypto or crypto that they + do not own. The use of unauthorized or stolen payment + information may result in the suspension or termination of + the user's account, and may also subject the user to + criminal or civil liabilities. The user is responsible for + ensuring that their payment information is properly + obtained. OctoBot cloud reserves the right to take + appropriate legal action against any user who uses + unauthorized or stolen payment information. By using the + platform, the user represents and warrants that their + payment information is accurate and that they have the + right to use the payment method they have provided. +- OctoBot cloud may offer users a free trial period during + which they can access and use the service without + incurring any charges. The free trial period will be + specified at the time of registration and may be subject + to additional terms and conditions. Users may not be + required to provide a valid payment method to sign up for + a free trial and will not be charged unless they continue + to use the service after the trial period has ended. + OctoBot Cloud reserves the right to modify or discontinue + the free trial offer at any time without notice. +- OctoBot cloud does offer refunds in accordance with the + free trial. Users are responsible for understanding the features and + limitations of the service before making a purchase. In + accordance with applicable laws, the right of the user to + retract from their purchase is excluded in the case of the + provision of services, such as access to OctoBot cloud + services. + +## OctoBot Rewards Program + +- OctoBot cloud reserves the right, in its sole and absolute + discretion, to void, cancel, or remove any and all OctoBot + cloud reward points ("experience points") and levels a + user may have earned or accumulated, at any time and + without prior notice. This action may be taken if OctoBot + cloud reasonably believes or suspects that such points or + levels have been obtained or accumulated through + fraudulent activity, suspicious behavior, or any means in + violation of these Terms and Conditions, or in any manner + inconsistent with the purpose or intention of the OctoBot + Rewards Program. This includes, but is not limited to, the + usage of automated methods, deception, or + misrepresentation of any sort. No appeal or challenge + process shall be available to the user in relation to any + such decision by OctoBot cloud. +- By using the services of OctoBot cloud, you agree to these + Rewards Program terms and acknowledge that the decision of + OctoBot in these matters is final and binding. + +## Account management + +- The user is responsible for maintaining the + confidentiality of their OctoBot cloud account and + password, and for restricting access to their computer or + device. The user agrees to accept responsibility for all + activities that occur under their account or password. +- The user may not use OctoBot cloud for any illegal or + unauthorized purposes, and they must comply with all + local, state, federal, and international laws and + regulations. +- The user may not modify, adapt, translate, or reverse + engineer any part of OctoBot cloud, or attempt to do so. +- Drakkar-Software reserves the right to terminate or + suspend the user's account, without notice and without + liability, if the user violates any of these terms and + conditions, or engages in any illegal or unauthorized + activities. +- The user may cancel their account at any time, by + contacting Drakkar-Software. Upon cancellation, the user's + account and all associated data will be permanently + deleted after up to 3 months. The user may request a + refund for any subscription in accordance with the + [Refund Policy](terms/refund). +- The user may not transfer their account to any other + person or entity, and any attempt to do so will be null + and void. +- The user is responsible for regularly backing up any data + or information stored on OctoBot cloud, as + Drakkar-Software is not responsible for any loss of data + or information. + +## Availability of Service + +- OctoBot cloud is a Service-as-a-Software (SaaS) service + offered by Drakkar-Software, located at www.octobot.cloud. +- OctoBot cloud provides its services to its users on a + reasonable effort basis and will use commercially + reasonable efforts to make the service available 24 hours + a day, 7 days a week, except for: (i) planned downtime (of + which OctoBot cloud shall give at least 4 hours notice via + the website), or (ii) any unavailability caused by + circumstances beyond OctoBot cloud's reasonable control, + including without limitation, acts of God, acts of + government, floods, fires, earthquakes, civil unrest, acts + of terror, strikes or other labor problems, or Internet + service provider failures or delays. +- OctoBot cloud does not guarantee the availability of its + service and does not accept any responsibility for any + unavailability. + +## Responsibility of User Content + +- By using the OctoBot cloud service, you grant OctoBot + Cloud and Drakkar-Software the right to use, modify, + display, distribute, and create derivative works of your + User Content for the purpose of providing the OctoBot + Cloud service. +- You are solely responsible for the User Content that you + make available through the OctoBot cloud platform. You + represent and warrant that: (i) you either are the sole + and exclusive owner of all User Content or you have all + rights, licenses, consents, and releases necessary to + grant OctoBot cloud and Drakkar-Software the rights in + such User Content, and (ii) neither the User Content, nor + your submission, uploading, publishing, or otherwise + making available of such User Content, nor OctoBot cloud + and Drakkar-Software's use of the User Content as + permitted herein will infringe, misappropriate or violate + a third party's patent, copyright, trademark, trade + secret, moral rights or other proprietary or intellectual + property rights, or rights of publicity or privacy, or + result in the violation of any applicable law or + regulation. +- OctoBot cloud and Drakkar-Software reserve the right to + remove any User Content from the OctoBot cloud platform at + any time, for any reason or for no reason, including User + Content that OctoBot cloud and Drakkar-Software believe + violates these terms and conditions. + +## Links and third party sites + +- OctoBot cloud and its content (including information sent + to you) may contain links or references to third-party + websites. Such links and references may display content + from third-party websites. Any such links, references and + content are provided for your convenience only. OctoBot + cloud has no control over third party websites and accept + no legal responsibility for any content, material or + information contained in them. The display of any + hyperlink and reference to any third party website does + not mean that OctoBot cloud endorse that third party's + website, products or services. Your use of a third party + website may be governed by the terms and conditions of + that third party website. + +## Modification of Terms and Conditions + +- Drakkar-Software reserves the right to modify these Terms + and Conditions at any time, in its sole discretion. If + Drakkar-Software makes a material change to these Terms + and Conditions, Drakkar-Software will notify the user + through the OctoBot cloud website (www.octobot.cloud) or + through the user's registered email. The user's continued + use of the OctoBot cloud and OctoBot software following + the posting of changes to these Terms and Conditions + constitutes acceptance of those changes. If the user does + not agree to the modified terms, the user must discontinue + using the OctoBot cloud and OctoBot software. + +## Termination of Service + +- Drakkar-Software may, at its discretion, terminate or + suspend the user's access to the OctoBot cloud and OctoBot + software at any time and without notice, for any reason, + including but not limited to, the user's violation of + these Terms and Conditions. +- Upon termination, the user's right to use the OctoBot + Cloud and OctoBot software will immediately cease, and + Drakkar-Software may, but shall not be obligated to, + remove all the user's information and files from its + servers. The user agrees that Drakkar-Software shall not + be liable to the user or any third party for any + termination of the user's access to the OctoBot cloud and + OctoBot software. + +## Warranties and Disclaimers + +- OctoBot cloud and all associated services are provided "as + is" and "as available", without warranty of any kind, + either express or implied. +- Drakkar-Software makes no representations or warranties of + any kind, express or implied, as to the operation of + OctoBot cloud, or the information, content, materials, or + products included on OctoBot cloud. The user agrees that + their use of OctoBot cloud is at their sole risk. +- Drakkar-Software does not warrant that OctoBot cloud will + be uninterrupted or error-free, and the company does not + make any warranties as to the accuracy, completeness, + reliability, or availability of OctoBot cloud or its + associated services. +- The user has the option to use the provided trading + strategies as is, customize them, or create copies of + them. The user acknowledges that the use of OctoBot cloud + and the execution of trades through the platform is + subject to market risk, and that the user may lose money. + The user is solely responsible for their trades and for + any associated losses. +- Drakkar-Software is not responsible for any losses or + damages that may arise from the use of OctoBot cloud, + including but not limited to direct, indirect, incidental, + punitive, and consequential damages. +- Drakkar-Software makes no warranty that OctoBot cloud will + meet the user's requirements, or that OctoBot cloud will + be uninterrupted, timely, secure, or error-free. +- The user agrees to indemnify and hold Drakkar-Software and + its affiliates, officers, agents, and employees harmless + from any claim or demand, including reasonable attorneys' + fees, made by any third party due to or arising out of the + user's use of OctoBot cloud, or the user's violation of + these terms and conditions. +- These terms and conditions, together with the OctoBot + Cloud privacy policy, constitute the entire agreement + between the user and Drakkar-Software, and supersede all + prior agreements, representations, and understandings. + +## Limitation of Liability + +- OctoBot, OctoBot cloud, and Drakkar-Software are not + responsible for any trading-related loss or any loss + caused by the use of the OctoBot software. +- In no event shall OctoBot cloud or Drakkar-Software be + liable for any direct, indirect, incidental, special, + punitive, or consequential damages arising from the use of + the OctoBot cloud service or OctoBot software. +- OctoBot cloud and Drakkar-Software make no representations + or warranties about the accuracy, completeness, security, + or reliability of the OctoBot cloud service or the OctoBot + software. +- Users understand and agree that the use of OctoBot cloud + and OctoBot software is at their own risk. + +## Third party rights + +- A contract under these terms and conditions is for our + benefit and your benefit, and is not intended to benefit + or be enforceable by any third party. +- The exercise of the parties' rights under a contract under + these terms and conditions is not subject to the consent + of any third party. + +## Linking to OctoBot cloud + +Organizations may link to OctoBot cloud, publications or to +other OctoBot cloud information so long as the link: (a) is +not in any way deceptive; (b) does not falsely imply +sponsorship, endorsement or approval of the linking party +and its products and/or services; and (c) fits within the +context of the linking party's site. + +## Third party licenses + +OctoBot cloud makes use of various graphic elements provided +by Freepik, a platform that offers a large selection of free +graphic resources. These elements are used under the free +license offered by Freepik and are subject to its terms and +conditions, which can be found at freepik.com. Most images +are designed by Deinos Art and vectorjuice / Freepik. + +## Risks Disclosure Statement + +Please be advised that the use of OctoBot cloud and its +services may involve certain risks and uncertainties, +including but not limited to the risk of technological +malfunctions, operational errors, and security breaches. The +user acknowledges and agrees that they use OctoBot cloud and +its services at their own risk. + +## Privacy policy + +Users are referred to the OctoBot cloud Privacy Policy, +available at www.octobot.cloud/en/terms/privacy, for further information regarding the collection, use, and +disclosure of their personal information by OctoBot cloud. + +## Copyright Notice + +All content included on OctoBot cloud website, such as text, +graphics, logos, images, data compilations, software, and +all other material is the property of Drakkar-Software, +OctoBot cloud or its content suppliers and is protected by +international copyright laws. Any use, including but not +limited to the reproduction, distribution, display or +transmission of the content of this website is strictly +prohibited, unless authorized by Drakkar-Software, or +OctoBot cloud. Users do not acquire any ownership rights by +using OctoBot cloud services or accessing the content +provided on the website. Any unauthorized use may result in +severe civil and criminal penalties. + +## Updates + +- Updated the 18/06/2024 to add the "OctoBot Rewards Program" + and "Links and third party sites" sections. + +- Updated the 01/02/2024 to add crypto payments terms and to introduce the Refund Policy. + +- Updated the 07/01/2025 to update user-created strategies and + crypto-baskets updates terms. Also added AI content terms + and crypto-assets risks disclaimer. + +- Updated the 22/04/2025 to include privacy policy and risk disclose links. diff --git a/docs/src/pages/terms/privacy.md b/docs/src/pages/terms/privacy.md new file mode 100644 index 0000000000..bc80770992 --- /dev/null +++ b/docs/src/pages/terms/privacy.md @@ -0,0 +1,155 @@ +--- +title: "Privacy policy" +description: "Know everything about OctoBot cloud privacy policy. Consent how we use your information, log files, cookies and third party policies" +--- + + + +# Privacy policy + +At OctoBot cloud, accessible from www.octobot.cloud, one of our main +priorities is the privacy of our visitors. This Privacy Policy document +contains types of information that is collected and recorded by OctoBot cloud +and how we use it. + +If you have additional questions or require more information about our Privacy +Policy, do not hesitate to contact us. + +This Privacy Policy applies only to our online activities and is valid for +visitors to our website with regards to the information that they shared +and/or collected in OctoBot cloud. This policy is not applicable to any +information collected offline or via channels other than this website. + +## Consent + +- OctoBot cloud is a SAS (Software as a Service) offering from + Drakkar-Software, which provides hosting services for OctoBot + software, a cryptocurrency trading bot. +- The purpose of OctoBot cloud is to allow users to access and utilize + the capabilities of the OctoBot software for the purpose of trading + cryptocurrencies on supported exchanges. +- By accessing and using OctoBot cloud, the user agrees to be bound by + these terms and conditions, which constitute a legally binding + agreement between the user and Drakkar-Software. +- Drakkar-Software reserves the right to modify these terms and + conditions at any time, with notice to the user. The user is + responsible for regularly reviewing these terms and conditions. + Continued use of OctoBot cloud after any modifications indicates the + user's acceptance of the modified terms. +- Drakkar-Software reserves the right to make changes to any product + at any time without prior notice to the user. It is the + responsibility of the user to regularly review these details and + updates. By continuing to use OctoBot Cloud after any changes have + been made, the user is acknowledging and accepting the updated + product. +- By using OctoBot cloud, the user agrees to the collection and use of + their personal information as outlined in the Privacy Policy. The + Privacy Policy, which is incorporated into these terms and + conditions, provides more information about the types of information + collected, how it is used, and with whom it may be shared. The user + is encouraged to review the Privacy Policy carefully and to contact + OctoBot Cloud if they have any questions or concerns. +- By using OctoBot cloud and its services, the user acknowledges and + agrees to the terms and conditions set forth in this agreement, + including but not limited to the [Risks Disclosure Statement](risk). + The user is advised to regularly review the terms and + conditions, including the Risks Disclosure Statement, to stay + informed of any changes. +- Information we collect + +## How we use your information + +- Provide, operate, and maintain our website +- Improve, personalize, and expand our website +- Understand and analyze how you use our website +- Develop new products, services, features, and functionality +- Communicate with you, either directly or through one of our + partners, including for customer service, to provide you with + updates and other information relating to the website, and for + marketing and promotional purposes +- Send you emails +- Find and prevent fraud +- Log Files + OctoBot cloud follows a standard procedure of using log files. These + files log visitors when they visit websites. All hosting companies do + this as part of hosting services' analytics. The information collected + by log files include internet protocol (IP) addresses, browser type, + Internet Service Provider (ISP), date and time stamp, referring/exit + pages, and possibly the number of clicks. These are not linked to any + information that is personally identifiable. The purpose of the + information is for analyzing trends, administering the site, tracking + users' movement on the website, and gathering demographic information. +- Cookies and Web Beacons + Like any other website, OctoBot cloud uses 'cookies'. These cookies + are used to store information including visitors' preferences, and the + pages on the website that the visitor accessed or visited. The + information is used to optimize the users' experience by customizing + our web page content based on visitors' browser type and/or other + information. +- Third Party Privacy Policies + OctoBot cloud's Privacy Policy does not apply to other advertisers or + websites. Thus, we are advising you to consult the respective Privacy + Policies of these third-party ad servers for more detailed + information. It may include their practices and instructions about how + to opt-out of certain options. You can choose to disable cookies + through your individual browser options. To know more detailed + information about cookie management with specific web browsers, it can + be found at the browsers' respective websites. Additionally, OctoBot + cloud uses third-party analytics services including + <a href="https://www.cloudflare.com/analytics/" rel="nofollow">Cloudflare Analytics</a> + , + <a href="https://vercel.com/analytics" rel="nofollow">Vercel Analytics</a> + , and <a href="https://posthog.com/" rel="nofollow">Posthog</a> to track user behavior on the site. In order to process + credit card payments, OctoBot Cloud utilizes Stripe, a third-party payment + processing service. Stripe collects certain personal information, including + credit card numbers, in order to process payments. This information is + securely stored by Stripe, and OctoBot Cloud does not have access to it. For + more information about privacy practices, please refer to their Privacy + Policy. Stripe's privacy practices, please refer to their Privacy Policy. +- CCPA Privacy Rights (Do Not Sell My Personal Information) + Under the CCPA, among other rights, California consumers have the + right to: Request that a business that collects a consumer's personal + data disclose the categories and specific pieces of personal data that + a business has collected about consumers. Request that a business + delete any personal data about the consumer that a business has + collected. Request that a business that sells a consumer's personal + data, not sell the consumer's personal data. If you make a request, we + have three months to respond to you. If you would like to exercise any + of these rights, please contact us. + +## GDPR Data Protection Rights + +- The right to access: You have the right to request copies of your + personal data. We may charge you a small fee for this service. +- The right to rectification: You have the right to request that we + correct any information you believe is inaccurate. You also have the + right to request that we complete the information you believe is + incomplete. +- The right to erasure: You have the right to request that we erase + your personal data, under certain conditions. +- The right to restrict processing: You have the right to request that + we restrict the processing of your personal data, under certain + conditions. +- The right to object to processing: You have the right to object to + our processing of your personal data, under certain conditions. +- The right to data portability: You have the right to request that we + transfer the data that we have collected to another organization, or + directly to you, under certain conditions. +- Children's Information + Another part of our priority is adding protection for children while + using the internet. We encourage parents and guardians to observe, + participate in, and/or monitor and guide their online activity. + OctoBot cloud does not knowingly collect any Personal Identifiable + Information from children under the age of 13. If you think that your + child provided this kind of information on our website, we strongly + encourage you to contact us immediately and we will do our best + efforts to promptly remove such information from our records. +- Changes to This Privacy Policy + We may update our Privacy Policy from time to time. Thus, we advise + you to review this page periodically for any changes. We will notify + you of any changes by posting the new Privacy Policy on this page. + These changes are effective immediately, after they are posted on this + page. +- Contact Us + If you have any questions or suggestions about our Privacy Policy, do + not hesitate to contact us at contact@drakkar.software. diff --git a/docs/src/pages/terms/referral.md b/docs/src/pages/terms/referral.md new file mode 100644 index 0000000000..459814b837 --- /dev/null +++ b/docs/src/pages/terms/referral.md @@ -0,0 +1,125 @@ +--- +title: "Terms of Referral" +description: "Know everything about OctoBot cloud Terms of Referral. Unlock rewards by bringing your friend to OctoBot." +--- + +# Terms of Referral + +We welcome you to OctoBot cloud, a SaaS service provided by Drakkar-Software +that hosts OctoBot software, a bot for cryptocurrency trading. The Octobot +software, the www.octobot.cloud website, mobile applications, and application +program interfaces are collectively referred to as the "Software". + +**By creating or sharing a referral link, you acknowledge and consent to being +bound by these Referral Terms.** + +For matters not addressed in the Referral Terms, the Terms of Use will apply. +If there is a discrepancy between the Referral Terms and the Terms of Use, the +Referral Terms will prevail. Drakkar-Software reserves the right to modify the +Referral Terms as outlined in Section 1 of the Terms of Use. + +For additional information, please visit our website at www.octobot.cloud or +email us at contact@drakkar.software. + +## Definitions + +- A "**Referrer**" is an individual or legal entity with a software user account, + without any bans, blocks, sanctions, or limitations, who has accepted + the Referral Terms and has expressed interest in participating in + OctoBot's referral program by attracting new clients via the Referral + Link. +- A "**Qualified Client**" is an individual or legal entity who has (a) successfully registered + on the Software using the Referral Link, (b) successfully initiated a + paid or free plan on the Software, and (c) is not the Referrer. +- A "**Referral Link**" is a personalized hyperlink to the Software registration page, + generated automatically for the Referrer to share with potential + Qualified Clients. + +## Referrer's Responsibilities + +#### To become a Referrer, you must: + +- Be a registered Client of the Software with a complete profile + that includes all requested data, including contact details. +- Obtain a Referral Link in the referral section of the Software. + +#### The Referrer can involve any person to become a Qualified Client of the Software, as long as it is legal and they ensure that such individuals: + +- Are of legal age to use the Software. +- Are not under the control of, or residing in, a jurisdiction that + explicitly bans the use of similar software to the Software. +- Understand that the use of the Software is at their discretion and + responsibility. + +#### The Referrer is not authorized to: + +- Enter into or conclude any agreements on behalf of OctoBot or + Drakkar-Software. +- Present themselves as an employee or partner of OctoBot or + Drakkar-Software. +- Collude with other existing or potential clients of the Software + for illegal benefits. +- Make any statement or guarantee regarding the Software. +- Damage the reputation or image of Drakkar-Software and/or the + Software. +- Use any intellectual property of OctoBot or Drakkar-Software + contrary to the license provided under the Terms of Use. +- A new client brought to the Software by the Referrer is considered a + Qualified Client only if they have followed the Referral Link provided + by the Referrer to register and successfully started a paid or free + plan on the Software. + +## Referral rewards + +- Each Qualified Client brought by the Referrer will generate rewards + for the Referrer. +- Actions performed by Qualified Clients such as successful (and not + refunded or reversed) purchase of certain products might generate + rewards for the Referrer. +- Reward amounts associated to each referral reward are defined in the + Rewards section of the software. +- Reward amounts associated to each referral reward are defined in the + Rewards section of the software. + +## Compliance with Sanctions + +By using Drakkar-Software's services, you confirm that you: + +Are not included in any trade embargos or economic sanctions lists, including but not limited to: + +- Restrictive measures of the European Union +- Sanctions of the United Nations +- Sanctions of the Government of France +- the list of specially designated nationals maintained by + Office of Foreign Assets Control (OFAC) of the U.S. Department + of the Treasury +- the denied persons or entity list of the U.S. Department of + Commerce +- Lists of subjects to Financial Sanctions maintained by the UK + Office of Financial Sanctions Implementation (OFSI) +- Do not violate or circumvent any international sanctions or + restrictive measures established by the European Union, United + Nations, United States of America, United Kingdom, or any other + sanctions applicable in France +- Are not from countries or geographical regions under sanctions + imposed by the European Union, United Nations, United States of + America, United Kingdom, and or any other international sanctions + applicable in France. + +And that you: + +- Drakkar-Software reserves the right to choose markets and + jurisdictions to conduct business, and may restrict or refuse the + provision of services in certain countries or regions. +- If you become subject to international sanctions, you must immediately + stop using our services and notify Drakkar-Software. +- We reserve the right to terminate, suspend, or restrict our services + to you, or terminate this Agreement if you become a subject of + international sanctions, if providing services to you violates or + circumvents international sanctions, or if you are related to a + territory, area of activity, transaction, or person subject to + international sanctions according to our assessment. +- We also reserve the right to terminate, suspend, or restrict our + services to you, or terminate this Agreement if we decide to limit our + business activities in certain markets and jurisdictions as mentioned + in Section 4. diff --git a/docs/src/pages/terms/refund.md b/docs/src/pages/terms/refund.md new file mode 100644 index 0000000000..cf3c2af7cb --- /dev/null +++ b/docs/src/pages/terms/refund.md @@ -0,0 +1,57 @@ +--- +title: "Refund policy" +description: "Explore OctoBot cloud's refund policy. Subscription cancellation, refund conditions for monthly and annual plans, and payment processing details." +--- + +# Refund policy + +This Refund Policy is effective as of February 01, 2024. Terms used +herein have the meanings defined in the [Terms of Use](/terms). + +For any payment or refund issues, please contact support via email +contact@octobot.cloud. + +## Monthly Subscription Refund + +Eligible for a refund within fourteen (14) days of purchase. +Contact support +for a refund request. + +## Annual Subscription Refund + +For annual subscriptions, after the grace period of 14 days, used +months are calculated based on the monthly subscription rate. If a +user cancels within 180 days of payment, they're eligible for a +refund of the unused months, calculated at the monthly plan rate. +For instance, if the subscription was used for 3 months and 15 +days, the refund covers 8 unused months. No refunds are issued +after 180 days from payment. + +## Lifetime Purchase Refund + +Eligible for a refund within thirty (30) days of purchase. +Contact support +for a refund request. + +## Payment Processing + +Refunds processed through the original payment method, including +Stripe, Apple AppStore, Google Play, or cryptocurrencies. + +## Refund Restrictions + +A refund may be utilized by the user only once. Upon approval of a +refund request, the user will not have access to further contests, +disputes, or transaction reversals with OctoBot cloud or any +third-party payment service. + +## Currency and Fees + +Refunds in the same currency or equivalent. OctoBot cloud is not +responsible for any fees from payment service providers. + +## Limitation on Refunds + +Refund requests can be made only once. After a refund is approved, +no subsequent contests, disputes, or transaction reversals are +permitted. diff --git a/docs/src/pages/terms/risk.md b/docs/src/pages/terms/risk.md new file mode 100644 index 0000000000..2fb4958015 --- /dev/null +++ b/docs/src/pages/terms/risk.md @@ -0,0 +1,57 @@ +--- +title: "Risks Disclosure Statement" +description: "Explore OctoBot cloud's risk disclosure statement. Understand the potential risks associated with trading and investing." +--- + +# Risks Disclosure Statement + +The use of OctoBot cloud's services involves inherent risks and +uncertainties. As a user, you acknowledge and agree to assume all such +risks, including but not limited to the risks described in the statement. + + +By using OctoBot cloud's services, you acknowledge that you understand +and accept the risks disclosed in this statement. OctoBot cloud shall +not be liable for any loss or damages resulting from the use of its +services, except in the case of gross negligence or intentional +misconduct by OctoBot cloud. + + +## Data Loss + +OctoBot cloud is not responsible for any loss or +corruption of data or information stored or processed through our +services. The user is responsible for backing up their data and +taking appropriate measures to protect it. + +## Service Interruptions + +OctoBot cloud makes no representation or +warranty regarding the availability or reliability of its services. +Interruptions to the service may occur for a variety of reasons, +including but not limited to, maintenance, upgrades, network +failures, and other technical issues. + +## Security Risks + +OctoBot cloud takes measures to secure its services +and user data, but no security measures can guarantee complete +protection against unauthorized access or data breaches. The user is +responsible for ensuring that their account information, including +passwords, are kept secure. + +## Market Risks + +OctoBot cloud provides tools and services related to +trading and investment, but market conditions and financial +performance are subject to fluctuations and uncertainties. OctoBot +Cloud does not guarantee any specific results or returns, and the +user assumes all risks associated with their investments or trades. + +## Third-Party Risks + +OctoBot cloud integrates with various third-party +services and platforms, which are subject to their own risks and +uncertainties. OctoBot cloud is not responsible for the actions or +performance of these third-party services and platforms, and the +user assumes all risks associated with their use. diff --git a/docs/src/theme/BlogPostItem/index.tsx b/docs/src/theme/BlogPostItem/index.tsx new file mode 100644 index 0000000000..f5951f22b2 --- /dev/null +++ b/docs/src/theme/BlogPostItem/index.tsx @@ -0,0 +1,38 @@ +import React, {type ReactNode} from 'react'; +import BlogPostItem from '@theme-original/BlogPostItem'; +import type BlogPostItemType from '@theme/BlogPostItem'; +import type {WrapperProps} from '@docusaurus/types'; +import {useBlogPost} from '@docusaurus/plugin-content-blog/client'; +import Link from '@docusaurus/Link'; + +type Props = WrapperProps<typeof BlogPostItemType>; + +function CoverImage(): ReactNode { + const {metadata, frontMatter, isBlogPostPage} = useBlogPost(); + + // Only show in list view, not on the full post page + if (isBlogPostPage) return null; + + const image = frontMatter.image as string | undefined; + if (!image) return null; + + return ( + <Link to={metadata.permalink} className="blog-post-cover-link"> + <img + src={image} + alt={metadata.title} + className="blog-post-cover-image" + loading="lazy" + /> + </Link> + ); +} + +export default function BlogPostItemWrapper(props: Props): ReactNode { + return ( + <div className="blog-post-card"> + <CoverImage /> + <BlogPostItem {...props} /> + </div> + ); +} diff --git a/docs/static/.nojekyll b/docs/static/.nojekyll new file mode 100644 index 0000000000..e69de29bb2 diff --git a/docs/static/a-man-relaxing-in-his-couch-while-octobot-is-making-money-by-automating-cryptocurrency-strategies-dark.png b/docs/static/a-man-relaxing-in-his-couch-while-octobot-is-making-money-by-automating-cryptocurrency-strategies-dark.png new file mode 100644 index 0000000000..8a9a2f38d5 --- /dev/null +++ b/docs/static/a-man-relaxing-in-his-couch-while-octobot-is-making-money-by-automating-cryptocurrency-strategies-dark.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ac48ebec2def2b0944a80cc2f751659938d4157f3f744c5ab2334c62dbee902 +size 378187 diff --git a/docs/static/a-man-relaxing-in-his-couch-while-octobot-is-making-money-by-automating-cryptocurrency-strategies-light.png b/docs/static/a-man-relaxing-in-his-couch-while-octobot-is-making-money-by-automating-cryptocurrency-strategies-light.png new file mode 100644 index 0000000000..9fc8c1466e --- /dev/null +++ b/docs/static/a-man-relaxing-in-his-couch-while-octobot-is-making-money-by-automating-cryptocurrency-strategies-light.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a448bbde89a0d73c7a35351308fec9692e02ccb75b85db8baaf77fc1f2839d3 +size 380290 diff --git a/docs/static/images/blog/announcing-the-bitmart-and-octobot-partnership/bitmart-and-octobot-partnership.png b/docs/static/images/blog/announcing-the-bitmart-and-octobot-partnership/bitmart-and-octobot-partnership.png new file mode 100644 index 0000000000..3bbbcc9da7 --- /dev/null +++ b/docs/static/images/blog/announcing-the-bitmart-and-octobot-partnership/bitmart-and-octobot-partnership.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5447d3c6bc8380ffe72a0f8849bdf644ef66ab5f97e32fd341bd6dd3a98889d3 +size 902251 diff --git a/docs/static/images/blog/authors/guillaume-dsm-profile-picture.jpg b/docs/static/images/blog/authors/guillaume-dsm-profile-picture.jpg new file mode 100644 index 0000000000..d2f6ff7074 --- /dev/null +++ b/docs/static/images/blog/authors/guillaume-dsm-profile-picture.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0f4c24be854f1f15ef351f4fd7d75fd5ee0bd77af1e1b41626650d3b09f2d691 +size 25875 diff --git a/docs/static/images/blog/authors/paul-b-profile-picture.jpg b/docs/static/images/blog/authors/paul-b-profile-picture.jpg new file mode 100644 index 0000000000..995954ec55 --- /dev/null +++ b/docs/static/images/blog/authors/paul-b-profile-picture.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8a7934abaa295701414e3080c7605560abb2b884f6cbea429c0aa2c7cb4b2f28 +size 34931 diff --git a/docs/static/images/blog/automated-trading-bot/cover.png b/docs/static/images/blog/automated-trading-bot/cover.png new file mode 100644 index 0000000000..bf4f5d7419 --- /dev/null +++ b/docs/static/images/blog/automated-trading-bot/cover.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:04b35019da2dfd10f1d081c0386b66824272b344c3089e7ec63f75a1e0424dd2 +size 1264632 diff --git a/docs/static/images/blog/best-crypto-trading-bots/3commas.png b/docs/static/images/blog/best-crypto-trading-bots/3commas.png new file mode 100644 index 0000000000..512e6a9959 --- /dev/null +++ b/docs/static/images/blog/best-crypto-trading-bots/3commas.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b0211f6de2fea48f52316d7abb22f1ee53aed011b5819120015ed73fb868f70 +size 20434 diff --git a/docs/static/images/blog/best-crypto-trading-bots/cover.png b/docs/static/images/blog/best-crypto-trading-bots/cover.png new file mode 100644 index 0000000000..0f521c6d81 --- /dev/null +++ b/docs/static/images/blog/best-crypto-trading-bots/cover.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:28b8464bc7adc739ec208c0be23b66f9a2dd3f8636e9970699e1208344a710b8 +size 1754620 diff --git a/docs/static/images/blog/best-crypto-trading-bots/haasonline.jpg b/docs/static/images/blog/best-crypto-trading-bots/haasonline.jpg new file mode 100644 index 0000000000..3fd9d05967 --- /dev/null +++ b/docs/static/images/blog/best-crypto-trading-bots/haasonline.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c02703a906fbd092cc91a16f3c983afa6a176bce007439520e428e2c75bede5b +size 25732 diff --git a/docs/static/images/blog/best-crypto-trading-bots/pionex.jpg b/docs/static/images/blog/best-crypto-trading-bots/pionex.jpg new file mode 100644 index 0000000000..8e82a2b192 --- /dev/null +++ b/docs/static/images/blog/best-crypto-trading-bots/pionex.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51a0e1903422235f7511346998970c37071f2bf872791155ec70a5b62d1b7527 +size 24133 diff --git a/docs/static/images/blog/best-crypto-trading-bots/tradesanta.png b/docs/static/images/blog/best-crypto-trading-bots/tradesanta.png new file mode 100644 index 0000000000..967bcabc2a --- /dev/null +++ b/docs/static/images/blog/best-crypto-trading-bots/tradesanta.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0269921c8d6a1e9a90dc974000870f11c09f6388678c8d86eb58950e78e249cc +size 35852 diff --git a/docs/static/images/blog/best-open-source-crypto-trading-bots/cover.png b/docs/static/images/blog/best-open-source-crypto-trading-bots/cover.png new file mode 100644 index 0000000000..ab9a2b7618 --- /dev/null +++ b/docs/static/images/blog/best-open-source-crypto-trading-bots/cover.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9fb11e0cc11bcb616527f409937f63b6f49d010af13136e08f9763e1060c22f5 +size 952987 diff --git a/docs/static/images/blog/best-open-source-crypto-trading-bots/jesse.png b/docs/static/images/blog/best-open-source-crypto-trading-bots/jesse.png new file mode 100644 index 0000000000..cf3fd2a9dd --- /dev/null +++ b/docs/static/images/blog/best-open-source-crypto-trading-bots/jesse.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:740896e89ab2228672d6a33527b46357b32cce00c4fa43ee8c2137ce63a9ee27 +size 155524 diff --git a/docs/static/images/blog/bingx-wheel-of-fortune-event/bingx-and-octobot-wheel-of-fortune-event-with-usdt-to-earn.png b/docs/static/images/blog/bingx-wheel-of-fortune-event/bingx-and-octobot-wheel-of-fortune-event-with-usdt-to-earn.png new file mode 100644 index 0000000000..768b249982 --- /dev/null +++ b/docs/static/images/blog/bingx-wheel-of-fortune-event/bingx-and-octobot-wheel-of-fortune-event-with-usdt-to-earn.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb29951fd1dd85ebc0cbce618f1775328ea3eaf71013254e7cf3c38475c3f53e +size 905947 diff --git a/docs/static/images/blog/chatgpt-strategy-deep-dive/chatgpt-crypto-price-predictions.png b/docs/static/images/blog/chatgpt-strategy-deep-dive/chatgpt-crypto-price-predictions.png new file mode 100644 index 0000000000..31de73714b --- /dev/null +++ b/docs/static/images/blog/chatgpt-strategy-deep-dive/chatgpt-crypto-price-predictions.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:28cf38f7ffe137c1d76e6a6d5032b3603991496819b5ff47243bedb5bc96a516 +size 154443 diff --git a/docs/static/images/blog/chatgpt-strategy-deep-dive/cover.png b/docs/static/images/blog/chatgpt-strategy-deep-dive/cover.png new file mode 100644 index 0000000000..65e65fd6fb --- /dev/null +++ b/docs/static/images/blog/chatgpt-strategy-deep-dive/cover.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8001f5fcc1de9b33a106089edd8940e0408370f4e9da34fcb8f634b75fea8e05 +size 73136 diff --git a/docs/static/images/blog/chatgpt-strategy-deep-dive/strategy-designer-bitcoin-with-chat-gpt-strategy.png b/docs/static/images/blog/chatgpt-strategy-deep-dive/strategy-designer-bitcoin-with-chat-gpt-strategy.png new file mode 100644 index 0000000000..03bd9343dc --- /dev/null +++ b/docs/static/images/blog/chatgpt-strategy-deep-dive/strategy-designer-bitcoin-with-chat-gpt-strategy.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6acf2dcbcf58caeeb9a9b10e4469d26b34a27756b5ca31a72ad353080eea540e +size 846042 diff --git a/docs/static/images/blog/coinbase-and-binance.us-trading-bot/binance.us-and-coinbase-support-on-octobot.png b/docs/static/images/blog/coinbase-and-binance.us-trading-bot/binance.us-and-coinbase-support-on-octobot.png new file mode 100644 index 0000000000..f05b9ece0c --- /dev/null +++ b/docs/static/images/blog/coinbase-and-binance.us-trading-bot/binance.us-and-coinbase-support-on-octobot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:173f3c1645830f5e898d4b0d905400fe7f5712a35ff77eae041e72f38c0d84ea +size 489595 diff --git a/docs/static/images/blog/coinbase-and-binance.us-trading-bot/crypto-basket.png b/docs/static/images/blog/coinbase-and-binance.us-trading-bot/crypto-basket.png new file mode 100644 index 0000000000..074f03eb7c --- /dev/null +++ b/docs/static/images/blog/coinbase-and-binance.us-trading-bot/crypto-basket.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca4dd73d5010015d109fc6b35695f581d5478c207e31aab29df5962616b232c9 +size 73589 diff --git a/docs/static/images/blog/coinbase-and-binance.us-trading-bot/octobot-collaborating-with-chatgpt-light.png b/docs/static/images/blog/coinbase-and-binance.us-trading-bot/octobot-collaborating-with-chatgpt-light.png new file mode 100644 index 0000000000..5625bd279b --- /dev/null +++ b/docs/static/images/blog/coinbase-and-binance.us-trading-bot/octobot-collaborating-with-chatgpt-light.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:010915a708fd0ed06367a13bc459f1da6712dffb5c72371f6191e69d3a27efd8 +size 154475 diff --git a/docs/static/images/blog/coinbase-and-binance.us-trading-bot/tradingview-automation-illustrated-by-tradingview-logo.png b/docs/static/images/blog/coinbase-and-binance.us-trading-bot/tradingview-automation-illustrated-by-tradingview-logo.png new file mode 100644 index 0000000000..9507689720 --- /dev/null +++ b/docs/static/images/blog/coinbase-and-binance.us-trading-bot/tradingview-automation-illustrated-by-tradingview-logo.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1748d756f4dd4626fb8853857b0e8e116596854757a2b8ac4130461d222df4dc +size 7344 diff --git a/docs/static/images/blog/following-strategies-in-octobot-cloud/community.png b/docs/static/images/blog/following-strategies-in-octobot-cloud/community.png new file mode 100644 index 0000000000..068b2f9822 --- /dev/null +++ b/docs/static/images/blog/following-strategies-in-octobot-cloud/community.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:20dbd6c42ebaf3eacc2690c8fe2d8c1bc48244f4f1c6fc569141001092e33740 +size 96110 diff --git a/docs/static/images/blog/following-strategies-in-octobot-cloud/config.png b/docs/static/images/blog/following-strategies-in-octobot-cloud/config.png new file mode 100644 index 0000000000..e91ae6d051 --- /dev/null +++ b/docs/static/images/blog/following-strategies-in-octobot-cloud/config.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ea9c21a37e7d57f8d12d610b1479779406091ec1ac37dbe8ee821dd9df848e6 +size 64949 diff --git a/docs/static/images/blog/following-strategies-in-octobot-cloud/cover.png b/docs/static/images/blog/following-strategies-in-octobot-cloud/cover.png new file mode 100644 index 0000000000..172d1d91ea --- /dev/null +++ b/docs/static/images/blog/following-strategies-in-octobot-cloud/cover.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f043668e868e63abc27dc62905fa4329164c9130524b65df696090899e95e36e +size 129053 diff --git a/docs/static/images/blog/following-strategies-in-octobot-cloud/id-button.png b/docs/static/images/blog/following-strategies-in-octobot-cloud/id-button.png new file mode 100644 index 0000000000..534fe657e3 --- /dev/null +++ b/docs/static/images/blog/following-strategies-in-octobot-cloud/id-button.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a631ec0643198e12d22ca7191fa076a6bdb3b8d05f92e999da55fb12a528271 +size 11162 diff --git a/docs/static/images/blog/following-strategies-in-octobot-cloud/imported.png b/docs/static/images/blog/following-strategies-in-octobot-cloud/imported.png new file mode 100644 index 0000000000..8c5a3c53c4 --- /dev/null +++ b/docs/static/images/blog/following-strategies-in-octobot-cloud/imported.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dae4583599cbcec848b9f258ef625e4f16b73120956fce358e8e630307d7e7f9 +size 68074 diff --git a/docs/static/images/blog/following-strategies-in-octobot-cloud/mode-config.png b/docs/static/images/blog/following-strategies-in-octobot-cloud/mode-config.png new file mode 100644 index 0000000000..32d34dbda9 --- /dev/null +++ b/docs/static/images/blog/following-strategies-in-octobot-cloud/mode-config.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7516754afccbcf7c265f9d1f1d6f1e5a6569d7797f87e9b4c13cf8ab7df65a6 +size 64829 diff --git a/docs/static/images/blog/following-strategies-in-octobot-cloud/pre-sub.png b/docs/static/images/blog/following-strategies-in-octobot-cloud/pre-sub.png new file mode 100644 index 0000000000..e42eb4efc2 --- /dev/null +++ b/docs/static/images/blog/following-strategies-in-octobot-cloud/pre-sub.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8b21e8251cb71456f67fd7059672d217d3fb888f0b14d26a063211fa13ea7f0 +size 177417 diff --git a/docs/static/images/blog/fomo-meaning/cover.png b/docs/static/images/blog/fomo-meaning/cover.png new file mode 100644 index 0000000000..f13e2e4550 --- /dev/null +++ b/docs/static/images/blog/fomo-meaning/cover.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b5c9b25025a230c08827f3197e398b8559c1896cd01f20fa3cb62db90ad8191 +size 444414 diff --git a/docs/static/images/blog/fud-meaning/cover.png b/docs/static/images/blog/fud-meaning/cover.png new file mode 100644 index 0000000000..a31d3cf6e5 --- /dev/null +++ b/docs/static/images/blog/fud-meaning/cover.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:143eb16b41310d4bed4caf0fb13417cb1154a867ff3f5b4eefbebe09ed4bc3c8 +size 428538 diff --git a/docs/static/images/blog/grid-trading/grid-strategies.png b/docs/static/images/blog/grid-trading/grid-strategies.png new file mode 100644 index 0000000000..79093c9988 --- /dev/null +++ b/docs/static/images/blog/grid-trading/grid-strategies.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f7cdf9a51ed560c315e3cd4ac7436ff73e5ba0ede865dfdc1a2ef5ea7a3171d6 +size 207278 diff --git a/docs/static/images/blog/hodl-meaning/cover.png b/docs/static/images/blog/hodl-meaning/cover.png new file mode 100644 index 0000000000..7d09089fb4 --- /dev/null +++ b/docs/static/images/blog/hodl-meaning/cover.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e24f9f317ca0973462af99e6f23ae6c4bdb44ef9dbce9ddca409bc7355603a0 +size 654287 diff --git a/docs/static/images/blog/hollaex-partnership/cover.jpg b/docs/static/images/blog/hollaex-partnership/cover.jpg new file mode 100644 index 0000000000..f667766fa2 --- /dev/null +++ b/docs/static/images/blog/hollaex-partnership/cover.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7f7e61812d2af73d5c59a12346df40615a6c07f81cc2c881f0b89da5eeb9702 +size 246783 diff --git a/docs/static/images/blog/how-does-trading-bot-work/cover.png b/docs/static/images/blog/how-does-trading-bot-work/cover.png new file mode 100644 index 0000000000..c569daf059 --- /dev/null +++ b/docs/static/images/blog/how-does-trading-bot-work/cover.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:776a81524b368b51c0729252816fe81069741f49dbd86be91cba1d82c3483817 +size 524309 diff --git a/docs/static/images/blog/how-to-automate-trading-in-tradingview/automate-your-tradingview-trades-to-trade-on-any-indicator-or-strategy.png b/docs/static/images/blog/how-to-automate-trading-in-tradingview/automate-your-tradingview-trades-to-trade-on-any-indicator-or-strategy.png new file mode 100644 index 0000000000..95cc54c85c --- /dev/null +++ b/docs/static/images/blog/how-to-automate-trading-in-tradingview/automate-your-tradingview-trades-to-trade-on-any-indicator-or-strategy.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:628480bc7d871d2a61ec3b76bdb4a523b3908d1eb9457168677594a0e82e1d41 +size 914594 diff --git a/docs/static/images/blog/how-to-automate-trading-in-tradingview/creating-an-alert-from-tradingview.png b/docs/static/images/blog/how-to-automate-trading-in-tradingview/creating-an-alert-from-tradingview.png new file mode 100644 index 0000000000..a435ec15e3 --- /dev/null +++ b/docs/static/images/blog/how-to-automate-trading-in-tradingview/creating-an-alert-from-tradingview.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:59cacd163c40603274ff46312631d0f7354a32f0bbe846f1ffdccf4550d0e733 +size 171620 diff --git a/docs/static/images/blog/how-to-automate-trading-in-tradingview/tradingview-alert-form.png b/docs/static/images/blog/how-to-automate-trading-in-tradingview/tradingview-alert-form.png new file mode 100644 index 0000000000..348e49c987 --- /dev/null +++ b/docs/static/images/blog/how-to-automate-trading-in-tradingview/tradingview-alert-form.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f705d29a83253e0479ecd1bba07e1048b25e8338c837dcd7097d267ddd7bfcd5 +size 20608 diff --git a/docs/static/images/blog/how-to-automate-trading-in-tradingview/tradingview-alert-notification-form.png b/docs/static/images/blog/how-to-automate-trading-in-tradingview/tradingview-alert-notification-form.png new file mode 100644 index 0000000000..a6d10364aa --- /dev/null +++ b/docs/static/images/blog/how-to-automate-trading-in-tradingview/tradingview-alert-notification-form.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:26c99b478bca4fcb3a9a8a3a0a2f8fc5a78033e628d8aeb8a034923ec4c90d70 +size 40023 diff --git a/docs/static/images/blog/how-to-automate-trading-in-tradingview/tradingview-alerte-email-form.png b/docs/static/images/blog/how-to-automate-trading-in-tradingview/tradingview-alerte-email-form.png new file mode 100644 index 0000000000..fc0f608686 --- /dev/null +++ b/docs/static/images/blog/how-to-automate-trading-in-tradingview/tradingview-alerte-email-form.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:406c80ab51d26825d338f5eef85da67fa39498b0e0a26b7eea6c9e4eddbfeb81 +size 45241 diff --git a/docs/static/images/blog/how-to-automate-trading-in-tradingview/tradingview-simple-rsi-strategy.png b/docs/static/images/blog/how-to-automate-trading-in-tradingview/tradingview-simple-rsi-strategy.png new file mode 100644 index 0000000000..ddc23b4a18 --- /dev/null +++ b/docs/static/images/blog/how-to-automate-trading-in-tradingview/tradingview-simple-rsi-strategy.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a592b35ea294454c69a994aec6e463d6e62d87db73d02971a60e2cf222008b35 +size 29187 diff --git a/docs/static/images/blog/how-to-automate-trading-in-tradingview/tradingview-strategy-example.png b/docs/static/images/blog/how-to-automate-trading-in-tradingview/tradingview-strategy-example.png new file mode 100644 index 0000000000..d5e5b809c6 --- /dev/null +++ b/docs/static/images/blog/how-to-automate-trading-in-tradingview/tradingview-strategy-example.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1e60ca51fded2c12d63f51ac0cec1e2d3e70f9451efc6caa9b343f81196ba2e7 +size 179403 diff --git a/docs/static/images/blog/how-to-automate-trading-in-tradingview/tradingview-strategy-explorer.png b/docs/static/images/blog/how-to-automate-trading-in-tradingview/tradingview-strategy-explorer.png new file mode 100644 index 0000000000..bda57284b3 --- /dev/null +++ b/docs/static/images/blog/how-to-automate-trading-in-tradingview/tradingview-strategy-explorer.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:255cd031f7fadc91ba6ea6103525cfdc2a3d7b0970f1f9cefb934c87259477b6 +size 46742 diff --git a/docs/static/images/blog/how-to-automate-trading-in-tradingview/tradingview-strategy-tester.png b/docs/static/images/blog/how-to-automate-trading-in-tradingview/tradingview-strategy-tester.png new file mode 100644 index 0000000000..60ee4a0b3c --- /dev/null +++ b/docs/static/images/blog/how-to-automate-trading-in-tradingview/tradingview-strategy-tester.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf0ebfe1519dac1842a4bae7ba57ec803b1d5da16b94345f735f70a65dc67cc7 +size 83148 diff --git a/docs/static/images/blog/how-to-create-your-tradingview-strategy-with-ai/octobot-ai-strategy-generator-creating-a-macd-strategy.png b/docs/static/images/blog/how-to-create-your-tradingview-strategy-with-ai/octobot-ai-strategy-generator-creating-a-macd-strategy.png new file mode 100644 index 0000000000..5fe3d88055 --- /dev/null +++ b/docs/static/images/blog/how-to-create-your-tradingview-strategy-with-ai/octobot-ai-strategy-generator-creating-a-macd-strategy.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6798cb1cc20740a7192b274aecb7e9f24bbc51f44bc229c69c1b7452a3f261fe +size 48470 diff --git a/docs/static/images/blog/how-to-create-your-tradingview-strategy-with-ai/octobot-ai-strategy-generator-macd-sma-strategy-pinescript-code.png b/docs/static/images/blog/how-to-create-your-tradingview-strategy-with-ai/octobot-ai-strategy-generator-macd-sma-strategy-pinescript-code.png new file mode 100644 index 0000000000..bef762b776 --- /dev/null +++ b/docs/static/images/blog/how-to-create-your-tradingview-strategy-with-ai/octobot-ai-strategy-generator-macd-sma-strategy-pinescript-code.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:40e1c8b8aa07c9158a0c57c51955d1ba9faaf3f9e893e6cd2fc2900b1ad5051d +size 60507 diff --git a/docs/static/images/blog/how-to-create-your-tradingview-strategy-with-ai/octobot-ai-strategy-generator-macd-strategy-add-moving-average-to-the-strategy.png b/docs/static/images/blog/how-to-create-your-tradingview-strategy-with-ai/octobot-ai-strategy-generator-macd-strategy-add-moving-average-to-the-strategy.png new file mode 100644 index 0000000000..671a257400 --- /dev/null +++ b/docs/static/images/blog/how-to-create-your-tradingview-strategy-with-ai/octobot-ai-strategy-generator-macd-strategy-add-moving-average-to-the-strategy.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9b5b3be5def13ed7d66353b142deb262f80e131e68150c5159674fc3186a08a +size 58069 diff --git a/docs/static/images/blog/how-to-create-your-tradingview-strategy-with-ai/octobot-ai-strategy-generator-macd-strategy-pinescript-code.png b/docs/static/images/blog/how-to-create-your-tradingview-strategy-with-ai/octobot-ai-strategy-generator-macd-strategy-pinescript-code.png new file mode 100644 index 0000000000..94336b9b38 --- /dev/null +++ b/docs/static/images/blog/how-to-create-your-tradingview-strategy-with-ai/octobot-ai-strategy-generator-macd-strategy-pinescript-code.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1278fdba8a3110db1d7d218fcc580eaf2dfdcfdf9891e6d7a7a87c154987897a +size 47459 diff --git a/docs/static/images/blog/how-to-create-your-tradingview-strategy-with-ai/octobot-ai-testing-the-macd-sma-strategy-on-tradingview.png b/docs/static/images/blog/how-to-create-your-tradingview-strategy-with-ai/octobot-ai-testing-the-macd-sma-strategy-on-tradingview.png new file mode 100644 index 0000000000..6ef1a86f4b --- /dev/null +++ b/docs/static/images/blog/how-to-create-your-tradingview-strategy-with-ai/octobot-ai-testing-the-macd-sma-strategy-on-tradingview.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:76be1b1278ba7218a4d210482118b8a4a5ccaae246fb302a0b856ee0203ed454 +size 131547 diff --git a/docs/static/images/blog/how-to-create-your-tradingview-strategy-with-ai/trading-strategy-with-ai-in-5-minutes-using-octobot-cloud-and-free-tradingview-account.png b/docs/static/images/blog/how-to-create-your-tradingview-strategy-with-ai/trading-strategy-with-ai-in-5-minutes-using-octobot-cloud-and-free-tradingview-account.png new file mode 100644 index 0000000000..32e546a5f5 --- /dev/null +++ b/docs/static/images/blog/how-to-create-your-tradingview-strategy-with-ai/trading-strategy-with-ai-in-5-minutes-using-octobot-cloud-and-free-tradingview-account.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90a6ca8af55768c8151cd27732e30e1be5046cad329206a8c687d0c950c77ee6 +size 1011981 diff --git a/docs/static/images/blog/how-to-use-a-self-custody-crypto-trading-bot/metamask-logo.png b/docs/static/images/blog/how-to-use-a-self-custody-crypto-trading-bot/metamask-logo.png new file mode 100644 index 0000000000..aa5be6f2d1 --- /dev/null +++ b/docs/static/images/blog/how-to-use-a-self-custody-crypto-trading-bot/metamask-logo.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d00265da858ceb6af833bd1f2172af09c2203358433ba3bcafa45bf83715ff5a +size 13043 diff --git a/docs/static/images/blog/how-to-use-a-self-custody-crypto-trading-bot/not-your-keys-not-your-coins-writen-on-paper-with-keys-and-a-bitcoin-logo.png b/docs/static/images/blog/how-to-use-a-self-custody-crypto-trading-bot/not-your-keys-not-your-coins-writen-on-paper-with-keys-and-a-bitcoin-logo.png new file mode 100644 index 0000000000..3a84c936cf --- /dev/null +++ b/docs/static/images/blog/how-to-use-a-self-custody-crypto-trading-bot/not-your-keys-not-your-coins-writen-on-paper-with-keys-and-a-bitcoin-logo.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c3b4e638e22b133f9d87175f08cfe74085ac2fd12bef9d84703a18d5f6d8f4e +size 566662 diff --git a/docs/static/images/blog/introducing-chatgpt-trading-tool/chatgpt-logo.png b/docs/static/images/blog/introducing-chatgpt-trading-tool/chatgpt-logo.png new file mode 100644 index 0000000000..7b0a185181 --- /dev/null +++ b/docs/static/images/blog/introducing-chatgpt-trading-tool/chatgpt-logo.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53a71516b1f9259532af05a9209149b83dc6d1b31689f2d67f86c8cd7e8ed983 +size 3805 diff --git a/docs/static/images/blog/introducing-chatgpt-trading-tool/prompt.png b/docs/static/images/blog/introducing-chatgpt-trading-tool/prompt.png new file mode 100644 index 0000000000..e0e324c111 --- /dev/null +++ b/docs/static/images/blog/introducing-chatgpt-trading-tool/prompt.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a189e5f7fda6e1effca3d0fd01ba5109554024e2271cc03094bfe654bd52b7d9 +size 75799 diff --git a/docs/static/images/blog/introducing-chatgpt-trading-tool/shib-prediction.png b/docs/static/images/blog/introducing-chatgpt-trading-tool/shib-prediction.png new file mode 100644 index 0000000000..85249c220c --- /dev/null +++ b/docs/static/images/blog/introducing-chatgpt-trading-tool/shib-prediction.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8833d8fa2ef22b8c3c456cd9990c271b7e44bfc525407c5b1826f92d2c18eb7a +size 473805 diff --git a/docs/static/images/blog/introducing-chatgpt-trading-tool/shib-tweet.png b/docs/static/images/blog/introducing-chatgpt-trading-tool/shib-tweet.png new file mode 100644 index 0000000000..9a69f86012 --- /dev/null +++ b/docs/static/images/blog/introducing-chatgpt-trading-tool/shib-tweet.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c246ef5a9a0d44eff98329f6e2d2e952e1c3a8e4072fcef2dc409d72b4eab12 +size 38987 diff --git a/docs/static/images/blog/introducing-chatgpt-trading-tool/sol-telegram.png b/docs/static/images/blog/introducing-chatgpt-trading-tool/sol-telegram.png new file mode 100644 index 0000000000..312e446099 --- /dev/null +++ b/docs/static/images/blog/introducing-chatgpt-trading-tool/sol-telegram.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:580f37c36b94d05c5c97ea0dbec9f04dfb5156170607184faf246de19571e8cb +size 50311 diff --git a/docs/static/images/blog/introducing-chatgpt-trading-tool/sol-tweet.png b/docs/static/images/blog/introducing-chatgpt-trading-tool/sol-tweet.png new file mode 100644 index 0000000000..17ea3e2b78 --- /dev/null +++ b/docs/static/images/blog/introducing-chatgpt-trading-tool/sol-tweet.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ffb0ce47320a7c309dc8b430d9d65bf7a21614ead2081c242ed16a230d2de66 +size 42411 diff --git a/docs/static/images/blog/introducing-chatgpt-trading-tool/tool-screenshot.png b/docs/static/images/blog/introducing-chatgpt-trading-tool/tool-screenshot.png new file mode 100644 index 0000000000..31de73714b --- /dev/null +++ b/docs/static/images/blog/introducing-chatgpt-trading-tool/tool-screenshot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:28cf38f7ffe137c1d76e6a6d5032b3603991496819b5ff47243bedb5bc96a516 +size 154443 diff --git a/docs/static/images/blog/introducing-cloud-octobot-plans/cover.png b/docs/static/images/blog/introducing-cloud-octobot-plans/cover.png new file mode 100644 index 0000000000..48040a5891 --- /dev/null +++ b/docs/static/images/blog/introducing-cloud-octobot-plans/cover.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8ac8385a3a65c9390e51684c353fde07b26a543addc9ee1d374e2defaba2c9b +size 124248 diff --git a/docs/static/images/blog/introducing-the-investor-plus-plan/grid-trading-illustrated-by-a-man-stepping-up-on-green-stairs-grabbing-coins.png b/docs/static/images/blog/introducing-the-investor-plus-plan/grid-trading-illustrated-by-a-man-stepping-up-on-green-stairs-grabbing-coins.png new file mode 100644 index 0000000000..cfd23c935d --- /dev/null +++ b/docs/static/images/blog/introducing-the-investor-plus-plan/grid-trading-illustrated-by-a-man-stepping-up-on-green-stairs-grabbing-coins.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:39ee20b3fa8a4f47ddb79058ba17e353cfe99ad80cb5bc0ba8b008436f3f84b5 +size 110564 diff --git a/docs/static/images/blog/introducing-the-investor-plus-plan/octobot-investor-plus-plan-announcement-with-tradingview-automations.png b/docs/static/images/blog/introducing-the-investor-plus-plan/octobot-investor-plus-plan-announcement-with-tradingview-automations.png new file mode 100644 index 0000000000..7ecf736f12 --- /dev/null +++ b/docs/static/images/blog/introducing-the-investor-plus-plan/octobot-investor-plus-plan-announcement-with-tradingview-automations.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9a413b789bae4b4a842ebec02da0cae26b0b96c57548547e3d8a8cf6d2e6665 +size 978598 diff --git a/docs/static/images/blog/introducing-the-investor-plus-plan/octobot-multi-exchange-dashboard-with-historical-portfolio-value-holdings-pie-chart-and-running-bots.png b/docs/static/images/blog/introducing-the-investor-plus-plan/octobot-multi-exchange-dashboard-with-historical-portfolio-value-holdings-pie-chart-and-running-bots.png new file mode 100644 index 0000000000..8b71bcd4b0 --- /dev/null +++ b/docs/static/images/blog/introducing-the-investor-plus-plan/octobot-multi-exchange-dashboard-with-historical-portfolio-value-holdings-pie-chart-and-running-bots.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82ff2cc6b852bd302ca33b955571507e289dc5bdc0b8fcd619c7a0e3054ffd15 +size 260248 diff --git a/docs/static/images/blog/introducing-the-investor-plus-plan/octobot-tradingview-market-buy-btc-automation.png b/docs/static/images/blog/introducing-the-investor-plus-plan/octobot-tradingview-market-buy-btc-automation.png new file mode 100644 index 0000000000..f9f7a3f885 --- /dev/null +++ b/docs/static/images/blog/introducing-the-investor-plus-plan/octobot-tradingview-market-buy-btc-automation.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60e535a447e8bd6d1cf85062ab4d5a2ead63aba357a3a5ba885a74085883b996 +size 57642 diff --git a/docs/static/images/blog/introducing-the-investor-plus-plan/octobot-tradingview-trading-side-of-ema-strategy-illustration-with-2-buy-and-2-sell.png b/docs/static/images/blog/introducing-the-investor-plus-plan/octobot-tradingview-trading-side-of-ema-strategy-illustration-with-2-buy-and-2-sell.png new file mode 100644 index 0000000000..1159455f75 --- /dev/null +++ b/docs/static/images/blog/introducing-the-investor-plus-plan/octobot-tradingview-trading-side-of-ema-strategy-illustration-with-2-buy-and-2-sell.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:78b1b2ef7dc5e2355108a40b6f2bced66d99c2051e91678738baa8a6d60904ab +size 187048 diff --git a/docs/static/images/blog/introducing-the-investor-plus-plan/tradingview-automation-illustrated-by-tradingview-logo.png b/docs/static/images/blog/introducing-the-investor-plus-plan/tradingview-automation-illustrated-by-tradingview-logo.png new file mode 100644 index 0000000000..9507689720 --- /dev/null +++ b/docs/static/images/blog/introducing-the-investor-plus-plan/tradingview-automation-illustrated-by-tradingview-logo.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1748d756f4dd4626fb8853857b0e8e116596854757a2b8ac4130461d222df4dc +size 7344 diff --git a/docs/static/images/blog/introducing-the-investor-plus-plan/tradingview-ema-strategy-configuration.png b/docs/static/images/blog/introducing-the-investor-plus-plan/tradingview-ema-strategy-configuration.png new file mode 100644 index 0000000000..918fb2f9a8 --- /dev/null +++ b/docs/static/images/blog/introducing-the-investor-plus-plan/tradingview-ema-strategy-configuration.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ccd8e1162293203b3efae5705601e30a60da7aa629856288f6732cfe22a73d9 +size 47548 diff --git a/docs/static/images/blog/introducing-the-investor-plus-plan/tradingview-ema-strategy-illustration-with-2-buy-and-2-sell.png b/docs/static/images/blog/introducing-the-investor-plus-plan/tradingview-ema-strategy-illustration-with-2-buy-and-2-sell.png new file mode 100644 index 0000000000..becf3c36e5 --- /dev/null +++ b/docs/static/images/blog/introducing-the-investor-plus-plan/tradingview-ema-strategy-illustration-with-2-buy-and-2-sell.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb3d7bcbc6bd131c2f94ecfd977095264bd47efedbedd445bcca3ee6ffe58126 +size 176199 diff --git a/docs/static/images/blog/introducing-the-new-octobot-cloud/bot.png b/docs/static/images/blog/introducing-the-new-octobot-cloud/bot.png new file mode 100644 index 0000000000..178a3e7ad3 --- /dev/null +++ b/docs/static/images/blog/introducing-the-new-octobot-cloud/bot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b72a216ee81976b9a2e60202885e9755c6f2d8716b831c9149cb90f9aa67c4a6 +size 237355 diff --git a/docs/static/images/blog/introducing-the-new-octobot-cloud/cover.png b/docs/static/images/blog/introducing-the-new-octobot-cloud/cover.png new file mode 100644 index 0000000000..895dff4043 --- /dev/null +++ b/docs/static/images/blog/introducing-the-new-octobot-cloud/cover.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9339839c1e0eec2b830a2d918c1f55be9c8d70912e64ab93a287c9057bfe86c6 +size 57649 diff --git a/docs/static/images/blog/introducing-the-new-octobot-cloud/dca.png b/docs/static/images/blog/introducing-the-new-octobot-cloud/dca.png new file mode 100644 index 0000000000..3d903d3174 --- /dev/null +++ b/docs/static/images/blog/introducing-the-new-octobot-cloud/dca.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cefe13f2cfe23c88923b91823678b1248901ff35832739b41c238d9eef947f2e +size 221192 diff --git a/docs/static/images/blog/introducing-the-new-octobot-cloud/plans.png b/docs/static/images/blog/introducing-the-new-octobot-cloud/plans.png new file mode 100644 index 0000000000..b293bf34a2 --- /dev/null +++ b/docs/static/images/blog/introducing-the-new-octobot-cloud/plans.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ed51417d43ea930b87e5084b38ef6a83b0624a95cec6e5a5307534101f104ae +size 51645 diff --git a/docs/static/images/blog/introducing-the-new-octobot-cloud/strategies.png b/docs/static/images/blog/introducing-the-new-octobot-cloud/strategies.png new file mode 100644 index 0000000000..77c0921f48 --- /dev/null +++ b/docs/static/images/blog/introducing-the-new-octobot-cloud/strategies.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e4f51ebe0871345d7bc6eff8a5ace90f44054e5b5f63d6dc23f56e78ac6b8bb0 +size 334738 diff --git a/docs/static/images/blog/introducing-the-new-octobot-mobile-app/app-bots.webp b/docs/static/images/blog/introducing-the-new-octobot-mobile-app/app-bots.webp new file mode 100644 index 0000000000..ef258c4619 --- /dev/null +++ b/docs/static/images/blog/introducing-the-new-octobot-mobile-app/app-bots.webp @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:99ca7dafe99e31ea865474df2be5e9519181646eee8dec4fb066e51c2941db63 +size 312278 diff --git a/docs/static/images/blog/introducing-the-new-octobot-mobile-app/app-signin.webp b/docs/static/images/blog/introducing-the-new-octobot-mobile-app/app-signin.webp new file mode 100644 index 0000000000..ff7ad61449 --- /dev/null +++ b/docs/static/images/blog/introducing-the-new-octobot-mobile-app/app-signin.webp @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:955940c6a24c27b68964f29c048e4c2adc1209c188d2567b65027495fb5389f0 +size 126094 diff --git a/docs/static/images/blog/introducing-the-new-octobot-mobile-app/cover.png b/docs/static/images/blog/introducing-the-new-octobot-mobile-app/cover.png new file mode 100644 index 0000000000..985220a62c --- /dev/null +++ b/docs/static/images/blog/introducing-the-new-octobot-mobile-app/cover.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1eed72bd5dbcce3aa479484d8318c2d9909526d433d16dcd15c7e104c8bfe86 +size 57695 diff --git a/docs/static/images/blog/introducing-the-pro-plan/acheter-et-vendre-des-crypto-directement-depuis-votre-octobot.png b/docs/static/images/blog/introducing-the-pro-plan/acheter-et-vendre-des-crypto-directement-depuis-votre-octobot.png new file mode 100644 index 0000000000..f1fd5c1a50 --- /dev/null +++ b/docs/static/images/blog/introducing-the-pro-plan/acheter-et-vendre-des-crypto-directement-depuis-votre-octobot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b84b07d99c4ca489de88299f225f01816c21caa3259f98613ab210a1d8383b5f +size 233077 diff --git a/docs/static/images/blog/introducing-the-pro-plan/acheter-et-vendre-des-crypto-historique-activite-octobot.png b/docs/static/images/blog/introducing-the-pro-plan/acheter-et-vendre-des-crypto-historique-activite-octobot.png new file mode 100644 index 0000000000..ae5aaf21f8 --- /dev/null +++ b/docs/static/images/blog/introducing-the-pro-plan/acheter-et-vendre-des-crypto-historique-activite-octobot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db49253289f6784b247ecd4856db33214f23efe49f652e393af5cb5b8883f725 +size 58118 diff --git a/docs/static/images/blog/introducing-the-pro-plan/annuler-des-ordres-directement-depuis-octobot.png b/docs/static/images/blog/introducing-the-pro-plan/annuler-des-ordres-directement-depuis-octobot.png new file mode 100644 index 0000000000..6658a36d65 --- /dev/null +++ b/docs/static/images/blog/introducing-the-pro-plan/annuler-des-ordres-directement-depuis-octobot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fceceb63c4cf2f92c410488e4a20a1c09fa437f57bd9d8ac75263cc6e5dcb889 +size 68463 diff --git a/docs/static/images/blog/introducing-the-pro-plan/buy-and-sell-crypto-directly-from-your-octobot.png b/docs/static/images/blog/introducing-the-pro-plan/buy-and-sell-crypto-directly-from-your-octobot.png new file mode 100644 index 0000000000..4633da3c04 --- /dev/null +++ b/docs/static/images/blog/introducing-the-pro-plan/buy-and-sell-crypto-directly-from-your-octobot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d232e80577e9204396cebc08de0c4a1fac663f0617f7ed249bed02e632f9ae8 +size 214901 diff --git a/docs/static/images/blog/introducing-the-pro-plan/buy-and-sell-crypto-octobot-activity-history.png b/docs/static/images/blog/introducing-the-pro-plan/buy-and-sell-crypto-octobot-activity-history.png new file mode 100644 index 0000000000..1a770c250e --- /dev/null +++ b/docs/static/images/blog/introducing-the-pro-plan/buy-and-sell-crypto-octobot-activity-history.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f307bfcbe12cf5f433709ced1d48e0a8c4e4307ef27d6c7a6eed673a9d156da +size 46802 diff --git a/docs/static/images/blog/introducing-the-pro-plan/buy-sol-with-50-usdt-at-10-percent-discount-octobot-tradingview-automation.png b/docs/static/images/blog/introducing-the-pro-plan/buy-sol-with-50-usdt-at-10-percent-discount-octobot-tradingview-automation.png new file mode 100644 index 0000000000..a016ec0f21 --- /dev/null +++ b/docs/static/images/blog/introducing-the-pro-plan/buy-sol-with-50-usdt-at-10-percent-discount-octobot-tradingview-automation.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b451e859ef74e77478967d2424246acc3400ad6dccba30572731d617358a22c7 +size 51716 diff --git a/docs/static/images/blog/introducing-the-pro-plan/cancel-orders-directly-from-your-octobot.png b/docs/static/images/blog/introducing-the-pro-plan/cancel-orders-directly-from-your-octobot.png new file mode 100644 index 0000000000..ca306106eb --- /dev/null +++ b/docs/static/images/blog/introducing-the-pro-plan/cancel-orders-directly-from-your-octobot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63231e99c31612b71545d7500880581a02a6b43fe44df80bc5f57f00fd344622 +size 57262 diff --git a/docs/static/images/blog/introducing-the-pro-plan/crypto-basket-landing.png b/docs/static/images/blog/introducing-the-pro-plan/crypto-basket-landing.png new file mode 100644 index 0000000000..854b801b77 --- /dev/null +++ b/docs/static/images/blog/introducing-the-pro-plan/crypto-basket-landing.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8105df2ee8709d44fe54b64945147473b323eef6459a741aa39905e4c6abfe0e +size 62303 diff --git a/docs/static/images/blog/introducing-the-pro-plan/octobot-trading-plan-announcement-with-TradingView-automations-and-advanced-coins-trading.png b/docs/static/images/blog/introducing-the-pro-plan/octobot-trading-plan-announcement-with-TradingView-automations-and-advanced-coins-trading.png new file mode 100644 index 0000000000..13eda2bd66 --- /dev/null +++ b/docs/static/images/blog/introducing-the-pro-plan/octobot-trading-plan-announcement-with-TradingView-automations-and-advanced-coins-trading.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ccc3ddbe5da5765f3e4647550d6359bd7302b4652a8345f651492b8e419fc753 +size 972977 diff --git a/docs/static/images/blog/introducing-the-pro-plan/sell-1-avax-at-120-usdt-octobot-tradingview-automation.png b/docs/static/images/blog/introducing-the-pro-plan/sell-1-avax-at-120-usdt-octobot-tradingview-automation.png new file mode 100644 index 0000000000..5f70c5d76a --- /dev/null +++ b/docs/static/images/blog/introducing-the-pro-plan/sell-1-avax-at-120-usdt-octobot-tradingview-automation.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9910633ec3b02a450d07b54fd9688539df96cf0d6a724b854a660db5a23f6b68 +size 51936 diff --git a/docs/static/images/blog/introducing-the-strategy-designer/comparison.png b/docs/static/images/blog/introducing-the-strategy-designer/comparison.png new file mode 100644 index 0000000000..f690004971 --- /dev/null +++ b/docs/static/images/blog/introducing-the-strategy-designer/comparison.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba0b010a5e0940f423e07bb9fe61e9d027b67cad6934b9fff708515b29601ce9 +size 171010 diff --git a/docs/static/images/blog/introducing-the-strategy-designer/cover.png b/docs/static/images/blog/introducing-the-strategy-designer/cover.png new file mode 100644 index 0000000000..ca83c71665 --- /dev/null +++ b/docs/static/images/blog/introducing-the-strategy-designer/cover.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c4f39903e3ec0c42a8c62694d37e799e7abf657398fabc33b3e3fbde69eaaa2c +size 180582 diff --git a/docs/static/images/blog/introducing-the-strategy-designer/full-page.png b/docs/static/images/blog/introducing-the-strategy-designer/full-page.png new file mode 100644 index 0000000000..411d2a7516 --- /dev/null +++ b/docs/static/images/blog/introducing-the-strategy-designer/full-page.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c166a88b3a6216ae0468c05e55921cdfa6ece1557e869ee6ebcdbd9c9013e725 +size 169517 diff --git a/docs/static/images/blog/introducing-the-strategy-designer/history.png b/docs/static/images/blog/introducing-the-strategy-designer/history.png new file mode 100644 index 0000000000..b04b208a76 --- /dev/null +++ b/docs/static/images/blog/introducing-the-strategy-designer/history.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3819af7b49477335f4b34cb90ba58f73a5c04bf8712364b67f25ab6b155b3853 +size 67489 diff --git a/docs/static/images/blog/introducing-the-strategy-designer/preview.png b/docs/static/images/blog/introducing-the-strategy-designer/preview.png new file mode 100644 index 0000000000..ae39fd080c --- /dev/null +++ b/docs/static/images/blog/introducing-the-strategy-designer/preview.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4ab2bcdc9fc2bab37f4024ee53331fee3ebfdce6003bfd4f5e3b05c2394161b3 +size 82703 diff --git a/docs/static/images/blog/introducing-the-strategy-designer/trades-comp.png b/docs/static/images/blog/introducing-the-strategy-designer/trades-comp.png new file mode 100644 index 0000000000..6016aaa52c --- /dev/null +++ b/docs/static/images/blog/introducing-the-strategy-designer/trades-comp.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3fe4afb051bde621b9e6ebbcb5299e51be0b83ce96980cd036cb0435fcf08f72 +size 116375 diff --git a/docs/static/images/blog/introducing-trading-modes-guides/person-looking-at-his-screens-using-many-trading-strategies.jpg b/docs/static/images/blog/introducing-trading-modes-guides/person-looking-at-his-screens-using-many-trading-strategies.jpg new file mode 100644 index 0000000000..90dfa7a491 --- /dev/null +++ b/docs/static/images/blog/introducing-trading-modes-guides/person-looking-at-his-screens-using-many-trading-strategies.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0894d3aaedeabd3f2713b9b143de614d616d6cd3e6f5f3c76c9b7d23f2ddf851 +size 126774 diff --git a/docs/static/images/blog/invest-with-crypto-baskets/crypto-basket-landing.png b/docs/static/images/blog/invest-with-crypto-baskets/crypto-basket-landing.png new file mode 100644 index 0000000000..8dfeca1185 --- /dev/null +++ b/docs/static/images/blog/invest-with-crypto-baskets/crypto-basket-landing.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:70fad108a8824f6976b2a5f067d3a3096caf93ef0b8a55a66642ef6850f3c4c0 +size 247491 diff --git a/docs/static/images/blog/invest-with-crypto-baskets/crypto-basket.png b/docs/static/images/blog/invest-with-crypto-baskets/crypto-basket.png new file mode 100644 index 0000000000..91765f213a --- /dev/null +++ b/docs/static/images/blog/invest-with-crypto-baskets/crypto-basket.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d016a8ab7399c5641d19f3fd7ffcd2a444e892bd68ad89f307d5d3f309e86d5 +size 58895 diff --git a/docs/static/images/blog/invest-with-crypto-baskets/use-top-market-cap-basket.png b/docs/static/images/blog/invest-with-crypto-baskets/use-top-market-cap-basket.png new file mode 100644 index 0000000000..69da3d3402 --- /dev/null +++ b/docs/static/images/blog/invest-with-crypto-baskets/use-top-market-cap-basket.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8435d18f55782a7a2f9ed01feaf53800ef1ef3daeb43039c4d58ea3780b1ca33 +size 23597 diff --git a/docs/static/images/blog/invest-with-crypto-baskets/utiliser-le-panier-top-marketcap.png b/docs/static/images/blog/invest-with-crypto-baskets/utiliser-le-panier-top-marketcap.png new file mode 100644 index 0000000000..46614a063b --- /dev/null +++ b/docs/static/images/blog/invest-with-crypto-baskets/utiliser-le-panier-top-marketcap.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:abc9b41a4f9e141e71f8eaf679d209746381ebc8bffb7c88d72af6d13047061d +size 24384 diff --git a/docs/static/images/blog/kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai/creating-a-meme-coins-crypto-basket-using-ai.png b/docs/static/images/blog/kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai/creating-a-meme-coins-crypto-basket-using-ai.png new file mode 100644 index 0000000000..c35dd53ced --- /dev/null +++ b/docs/static/images/blog/kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai/creating-a-meme-coins-crypto-basket-using-ai.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:43c1578fe575d18cf8c0f1993d7b669cbbd9c4350cb68f3e7cf453e4b56f20a1 +size 106255 diff --git a/docs/static/images/blog/kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai/creating-a-sol-eth-usdc-dca-strategy-on-conbase-using-ai.png b/docs/static/images/blog/kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai/creating-a-sol-eth-usdc-dca-strategy-on-conbase-using-ai.png new file mode 100644 index 0000000000..0803470b98 --- /dev/null +++ b/docs/static/images/blog/kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai/creating-a-sol-eth-usdc-dca-strategy-on-conbase-using-ai.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9def106b2a9ce7af9de397720946f035c20decdc94ac4dfcb76c48e85e6e038 +size 103617 diff --git a/docs/static/images/blog/kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai/creating-a-sol-usdt-grid-strategy-on-kucoin-using-ai.png b/docs/static/images/blog/kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai/creating-a-sol-usdt-grid-strategy-on-kucoin-using-ai.png new file mode 100644 index 0000000000..0f9dbe30c1 --- /dev/null +++ b/docs/static/images/blog/kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai/creating-a-sol-usdt-grid-strategy-on-kucoin-using-ai.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6f8b932f8b5d24863abf11b9954a6119e7acd89c0c7ddd392da8f2f2650fb168 +size 93625 diff --git a/docs/static/images/blog/kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai/kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai-announcement-banner.png b/docs/static/images/blog/kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai/kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai-announcement-banner.png new file mode 100644 index 0000000000..36536af0fa --- /dev/null +++ b/docs/static/images/blog/kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai/kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai-announcement-banner.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:986f9d4502fd3ef6dae451488fd28e1a43d4f7aa59f32f036c5d179b7bdb40c1 +size 265800 diff --git a/docs/static/images/blog/kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai/kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai-announcement.jpeg b/docs/static/images/blog/kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai/kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai-announcement.jpeg new file mode 100644 index 0000000000..708d5574d6 --- /dev/null +++ b/docs/static/images/blog/kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai/kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai-announcement.jpeg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c68b8d4a98828ec86a566cd2d9cbf171999d6519123aa2c4f27258bd81ce984 +size 61180 diff --git a/docs/static/images/blog/kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai/top-5-basket-with-73-percent-profit-on-kucoin.png b/docs/static/images/blog/kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai/top-5-basket-with-73-percent-profit-on-kucoin.png new file mode 100644 index 0000000000..dd94689c65 --- /dev/null +++ b/docs/static/images/blog/kucoin-x-octobot-fireside-chat-simplifying-crypto-investment-with-ai/top-5-basket-with-73-percent-profit-on-kucoin.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:04d504d720838a54f020c59085334e04b6ace61ce24b2f8ae1e6453ac809bb34 +size 173583 diff --git a/docs/static/images/blog/making-octobot-more-accessible/octobot-low-risk-crypto-baskets-strategies.png b/docs/static/images/blog/making-octobot-more-accessible/octobot-low-risk-crypto-baskets-strategies.png new file mode 100644 index 0000000000..5f8dd749a6 --- /dev/null +++ b/docs/static/images/blog/making-octobot-more-accessible/octobot-low-risk-crypto-baskets-strategies.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:42ec50c20a7e0260843a4d73be86a7edbec29cca359205f67b7fbaacf1ef7ac8 +size 66397 diff --git a/docs/static/images/blog/making-octobot-more-accessible/octobot-multi-exchange-dashboard.png b/docs/static/images/blog/making-octobot-more-accessible/octobot-multi-exchange-dashboard.png new file mode 100644 index 0000000000..2874158262 --- /dev/null +++ b/docs/static/images/blog/making-octobot-more-accessible/octobot-multi-exchange-dashboard.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0145f88eb6b96a2e88aab0b9bc38098af7a20b090cac21efc174a61c317f19cf +size 44123 diff --git a/docs/static/images/blog/making-octobot-more-accessible/octobot-plans-improvements.png b/docs/static/images/blog/making-octobot-more-accessible/octobot-plans-improvements.png new file mode 100644 index 0000000000..9a30e86bf1 --- /dev/null +++ b/docs/static/images/blog/making-octobot-more-accessible/octobot-plans-improvements.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:307f18a49185cef33ba59ce34d1776d8014dd73761639fcc0dac4eee05897881 +size 905421 diff --git a/docs/static/images/blog/making-octobot-more-accessible/octobot-strategy-explorer-with-crypto-baskets-and-dca-strategies.png b/docs/static/images/blog/making-octobot-more-accessible/octobot-strategy-explorer-with-crypto-baskets-and-dca-strategies.png new file mode 100644 index 0000000000..24b067a95a --- /dev/null +++ b/docs/static/images/blog/making-octobot-more-accessible/octobot-strategy-explorer-with-crypto-baskets-and-dca-strategies.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71af41014be3cc1dac0f258074578db4757cfa52f4cbd129d6ea847be9fbf619 +size 185522 diff --git a/docs/static/images/blog/mobile-app-revamp/bot-view-pf-en.png b/docs/static/images/blog/mobile-app-revamp/bot-view-pf-en.png new file mode 100644 index 0000000000..cbbfe50557 --- /dev/null +++ b/docs/static/images/blog/mobile-app-revamp/bot-view-pf-en.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ed14cc907d8facf9a59dd9bef8d69625adfd7e7da70d706b29d863b70d83f23 +size 48175 diff --git a/docs/static/images/blog/mobile-app-revamp/bot-view-pf-fr.png b/docs/static/images/blog/mobile-app-revamp/bot-view-pf-fr.png new file mode 100644 index 0000000000..37a7871442 --- /dev/null +++ b/docs/static/images/blog/mobile-app-revamp/bot-view-pf-fr.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b216ed8abf2ba15cc4b9f66a97f8783dcbb0b12780c6ceb89775d9717490149e +size 49222 diff --git a/docs/static/images/blog/mobile-app-revamp/mobile-dashboard-en.png b/docs/static/images/blog/mobile-app-revamp/mobile-dashboard-en.png new file mode 100644 index 0000000000..808d195408 --- /dev/null +++ b/docs/static/images/blog/mobile-app-revamp/mobile-dashboard-en.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f1716c44c1c382949d4da1d8359f632c948dffa6da65e50e6c1516ca703f358c +size 48304 diff --git a/docs/static/images/blog/mobile-app-revamp/mobile-dashboard-fr.png b/docs/static/images/blog/mobile-app-revamp/mobile-dashboard-fr.png new file mode 100644 index 0000000000..6b0687f67d --- /dev/null +++ b/docs/static/images/blog/mobile-app-revamp/mobile-dashboard-fr.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a7c2ef962a9f77c57cc588701022995ff6b86a63776422cdbfdd682354588367 +size 49111 diff --git a/docs/static/images/blog/mobile-app-revamp/thumb.png b/docs/static/images/blog/mobile-app-revamp/thumb.png new file mode 100644 index 0000000000..b41374c17c --- /dev/null +++ b/docs/static/images/blog/mobile-app-revamp/thumb.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4006e4e681ab7eb0ce1b8bc0aadde94ff88a110df6734035440108b7c918421a +size 65503 diff --git a/docs/static/images/blog/new-octobot-cloud-plans-and-trading-bots/a-man-relaxing-in-his-couch-while-octobot-is-making-money-by-automating-cryptocurrency-strategies-light.png b/docs/static/images/blog/new-octobot-cloud-plans-and-trading-bots/a-man-relaxing-in-his-couch-while-octobot-is-making-money-by-automating-cryptocurrency-strategies-light.png new file mode 100644 index 0000000000..9fc8c1466e --- /dev/null +++ b/docs/static/images/blog/new-octobot-cloud-plans-and-trading-bots/a-man-relaxing-in-his-couch-while-octobot-is-making-money-by-automating-cryptocurrency-strategies-light.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a448bbde89a0d73c7a35351308fec9692e02ccb75b85db8baaf77fc1f2839d3 +size 380290 diff --git a/docs/static/images/blog/octobot-1-0-2-whats-new/cover.png b/docs/static/images/blog/octobot-1-0-2-whats-new/cover.png new file mode 100644 index 0000000000..2b99e4632d --- /dev/null +++ b/docs/static/images/blog/octobot-1-0-2-whats-new/cover.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aca5b3a0c5ad68b356aa8a33b0b40ccb433e8928f7657704641c4b58c26d7719 +size 188081 diff --git a/docs/static/images/blog/octobot-1-0-2-whats-new/gpt-evaluator-settings.png b/docs/static/images/blog/octobot-1-0-2-whats-new/gpt-evaluator-settings.png new file mode 100644 index 0000000000..f9427e4ed7 --- /dev/null +++ b/docs/static/images/blog/octobot-1-0-2-whats-new/gpt-evaluator-settings.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2cc6bbe0293e15a933281c0c097bb0224c72ac7fff1ea699c45a00132970cb08 +size 145559 diff --git a/docs/static/images/blog/octobot-1-0-2-whats-new/tv-guides.png b/docs/static/images/blog/octobot-1-0-2-whats-new/tv-guides.png new file mode 100644 index 0000000000..a4ace047ef --- /dev/null +++ b/docs/static/images/blog/octobot-1-0-2-whats-new/tv-guides.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac040d847a3eaf136308e0201c2c6fcf7e898df1c65a62d09e3b371e8fd4ad80 +size 389988 diff --git a/docs/static/images/blog/octobot-1-0-4-whats-new/download-octobot-cloud-strategies-in-open-source-bot.png b/docs/static/images/blog/octobot-1-0-4-whats-new/download-octobot-cloud-strategies-in-open-source-bot.png new file mode 100644 index 0000000000..d9cc4ffe7e --- /dev/null +++ b/docs/static/images/blog/octobot-1-0-4-whats-new/download-octobot-cloud-strategies-in-open-source-bot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4fee98a713be988b3ba93d3f82588a99a6deb446d539acdca5923d2459b3a11a +size 151470 diff --git a/docs/static/images/blog/octobot-1-0-4-whats-new/with-octobot-1.0.4-use-octobot-cloud-strategies-and-trade-on-bingx.png b/docs/static/images/blog/octobot-1-0-4-whats-new/with-octobot-1.0.4-use-octobot-cloud-strategies-and-trade-on-bingx.png new file mode 100644 index 0000000000..c64b1158ca --- /dev/null +++ b/docs/static/images/blog/octobot-1-0-4-whats-new/with-octobot-1.0.4-use-octobot-cloud-strategies-and-trade-on-bingx.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c6ba205b04064329d489b99fc03ed1c1595707376a23ed8accbb354db5596b24 +size 83301 diff --git a/docs/static/images/blog/octobot-2-0-0-whats-new/octobot-2.0.0-annoucement-with-new-design-preview.png b/docs/static/images/blog/octobot-2-0-0-whats-new/octobot-2.0.0-annoucement-with-new-design-preview.png new file mode 100644 index 0000000000..7fd8cb49c2 --- /dev/null +++ b/docs/static/images/blog/octobot-2-0-0-whats-new/octobot-2.0.0-annoucement-with-new-design-preview.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cfdce889d9a4ebd78b68fa9f9eb2c9fd7ce018bc27e91b19533d492d43cdc2f6 +size 341217 diff --git a/docs/static/images/blog/octobot-2-0-0-whats-new/octobot-2.0.0-pnl-light.png b/docs/static/images/blog/octobot-2-0-0-whats-new/octobot-2.0.0-pnl-light.png new file mode 100644 index 0000000000..2c3ae3ff6f --- /dev/null +++ b/docs/static/images/blog/octobot-2-0-0-whats-new/octobot-2.0.0-pnl-light.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a598053e9c99257462faac617065e21839993f8a7a97f8eca48807754135439 +size 91830 diff --git a/docs/static/images/blog/octobot-2-0-0-whats-new/octobot-2.0.0-preview-dark.png b/docs/static/images/blog/octobot-2-0-0-whats-new/octobot-2.0.0-preview-dark.png new file mode 100644 index 0000000000..e80fb21d79 --- /dev/null +++ b/docs/static/images/blog/octobot-2-0-0-whats-new/octobot-2.0.0-preview-dark.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b07c17ddd13a9fddac130045188f298ff90052649a0134735ca8e199741c7a9a +size 84921 diff --git a/docs/static/images/blog/octobot-2-0-0-whats-new/octobot-premium-extension-preview.png b/docs/static/images/blog/octobot-2-0-0-whats-new/octobot-premium-extension-preview.png new file mode 100644 index 0000000000..535dd66d1e --- /dev/null +++ b/docs/static/images/blog/octobot-2-0-0-whats-new/octobot-premium-extension-preview.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f892f289dfb4a346912d476a2e14362986d637da072bcd9e391cce3a66012f7a +size 200745 diff --git a/docs/static/images/blog/octobot-2-0-0-whats-new/octobot-premium-new-tentacles-available.png b/docs/static/images/blog/octobot-2-0-0-whats-new/octobot-premium-new-tentacles-available.png new file mode 100644 index 0000000000..c77113e320 --- /dev/null +++ b/docs/static/images/blog/octobot-2-0-0-whats-new/octobot-premium-new-tentacles-available.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ee75c88ffea41811874a1ab77968c38df5a07308a924c15358b965c9d757558 +size 7289 diff --git a/docs/static/images/blog/octobot-pro-plan-early-access/cloud-strategies.png b/docs/static/images/blog/octobot-pro-plan-early-access/cloud-strategies.png new file mode 100644 index 0000000000..2f58c444ff --- /dev/null +++ b/docs/static/images/blog/octobot-pro-plan-early-access/cloud-strategies.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:52562a55eb494836e2e6495c7bbae29510168343aa801e20b446958f611fa3ba +size 288779 diff --git a/docs/static/images/blog/octobot-pro-plan-early-access/octobot-pro-plan-early-access-announcement.png b/docs/static/images/blog/octobot-pro-plan-early-access/octobot-pro-plan-early-access-announcement.png new file mode 100644 index 0000000000..0b333b6b17 --- /dev/null +++ b/docs/static/images/blog/octobot-pro-plan-early-access/octobot-pro-plan-early-access-announcement.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0e1440185cd293d6a718f8afae94465603c7f900cb6bc150f8ce22dd6a50047 +size 74294 diff --git a/docs/static/images/blog/octobot-pro-plan-early-access/strategy-designer-preview.png b/docs/static/images/blog/octobot-pro-plan-early-access/strategy-designer-preview.png new file mode 100644 index 0000000000..aa2db1ad8e --- /dev/null +++ b/docs/static/images/blog/octobot-pro-plan-early-access/strategy-designer-preview.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0744588f9e9a41622857b9ce67bdc69b380914316980d017376195c7a53fd5d7 +size 224078 diff --git a/docs/static/images/blog/octobots-in-octobot-cloud/cover.png b/docs/static/images/blog/octobots-in-octobot-cloud/cover.png new file mode 100644 index 0000000000..b8fc850f0f --- /dev/null +++ b/docs/static/images/blog/octobots-in-octobot-cloud/cover.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dde2b11ab5f86cfa4943eea27161ba5a359fffc9f72fe06a181fd9479f8840e0 +size 143907 diff --git a/docs/static/images/blog/octobots-in-octobot-cloud/deploy-now.jpg b/docs/static/images/blog/octobots-in-octobot-cloud/deploy-now.jpg new file mode 100644 index 0000000000..5210115f52 --- /dev/null +++ b/docs/static/images/blog/octobots-in-octobot-cloud/deploy-now.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:52a9ee994634051ec8652d6edc61fd0e286a7bce4193f327c70e7101db2382cc +size 65228 diff --git a/docs/static/images/blog/octobots-in-octobot-cloud/deploying.png b/docs/static/images/blog/octobots-in-octobot-cloud/deploying.png new file mode 100644 index 0000000000..76a56791ad --- /dev/null +++ b/docs/static/images/blog/octobots-in-octobot-cloud/deploying.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a6263442165a1a284e68d5601f2ec2a29c5ef10511cbaf60bc71fcf3442559f +size 63748 diff --git a/docs/static/images/blog/octobots-in-octobot-cloud/login.png b/docs/static/images/blog/octobots-in-octobot-cloud/login.png new file mode 100644 index 0000000000..3ead344837 --- /dev/null +++ b/docs/static/images/blog/octobots-in-octobot-cloud/login.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4cf77771d323ac18a76d64d0ec65cc41fdab540c68923fd07e32b4767a3aff1d +size 63746 diff --git a/docs/static/images/blog/octobots-in-octobot-cloud/my-bots-button.jpg b/docs/static/images/blog/octobots-in-octobot-cloud/my-bots-button.jpg new file mode 100644 index 0000000000..c7590ffe59 --- /dev/null +++ b/docs/static/images/blog/octobots-in-octobot-cloud/my-bots-button.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1851e0914359060a7175b1078e4abca7ad11d5a102fbfa1be39393832fa142ba +size 10635 diff --git a/docs/static/images/blog/octobots-in-octobot-cloud/open-interface.png b/docs/static/images/blog/octobots-in-octobot-cloud/open-interface.png new file mode 100644 index 0000000000..5278c71b77 --- /dev/null +++ b/docs/static/images/blog/octobots-in-octobot-cloud/open-interface.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ff99b3a0afb960b908193356029b6cbc77fcf2a091ef3659870ef62adcc2072 +size 95773 diff --git a/docs/static/images/blog/one-click-cloud-deployment-with-octobot-1-0-9/crypto-basket.png b/docs/static/images/blog/one-click-cloud-deployment-with-octobot-1-0-9/crypto-basket.png new file mode 100644 index 0000000000..7105f4e5ef --- /dev/null +++ b/docs/static/images/blog/one-click-cloud-deployment-with-octobot-1-0-9/crypto-basket.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bfac310096fc01a542daca6fac20247ea7043d4523ba5b582b40a3f220449a85 +size 227490 diff --git a/docs/static/images/blog/one-click-cloud-deployment-with-octobot-1-0-9/octobot-1.0.9-ditigtalocean-1-click-deployment-custom-crypto-baskets.png b/docs/static/images/blog/one-click-cloud-deployment-with-octobot-1-0-9/octobot-1.0.9-ditigtalocean-1-click-deployment-custom-crypto-baskets.png new file mode 100644 index 0000000000..6a26bc175d --- /dev/null +++ b/docs/static/images/blog/one-click-cloud-deployment-with-octobot-1-0-9/octobot-1.0.9-ditigtalocean-1-click-deployment-custom-crypto-baskets.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:041d4dce7df2f03118d6dfb011f5897dec8249535f58af47e903d2ca82d2c3f1 +size 556382 diff --git a/docs/static/images/blog/one-click-cloud-deployment-with-octobot-1-0-9/octobot-on-the-digitalocean-marketplace.png b/docs/static/images/blog/one-click-cloud-deployment-with-octobot-1-0-9/octobot-on-the-digitalocean-marketplace.png new file mode 100644 index 0000000000..c843027307 --- /dev/null +++ b/docs/static/images/blog/one-click-cloud-deployment-with-octobot-1-0-9/octobot-on-the-digitalocean-marketplace.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b4719ee71e08247b6f93f78ac4d9fcd652e0f76e4d3b0c81c0801a2466b3e6c +size 58545 diff --git a/docs/static/images/blog/one-click-cloud-deployment-with-octobot-1-0-9/tradingview-logo-showing-octobot-tradingview-trading-mode.png b/docs/static/images/blog/one-click-cloud-deployment-with-octobot-1-0-9/tradingview-logo-showing-octobot-tradingview-trading-mode.png new file mode 100644 index 0000000000..4e8ff7e0f0 --- /dev/null +++ b/docs/static/images/blog/one-click-cloud-deployment-with-octobot-1-0-9/tradingview-logo-showing-octobot-tradingview-trading-mode.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41dafb6758fc2725014a9caf1548a04633f1cb97ae3619652cf9933ecf7e6dd1 +size 14264 diff --git a/docs/static/images/blog/open-source-trading-software/cover.png b/docs/static/images/blog/open-source-trading-software/cover.png new file mode 100644 index 0000000000..9f77cd1ee9 --- /dev/null +++ b/docs/static/images/blog/open-source-trading-software/cover.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e41a69c46db59f1b4775276fdb1ef0567d08745604de5db8bc494d29b67c246 +size 769439 diff --git a/docs/static/images/blog/open-source-trading-software/crypto.png b/docs/static/images/blog/open-source-trading-software/crypto.png new file mode 100644 index 0000000000..119a27c70e --- /dev/null +++ b/docs/static/images/blog/open-source-trading-software/crypto.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e7d9b35319d5cc4f3a7e2d4f3be5937aaa2a1f64de1c528bd2b81663da313bb3 +size 870878 diff --git a/docs/static/images/blog/paper-trading-with-octobot/cover.jpg b/docs/static/images/blog/paper-trading-with-octobot/cover.jpg new file mode 100644 index 0000000000..90c4e6b1c4 --- /dev/null +++ b/docs/static/images/blog/paper-trading-with-octobot/cover.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50a6ccf8d1b0770504fe655df0f00c5ee78bf73ebe7a3b152bb217f948cd29e7 +size 106760 diff --git a/docs/static/images/blog/profile-sharing-in-octobot-cloud/bot-import-link.jpg b/docs/static/images/blog/profile-sharing-in-octobot-cloud/bot-import-link.jpg new file mode 100644 index 0000000000..a0dab88413 --- /dev/null +++ b/docs/static/images/blog/profile-sharing-in-octobot-cloud/bot-import-link.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf143c94a6327e1ee7e27f5b01eea3dba0abf1fb2f3a18a35e35935d8f88b348 +size 44875 diff --git a/docs/static/images/blog/profile-sharing-in-octobot-cloud/bot-import.jpg b/docs/static/images/blog/profile-sharing-in-octobot-cloud/bot-import.jpg new file mode 100644 index 0000000000..05bd553b2f --- /dev/null +++ b/docs/static/images/blog/profile-sharing-in-octobot-cloud/bot-import.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a7baca41ef7606a05f66677cd3fcf928ede678c9ed04674db318014869eee88 +size 203153 diff --git a/docs/static/images/blog/profile-sharing-in-octobot-cloud/bot-imported.jpg b/docs/static/images/blog/profile-sharing-in-octobot-cloud/bot-imported.jpg new file mode 100644 index 0000000000..15a4d35164 --- /dev/null +++ b/docs/static/images/blog/profile-sharing-in-octobot-cloud/bot-imported.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2da27a73fab11f51538a2296eb101dc64895d2f92e6fa9b2641e1502d9306f10 +size 198920 diff --git a/docs/static/images/blog/profile-sharing-in-octobot-cloud/bot-share.jpg b/docs/static/images/blog/profile-sharing-in-octobot-cloud/bot-share.jpg new file mode 100644 index 0000000000..323d8e234c --- /dev/null +++ b/docs/static/images/blog/profile-sharing-in-octobot-cloud/bot-share.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5bcf45e601e4cfcb200843094588adaaa7baef865a3ebd372cb96b4a6533c7ad +size 167257 diff --git a/docs/static/images/blog/profile-sharing-in-octobot-cloud/copy.jpg b/docs/static/images/blog/profile-sharing-in-octobot-cloud/copy.jpg new file mode 100644 index 0000000000..9f3572735a --- /dev/null +++ b/docs/static/images/blog/profile-sharing-in-octobot-cloud/copy.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:05cbce1ffec138678a45ece467af2891a3c4b585857c1618f78b429da7da5293 +size 107273 diff --git a/docs/static/images/blog/profile-sharing-in-octobot-cloud/cover.jpg b/docs/static/images/blog/profile-sharing-in-octobot-cloud/cover.jpg new file mode 100644 index 0000000000..87eb07ff7b --- /dev/null +++ b/docs/static/images/blog/profile-sharing-in-octobot-cloud/cover.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca43a477c5c45a519dcb521bdc5be13341bbf1955c3516256957c83dc01aca36 +size 94593 diff --git a/docs/static/images/blog/profile-sharing-in-octobot-cloud/editor.jpg b/docs/static/images/blog/profile-sharing-in-octobot-cloud/editor.jpg new file mode 100644 index 0000000000..c06594f820 --- /dev/null +++ b/docs/static/images/blog/profile-sharing-in-octobot-cloud/editor.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:96a6ce1bef33efa89d68d07b4efcbf0bde1aafe8a538ec5e6cb4deccc913d2dc +size 39979 diff --git a/docs/static/images/blog/profile-sharing-in-octobot-cloud/publish-profile.jpg b/docs/static/images/blog/profile-sharing-in-octobot-cloud/publish-profile.jpg new file mode 100644 index 0000000000..fb4e51e210 --- /dev/null +++ b/docs/static/images/blog/profile-sharing-in-octobot-cloud/publish-profile.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e6d365ee850d3af94d9b25fd6f892cc2decee3c3fa57488f8735b4603983054a +size 54326 diff --git a/docs/static/images/blog/profile-sharing-in-octobot-cloud/publish.jpg b/docs/static/images/blog/profile-sharing-in-octobot-cloud/publish.jpg new file mode 100644 index 0000000000..8b901d00c8 --- /dev/null +++ b/docs/static/images/blog/profile-sharing-in-octobot-cloud/publish.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d70366d796488d21bafa2e134b4abec63231c780efbdd5484c93f608788d5a92 +size 62265 diff --git a/docs/static/images/blog/profile-sharing-in-octobot-cloud/sub.jpg b/docs/static/images/blog/profile-sharing-in-octobot-cloud/sub.jpg new file mode 100644 index 0000000000..e350dc3da7 --- /dev/null +++ b/docs/static/images/blog/profile-sharing-in-octobot-cloud/sub.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:edcee4288c2d6bfb2d59902fe09d69e1ea4c48a18d22d439eb158eb43d99708d +size 90005 diff --git a/docs/static/images/blog/safu-meaning/cover.png b/docs/static/images/blog/safu-meaning/cover.png new file mode 100644 index 0000000000..942a14ef0d --- /dev/null +++ b/docs/static/images/blog/safu-meaning/cover.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:70ce857ca6cbd56e069a01096d44fdc8b05e6dcbf1ddb76c39a4f5a7c5dc4ff3 +size 40139 diff --git a/docs/static/images/blog/safu-meaning/safu-tweet.png b/docs/static/images/blog/safu-meaning/safu-tweet.png new file mode 100644 index 0000000000..df31ad5d5d --- /dev/null +++ b/docs/static/images/blog/safu-meaning/safu-tweet.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f59117728cac8cfea70d854a5ea65730e832e9777339918ab5f57d8a99a506df +size 21095 diff --git a/docs/static/images/blog/shape-the-future-with-our-roadmap/banner-dark.png b/docs/static/images/blog/shape-the-future-with-our-roadmap/banner-dark.png new file mode 100644 index 0000000000..baf966b917 --- /dev/null +++ b/docs/static/images/blog/shape-the-future-with-our-roadmap/banner-dark.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61a34338e3b279152fd81d04a5549b46ed59152383c501169473ad19cd640928 +size 49699 diff --git a/docs/static/images/blog/shape-the-future-with-our-roadmap/octobot_cloud.png b/docs/static/images/blog/shape-the-future-with-our-roadmap/octobot_cloud.png new file mode 100644 index 0000000000..db7854a8ba --- /dev/null +++ b/docs/static/images/blog/shape-the-future-with-our-roadmap/octobot_cloud.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6091f6929e699beecdf4e164a4e0ddf697251708ce7974e7b4dbc221b89ebe40 +size 90034 diff --git a/docs/static/images/blog/shape-the-future-with-our-roadmap/open_source_octobot.png b/docs/static/images/blog/shape-the-future-with-our-roadmap/open_source_octobot.png new file mode 100644 index 0000000000..7975c27f76 --- /dev/null +++ b/docs/static/images/blog/shape-the-future-with-our-roadmap/open_source_octobot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f934e4fdb6ae64151fd2eb221db356b0a40a6626c24498006ab955d39f6532fa +size 100256 diff --git a/docs/static/images/blog/shape-the-future-with-our-roadmap/roadmap.png b/docs/static/images/blog/shape-the-future-with-our-roadmap/roadmap.png new file mode 100644 index 0000000000..e060a06b0d --- /dev/null +++ b/docs/static/images/blog/shape-the-future-with-our-roadmap/roadmap.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90284d6d15ec17d5e1a54c03a95be1b36da0c3106d5d0f5ff8fe0b13f7ff5200 +size 58527 diff --git a/docs/static/images/blog/smart-dca-making-of/binance-trading-rules-min-funds.png b/docs/static/images/blog/smart-dca-making-of/binance-trading-rules-min-funds.png new file mode 100644 index 0000000000..35420cfe53 --- /dev/null +++ b/docs/static/images/blog/smart-dca-making-of/binance-trading-rules-min-funds.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e75839a63b3b46ce593530cc82f5515e09f6d2bb873a4b90c8969a5ef83c6235 +size 53967 diff --git a/docs/static/images/blog/smart-dca-making-of/coingecko-coin-categories.png b/docs/static/images/blog/smart-dca-making-of/coingecko-coin-categories.png new file mode 100644 index 0000000000..4bffcaa7cf --- /dev/null +++ b/docs/static/images/blog/smart-dca-making-of/coingecko-coin-categories.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:092465b06e9185dde3b36a6fda0566a1eb2218303aeff4aae1c19b7657263a4b +size 105465 diff --git a/docs/static/images/blog/smart-dca-making-of/cover.png b/docs/static/images/blog/smart-dca-making-of/cover.png new file mode 100644 index 0000000000..dd5185a043 --- /dev/null +++ b/docs/static/images/blog/smart-dca-making-of/cover.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc81163833e0df63af6921bcb36f960e713207e92f0a2ac82e40d1133df652b2 +size 61978 diff --git a/docs/static/images/blog/smart-dca-making-of/profitable-results-with-0.8-percent-take-profit.png b/docs/static/images/blog/smart-dca-making-of/profitable-results-with-0.8-percent-take-profit.png new file mode 100644 index 0000000000..63d1a44072 --- /dev/null +++ b/docs/static/images/blog/smart-dca-making-of/profitable-results-with-0.8-percent-take-profit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:18e0d150db6417e337af8b62ac6d6537019f5393dddb6bf2bc431dbcc4fbaef3 +size 154214 diff --git a/docs/static/images/blog/smart-dca-making-of/risky-results-with-2-percent-take-profit.png b/docs/static/images/blog/smart-dca-making-of/risky-results-with-2-percent-take-profit.png new file mode 100644 index 0000000000..6fd9277817 --- /dev/null +++ b/docs/static/images/blog/smart-dca-making-of/risky-results-with-2-percent-take-profit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93c1ff57c2de95ac5ff921c6a3c7262621abeed4b66a71a4ad0b3809e29625c1 +size 145102 diff --git a/docs/static/images/blog/strategy-designer-revamp/cover.png b/docs/static/images/blog/strategy-designer-revamp/cover.png new file mode 100644 index 0000000000..aa2db1ad8e --- /dev/null +++ b/docs/static/images/blog/strategy-designer-revamp/cover.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0744588f9e9a41622857b9ce67bdc69b380914316980d017376195c7a53fd5d7 +size 224078 diff --git a/docs/static/images/blog/strategy-designer-revamp/stepper1.1.png b/docs/static/images/blog/strategy-designer-revamp/stepper1.1.png new file mode 100644 index 0000000000..49a586574d --- /dev/null +++ b/docs/static/images/blog/strategy-designer-revamp/stepper1.1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:54b6c743ae3ef041940be9d6bca2e1c70ec7c2b850aca1e483da7f327273a0d1 +size 107119 diff --git a/docs/static/images/blog/strategy-designer-revamp/stepper1.2.png b/docs/static/images/blog/strategy-designer-revamp/stepper1.2.png new file mode 100644 index 0000000000..5383c6cd45 --- /dev/null +++ b/docs/static/images/blog/strategy-designer-revamp/stepper1.2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6e915ad83746d1e10fe29bcaa208b28ae1dc62baff182b8ca5d61238dce908c7 +size 23258 diff --git a/docs/static/images/blog/strategy-designer-revamp/stepper2.png b/docs/static/images/blog/strategy-designer-revamp/stepper2.png new file mode 100644 index 0000000000..bb847a88e9 --- /dev/null +++ b/docs/static/images/blog/strategy-designer-revamp/stepper2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b79d4192d83b62db02df007017eb66a47e92d2bbdb174a68d20ea6c4a9eee045 +size 88512 diff --git a/docs/static/images/blog/strategy-designer-revamp/stepper3.png b/docs/static/images/blog/strategy-designer-revamp/stepper3.png new file mode 100644 index 0000000000..e0774076f1 --- /dev/null +++ b/docs/static/images/blog/strategy-designer-revamp/stepper3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:62b99c76d05bc3707ac9e5aed0842f779bef2ed57d879a229daacaad3a646b39 +size 64231 diff --git a/docs/static/images/blog/strategy-designer-revamp/stepper4.png b/docs/static/images/blog/strategy-designer-revamp/stepper4.png new file mode 100644 index 0000000000..086f87c208 --- /dev/null +++ b/docs/static/images/blog/strategy-designer-revamp/stepper4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bde3433e80218ae0e4c20763ade9fdd9615de323e8b5018f512e37ff2b4c2d87 +size 47996 diff --git a/docs/static/images/blog/strategy-designer-revamp/summary.png b/docs/static/images/blog/strategy-designer-revamp/summary.png new file mode 100644 index 0000000000..a45c4a1bc9 --- /dev/null +++ b/docs/static/images/blog/strategy-designer-revamp/summary.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:756a46c74279408d1f1a7e593e34b654f3e9ea9588d05daf0d698c5830c8b596 +size 48117 diff --git a/docs/static/images/blog/strategy-designer-revamp/trades.png b/docs/static/images/blog/strategy-designer-revamp/trades.png new file mode 100644 index 0000000000..ae61a3621a --- /dev/null +++ b/docs/static/images/blog/strategy-designer-revamp/trades.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98f54838c2313ab34ff473eb0304cb6411d9b10c0109e7a70f7df262d8cf3f3c +size 50674 diff --git a/docs/static/images/blog/strategy-designer-revamp/use-as-live.png b/docs/static/images/blog/strategy-designer-revamp/use-as-live.png new file mode 100644 index 0000000000..2e4781bb9a --- /dev/null +++ b/docs/static/images/blog/strategy-designer-revamp/use-as-live.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2623c12b2b634b271ecfa57672a5b1a70f09567d60f4716d11bf5ea07e0dd35f +size 3028 diff --git a/docs/static/images/blog/strategy-designer-revamp/viewer.png b/docs/static/images/blog/strategy-designer-revamp/viewer.png new file mode 100644 index 0000000000..d55ede2910 --- /dev/null +++ b/docs/static/images/blog/strategy-designer-revamp/viewer.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:85dc636cd86126e320aac176c9caa75876434c91232f966233779e1b7f63d4d8 +size 188175 diff --git a/docs/static/images/blog/trading-on-coinex-with-octobot/trading-on-coinex-with-octobot.png b/docs/static/images/blog/trading-on-coinex-with-octobot/trading-on-coinex-with-octobot.png new file mode 100644 index 0000000000..15e64230b7 --- /dev/null +++ b/docs/static/images/blog/trading-on-coinex-with-octobot/trading-on-coinex-with-octobot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b979b6dae34bc563d62da953fbc2e8b1491e63ed5f624c3f5a3fe85d67e5527 +size 909510 diff --git a/docs/static/images/blog/trading-orders/cover.png b/docs/static/images/blog/trading-orders/cover.png new file mode 100644 index 0000000000..2a14cf9279 --- /dev/null +++ b/docs/static/images/blog/trading-orders/cover.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9633e7cc507f6f30f3f5aa55ebf58cedecd638556cc8bc8b1f4105b46b350a0d +size 180265 diff --git a/docs/static/images/blog/trading-strategy-automation/cover.png b/docs/static/images/blog/trading-strategy-automation/cover.png new file mode 100644 index 0000000000..6b54eae844 --- /dev/null +++ b/docs/static/images/blog/trading-strategy-automation/cover.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bea0f988781fe8e934d81061fed1477524ccc68f0587e99f3002ba199823993d +size 687645 diff --git a/docs/static/images/blog/trading-using-chat-gpt/ChatGPT-Logo.png b/docs/static/images/blog/trading-using-chat-gpt/ChatGPT-Logo.png new file mode 100644 index 0000000000..4006c86432 --- /dev/null +++ b/docs/static/images/blog/trading-using-chat-gpt/ChatGPT-Logo.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f50d10386773c03b0762b8320ef91de46ab0f4d1648955c09bf82020ad934ebd +size 43937 diff --git a/docs/static/images/blog/trading-using-chat-gpt/cover.png b/docs/static/images/blog/trading-using-chat-gpt/cover.png new file mode 100644 index 0000000000..90432c75c0 --- /dev/null +++ b/docs/static/images/blog/trading-using-chat-gpt/cover.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9bb99d875e6a2667d273249c6f894fab67431bc850ce895b25f378e2c32546e2 +size 723644 diff --git a/docs/static/images/blog/trading-using-chat-gpt/gpt-free-tool.png b/docs/static/images/blog/trading-using-chat-gpt/gpt-free-tool.png new file mode 100644 index 0000000000..69a944aad0 --- /dev/null +++ b/docs/static/images/blog/trading-using-chat-gpt/gpt-free-tool.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:70c3fabf8855d3369e3b543d7e1513200748f549c743d582da221698ef3b1093 +size 236006 diff --git a/docs/static/images/blog/trading-using-tradingview/cover.png b/docs/static/images/blog/trading-using-tradingview/cover.png new file mode 100644 index 0000000000..0a323ed998 --- /dev/null +++ b/docs/static/images/blog/trading-using-tradingview/cover.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1e2e7bbf103dd12284670b2d07c597a22e90518f64f0ce85ea1e2d749f4d0083 +size 786776 diff --git a/docs/static/images/blog/trading-using-tradingview/telegram.png b/docs/static/images/blog/trading-using-tradingview/telegram.png new file mode 100644 index 0000000000..a8fd2cf0e9 --- /dev/null +++ b/docs/static/images/blog/trading-using-tradingview/telegram.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a2e2964ed405e39119d6c0f06c243eeb028e1c79d03776279f871e698fc06503 +size 14861 diff --git a/docs/static/images/blog/trading-with-ai-introduction/brain.jpeg b/docs/static/images/blog/trading-with-ai-introduction/brain.jpeg new file mode 100644 index 0000000000..b4445b9a63 --- /dev/null +++ b/docs/static/images/blog/trading-with-ai-introduction/brain.jpeg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b56da463f7ad4c2e38f3241655fdb87aafde1441fecdf35f140e0a81fb7b67b5 +size 301857 diff --git a/docs/static/images/blog/trading-with-ai-introduction/cover.png b/docs/static/images/blog/trading-with-ai-introduction/cover.png new file mode 100644 index 0000000000..c9aba3dd0a --- /dev/null +++ b/docs/static/images/blog/trading-with-ai-introduction/cover.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:acccf69e9879a2b723951e131cad993269f25e87646c757a724c14ea3e6b288b +size 1610091 diff --git a/docs/static/images/blog/trading-with-ai-introduction/strategy-output.png b/docs/static/images/blog/trading-with-ai-introduction/strategy-output.png new file mode 100644 index 0000000000..edf4f21e17 --- /dev/null +++ b/docs/static/images/blog/trading-with-ai-introduction/strategy-output.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e9c051c036a3e6a4fcc10da94ff26add5a267c191e7c9cd1976c4d6cc94e157 +size 222779 diff --git a/docs/static/images/blog/trading-with-ai-introduction/trading.jpg b/docs/static/images/blog/trading-with-ai-introduction/trading.jpg new file mode 100644 index 0000000000..cbfd9d93da --- /dev/null +++ b/docs/static/images/blog/trading-with-ai-introduction/trading.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2deb9f0b6129a63d028d98727d91e3ca83bd5c60fdd038b2537153c60167e25f +size 39359 diff --git a/docs/static/images/blog/welcome-to-octobot-blog/cover.png b/docs/static/images/blog/welcome-to-octobot-blog/cover.png new file mode 100644 index 0000000000..02ecd63293 --- /dev/null +++ b/docs/static/images/blog/welcome-to-octobot-blog/cover.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d28353a1a716e75ce72dca1a5a24e0fc805e058f0941eb0f22cc8734ecb5915d +size 12421 diff --git a/docs/static/images/blog/what-are-octobot-rewards-and-how-to-get-them/octobot-rewards-dashboard-showing-crypto-apprentice-rewards.png b/docs/static/images/blog/what-are-octobot-rewards-and-how-to-get-them/octobot-rewards-dashboard-showing-crypto-apprentice-rewards.png new file mode 100644 index 0000000000..d8bad2fd42 --- /dev/null +++ b/docs/static/images/blog/what-are-octobot-rewards-and-how-to-get-them/octobot-rewards-dashboard-showing-crypto-apprentice-rewards.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cbc35d4de59a1504cbc8dfcb989127d6d94f3f9228ccaba9f8a99570efde4dfc +size 81541 diff --git a/docs/static/images/blog/what-are-octobot-rewards-and-how-to-get-them/octobot-rewards-dashboard-showing-grandmaster-of-crypto-rewards.png b/docs/static/images/blog/what-are-octobot-rewards-and-how-to-get-them/octobot-rewards-dashboard-showing-grandmaster-of-crypto-rewards.png new file mode 100644 index 0000000000..319c74fe8c --- /dev/null +++ b/docs/static/images/blog/what-are-octobot-rewards-and-how-to-get-them/octobot-rewards-dashboard-showing-grandmaster-of-crypto-rewards.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3791c1bbb6ecf3c4a5fd8468eac64f1f07adb7afcff178ffa6c9be42192a614b +size 69260 diff --git a/docs/static/images/blog/what-are-octobot-rewards-and-how-to-get-them/octobot-rewards-get-rewarded-for-using-octobot-and-use-advanced-strategies.png b/docs/static/images/blog/what-are-octobot-rewards-and-how-to-get-them/octobot-rewards-get-rewarded-for-using-octobot-and-use-advanced-strategies.png new file mode 100644 index 0000000000..92e539dc3f --- /dev/null +++ b/docs/static/images/blog/what-are-octobot-rewards-and-how-to-get-them/octobot-rewards-get-rewarded-for-using-octobot-and-use-advanced-strategies.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c0eafcfc73ca50f0c33782b07e5f653bc12c90b4885a6d0c614ac6b7fd2ab78 +size 354816 diff --git a/docs/static/images/blog/what-are-octobot-rewards-and-how-to-get-them/octobot-rewards-list-with-completed-and-uncompleted-missions.png b/docs/static/images/blog/what-are-octobot-rewards-and-how-to-get-them/octobot-rewards-list-with-completed-and-uncompleted-missions.png new file mode 100644 index 0000000000..7ea370eee7 --- /dev/null +++ b/docs/static/images/blog/what-are-octobot-rewards-and-how-to-get-them/octobot-rewards-list-with-completed-and-uncompleted-missions.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e5192f4e50235ba25b58a88d14082427c5416412c1126cff214e9b0688b6431f +size 163575 diff --git a/docs/static/images/blog/what-are-stablecoins/cover.png b/docs/static/images/blog/what-are-stablecoins/cover.png new file mode 100644 index 0000000000..e2b898c8a5 --- /dev/null +++ b/docs/static/images/blog/what-are-stablecoins/cover.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a152e23e8dab808ba9ad6bcaddb7dc0188fb22523036ab5f829464d12d4153cb +size 86164 diff --git a/docs/static/images/blog/what-are-stablecoins/future.png b/docs/static/images/blog/what-are-stablecoins/future.png new file mode 100644 index 0000000000..21f06d8454 --- /dev/null +++ b/docs/static/images/blog/what-are-stablecoins/future.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3778e933873e4c788c1f1b1d8cfcc87ef5ee51d705c4c18020f2847766e9e0b0 +size 134737 diff --git a/docs/static/images/blog/what-are-stablecoins/usdt-logo.png b/docs/static/images/blog/what-are-stablecoins/usdt-logo.png new file mode 100644 index 0000000000..5d68e6c260 --- /dev/null +++ b/docs/static/images/blog/what-are-stablecoins/usdt-logo.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19b2693f38fabcbd0c12bb190becca1dc0250da7ed0ff905892cd511f76b9f09 +size 16745 diff --git a/docs/static/images/blog/what-are-stablecoins/usdt.png b/docs/static/images/blog/what-are-stablecoins/usdt.png new file mode 100644 index 0000000000..392fffd815 --- /dev/null +++ b/docs/static/images/blog/what-are-stablecoins/usdt.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d54d3f7955732b57dd3311ee550a1fd2c38ab6339e442d2c3db6cd583651a6d +size 35804 diff --git a/docs/static/images/blog/what-is-dca/cover.png b/docs/static/images/blog/what-is-dca/cover.png new file mode 100644 index 0000000000..c499bb1567 --- /dev/null +++ b/docs/static/images/blog/what-is-dca/cover.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7ac8b1a326c128753721d4f19f0eeda18b4f26325b96cd0dd2fdae394bfb3a4 +size 294134 diff --git a/docs/static/images/blog/what-is-future-trading/cover.png b/docs/static/images/blog/what-is-future-trading/cover.png new file mode 100644 index 0000000000..5de43efa9a --- /dev/null +++ b/docs/static/images/blog/what-is-future-trading/cover.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa75e4c735f3dac1c700a695c5fddbced3d6c3e57fdd50d383034ad269864a0d +size 474083 diff --git a/docs/static/images/blog/what-is-spot-trading/cover.png b/docs/static/images/blog/what-is-spot-trading/cover.png new file mode 100644 index 0000000000..2efaaf6f25 --- /dev/null +++ b/docs/static/images/blog/what-is-spot-trading/cover.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:62270cfe9c0b6b6e582d6b54f658808a829f57fc822edd4acd18ff822df0c7d9 +size 307677 diff --git a/docs/static/images/blog/what-is-spot-trading/cryptocurrency-desk.png b/docs/static/images/blog/what-is-spot-trading/cryptocurrency-desk.png new file mode 100644 index 0000000000..6ef005f2ea --- /dev/null +++ b/docs/static/images/blog/what-is-spot-trading/cryptocurrency-desk.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fecaecc133b75db90aee3ecb5ea9fd71e1668fd79049d40fd15fe3ac563fb1fe +size 286174 diff --git a/docs/static/images/guides/acheter-et-vendre-depuis-votre-portfolio-octobot.png b/docs/static/images/guides/acheter-et-vendre-depuis-votre-portfolio-octobot.png new file mode 100644 index 0000000000..6ce74be887 --- /dev/null +++ b/docs/static/images/guides/acheter-et-vendre-depuis-votre-portfolio-octobot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56563e7e5e9aaa5ee8a2035a9491084a5993ad52f7a8f5fe56cc0c267033c08f +size 16616 diff --git a/docs/static/images/guides/acheter-et-vendre-des-crypto-directement-depuis-votre-octobot.png b/docs/static/images/guides/acheter-et-vendre-des-crypto-directement-depuis-votre-octobot.png new file mode 100644 index 0000000000..f1fd5c1a50 --- /dev/null +++ b/docs/static/images/guides/acheter-et-vendre-des-crypto-directement-depuis-votre-octobot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b84b07d99c4ca489de88299f225f01816c21caa3259f98613ab210a1d8383b5f +size 233077 diff --git a/docs/static/images/guides/acheter-et-vendre-des-crypto-historique-activite-octobot.png b/docs/static/images/guides/acheter-et-vendre-des-crypto-historique-activite-octobot.png new file mode 100644 index 0000000000..ae5aaf21f8 --- /dev/null +++ b/docs/static/images/guides/acheter-et-vendre-des-crypto-historique-activite-octobot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db49253289f6784b247ecd4856db33214f23efe49f652e393af5cb5b8883f725 +size 58118 diff --git a/docs/static/images/guides/adding-a-tradingview-strategy-alert.png b/docs/static/images/guides/adding-a-tradingview-strategy-alert.png new file mode 100644 index 0000000000..e4d931d6ea --- /dev/null +++ b/docs/static/images/guides/adding-a-tradingview-strategy-alert.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d8d45775ba42898a14ad4509896386426dbfe3860e500b15658643f43f65bfbe +size 17226 diff --git a/docs/static/images/guides/ai-trading-illustrated-by-octobot-head-with-chatgpt-logo-trading-bitcoin-ethereum-litecoin-usd-logos.png b/docs/static/images/guides/ai-trading-illustrated-by-octobot-head-with-chatgpt-logo-trading-bitcoin-ethereum-litecoin-usd-logos.png new file mode 100644 index 0000000000..5071089b2d --- /dev/null +++ b/docs/static/images/guides/ai-trading-illustrated-by-octobot-head-with-chatgpt-logo-trading-bitcoin-ethereum-litecoin-usd-logos.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8df6e8808ab5a352bb8c4ae4722632b215761175db1552d81ba971c725379b6d +size 97075 diff --git a/docs/static/images/guides/annuler-des-ordres-directement-depuis-octobot.png b/docs/static/images/guides/annuler-des-ordres-directement-depuis-octobot.png new file mode 100644 index 0000000000..6658a36d65 --- /dev/null +++ b/docs/static/images/guides/annuler-des-ordres-directement-depuis-octobot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fceceb63c4cf2f92c410488e4a20a1c09fa437f57bd9d8ac75263cc6e5dcb889 +size 68463 diff --git a/docs/static/images/guides/backtesting/octobot-backtesting-data-collector.png b/docs/static/images/guides/backtesting/octobot-backtesting-data-collector.png new file mode 100644 index 0000000000..e9e28953ba --- /dev/null +++ b/docs/static/images/guides/backtesting/octobot-backtesting-data-collector.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:21fa2db8fb5cdd7acb533ae99c72ac7f5f58de828a56cfe280a461298e3c4dde +size 233783 diff --git a/docs/static/images/guides/backtesting/octobot-backtesting-data-selector-starting-a-backtesting.png b/docs/static/images/guides/backtesting/octobot-backtesting-data-selector-starting-a-backtesting.png new file mode 100644 index 0000000000..e417385ce5 --- /dev/null +++ b/docs/static/images/guides/backtesting/octobot-backtesting-data-selector-starting-a-backtesting.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:85b87610ad8ecaac15b498dff4027e0eced9f200d8344956ea4bfb99c599d43d +size 212550 diff --git a/docs/static/images/guides/backtesting/octobot-backtesting-profile-selector.png b/docs/static/images/guides/backtesting/octobot-backtesting-profile-selector.png new file mode 100644 index 0000000000..cb0d9fd07c --- /dev/null +++ b/docs/static/images/guides/backtesting/octobot-backtesting-profile-selector.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d5e8f823e252881c3c9f45a57dce301bd5b00dfd62b59f69f31e1fc19a274fa2 +size 250385 diff --git a/docs/static/images/guides/backtesting/octobot-backtesting-result-graph.png b/docs/static/images/guides/backtesting/octobot-backtesting-result-graph.png new file mode 100644 index 0000000000..f7e8ab236b --- /dev/null +++ b/docs/static/images/guides/backtesting/octobot-backtesting-result-graph.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e90c4dc3579aa42f94793f4f3fcf20b55430f03fe93d78a20dfcd50d3140817 +size 211280 diff --git a/docs/static/images/guides/backtesting/octobot-backtesting-result-summary.png b/docs/static/images/guides/backtesting/octobot-backtesting-result-summary.png new file mode 100644 index 0000000000..ab273a37ca --- /dev/null +++ b/docs/static/images/guides/backtesting/octobot-backtesting-result-summary.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:39aecfad613594bb322493a2b8a88fb4c596d4e92bcc1505071623c37df8e675 +size 581506 diff --git a/docs/static/images/guides/backtesting/octobot-backtesting-result-trades.png b/docs/static/images/guides/backtesting/octobot-backtesting-result-trades.png new file mode 100644 index 0000000000..919e1e59f5 --- /dev/null +++ b/docs/static/images/guides/backtesting/octobot-backtesting-result-trades.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:220947abb7c1352a5d5ef319552d0e4470a9c4055abd890d23caf392e08ea4ab +size 235718 diff --git a/docs/static/images/guides/binance/account-api-management-from-navbar.png b/docs/static/images/guides/binance/account-api-management-from-navbar.png new file mode 100644 index 0000000000..75096bed43 --- /dev/null +++ b/docs/static/images/guides/binance/account-api-management-from-navbar.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:558fd6ac98ccf326674af3e977b969b8b19ca33f5b6b8edd5e90cdbb5257e37c +size 312431 diff --git a/docs/static/images/guides/binance/account-setting-api-management.png b/docs/static/images/guides/binance/account-setting-api-management.png new file mode 100644 index 0000000000..5de77fe7e8 --- /dev/null +++ b/docs/static/images/guides/binance/account-setting-api-management.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:43dc18b4c2aa1618bd047af10348cc7aa656c86d406a55b00dc2946e09e287ed +size 123141 diff --git a/docs/static/images/guides/binance/add-api-key-to-octobot-cloud-from-profile.png b/docs/static/images/guides/binance/add-api-key-to-octobot-cloud-from-profile.png new file mode 100644 index 0000000000..1fdb207476 --- /dev/null +++ b/docs/static/images/guides/binance/add-api-key-to-octobot-cloud-from-profile.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46cebe20cbd8d411cca7c2b929bc298f9cbc40ee290b5dc769531c20626e1915 +size 209863 diff --git a/docs/static/images/guides/binance/add-api-key-to-octobot-cloud-from-strategy-start.png b/docs/static/images/guides/binance/add-api-key-to-octobot-cloud-from-strategy-start.png new file mode 100644 index 0000000000..32d0961522 --- /dev/null +++ b/docs/static/images/guides/binance/add-api-key-to-octobot-cloud-from-strategy-start.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bbda7014a187efcec7d86fb9c4b9912f4ad5934b8f17a256802b864783c86ae4 +size 165269 diff --git a/docs/static/images/guides/binance/ajouter-api-key-a-octobot-cloud-depuis-profil.png b/docs/static/images/guides/binance/ajouter-api-key-a-octobot-cloud-depuis-profil.png new file mode 100644 index 0000000000..8ecfee2dd5 --- /dev/null +++ b/docs/static/images/guides/binance/ajouter-api-key-a-octobot-cloud-depuis-profil.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce0a88eb122c6ae27940a42387e4bed906823b4c22620985ae8ff75a9fc8c71f +size 202702 diff --git a/docs/static/images/guides/binance/ajouter-api-key-a-octobot-cloud-depuis-start-de-strategie.png b/docs/static/images/guides/binance/ajouter-api-key-a-octobot-cloud-depuis-start-de-strategie.png new file mode 100644 index 0000000000..202f4a3929 --- /dev/null +++ b/docs/static/images/guides/binance/ajouter-api-key-a-octobot-cloud-depuis-start-de-strategie.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:014974272b59c6132b5e96ce01bb428febe61f305c198874a2025a0a3a3f032b +size 157818 diff --git a/docs/static/images/guides/binance/api-created-1.png b/docs/static/images/guides/binance/api-created-1.png new file mode 100644 index 0000000000..23824f6de6 --- /dev/null +++ b/docs/static/images/guides/binance/api-created-1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1da84cf6fd0041244344c364acca537a8e622b43b16f5f95d18f4b732e521e51 +size 165051 diff --git a/docs/static/images/guides/binance/api-created-add-trading-permission-save.png b/docs/static/images/guides/binance/api-created-add-trading-permission-save.png new file mode 100644 index 0000000000..17cf54ff46 --- /dev/null +++ b/docs/static/images/guides/binance/api-created-add-trading-permission-save.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c6bfc7f45ad6a341c798137be5df37e62ba8d0120d0115d77531d645b3d1251c +size 208042 diff --git a/docs/static/images/guides/binance/api-created-add-trading-permission.png b/docs/static/images/guides/binance/api-created-add-trading-permission.png new file mode 100644 index 0000000000..cd93ed1cb2 --- /dev/null +++ b/docs/static/images/guides/binance/api-created-add-trading-permission.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:673449d25b3d9e8409261a4a99b211201cab61b2d100d850a786d92f4867634d +size 207977 diff --git a/docs/static/images/guides/binance/api-created-click-edit-restrictions.png b/docs/static/images/guides/binance/api-created-click-edit-restrictions.png new file mode 100644 index 0000000000..a739117534 --- /dev/null +++ b/docs/static/images/guides/binance/api-created-click-edit-restrictions.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c753c83c50b43a52b1efd266363e32715483770ab8b1ab08b2ce7ecb83b03186 +size 207706 diff --git a/docs/static/images/guides/binance/api-creation-completed-selected-values.png b/docs/static/images/guides/binance/api-creation-completed-selected-values.png new file mode 100644 index 0000000000..d305ab09af --- /dev/null +++ b/docs/static/images/guides/binance/api-creation-completed-selected-values.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6a2c029d7b2d3847cf9e16ada13db56271159eaf31ec95185a8d91f7fc8805a +size 207747 diff --git a/docs/static/images/guides/binance/api-creation-completed.png b/docs/static/images/guides/binance/api-creation-completed.png new file mode 100644 index 0000000000..2649eb2b79 --- /dev/null +++ b/docs/static/images/guides/binance/api-creation-completed.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa0a8bd9d8c9de7e4696a9fb97bac0b44c9b635c92755b1230445fb4cf231828 +size 165060 diff --git a/docs/static/images/guides/binance/api-cree-1.png b/docs/static/images/guides/binance/api-cree-1.png new file mode 100644 index 0000000000..92b1c4d33d --- /dev/null +++ b/docs/static/images/guides/binance/api-cree-1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ffbfef5db58450ac828417fa14d0cf9b27f572261450ce5885107d0e572e2dc9 +size 178411 diff --git a/docs/static/images/guides/binance/api-cree-ajouter-trading-permission-sauvegarder.png b/docs/static/images/guides/binance/api-cree-ajouter-trading-permission-sauvegarder.png new file mode 100644 index 0000000000..fd5d36763b --- /dev/null +++ b/docs/static/images/guides/binance/api-cree-ajouter-trading-permission-sauvegarder.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab481d416b6bcaf8fdc60dfa763c993de669e940e73a3e95fa722640a29544d4 +size 227849 diff --git a/docs/static/images/guides/binance/api-cree-ajouter-trading-permission.png b/docs/static/images/guides/binance/api-cree-ajouter-trading-permission.png new file mode 100644 index 0000000000..052170a2ae --- /dev/null +++ b/docs/static/images/guides/binance/api-cree-ajouter-trading-permission.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf113c16ec524a257120b2df5de5b047a08274c5cbf55610b1f6807597270775 +size 227891 diff --git a/docs/static/images/guides/binance/api-cree-key-selectionnees.png b/docs/static/images/guides/binance/api-cree-key-selectionnees.png new file mode 100644 index 0000000000..750eef2f09 --- /dev/null +++ b/docs/static/images/guides/binance/api-cree-key-selectionnees.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:848b99596dfb34e3ab28975d79003ba92e501245c403fd954cb79e56893bf1f1 +size 223435 diff --git a/docs/static/images/guides/binance/api-cree-modifier-restrictions.png b/docs/static/images/guides/binance/api-cree-modifier-restrictions.png new file mode 100644 index 0000000000..5ed54742f3 --- /dev/null +++ b/docs/static/images/guides/binance/api-cree-modifier-restrictions.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4d9389e219926875641ff6383313fff465bc9b282a81170c09e064f4baec4436 +size 223137 diff --git a/docs/static/images/guides/binance/api-restreindre-ips-de-confiance.png b/docs/static/images/guides/binance/api-restreindre-ips-de-confiance.png new file mode 100644 index 0000000000..2f490cf7d2 --- /dev/null +++ b/docs/static/images/guides/binance/api-restreindre-ips-de-confiance.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:792ae274334a454cb988db50b36373072dd6e9c5d30782fa198e22ad3d9979b5 +size 159226 diff --git a/docs/static/images/guides/binance/api-restrict-to-trusted-ips.png b/docs/static/images/guides/binance/api-restrict-to-trusted-ips.png new file mode 100644 index 0000000000..01d508cd71 --- /dev/null +++ b/docs/static/images/guides/binance/api-restrict-to-trusted-ips.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f142e8f9c8b8097c99aea51e0071cb67d542effc9baa2f68d1febfcef868546 +size 144603 diff --git a/docs/static/images/guides/binance/apis-list-create-new-api.png b/docs/static/images/guides/binance/apis-list-create-new-api.png new file mode 100644 index 0000000000..315c4fa456 --- /dev/null +++ b/docs/static/images/guides/binance/apis-list-create-new-api.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fda8a0d42661a9f267df012c1f4fc6ccc15964234e59e034f968b11b84328271 +size 117988 diff --git a/docs/static/images/guides/binance/apis-list-default-security-controls.png b/docs/static/images/guides/binance/apis-list-default-security-controls.png new file mode 100644 index 0000000000..03800566f2 --- /dev/null +++ b/docs/static/images/guides/binance/apis-list-default-security-controls.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8086e0bcdd39e4b975b5d0d7e3487534f953f24f2b60cd448261ada77b59454 +size 97995 diff --git a/docs/static/images/guides/binance/apis-liste-controle-securite.png b/docs/static/images/guides/binance/apis-liste-controle-securite.png new file mode 100644 index 0000000000..e9137fd860 --- /dev/null +++ b/docs/static/images/guides/binance/apis-liste-controle-securite.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22164974480f1e7ce4ef2f591d4eeccb375bd1aba4e29f6aaa9eaf5d20be230d +size 130323 diff --git a/docs/static/images/guides/binance/apis-liste-creer-nouvelle-api.png b/docs/static/images/guides/binance/apis-liste-creer-nouvelle-api.png new file mode 100644 index 0000000000..0501f5bccf --- /dev/null +++ b/docs/static/images/guides/binance/apis-liste-creer-nouvelle-api.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d83b1589c3dace12afaa8b00b7c69334f78e09da7a999fde0943c491809a645f +size 130473 diff --git a/docs/static/images/guides/binance/binance-account-authentification.png b/docs/static/images/guides/binance/binance-account-authentification.png new file mode 100644 index 0000000000..5cb48c5c10 --- /dev/null +++ b/docs/static/images/guides/binance/binance-account-authentification.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f109644ad223146ca41ce4dfb7dd52abc158719a051208fcbbf7a740a3b70c93 +size 54218 diff --git a/docs/static/images/guides/binance/binance-account-login.png b/docs/static/images/guides/binance/binance-account-login.png new file mode 100644 index 0000000000..e1bd5acd2d --- /dev/null +++ b/docs/static/images/guides/binance/binance-account-login.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:11dc6b7185a77afa1eda813ad45d855700f8ed161aa5827029bc36ad43818b84 +size 49382 diff --git a/docs/static/images/guides/binance/binance-subaccounts.png b/docs/static/images/guides/binance/binance-subaccounts.png new file mode 100644 index 0000000000..1e0b69dcc8 --- /dev/null +++ b/docs/static/images/guides/binance/binance-subaccounts.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f7c4ff7804d19a382cf2e07a5a02afc3548ce8f67b5c87f0617a2cf36d398718 +size 95037 diff --git a/docs/static/images/guides/binance/compte-lien-gestion-des-api-depuis-navbar.png b/docs/static/images/guides/binance/compte-lien-gestion-des-api-depuis-navbar.png new file mode 100644 index 0000000000..698f12c859 --- /dev/null +++ b/docs/static/images/guides/binance/compte-lien-gestion-des-api-depuis-navbar.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e6d746a7abf240e2f8eba3051eabf94fd77618e54819e2316b099a169d6cdf5 +size 379596 diff --git a/docs/static/images/guides/binance/compte-lien-gestion-des-api.png b/docs/static/images/guides/binance/compte-lien-gestion-des-api.png new file mode 100644 index 0000000000..50f70888fd --- /dev/null +++ b/docs/static/images/guides/binance/compte-lien-gestion-des-api.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a92fa2ce2d0887779ea92fde81a3fc9119d6f0a61440b26291ad4ce5219ea9ce +size 128183 diff --git a/docs/static/images/guides/binance/create-api-security-verification.png b/docs/static/images/guides/binance/create-api-security-verification.png new file mode 100644 index 0000000000..0c3806f0c2 --- /dev/null +++ b/docs/static/images/guides/binance/create-api-security-verification.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f9119266deca9fd7b5904b356fcf42494b3fc99ff6288ac13278fe2e41f8ab95 +size 119868 diff --git a/docs/static/images/guides/binance/creer-api-verification-securite.png b/docs/static/images/guides/binance/creer-api-verification-securite.png new file mode 100644 index 0000000000..ae908ab82d --- /dev/null +++ b/docs/static/images/guides/binance/creer-api-verification-securite.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b14f672d1ae19f01bb5db93624ae91be14a0c441bbbb4d72cee1dd107ca6d520 +size 119987 diff --git a/docs/static/images/guides/binance/select-api-name.png b/docs/static/images/guides/binance/select-api-name.png new file mode 100644 index 0000000000..5869a970cc --- /dev/null +++ b/docs/static/images/guides/binance/select-api-name.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b058bfd71089057116ed0b9fcdfa899e04df7d21b2e5e2e8db8ecdb27aa355a9 +size 110376 diff --git a/docs/static/images/guides/binance/select-api-type.png b/docs/static/images/guides/binance/select-api-type.png new file mode 100644 index 0000000000..5a6ebdd16c --- /dev/null +++ b/docs/static/images/guides/binance/select-api-type.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:309ee63d5638c9ea9729ff3af4529c43a8fba18064b1951fbb99471ce56eefa0 +size 143860 diff --git a/docs/static/images/guides/binance/selection-api-type.png b/docs/static/images/guides/binance/selection-api-type.png new file mode 100644 index 0000000000..e7600ac9ae --- /dev/null +++ b/docs/static/images/guides/binance/selection-api-type.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1dae721b5a6503742152c2319de562864d66582074bf58934ad8804dfc80d2d +size 158228 diff --git a/docs/static/images/guides/binance/selection-nom-api.png b/docs/static/images/guides/binance/selection-nom-api.png new file mode 100644 index 0000000000..f95e4c53bd --- /dev/null +++ b/docs/static/images/guides/binance/selection-nom-api.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1087361dbbd5dc4e025339852b78296bf75f25292f41c6075455319ceb97a3b3 +size 120219 diff --git a/docs/static/images/guides/buy-and-sell-crypto-directly-from-your-octobot.png b/docs/static/images/guides/buy-and-sell-crypto-directly-from-your-octobot.png new file mode 100644 index 0000000000..4633da3c04 --- /dev/null +++ b/docs/static/images/guides/buy-and-sell-crypto-directly-from-your-octobot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d232e80577e9204396cebc08de0c4a1fac663f0617f7ed249bed02e632f9ae8 +size 214901 diff --git a/docs/static/images/guides/buy-and-sell-crypto-octobot-activity-history.png b/docs/static/images/guides/buy-and-sell-crypto-octobot-activity-history.png new file mode 100644 index 0000000000..1a770c250e --- /dev/null +++ b/docs/static/images/guides/buy-and-sell-crypto-octobot-activity-history.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f307bfcbe12cf5f433709ced1d48e0a8c4e4307ef27d6c7a6eed673a9d156da +size 46802 diff --git a/docs/static/images/guides/buy-and-sell-from-your-octobot-portfolio.png b/docs/static/images/guides/buy-and-sell-from-your-octobot-portfolio.png new file mode 100644 index 0000000000..aa47837950 --- /dev/null +++ b/docs/static/images/guides/buy-and-sell-from-your-octobot-portfolio.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac639defb49affccd42f3c476937a3ff89fa84942c0c49f82f27d384cd76b9fa +size 15536 diff --git a/docs/static/images/guides/cancel-orders-directly-from-your-octobot.png b/docs/static/images/guides/cancel-orders-directly-from-your-octobot.png new file mode 100644 index 0000000000..ca306106eb --- /dev/null +++ b/docs/static/images/guides/cancel-orders-directly-from-your-octobot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63231e99c31612b71545d7500880581a02a6b43fe44df80bc5f57f00fd344622 +size 57262 diff --git a/docs/static/images/guides/chatgpt/octobot-chatgpt-configuration-openai-key-and-custom-base-url.png b/docs/static/images/guides/chatgpt/octobot-chatgpt-configuration-openai-key-and-custom-base-url.png new file mode 100644 index 0000000000..09a4d1d3de --- /dev/null +++ b/docs/static/images/guides/chatgpt/octobot-chatgpt-configuration-openai-key-and-custom-base-url.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b54d47293c460ed4018a9bc54f3fa1e6ceba1a4e55a0c16b58684cbb21d8a0a +size 10517 diff --git a/docs/static/images/guides/cloud-bot.png b/docs/static/images/guides/cloud-bot.png new file mode 100644 index 0000000000..e7b2417da0 --- /dev/null +++ b/docs/static/images/guides/cloud-bot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a502f2a35421c8395ae56dc42d7b98843c080c32efdc90751c3f708ff422d09 +size 198383 diff --git a/docs/static/images/guides/cloud-bots.png b/docs/static/images/guides/cloud-bots.png new file mode 100644 index 0000000000..f958a6c7c0 --- /dev/null +++ b/docs/static/images/guides/cloud-bots.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0347b99b1e462d5e28f45e9a2bd268c33a392b622a36e7e1e734303e64bc08ef +size 122114 diff --git a/docs/static/images/guides/cloud-strategy-select-exchange.png b/docs/static/images/guides/cloud-strategy-select-exchange.png new file mode 100644 index 0000000000..d3bc6b38ca --- /dev/null +++ b/docs/static/images/guides/cloud-strategy-select-exchange.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:419ceb666ef23af114882fc5a5013e1d2ca741616e66d742f98db9c680fee8df +size 89507 diff --git a/docs/static/images/guides/cloud-strategy-start.png b/docs/static/images/guides/cloud-strategy-start.png new file mode 100644 index 0000000000..b02dfb9cfe --- /dev/null +++ b/docs/static/images/guides/cloud-strategy-start.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a13838cda7d83d7d81c41c10fcab1847ec56e599f52b3f6c86401c379bd83511 +size 48525 diff --git a/docs/static/images/guides/cloud-strategy.png b/docs/static/images/guides/cloud-strategy.png new file mode 100644 index 0000000000..96fe4d62d9 --- /dev/null +++ b/docs/static/images/guides/cloud-strategy.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dbb384cf52348fa71d563b0f8145fc479b65c243ee3bf50d5c67252aa0f03ba1 +size 175285 diff --git a/docs/static/images/guides/cloud-strategy2.png b/docs/static/images/guides/cloud-strategy2.png new file mode 100644 index 0000000000..e513e80935 --- /dev/null +++ b/docs/static/images/guides/cloud-strategy2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0c354b2d69b4a4b2fadd0a4294d3a3d8ea699af1e25225ecd222b00a777ee6f +size 190795 diff --git a/docs/static/images/guides/coinbase/account-setting-api-management-click-api.png b/docs/static/images/guides/coinbase/account-setting-api-management-click-api.png new file mode 100644 index 0000000000..b7bd534040 --- /dev/null +++ b/docs/static/images/guides/coinbase/account-setting-api-management-click-api.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6dde1d82c9143d187a0ef3b980b4166bf433a2b7a3f57bc785f4f87f305e8fbd +size 192040 diff --git a/docs/static/images/guides/coinbase/account-setting-api-management.png b/docs/static/images/guides/coinbase/account-setting-api-management.png new file mode 100644 index 0000000000..fd8e5da123 --- /dev/null +++ b/docs/static/images/guides/coinbase/account-setting-api-management.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f7feb6e86a0ec1498c08cacd66faa59dbaa6ad57f45b0372889fc5d5fb37074 +size 291824 diff --git a/docs/static/images/guides/coinbase/add-api-key-to-octobot-cloud-from-profile.png b/docs/static/images/guides/coinbase/add-api-key-to-octobot-cloud-from-profile.png new file mode 100644 index 0000000000..a90ac6425d --- /dev/null +++ b/docs/static/images/guides/coinbase/add-api-key-to-octobot-cloud-from-profile.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b435445fadf121ef14b6f9f7e7acc239102f5b80753053670582940ec7d249ff +size 236602 diff --git a/docs/static/images/guides/coinbase/add-api-key-to-octobot-cloud-from-strategy-start.png b/docs/static/images/guides/coinbase/add-api-key-to-octobot-cloud-from-strategy-start.png new file mode 100644 index 0000000000..73ad55e443 --- /dev/null +++ b/docs/static/images/guides/coinbase/add-api-key-to-octobot-cloud-from-strategy-start.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32555392d1a4d989a8291df9938b2c1e74ef88c07d5081fda3eece5cf955cf37 +size 207358 diff --git a/docs/static/images/guides/coinbase/api-creation-completed-selected-values.png b/docs/static/images/guides/coinbase/api-creation-completed-selected-values.png new file mode 100644 index 0000000000..0c26de6ef7 --- /dev/null +++ b/docs/static/images/guides/coinbase/api-creation-completed-selected-values.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dcb41dcb2e3348e492816783b6d1ed7c1e2ee3adb909ae615b37c5f2c8629c5e +size 99735 diff --git a/docs/static/images/guides/coinbase/apis-list-create-new-api.png b/docs/static/images/guides/coinbase/apis-list-create-new-api.png new file mode 100644 index 0000000000..6a9dac3b2a --- /dev/null +++ b/docs/static/images/guides/coinbase/apis-list-create-new-api.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:96d0dfdee1b7b85f07891ad34ab35e0103918bf6c4b5f85e885ea8269021913a +size 95534 diff --git a/docs/static/images/guides/coinbase/coinbase-account-login.png b/docs/static/images/guides/coinbase/coinbase-account-login.png new file mode 100644 index 0000000000..2d70b0511e --- /dev/null +++ b/docs/static/images/guides/coinbase/coinbase-account-login.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dbdb4c9a4be28d73348916bd4cda7f4201e745e9e151f2605fcd05ef1f746d34 +size 62777 diff --git a/docs/static/images/guides/coinbase/coinbase-api-key-created-starting-with-organizations.png b/docs/static/images/guides/coinbase/coinbase-api-key-created-starting-with-organizations.png new file mode 100644 index 0000000000..e99dd6079b --- /dev/null +++ b/docs/static/images/guides/coinbase/coinbase-api-key-created-starting-with-organizations.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8a0e932b834a342494385bd3606fa412b4942ccb649c9e7c053db4518ecfd800 +size 99575 diff --git a/docs/static/images/guides/coinbase/coinbase-api-key-created.png b/docs/static/images/guides/coinbase/coinbase-api-key-created.png new file mode 100644 index 0000000000..a4e28c8cb3 --- /dev/null +++ b/docs/static/images/guides/coinbase/coinbase-api-key-created.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9615f0d21b77ea5028afd984a1c3525365c98e34c72f5c50224ddda273e5e46e +size 99298 diff --git a/docs/static/images/guides/coinbase/coinbase-api-key-open-development-platform.png b/docs/static/images/guides/coinbase/coinbase-api-key-open-development-platform.png new file mode 100644 index 0000000000..8b15df2b8a --- /dev/null +++ b/docs/static/images/guides/coinbase/coinbase-api-key-open-development-platform.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d51009f13af0b052632d14be99bb5489be07b218eeeeed11af95f95594418d3c +size 71697 diff --git a/docs/static/images/guides/coinbase/coinbase-api-key-select-multi-portfolio.png b/docs/static/images/guides/coinbase/coinbase-api-key-select-multi-portfolio.png new file mode 100644 index 0000000000..eac4843584 --- /dev/null +++ b/docs/static/images/guides/coinbase/coinbase-api-key-select-multi-portfolio.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:28c716c2cb1ed10f9ab39d25e8088fa8c677d16818698f1e142cfa0dfb5e33b9 +size 21264 diff --git a/docs/static/images/guides/coinbase/coinbase-developer-platform-click-create.png b/docs/static/images/guides/coinbase/coinbase-developer-platform-click-create.png new file mode 100644 index 0000000000..c122c5d14f --- /dev/null +++ b/docs/static/images/guides/coinbase/coinbase-developer-platform-click-create.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:99d41f759920416be21be47af92249b7a683d071762392b5af17ce45c5c34c3c +size 119292 diff --git a/docs/static/images/guides/coinbase/coinbase-developer-platform-create-api-key.png b/docs/static/images/guides/coinbase/coinbase-developer-platform-create-api-key.png new file mode 100644 index 0000000000..357aae36ae --- /dev/null +++ b/docs/static/images/guides/coinbase/coinbase-developer-platform-create-api-key.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0118041b538adbc9e3929d95d444563eccf6657201e937234c7e869b689ee6a +size 78330 diff --git a/docs/static/images/guides/coinbase/coinbase-developer-platform-created-ecdsa.png b/docs/static/images/guides/coinbase/coinbase-developer-platform-created-ecdsa.png new file mode 100644 index 0000000000..8502ca24b8 --- /dev/null +++ b/docs/static/images/guides/coinbase/coinbase-developer-platform-created-ecdsa.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d531b6ca14dbbc257609186c3d3cd187454e781ecaf1b2ed78dfa6ba2ed1fb14 +size 87853 diff --git a/docs/static/images/guides/coinbase/coinbase-developer-platform-dashboard.png b/docs/static/images/guides/coinbase/coinbase-developer-platform-dashboard.png new file mode 100644 index 0000000000..e23d134311 --- /dev/null +++ b/docs/static/images/guides/coinbase/coinbase-developer-platform-dashboard.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3916321739d0f21afdb01a30143b2367d609a634a9cdce605fc0308789fd9344 +size 122773 diff --git a/docs/static/images/guides/coinbase/coinbase-developer-platform-enter-api-name.png b/docs/static/images/guides/coinbase/coinbase-developer-platform-enter-api-name.png new file mode 100644 index 0000000000..9f52a84364 --- /dev/null +++ b/docs/static/images/guides/coinbase/coinbase-developer-platform-enter-api-name.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71fc8065d81867404b252f9dd5c033e6632e8aff619b8219d53e548b6444569a +size 63061 diff --git a/docs/static/images/guides/coinbase/coinbase-developer-platform-enter-portflio-and-restrictions.png b/docs/static/images/guides/coinbase/coinbase-developer-platform-enter-portflio-and-restrictions.png new file mode 100644 index 0000000000..103dd11c53 --- /dev/null +++ b/docs/static/images/guides/coinbase/coinbase-developer-platform-enter-portflio-and-restrictions.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bb3c32d1413da53ce8d07e25cbaa5bb96e97ea25ef41a685a099228176b89e90 +size 109717 diff --git a/docs/static/images/guides/coinbase/coinbase-developer-platform-enter-select-ecdsa.png b/docs/static/images/guides/coinbase/coinbase-developer-platform-enter-select-ecdsa.png new file mode 100644 index 0000000000..9ac1532502 --- /dev/null +++ b/docs/static/images/guides/coinbase/coinbase-developer-platform-enter-select-ecdsa.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fbaf4a37341b7a9c20da1d56103cd712789eaf0150c6fafcd0bb6214e7d7f3e9 +size 166310 diff --git a/docs/static/images/guides/coinbase/coinbase-multi-portfolio.png b/docs/static/images/guides/coinbase/coinbase-multi-portfolio.png new file mode 100644 index 0000000000..ad366f1cc6 --- /dev/null +++ b/docs/static/images/guides/coinbase/coinbase-multi-portfolio.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c270ad018dea740d5393d03e94e77b9a6cea1c82eb31895d18a2c7870860970 +size 139051 diff --git a/docs/static/images/guides/coinbase/select-api-name-and-restrictions.png b/docs/static/images/guides/coinbase/select-api-name-and-restrictions.png new file mode 100644 index 0000000000..3ed25ca1f4 --- /dev/null +++ b/docs/static/images/guides/coinbase/select-api-name-and-restrictions.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d64cd9720d7ebd7326b4390e5d113b932912ad287da9c59bdcbe88a73ecc67b +size 102311 diff --git a/docs/static/images/guides/configuration/access-octobot-trading-mode-config-from-profiles.png b/docs/static/images/guides/configuration/access-octobot-trading-mode-config-from-profiles.png new file mode 100644 index 0000000000..b03d610e61 --- /dev/null +++ b/docs/static/images/guides/configuration/access-octobot-trading-mode-config-from-profiles.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6e60c27b19f41262ef95a16248b81b2c037d4892696e6c09b30eb562063cff5c +size 30848 diff --git a/docs/static/images/guides/configuration/custom-profile-evaluator-selector.png b/docs/static/images/guides/configuration/custom-profile-evaluator-selector.png new file mode 100644 index 0000000000..d5f6d4d784 --- /dev/null +++ b/docs/static/images/guides/configuration/custom-profile-evaluator-selector.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:31f6bffc3f54cd418fa5359f154c406ead914cff4b05ff377f84222f7dd9a092 +size 115957 diff --git a/docs/static/images/guides/configuration/custom-profile-trading-modes-selector.png b/docs/static/images/guides/configuration/custom-profile-trading-modes-selector.png new file mode 100644 index 0000000000..9645ffed2a --- /dev/null +++ b/docs/static/images/guides/configuration/custom-profile-trading-modes-selector.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2e76183aad59291755e2b16b706d1c19d3f30725875ae73abe82d331bc3e0aaf +size 122007 diff --git a/docs/static/images/guides/configuration/duplicate-octobot-profile.png b/docs/static/images/guides/configuration/duplicate-octobot-profile.png new file mode 100644 index 0000000000..8ea38f6991 --- /dev/null +++ b/docs/static/images/guides/configuration/duplicate-octobot-profile.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:042088d20c1d15e68e2197affb3de2ed52512d1a14df6c0ae3d4beaf84129f0b +size 115545 diff --git a/docs/static/images/guides/configuration/exchange-accounts-configuration-in-octobot.png b/docs/static/images/guides/configuration/exchange-accounts-configuration-in-octobot.png new file mode 100644 index 0000000000..7b860a5d86 --- /dev/null +++ b/docs/static/images/guides/configuration/exchange-accounts-configuration-in-octobot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e1b6819ac2a656aaad43faff8b6bfd53fdea9dee9652ba5fcde348969bb529ef +size 76391 diff --git a/docs/static/images/guides/configuration/import-octobot-profile.png b/docs/static/images/guides/configuration/import-octobot-profile.png new file mode 100644 index 0000000000..1137438c20 --- /dev/null +++ b/docs/static/images/guides/configuration/import-octobot-profile.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c402ccce85eb0e8a839933eb9b84bf29206df953022c0c3efeffef479279e896 +size 109184 diff --git a/docs/static/images/guides/configuration/interfaces-configuration-in-octobot.png b/docs/static/images/guides/configuration/interfaces-configuration-in-octobot.png new file mode 100644 index 0000000000..420e08046d --- /dev/null +++ b/docs/static/images/guides/configuration/interfaces-configuration-in-octobot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:945dedfd96cc2ab10a2db00fc7f867ca89332ec679242b1852cc963fd0cec3b3 +size 66694 diff --git a/docs/static/images/guides/configuration/notifications-configuration-in-octobot.png b/docs/static/images/guides/configuration/notifications-configuration-in-octobot.png new file mode 100644 index 0000000000..c6dd50d628 --- /dev/null +++ b/docs/static/images/guides/configuration/notifications-configuration-in-octobot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7aefe22a072c5064bc18754f8ba4727fa5e27b24b627e6bb4380805ed550a22f +size 33158 diff --git a/docs/static/images/guides/configuration/octobot-evaluator-details-from-profiles.png b/docs/static/images/guides/configuration/octobot-evaluator-details-from-profiles.png new file mode 100644 index 0000000000..6fdd8126e4 --- /dev/null +++ b/docs/static/images/guides/configuration/octobot-evaluator-details-from-profiles.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:85b515882bb34f0f334cc449c53da6492f309d5ae1361c67cab9ce0021fb2ed2 +size 75905 diff --git a/docs/static/images/guides/configuration/octobot-exchanges-settings-from-profiles.png b/docs/static/images/guides/configuration/octobot-exchanges-settings-from-profiles.png new file mode 100644 index 0000000000..ef7112cd76 --- /dev/null +++ b/docs/static/images/guides/configuration/octobot-exchanges-settings-from-profiles.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:08b5270b9e1ecbdfbfe9c24a6f6fd0677273776e7be11fd8bcf53cf9efe7bdd6 +size 46303 diff --git a/docs/static/images/guides/configuration/octobot-profile-overview.png b/docs/static/images/guides/configuration/octobot-profile-overview.png new file mode 100644 index 0000000000..0ea30b8d1c --- /dev/null +++ b/docs/static/images/guides/configuration/octobot-profile-overview.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:013fba28979e293fb7fc98edb51f470737e97a76ba82e20287fa182688e8e047 +size 111823 diff --git a/docs/static/images/guides/configuration/octobot-trading-mode-details-from-profiles.png b/docs/static/images/guides/configuration/octobot-trading-mode-details-from-profiles.png new file mode 100644 index 0000000000..d08c63d7af --- /dev/null +++ b/docs/static/images/guides/configuration/octobot-trading-mode-details-from-profiles.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a0e055c86f6403fb4fa04c80ef4c67b9f9a4bc85f62e3a0e066a62612f5102a8 +size 368146 diff --git a/docs/static/images/guides/configuration/octobot-trading-pairs-settings-from-profiles.png b/docs/static/images/guides/configuration/octobot-trading-pairs-settings-from-profiles.png new file mode 100644 index 0000000000..8239c94771 --- /dev/null +++ b/docs/static/images/guides/configuration/octobot-trading-pairs-settings-from-profiles.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff06745d6ead8f7aa1497abb93803607489fcf5c399f17f16b99c201324abbfd +size 127482 diff --git a/docs/static/images/guides/configuration/octobot-trading-settings-from-profiles.png b/docs/static/images/guides/configuration/octobot-trading-settings-from-profiles.png new file mode 100644 index 0000000000..3dc38663ba --- /dev/null +++ b/docs/static/images/guides/configuration/octobot-trading-settings-from-profiles.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f2cc27dcb77c9aeb3218ad5a0793b34ef78a61f2fa6979782de77193ca82383 +size 48629 diff --git a/docs/static/images/guides/configuration/share-octobot-profile.png b/docs/static/images/guides/configuration/share-octobot-profile.png new file mode 100644 index 0000000000..c14d302f77 --- /dev/null +++ b/docs/static/images/guides/configuration/share-octobot-profile.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af8c4405b2390c1c6eb42b3b3ac73fb047c1309321f7f93b2caf60053f12606f +size 143752 diff --git a/docs/static/images/guides/configuration/trading-mode-configuration-from-profiles.png b/docs/static/images/guides/configuration/trading-mode-configuration-from-profiles.png new file mode 100644 index 0000000000..63576aa60f --- /dev/null +++ b/docs/static/images/guides/configuration/trading-mode-configuration-from-profiles.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:40dcd1dc405b7c45cf8ec26df6b221310458c0efef8032a1afc18f1d95613be0 +size 117068 diff --git a/docs/static/images/guides/crypto-basket.png b/docs/static/images/guides/crypto-basket.png new file mode 100644 index 0000000000..7105f4e5ef --- /dev/null +++ b/docs/static/images/guides/crypto-basket.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bfac310096fc01a542daca6fac20247ea7043d4523ba5b582b40a3f220449a85 +size 227490 diff --git a/docs/static/images/guides/dca-trading-illustrated-by-a-man-watering-a-plant-growing-money.png b/docs/static/images/guides/dca-trading-illustrated-by-a-man-watering-a-plant-growing-money.png new file mode 100644 index 0000000000..cdc4786e6c --- /dev/null +++ b/docs/static/images/guides/dca-trading-illustrated-by-a-man-watering-a-plant-growing-money.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d429c24cecf2ee704997b9ebe099092d6539a677b585936b6149796f13d1168f +size 94467 diff --git a/docs/static/images/guides/dev_env/create-pycharm-export-tentacles-config.png b/docs/static/images/guides/dev_env/create-pycharm-export-tentacles-config.png new file mode 100644 index 0000000000..b617e94926 --- /dev/null +++ b/docs/static/images/guides/dev_env/create-pycharm-export-tentacles-config.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3283260b8f93a83b9873fa6ee184bd7d1bbac2f0a0a6960415116d1cfe389def +size 100671 diff --git a/docs/static/images/guides/dev_env/create-pycharm-export-tentacles-to-repo-config.png b/docs/static/images/guides/dev_env/create-pycharm-export-tentacles-to-repo-config.png new file mode 100644 index 0000000000..960190f85f --- /dev/null +++ b/docs/static/images/guides/dev_env/create-pycharm-export-tentacles-to-repo-config.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f4910a4b17922ff8580f38d167d8c623bc9e7a6232f25ca68c670a78c070597 +size 105102 diff --git a/docs/static/images/guides/dev_env/create-pycharm-install-tentacles-config.png b/docs/static/images/guides/dev_env/create-pycharm-install-tentacles-config.png new file mode 100644 index 0000000000..3bb97a37ff --- /dev/null +++ b/docs/static/images/guides/dev_env/create-pycharm-install-tentacles-config.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:557b88f5a3131e58060053cdb9328eb9ade4f4d222b645464fef95d54a3b307d +size 84854 diff --git a/docs/static/images/guides/dev_env/create-pycharm-interpreter.png b/docs/static/images/guides/dev_env/create-pycharm-interpreter.png new file mode 100644 index 0000000000..761e080d06 --- /dev/null +++ b/docs/static/images/guides/dev_env/create-pycharm-interpreter.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4a00602ab24d72b8a13033df1b467f73cd34b33f989c73d0579f228c3fa94958 +size 131588 diff --git a/docs/static/images/guides/dev_env/create-pycharm-start-octobot-run-config.png b/docs/static/images/guides/dev_env/create-pycharm-start-octobot-run-config.png new file mode 100644 index 0000000000..5b8729015c --- /dev/null +++ b/docs/static/images/guides/dev_env/create-pycharm-start-octobot-run-config.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ea86267d8b73981d5fc6708aa2266bfa1869518f3c037672b669e5b561a7968 +size 107225 diff --git a/docs/static/images/guides/dev_env/create-pycharm-tests-config.png b/docs/static/images/guides/dev_env/create-pycharm-tests-config.png new file mode 100644 index 0000000000..8c9f838ef3 --- /dev/null +++ b/docs/static/images/guides/dev_env/create-pycharm-tests-config.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4800f0226e64b1026d7c8323d83bae31d63749fafc3d114140c2e88f60080522 +size 101072 diff --git a/docs/static/images/guides/dev_env/edit-pycharm-configurations.png b/docs/static/images/guides/dev_env/edit-pycharm-configurations.png new file mode 100644 index 0000000000..a4bb4b6e61 --- /dev/null +++ b/docs/static/images/guides/dev_env/edit-pycharm-configurations.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f44aaee4d98be5c97c1c3030b38e50233cc1da9e93a1dbd72844d89efad269e0 +size 62954 diff --git a/docs/static/images/guides/dev_env/execute-pycharm-export-tentacles-to-repo.png b/docs/static/images/guides/dev_env/execute-pycharm-export-tentacles-to-repo.png new file mode 100644 index 0000000000..1d3ef02ddf --- /dev/null +++ b/docs/static/images/guides/dev_env/execute-pycharm-export-tentacles-to-repo.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb40bb34e2486af9b5c6c8771d92adc6c7716268834d7f98e04904f99925ecfe +size 75995 diff --git a/docs/static/images/guides/dev_env/execute-pycharm-export-tentacles.png b/docs/static/images/guides/dev_env/execute-pycharm-export-tentacles.png new file mode 100644 index 0000000000..aa19b1a4e4 --- /dev/null +++ b/docs/static/images/guides/dev_env/execute-pycharm-export-tentacles.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a3c2eed24043e72a13c10938656026d4f19f47001da01126e4afc3acfb9427da +size 71591 diff --git a/docs/static/images/guides/dev_env/execute-pycharm-install-tentacles.png b/docs/static/images/guides/dev_env/execute-pycharm-install-tentacles.png new file mode 100644 index 0000000000..2e5fe89968 --- /dev/null +++ b/docs/static/images/guides/dev_env/execute-pycharm-install-tentacles.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:87a5fac7c5d80ab8c7991232797e0d5520a0c084d01e4510ec69b60d8af80d88 +size 108578 diff --git a/docs/static/images/guides/dev_env/execute-pycharm-start-octobot.png b/docs/static/images/guides/dev_env/execute-pycharm-start-octobot.png new file mode 100644 index 0000000000..1fbfbad574 --- /dev/null +++ b/docs/static/images/guides/dev_env/execute-pycharm-start-octobot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f30788f70cabe94c3869700489152069495f233ccad7b07ac3b7f75e4ef504b4 +size 169550 diff --git a/docs/static/images/guides/dev_env/execute-pycharm-tests.png b/docs/static/images/guides/dev_env/execute-pycharm-tests.png new file mode 100644 index 0000000000..f3d3f7db8d --- /dev/null +++ b/docs/static/images/guides/dev_env/execute-pycharm-tests.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9ff842772504c7db0f03300f453ab4b48bb22719a98090d724e5485859f05c0 +size 72331 diff --git a/docs/static/images/guides/dev_env/install-octobot-requirements-from-pycharm.png b/docs/static/images/guides/dev_env/install-octobot-requirements-from-pycharm.png new file mode 100644 index 0000000000..828ceaf893 --- /dev/null +++ b/docs/static/images/guides/dev_env/install-octobot-requirements-from-pycharm.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:819a1bbb6ca21e94d1e9b2c5e06e54aa4fd5188d11298920a9278b161fcea444 +size 22263 diff --git a/docs/static/images/guides/dev_env/pycharm-debug-octobot.png b/docs/static/images/guides/dev_env/pycharm-debug-octobot.png new file mode 100644 index 0000000000..fa9026037e --- /dev/null +++ b/docs/static/images/guides/dev_env/pycharm-debug-octobot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:303d4c6ac8d0a70bc72b45ca3c8ceabf7ffc80834c0733d8cbdcbc1fdc57ab51 +size 236107 diff --git a/docs/static/images/guides/dev_env/vscode-create-octobot-venv.png b/docs/static/images/guides/dev_env/vscode-create-octobot-venv.png new file mode 100644 index 0000000000..28710e519a --- /dev/null +++ b/docs/static/images/guides/dev_env/vscode-create-octobot-venv.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b427c416f3ae09addd2413603a0787f1f6e680d8ee41f80810e3c490f856fc0 +size 87837 diff --git a/docs/static/images/guides/dev_env/vscode-executed-export-tentacles-to-repo.png b/docs/static/images/guides/dev_env/vscode-executed-export-tentacles-to-repo.png new file mode 100644 index 0000000000..7a6dc4d6ea --- /dev/null +++ b/docs/static/images/guides/dev_env/vscode-executed-export-tentacles-to-repo.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee10d168ab9f99da9632f47185a80063a9c4ced6543a256366b0fadfe2075234 +size 38761 diff --git a/docs/static/images/guides/dev_env/vscode-executed-export-tentacles-to-zip.png b/docs/static/images/guides/dev_env/vscode-executed-export-tentacles-to-zip.png new file mode 100644 index 0000000000..1de7af5f54 --- /dev/null +++ b/docs/static/images/guides/dev_env/vscode-executed-export-tentacles-to-zip.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f51b769f9763e4a5d1323a18eee1e9d20ddd94069c776bc75ee6031bd65b2357 +size 38756 diff --git a/docs/static/images/guides/dev_env/vscode-executed-install-tentacles-from-zip.png b/docs/static/images/guides/dev_env/vscode-executed-install-tentacles-from-zip.png new file mode 100644 index 0000000000..a2f33238f6 --- /dev/null +++ b/docs/static/images/guides/dev_env/vscode-executed-install-tentacles-from-zip.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf167dd68e0676bcc01be0c73b9420ff34351bfb62bc4498716a0becf698d1f6 +size 57102 diff --git a/docs/static/images/guides/dev_env/vscode-executed-start-octobot.png b/docs/static/images/guides/dev_env/vscode-executed-start-octobot.png new file mode 100644 index 0000000000..ae86a3c200 --- /dev/null +++ b/docs/static/images/guides/dev_env/vscode-executed-start-octobot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60f0fb5ef27953ba366e97ffe2e66bcf03a6d0bd1c28263674f7341487b923fc +size 81144 diff --git a/docs/static/images/guides/dev_env/vscode-executed-tests.png b/docs/static/images/guides/dev_env/vscode-executed-tests.png new file mode 100644 index 0000000000..710ae0d011 --- /dev/null +++ b/docs/static/images/guides/dev_env/vscode-executed-tests.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6f0f3b3caa8d6240a8d8cdc8f1dd222f8ac1e57507d2bdcc48e54635f7e7a1f +size 39837 diff --git a/docs/static/images/guides/dev_env/vscode-install-python-requirements.png b/docs/static/images/guides/dev_env/vscode-install-python-requirements.png new file mode 100644 index 0000000000..79e805b13b --- /dev/null +++ b/docs/static/images/guides/dev_env/vscode-install-python-requirements.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a77d7e05c3ba74ad1f76ff7c841957ecd3facbe739acc20e010a14b6da8cb606 +size 29749 diff --git a/docs/static/images/guides/dev_env/vscode-run-configurations-selector.png b/docs/static/images/guides/dev_env/vscode-run-configurations-selector.png new file mode 100644 index 0000000000..df9e04f876 --- /dev/null +++ b/docs/static/images/guides/dev_env/vscode-run-configurations-selector.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb8d769eb58de581a59a88e9e7ffabb03fc08680f4bb9a9d491056a47418ef63 +size 20678 diff --git a/docs/static/images/guides/download-octobot-cloud-strategies-in-open-source-bot.png b/docs/static/images/guides/download-octobot-cloud-strategies-in-open-source-bot.png new file mode 100644 index 0000000000..56ee82428b --- /dev/null +++ b/docs/static/images/guides/download-octobot-cloud-strategies-in-open-source-bot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e618752bde489bfa3cb644d258770a92b2239191c0eede40a05dcb5a91cc78b +size 220893 diff --git a/docs/static/images/guides/grid-trading-illustrated-by-a-man-stepping-up-on-green-stairs-grabbing-coins.png b/docs/static/images/guides/grid-trading-illustrated-by-a-man-stepping-up-on-green-stairs-grabbing-coins.png new file mode 100644 index 0000000000..23877a51a2 --- /dev/null +++ b/docs/static/images/guides/grid-trading-illustrated-by-a-man-stepping-up-on-green-stairs-grabbing-coins.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a2bc616356a17e3b76a18c7af52da102a91a79c3de73acdf1f163b05275ffe26 +size 87852 diff --git a/docs/static/images/guides/having-multiple-octobots-on-different-strategies.png b/docs/static/images/guides/having-multiple-octobots-on-different-strategies.png new file mode 100644 index 0000000000..ce2d8d6fca --- /dev/null +++ b/docs/static/images/guides/having-multiple-octobots-on-different-strategies.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8cba66560a4beaf3cda8035750df8ccea0601b8675582ba48fa17894d996b0c3 +size 145218 diff --git a/docs/static/images/guides/hyperliquid/hyperliquid-add-api-days-and-copy-private-key.png b/docs/static/images/guides/hyperliquid/hyperliquid-add-api-days-and-copy-private-key.png new file mode 100644 index 0000000000..2366942d0d --- /dev/null +++ b/docs/static/images/guides/hyperliquid/hyperliquid-add-api-days-and-copy-private-key.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:125afa210e35726009f5f4ed585b815623b424eb5c46419976a9de0b572b9564 +size 111267 diff --git a/docs/static/images/guides/hyperliquid/hyperliquid-api-click-authorize-from-popup.png b/docs/static/images/guides/hyperliquid/hyperliquid-api-click-authorize-from-popup.png new file mode 100644 index 0000000000..eedaf19692 --- /dev/null +++ b/docs/static/images/guides/hyperliquid/hyperliquid-api-click-authorize-from-popup.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d2eef4adf4a4f70704b86aba81a3cc18ac29caf2b6bd1aa3bd4b4868d9830378 +size 111633 diff --git a/docs/static/images/guides/hyperliquid/hyperliquid-api-click-authorize.png b/docs/static/images/guides/hyperliquid/hyperliquid-api-click-authorize.png new file mode 100644 index 0000000000..fa39f29c4c --- /dev/null +++ b/docs/static/images/guides/hyperliquid/hyperliquid-api-click-authorize.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73439b88551c40772f5edac8ef11a011e95289b2f1d0324c8eb8ce221d7ad1e7 +size 41275 diff --git a/docs/static/images/guides/hyperliquid/hyperliquid-api-enter-name-and-generate.png b/docs/static/images/guides/hyperliquid/hyperliquid-api-enter-name-and-generate.png new file mode 100644 index 0000000000..de58ed4fed --- /dev/null +++ b/docs/static/images/guides/hyperliquid/hyperliquid-api-enter-name-and-generate.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4520495084ee826a63421f9bc8f28486bc86d197381824ea571e1e640e1d2464 +size 42400 diff --git a/docs/static/images/guides/hyperliquid/hyperliquid-copy-public-key.png b/docs/static/images/guides/hyperliquid/hyperliquid-copy-public-key.png new file mode 100644 index 0000000000..03b1229763 --- /dev/null +++ b/docs/static/images/guides/hyperliquid/hyperliquid-copy-public-key.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91eb8dc00f8fc03106ed90824931a6b79888616cd1fb5295c7628f493ae42478 +size 76457 diff --git a/docs/static/images/guides/hyperliquid/hyperliquid-go-to-api-settings.png b/docs/static/images/guides/hyperliquid/hyperliquid-go-to-api-settings.png new file mode 100644 index 0000000000..e96d269fc0 --- /dev/null +++ b/docs/static/images/guides/hyperliquid/hyperliquid-go-to-api-settings.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:48f3e358c0e770d10f14e252d9c4da5c158e216075939f58b9a48c978ac62ea7 +size 128270 diff --git a/docs/static/images/guides/installation/digitalocean/choose-droplet-location.png b/docs/static/images/guides/installation/digitalocean/choose-droplet-location.png new file mode 100644 index 0000000000..4c792f81f2 --- /dev/null +++ b/docs/static/images/guides/installation/digitalocean/choose-droplet-location.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c976b616f82f0ba803c4e9900b050ee00e5c6ee5c47dc008c3625c5dc610316a +size 235792 diff --git a/docs/static/images/guides/installation/digitalocean/create-droplet.png b/docs/static/images/guides/installation/digitalocean/create-droplet.png new file mode 100644 index 0000000000..128c6860f1 --- /dev/null +++ b/docs/static/images/guides/installation/digitalocean/create-droplet.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c28d586c482b586db2ef102ef58f1f26bc82a80091de21f31aea29807e70b8a +size 291497 diff --git a/docs/static/images/guides/installation/digitalocean/digital-ocean-droplet-access.png b/docs/static/images/guides/installation/digitalocean/digital-ocean-droplet-access.png new file mode 100644 index 0000000000..18049567ff --- /dev/null +++ b/docs/static/images/guides/installation/digitalocean/digital-ocean-droplet-access.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c8163862f10df04ec69a1504f14d668c5c9f196d9afd900555964b93d7678137 +size 77517 diff --git a/docs/static/images/guides/installation/digitalocean/digital-ocean-droplet-pricing.png b/docs/static/images/guides/installation/digitalocean/digital-ocean-droplet-pricing.png new file mode 100644 index 0000000000..aef547bfe4 --- /dev/null +++ b/docs/static/images/guides/installation/digitalocean/digital-ocean-droplet-pricing.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9679bfbbf82b5909b8bed930e3f8dc52516bb9d58929021731be5631b4ff5bf3 +size 88785 diff --git a/docs/static/images/guides/installation/digitalocean/digital-ocean-octobot-app-page.png b/docs/static/images/guides/installation/digitalocean/digital-ocean-octobot-app-page.png new file mode 100644 index 0000000000..cd7409db8c --- /dev/null +++ b/docs/static/images/guides/installation/digitalocean/digital-ocean-octobot-app-page.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:456f69c5a8d43107e121fa158ae3448ff754627578bc0c01ea2a951bee843085 +size 227715 diff --git a/docs/static/images/guides/installation/digitalocean/digital-ocean-octobot-image.png b/docs/static/images/guides/installation/digitalocean/digital-ocean-octobot-image.png new file mode 100644 index 0000000000..dda6a9b8c3 --- /dev/null +++ b/docs/static/images/guides/installation/digitalocean/digital-ocean-octobot-image.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a590e2624ec4b639f227ee71dcaeb672f4a09f6e100bfefb1c2043b332c60eba +size 86316 diff --git a/docs/static/images/guides/installation/digitalocean/get-droplet-ip.png b/docs/static/images/guides/installation/digitalocean/get-droplet-ip.png new file mode 100644 index 0000000000..ef4f65acab --- /dev/null +++ b/docs/static/images/guides/installation/digitalocean/get-droplet-ip.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b386c95f049aa49a8bbe82826937323a2040be1d2bc3b549a254dbeabbbe4bd1 +size 148123 diff --git a/docs/static/images/guides/installation/digitalocean/open-octobot-with-droplet-ip.png b/docs/static/images/guides/installation/digitalocean/open-octobot-with-droplet-ip.png new file mode 100644 index 0000000000..c5edbd3302 --- /dev/null +++ b/docs/static/images/guides/installation/digitalocean/open-octobot-with-droplet-ip.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ebd547a24cc777f5ef1eb9d0e03c320d3e506812acb0e84547e616346d0e69e +size 21336 diff --git a/docs/static/images/guides/installation/digitalocean/wait-for-droplet-start.png b/docs/static/images/guides/installation/digitalocean/wait-for-droplet-start.png new file mode 100644 index 0000000000..40a2f4eee8 --- /dev/null +++ b/docs/static/images/guides/installation/digitalocean/wait-for-droplet-start.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15dba3361f649bffd963cc8564275799a6a3cafda93716489bb827c0e8fd8d41 +size 86382 diff --git a/docs/static/images/guides/interfaces/RedditForumEvaluator-configuration-to-select-subreddits-to-follow.png b/docs/static/images/guides/interfaces/RedditForumEvaluator-configuration-to-select-subreddits-to-follow.png new file mode 100644 index 0000000000..2dfb7e247c --- /dev/null +++ b/docs/static/images/guides/interfaces/RedditForumEvaluator-configuration-to-select-subreddits-to-follow.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3599fe8095db64616ec6cdc2360e6fd675106bb62fb3270d20bd0ad3372882c +size 69054 diff --git a/docs/static/images/guides/interfaces/octobot-collaborating-with-chatgpt-light.png b/docs/static/images/guides/interfaces/octobot-collaborating-with-chatgpt-light.png new file mode 100644 index 0000000000..5625bd279b --- /dev/null +++ b/docs/static/images/guides/interfaces/octobot-collaborating-with-chatgpt-light.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:010915a708fd0ed06367a13bc459f1da6712dffb5c72371f6191e69d3a27efd8 +size 154475 diff --git a/docs/static/images/guides/interfaces/reddit-connection-to-octobot-illustrated-by-reddit-logo.png b/docs/static/images/guides/interfaces/reddit-connection-to-octobot-illustrated-by-reddit-logo.png new file mode 100644 index 0000000000..14c0669fc0 --- /dev/null +++ b/docs/static/images/guides/interfaces/reddit-connection-to-octobot-illustrated-by-reddit-logo.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:62b5775aa1fa521614d60fbeb81d96105cd231b1ac09ac7acd35ac6040e0adc1 +size 10427 diff --git a/docs/static/images/guides/interfaces/reddit-create-app.png b/docs/static/images/guides/interfaces/reddit-create-app.png new file mode 100644 index 0000000000..b6b58d133f --- /dev/null +++ b/docs/static/images/guides/interfaces/reddit-create-app.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:83a2a0b2741c0797be47b2e34f66f4dc050e3626249e00c1761219fff91f1209 +size 25855 diff --git a/docs/static/images/guides/interfaces/reddit-created-app.png b/docs/static/images/guides/interfaces/reddit-created-app.png new file mode 100644 index 0000000000..8e3bf093b3 --- /dev/null +++ b/docs/static/images/guides/interfaces/reddit-created-app.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7287a481d9b8684a04ecb8d26c5c6aae88c534fa34febeb949450efede59a000 +size 13358 diff --git a/docs/static/images/guides/interfaces/reddit-octobot-config.png b/docs/static/images/guides/interfaces/reddit-octobot-config.png new file mode 100644 index 0000000000..952ec518e0 --- /dev/null +++ b/docs/static/images/guides/interfaces/reddit-octobot-config.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b49be283395a6ba4fbf7e4f6ced2f6f9b45b3ad7710ce4baff51810053934bd4 +size 15887 diff --git a/docs/static/images/guides/interfaces/telegram-connection-to-octobot-illustrated-by-telegram-logo.png b/docs/static/images/guides/interfaces/telegram-connection-to-octobot-illustrated-by-telegram-logo.png new file mode 100644 index 0000000000..9fb298b40b --- /dev/null +++ b/docs/static/images/guides/interfaces/telegram-connection-to-octobot-illustrated-by-telegram-logo.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c39f3638c20b8fef43f012529fca64e4b46cefc87f1d704cb91984ff5158ae1 +size 5300 diff --git a/docs/static/images/guides/interfaces/tradingview-automation-illustrated-by-tradingview-logo.png b/docs/static/images/guides/interfaces/tradingview-automation-illustrated-by-tradingview-logo.png new file mode 100644 index 0000000000..9507689720 --- /dev/null +++ b/docs/static/images/guides/interfaces/tradingview-automation-illustrated-by-tradingview-logo.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1748d756f4dd4626fb8853857b0e8e116596854757a2b8ac4130461d222df4dc +size 7344 diff --git a/docs/static/images/guides/kucoin/account-setting-api-management.png b/docs/static/images/guides/kucoin/account-setting-api-management.png new file mode 100644 index 0000000000..81fbc306c1 --- /dev/null +++ b/docs/static/images/guides/kucoin/account-setting-api-management.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e0e625210973f59204c5a7c6339957725019591dd38b2685ee257a39318e4c55 +size 190353 diff --git a/docs/static/images/guides/kucoin/add-api-key-to-octobot-cloud-from-profile.png b/docs/static/images/guides/kucoin/add-api-key-to-octobot-cloud-from-profile.png new file mode 100644 index 0000000000..eac13e7b3a --- /dev/null +++ b/docs/static/images/guides/kucoin/add-api-key-to-octobot-cloud-from-profile.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f62eba662def52531cf8275dbfa3f75dc91080253923d877a060bcf0b5bff863 +size 234751 diff --git a/docs/static/images/guides/kucoin/add-api-key-to-octobot-cloud-from-strategy-start.png b/docs/static/images/guides/kucoin/add-api-key-to-octobot-cloud-from-strategy-start.png new file mode 100644 index 0000000000..810f93c28f --- /dev/null +++ b/docs/static/images/guides/kucoin/add-api-key-to-octobot-cloud-from-strategy-start.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6f9f412d01e9a9417dc73752af63a358728eded45606abfc13b7d820d6d9e514 +size 201210 diff --git a/docs/static/images/guides/kucoin/api-creation-completed-selected-values.png b/docs/static/images/guides/kucoin/api-creation-completed-selected-values.png new file mode 100644 index 0000000000..0d0c7c723b --- /dev/null +++ b/docs/static/images/guides/kucoin/api-creation-completed-selected-values.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8815127bfa7bed42c749fa23b7f0ea77372050cb3f5fc627a28b5f7ea4c700ae +size 45100 diff --git a/docs/static/images/guides/kucoin/apis-list-create-new-api.png b/docs/static/images/guides/kucoin/apis-list-create-new-api.png new file mode 100644 index 0000000000..4521e3e949 --- /dev/null +++ b/docs/static/images/guides/kucoin/apis-list-create-new-api.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:979fa0d5564c6724da0e5421b7fd3596393e2a5a011778592e67147161c1e29f +size 140860 diff --git a/docs/static/images/guides/kucoin/create-api-security-verification.png b/docs/static/images/guides/kucoin/create-api-security-verification.png new file mode 100644 index 0000000000..4d90594dae --- /dev/null +++ b/docs/static/images/guides/kucoin/create-api-security-verification.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e23f728215c576a357073df79a4014d7e2b23d74b231ac8a804357a311f9531f +size 15868 diff --git a/docs/static/images/guides/kucoin/kucoin-account-login.png b/docs/static/images/guides/kucoin/kucoin-account-login.png new file mode 100644 index 0000000000..0fbd04a2e5 --- /dev/null +++ b/docs/static/images/guides/kucoin/kucoin-account-login.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d876b3fe82736ed229577904450a91a3184e6aea3064ee1e73b4da90db87836c +size 653026 diff --git a/docs/static/images/guides/kucoin/kucoin-api-key-created.png b/docs/static/images/guides/kucoin/kucoin-api-key-created.png new file mode 100644 index 0000000000..a62990c58a --- /dev/null +++ b/docs/static/images/guides/kucoin/kucoin-api-key-created.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8902063ab0bace9c26c5e26d5218091d03fba4ea80c881dba0c0083b6d8520b +size 36666 diff --git a/docs/static/images/guides/kucoin/select-api-name-passphrase-and-restrictions.png b/docs/static/images/guides/kucoin/select-api-name-passphrase-and-restrictions.png new file mode 100644 index 0000000000..8d2fefbf14 --- /dev/null +++ b/docs/static/images/guides/kucoin/select-api-name-passphrase-and-restrictions.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d8936db8df02602de2ab504ea5826b6ff9549b6a232d91f1f3fafa2072258fe6 +size 144170 diff --git a/docs/static/images/guides/octobot-cloud-restart-octobot.png b/docs/static/images/guides/octobot-cloud-restart-octobot.png new file mode 100644 index 0000000000..2261cbe3cd --- /dev/null +++ b/docs/static/images/guides/octobot-cloud-restart-octobot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:adf2c2559c70ccc97927b00e803a07fce0df4a127c5f8a4488e85aea639ee0ab +size 160302 diff --git a/docs/static/images/guides/octobot-cloud-strategies-explorer-with-crypto-baskets-and-strategies.png b/docs/static/images/guides/octobot-cloud-strategies-explorer-with-crypto-baskets-and-strategies.png new file mode 100644 index 0000000000..bbd674df47 --- /dev/null +++ b/docs/static/images/guides/octobot-cloud-strategies-explorer-with-crypto-baskets-and-strategies.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a91b190c28b0de3dfcb1d7ff02c43927ceb02174d018706149000d288bbcdc80 +size 249071 diff --git a/docs/static/images/guides/octobot-multi-exchange-dashboard-with-historical-portfolio-value-holdings-pie-chart-and-running-bots.png b/docs/static/images/guides/octobot-multi-exchange-dashboard-with-historical-portfolio-value-holdings-pie-chart-and-running-bots.png new file mode 100644 index 0000000000..8b71bcd4b0 --- /dev/null +++ b/docs/static/images/guides/octobot-multi-exchange-dashboard-with-historical-portfolio-value-holdings-pie-chart-and-running-bots.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82ff2cc6b852bd302ca33b955571507e289dc5bdc0b8fcd619c7a0e3054ffd15 +size 260248 diff --git a/docs/static/images/guides/octobot-pro/octobot-pro-report-btc-usdt-with-chart-trades-portfolio-value-and-rsi.jpg b/docs/static/images/guides/octobot-pro/octobot-pro-report-btc-usdt-with-chart-trades-portfolio-value-and-rsi.jpg new file mode 100644 index 0000000000..4d218550bd --- /dev/null +++ b/docs/static/images/guides/octobot-pro/octobot-pro-report-btc-usdt-with-chart-trades-portfolio-value-and-rsi.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17abde8fb350afb4d98945e552b726b7b8a2befbdf262c9d8bf06b8331bc9404 +size 421717 diff --git a/docs/static/images/guides/paper-trading-cloud-octobot-expiring-in-2-days.png b/docs/static/images/guides/paper-trading-cloud-octobot-expiring-in-2-days.png new file mode 100644 index 0000000000..17e8617e0f --- /dev/null +++ b/docs/static/images/guides/paper-trading-cloud-octobot-expiring-in-2-days.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34c1aa85e6e1660017191c5193e6cec9708bef95c5fbc1aead689b0745f788e6 +size 83296 diff --git a/docs/static/images/guides/paper-trading-cloud-octobot.png b/docs/static/images/guides/paper-trading-cloud-octobot.png new file mode 100644 index 0000000000..6ec1591b53 --- /dev/null +++ b/docs/static/images/guides/paper-trading-cloud-octobot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c0d0590a35d5d7f673ceceff877c98a29117d20a843969280ab5f43c5c34b8a6 +size 143820 diff --git a/docs/static/images/guides/paper-trading-virtual-portfolio-configuration.png b/docs/static/images/guides/paper-trading-virtual-portfolio-configuration.png new file mode 100644 index 0000000000..d674520b4f --- /dev/null +++ b/docs/static/images/guides/paper-trading-virtual-portfolio-configuration.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44acd5abd603c3501740142db98ae8ea0ab6a254b4a406580e0dffcde911a9ba +size 33784 diff --git a/docs/static/images/guides/pnl.png b/docs/static/images/guides/pnl.png new file mode 100644 index 0000000000..9d44041f85 --- /dev/null +++ b/docs/static/images/guides/pnl.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:42b062dcba8d9c17a6a74c772ddf01445463122d85a903343d58ca1182bfcbfd +size 143062 diff --git a/docs/static/images/guides/premium-octobot-extension/premium-octobot-extension-buy-section.png b/docs/static/images/guides/premium-octobot-extension/premium-octobot-extension-buy-section.png new file mode 100644 index 0000000000..60408da921 --- /dev/null +++ b/docs/static/images/guides/premium-octobot-extension/premium-octobot-extension-buy-section.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:11264016a972c235f79a5bbf4ebb540c0c2a60acea3f7792f83846a69ba6857d +size 8941 diff --git a/docs/static/images/guides/stopping-cloud-octobot-cancelling-orders.png b/docs/static/images/guides/stopping-cloud-octobot-cancelling-orders.png new file mode 100644 index 0000000000..761ac92cad --- /dev/null +++ b/docs/static/images/guides/stopping-cloud-octobot-cancelling-orders.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b34399462e363f9c8d8d2da448df23b31a41fc315269a0aa418c14a5d2b34f0f +size 225817 diff --git a/docs/static/images/guides/strategy-designer/octobot-strategy-designer-campaigns-selector.png b/docs/static/images/guides/strategy-designer/octobot-strategy-designer-campaigns-selector.png new file mode 100644 index 0000000000..4a011b9dd0 --- /dev/null +++ b/docs/static/images/guides/strategy-designer/octobot-strategy-designer-campaigns-selector.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:adf49ded2c78a4e9ce7c289468f334c5713b8bfa0ccedbd498abca5526ffeceb +size 58572 diff --git a/docs/static/images/guides/strategy-designer/octobot-strategy-designer-compare-run-results.png b/docs/static/images/guides/strategy-designer/octobot-strategy-designer-compare-run-results.png new file mode 100644 index 0000000000..bf3c7cb5ff --- /dev/null +++ b/docs/static/images/guides/strategy-designer/octobot-strategy-designer-compare-run-results.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e9303fedbf873e019c5b5caf7976fa3d0f4c71e4be96a5d496ded12adbfaeed +size 53986 diff --git a/docs/static/images/guides/strategy-designer/octobot-strategy-designer-create-a-new-strategy-configure-evaluators-settings.png b/docs/static/images/guides/strategy-designer/octobot-strategy-designer-create-a-new-strategy-configure-evaluators-settings.png new file mode 100644 index 0000000000..d054c8222a --- /dev/null +++ b/docs/static/images/guides/strategy-designer/octobot-strategy-designer-create-a-new-strategy-configure-evaluators-settings.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:241febb73790e22a5443e8dc6e679f7505c07a346179e9a556375b8e86e49838 +size 100239 diff --git a/docs/static/images/guides/strategy-designer/octobot-strategy-designer-create-a-new-strategy-select-and-configure-evaluators.png b/docs/static/images/guides/strategy-designer/octobot-strategy-designer-create-a-new-strategy-select-and-configure-evaluators.png new file mode 100644 index 0000000000..5529de0a11 --- /dev/null +++ b/docs/static/images/guides/strategy-designer/octobot-strategy-designer-create-a-new-strategy-select-and-configure-evaluators.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f99046e6891cf5a663267bfad3b999a1c5d5c3e700aed6f15e30e95eabf1f87d +size 171509 diff --git a/docs/static/images/guides/strategy-designer/octobot-strategy-designer-create-a-new-strategy-select-coins.png b/docs/static/images/guides/strategy-designer/octobot-strategy-designer-create-a-new-strategy-select-coins.png new file mode 100644 index 0000000000..5af8b0ebbe --- /dev/null +++ b/docs/static/images/guides/strategy-designer/octobot-strategy-designer-create-a-new-strategy-select-coins.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d9daba495d0b0940c6c60d38d6b4ddb6d3db71964db0c131264cb5b5ee066b8 +size 199512 diff --git a/docs/static/images/guides/strategy-designer/octobot-strategy-designer-create-a-new-strategy-select-portfolio.png b/docs/static/images/guides/strategy-designer/octobot-strategy-designer-create-a-new-strategy-select-portfolio.png new file mode 100644 index 0000000000..a60992fc13 --- /dev/null +++ b/docs/static/images/guides/strategy-designer/octobot-strategy-designer-create-a-new-strategy-select-portfolio.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa1dca09844fc61eb82ca5cc09f9a065ca40347ec59d9c6d24eebd267bc81e81 +size 19845 diff --git a/docs/static/images/guides/strategy-designer/octobot-strategy-designer-create-a-new-strategy-select-trading-mode.png b/docs/static/images/guides/strategy-designer/octobot-strategy-designer-create-a-new-strategy-select-trading-mode.png new file mode 100644 index 0000000000..50eec596c3 --- /dev/null +++ b/docs/static/images/guides/strategy-designer/octobot-strategy-designer-create-a-new-strategy-select-trading-mode.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3d15200d33c13c37bab804fe6a123805a23f15719d7a25c57a6e12b10921f3fd +size 149993 diff --git a/docs/static/images/guides/strategy-designer/octobot-strategy-designer-create-a-new-strategy-summary.png b/docs/static/images/guides/strategy-designer/octobot-strategy-designer-create-a-new-strategy-summary.png new file mode 100644 index 0000000000..05840e2524 --- /dev/null +++ b/docs/static/images/guides/strategy-designer/octobot-strategy-designer-create-a-new-strategy-summary.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:804a40dc0e69ba72913ca04aebf595913ea05f5bb72fc3d3bca55884fd259592 +size 81318 diff --git a/docs/static/images/guides/strategy-designer/octobot-strategy-designer-display-settings.png b/docs/static/images/guides/strategy-designer/octobot-strategy-designer-display-settings.png new file mode 100644 index 0000000000..c3ce06ad96 --- /dev/null +++ b/docs/static/images/guides/strategy-designer/octobot-strategy-designer-display-settings.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1649ce1d4212845df2562d39408aedd608db1197f6d29f66fdabf731e0253533 +size 102807 diff --git a/docs/static/images/guides/strategy-designer/octobot-strategy-designer-edit-current-backtesting.png b/docs/static/images/guides/strategy-designer/octobot-strategy-designer-edit-current-backtesting.png new file mode 100644 index 0000000000..2c64379d59 --- /dev/null +++ b/docs/static/images/guides/strategy-designer/octobot-strategy-designer-edit-current-backtesting.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8bfe092c556554fa7e02b853ae37e42ccc32cf6533216b1b173cd466a2787f18 +size 49280 diff --git a/docs/static/images/guides/strategy-designer/octobot-strategy-designer-edit-current-profile.png b/docs/static/images/guides/strategy-designer/octobot-strategy-designer-edit-current-profile.png new file mode 100644 index 0000000000..d4dba84d76 --- /dev/null +++ b/docs/static/images/guides/strategy-designer/octobot-strategy-designer-edit-current-profile.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e1da3736b27b68db13050e1cb63037dd997135ce49282a03f2bf3a015b548bd +size 340515 diff --git a/docs/static/images/guides/strategy-designer/octobot-strategy-designer-explore-your-past-backtestings-customize-columns.png b/docs/static/images/guides/strategy-designer/octobot-strategy-designer-explore-your-past-backtestings-customize-columns.png new file mode 100644 index 0000000000..029a86412a --- /dev/null +++ b/docs/static/images/guides/strategy-designer/octobot-strategy-designer-explore-your-past-backtestings-customize-columns.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:deb9923556ddbfca2286298f3deaf38d248b6faac816ba8396cfcc7a587a7519 +size 217196 diff --git a/docs/static/images/guides/strategy-designer/octobot-strategy-designer-explore-your-past-backtestings.png b/docs/static/images/guides/strategy-designer/octobot-strategy-designer-explore-your-past-backtestings.png new file mode 100644 index 0000000000..9d2271bcf1 --- /dev/null +++ b/docs/static/images/guides/strategy-designer/octobot-strategy-designer-explore-your-past-backtestings.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:701f3db822516b2a5f809ae66357c90a76707ddcaa394d5cbf63916cf5740a6a +size 64896 diff --git a/docs/static/images/guides/strategy-designer/octobot-strategy-designer-historical-charts.png b/docs/static/images/guides/strategy-designer/octobot-strategy-designer-historical-charts.png new file mode 100644 index 0000000000..70d2c755b0 --- /dev/null +++ b/docs/static/images/guides/strategy-designer/octobot-strategy-designer-historical-charts.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cbccc686839a61139b8995aa0aaad13261c5fd498389659b4f60acd2a660a977 +size 200719 diff --git a/docs/static/images/guides/strategy-designer/octobot-strategy-designer-new-backtesting.png b/docs/static/images/guides/strategy-designer/octobot-strategy-designer-new-backtesting.png new file mode 100644 index 0000000000..eb9949f96a --- /dev/null +++ b/docs/static/images/guides/strategy-designer/octobot-strategy-designer-new-backtesting.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fadd5de8a4fa117ffb9864a0546ddb05920b2563f3463b50eb8ccc5d2c5e7115 +size 4803 diff --git a/docs/static/images/guides/strategy-designer/octobot-strategy-designer-results-on-doge-btc-shib.png b/docs/static/images/guides/strategy-designer/octobot-strategy-designer-results-on-doge-btc-shib.png new file mode 100644 index 0000000000..cb9d7ab11d --- /dev/null +++ b/docs/static/images/guides/strategy-designer/octobot-strategy-designer-results-on-doge-btc-shib.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f2cb0093721c50cdec3fc4e47526306e27a2777a3f8b43cbd59286e4d2bb8abd +size 333764 diff --git a/docs/static/images/guides/strategy-designer/octobot-strategy-designer-results-summary.png b/docs/static/images/guides/strategy-designer/octobot-strategy-designer-results-summary.png new file mode 100644 index 0000000000..266ca46e68 --- /dev/null +++ b/docs/static/images/guides/strategy-designer/octobot-strategy-designer-results-summary.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba12580b848d40cef3e93398bc126c2c900ce17e544d0de931fcc0f83db9dc75 +size 142776 diff --git a/docs/static/images/guides/strategy-designer/octobot-strategy-designer-results-trade-and-orders-explorer.png b/docs/static/images/guides/strategy-designer/octobot-strategy-designer-results-trade-and-orders-explorer.png new file mode 100644 index 0000000000..24cd7fe28f --- /dev/null +++ b/docs/static/images/guides/strategy-designer/octobot-strategy-designer-results-trade-and-orders-explorer.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:16b8a782cc7aca41967a6cdd34b24b07a8ae263c8214c4ca0bf8a6c718ff7ba3 +size 223185 diff --git a/docs/static/images/guides/strategy-designer/octobot-strategy-designer-use-as-live-profile.png b/docs/static/images/guides/strategy-designer/octobot-strategy-designer-use-as-live-profile.png new file mode 100644 index 0000000000..4487cf4265 --- /dev/null +++ b/docs/static/images/guides/strategy-designer/octobot-strategy-designer-use-as-live-profile.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7ba42bdad6f4e41e48640ae321726104c540f9a811fb753d8376f5c6623bf34 +size 2332 diff --git a/docs/static/images/guides/trading-account-type-choice-real-or-paper-trading.png b/docs/static/images/guides/trading-account-type-choice-real-or-paper-trading.png new file mode 100644 index 0000000000..ffd0327f09 --- /dev/null +++ b/docs/static/images/guides/trading-account-type-choice-real-or-paper-trading.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07b74ae558b1e68ed8334de0f583803ea208dc960b7f9c64cf7dbdfa77b17a8d +size 32143 diff --git a/docs/static/images/guides/trading-modes/octobot-open-source-using-crypto-baskets-from-premium-extension.png b/docs/static/images/guides/trading-modes/octobot-open-source-using-crypto-baskets-from-premium-extension.png new file mode 100644 index 0000000000..416d472ccd --- /dev/null +++ b/docs/static/images/guides/trading-modes/octobot-open-source-using-crypto-baskets-from-premium-extension.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41b7292b9fc9960781afdc194c4c0b76133fd3c48a8f83e520da0ad049813ca7 +size 82350 diff --git a/docs/static/images/guides/trading-view/creating-a-indicator-alert-from-tradingview-trigger-options.png b/docs/static/images/guides/trading-view/creating-a-indicator-alert-from-tradingview-trigger-options.png new file mode 100644 index 0000000000..dbe412a3e1 --- /dev/null +++ b/docs/static/images/guides/trading-view/creating-a-indicator-alert-from-tradingview-trigger-options.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d142a65a7b4ac2dafdcbd07db44eaef867d07480cbe6d11f28905233cf5fef8 +size 25111 diff --git a/docs/static/images/guides/trading-view/creating-a-indicator-alert-from-tradingview.png b/docs/static/images/guides/trading-view/creating-a-indicator-alert-from-tradingview.png new file mode 100644 index 0000000000..f54e0b5f20 --- /dev/null +++ b/docs/static/images/guides/trading-view/creating-a-indicator-alert-from-tradingview.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:078c3c54880e0908f215cb5f9b2f8a93ecce1d0e82eef3409a9d2142a181207c +size 39573 diff --git a/docs/static/images/guides/trading-view/creating-a-price-alert-from-tradingview.png b/docs/static/images/guides/trading-view/creating-a-price-alert-from-tradingview.png new file mode 100644 index 0000000000..1897a58468 --- /dev/null +++ b/docs/static/images/guides/trading-view/creating-a-price-alert-from-tradingview.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4997e5f45ed3e5237c739d689aceed0d9a15b77f9b4cf73dc01d143ff4a170e7 +size 36903 diff --git a/docs/static/images/guides/trading-view/creating-a-strategy-alert-from-tradingview.png b/docs/static/images/guides/trading-view/creating-a-strategy-alert-from-tradingview.png new file mode 100644 index 0000000000..5f23999457 --- /dev/null +++ b/docs/static/images/guides/trading-view/creating-a-strategy-alert-from-tradingview.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4bdf44286926a3577fe54cc2d5425860b4cb98ee3e25cef8e5390855b2a74402 +size 193900 diff --git a/docs/static/images/guides/trading-view/creating-an-alert-from-tradingview-webhook-url.png b/docs/static/images/guides/trading-view/creating-an-alert-from-tradingview-webhook-url.png new file mode 100644 index 0000000000..5d088fb60e --- /dev/null +++ b/docs/static/images/guides/trading-view/creating-an-alert-from-tradingview-webhook-url.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb21cedd5d4d5d5b9851851fa4fb944b51328f0e3864c82ece9fe9a78d0a6d47 +size 42214 diff --git a/docs/static/images/guides/trading-view/creating-an-alert-from-tradingview.png b/docs/static/images/guides/trading-view/creating-an-alert-from-tradingview.png new file mode 100644 index 0000000000..a435ec15e3 --- /dev/null +++ b/docs/static/images/guides/trading-view/creating-an-alert-from-tradingview.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:59cacd163c40603274ff46312631d0f7354a32f0bbe846f1ffdccf4550d0e733 +size 171620 diff --git a/docs/static/images/guides/trading-view/creer-alerte-de-prix-depuis-tradingview.png b/docs/static/images/guides/trading-view/creer-alerte-de-prix-depuis-tradingview.png new file mode 100644 index 0000000000..73a941c7df --- /dev/null +++ b/docs/static/images/guides/trading-view/creer-alerte-de-prix-depuis-tradingview.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0aa1874c7d7eaebd507bc9a45811ad59c2428dda87cb8a8b90f62159014b5f4a +size 31489 diff --git a/docs/static/images/guides/trading-view/creer-une-alerte-de-strategie-tradingview.png b/docs/static/images/guides/trading-view/creer-une-alerte-de-strategie-tradingview.png new file mode 100644 index 0000000000..a83e9b68ba --- /dev/null +++ b/docs/static/images/guides/trading-view/creer-une-alerte-de-strategie-tradingview.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:199f046bd39585b461842879a5409cc0f21f81e3375e400aa3c2b46b4f3918e2 +size 155223 diff --git a/docs/static/images/guides/trading-view/creer-une-alerte-depuis-tradingview.png b/docs/static/images/guides/trading-view/creer-une-alerte-depuis-tradingview.png new file mode 100644 index 0000000000..6e6126a751 --- /dev/null +++ b/docs/static/images/guides/trading-view/creer-une-alerte-depuis-tradingview.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f344d9c4139c578e37c208cb74b1c6503b30ff7e8bd97271d163c575d91cf5d +size 99966 diff --git a/docs/static/images/guides/trading-view/creer-une-alerte-depuis-un-indicateur-tradingview.png b/docs/static/images/guides/trading-view/creer-une-alerte-depuis-un-indicateur-tradingview.png new file mode 100644 index 0000000000..be67b40ed9 --- /dev/null +++ b/docs/static/images/guides/trading-view/creer-une-alerte-depuis-un-indicateur-tradingview.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51a2ae033e0dcdaad52ec945e54c81eac9edf60388b0b0f6256726c6d1dab6d1 +size 25080 diff --git a/docs/static/images/guides/trading-view/creer-une-alerte-sur-tradingview-avec-webhook-url.png b/docs/static/images/guides/trading-view/creer-une-alerte-sur-tradingview-avec-webhook-url.png new file mode 100644 index 0000000000..f5b5028953 --- /dev/null +++ b/docs/static/images/guides/trading-view/creer-une-alerte-sur-tradingview-avec-webhook-url.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb1653612d8ba273e12f9b5c88828ebf8b9d089d9bca7cc9cc7e16fcb9a44493 +size 35430 diff --git a/docs/static/images/guides/trading-view/demarrer-un-nouvel-octobot-tradingview-depuis-l-explorer.png b/docs/static/images/guides/trading-view/demarrer-un-nouvel-octobot-tradingview-depuis-l-explorer.png new file mode 100644 index 0000000000..83abe9880f --- /dev/null +++ b/docs/static/images/guides/trading-view/demarrer-un-nouvel-octobot-tradingview-depuis-l-explorer.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c75383975c0acacad28bcd162b0d52d5ac3e80663cd814fafe2de9a90ae899c +size 128731 diff --git a/docs/static/images/guides/trading-view/octobot-automation-avancee-creer-acheter-btc.png b/docs/static/images/guides/trading-view/octobot-automation-avancee-creer-acheter-btc.png new file mode 100644 index 0000000000..4900e04eb4 --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-automation-avancee-creer-acheter-btc.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe05545357c3a496828827f19ec76c158f41f414fc312e43c446f87d5cad367a +size 56192 diff --git a/docs/static/images/guides/trading-view/octobot-automation-buy-eth-25-percent-usdt-automation-id.png b/docs/static/images/guides/trading-view/octobot-automation-buy-eth-25-percent-usdt-automation-id.png new file mode 100644 index 0000000000..251390cac6 --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-automation-buy-eth-25-percent-usdt-automation-id.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ee9da4d7e1f6a363e5a1c8925c34be79b04c40d800d6cdb2d1cab3e901820c7 +size 44331 diff --git a/docs/static/images/guides/trading-view/octobot-automation-buy-eth-25-percent-usdt.png b/docs/static/images/guides/trading-view/octobot-automation-buy-eth-25-percent-usdt.png new file mode 100644 index 0000000000..353608263a --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-automation-buy-eth-25-percent-usdt.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ec240274c7cd33108e61f41c941da6e72dc13530eca6c4215900317458e9881 +size 49614 diff --git a/docs/static/images/guides/trading-view/octobot-automation-connection-empty-panel.png b/docs/static/images/guides/trading-view/octobot-automation-connection-empty-panel.png new file mode 100644 index 0000000000..5516ecbaf3 --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-automation-connection-empty-panel.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:87f20fda90a9c51c9a71bc4d2e78a54e3e8db3c03b1eabb6d94ba5822286ffba +size 53387 diff --git a/docs/static/images/guides/trading-view/octobot-automation-connection-panel-highlights.png b/docs/static/images/guides/trading-view/octobot-automation-connection-panel-highlights.png new file mode 100644 index 0000000000..c369545381 --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-automation-connection-panel-highlights.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b183404e53ddc5c04c77e4ed89c249da04de79f64cd9225cbfded30cd70508ed +size 57158 diff --git a/docs/static/images/guides/trading-view/octobot-automation-connection-panel.png b/docs/static/images/guides/trading-view/octobot-automation-connection-panel.png new file mode 100644 index 0000000000..59caadb342 --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-automation-connection-panel.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f2d26b9d82582979e4582915f3fdf39b8dea6505544cdd0586982790ba18b1d3 +size 78890 diff --git a/docs/static/images/guides/trading-view/octobot-automation-create-buy-btc.png b/docs/static/images/guides/trading-view/octobot-automation-create-buy-btc.png new file mode 100644 index 0000000000..f9f7a3f885 --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-automation-create-buy-btc.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60e535a447e8bd6d1cf85062ab4d5a2ead63aba357a3a5ba885a74085883b996 +size 57642 diff --git a/docs/static/images/guides/trading-view/octobot-automation-create-sell-btc.png b/docs/static/images/guides/trading-view/octobot-automation-create-sell-btc.png new file mode 100644 index 0000000000..6968d5a4ce --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-automation-create-sell-btc.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d5083079017fb78494a5a5ff64d4953243a3e8e5c4baa49bcb31ea2fcdb631e7 +size 57102 diff --git a/docs/static/images/guides/trading-view/octobot-automation-creer-acheter-btc.png b/docs/static/images/guides/trading-view/octobot-automation-creer-acheter-btc.png new file mode 100644 index 0000000000..80e20b9d57 --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-automation-creer-acheter-btc.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:47e8b6e278be7e3d5bcef686f9f5231754bc6c301fef5311f79553ee4a5dbb2a +size 65515 diff --git a/docs/static/images/guides/trading-view/octobot-automation-creer-vendre-btc.png b/docs/static/images/guides/trading-view/octobot-automation-creer-vendre-btc.png new file mode 100644 index 0000000000..682471baab --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-automation-creer-vendre-btc.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8a69d1c942f5fc99335d456804689b2f525de8aabd38de537a3e8df03231728d +size 63996 diff --git a/docs/static/images/guides/trading-view/octobot-automation-email-address.png b/docs/static/images/guides/trading-view/octobot-automation-email-address.png new file mode 100644 index 0000000000..ecbfb4c223 --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-automation-email-address.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9af9b9a90ed317842bf38a2faa8bfe71d2ea6280344dd7ad1b921a65d1ff999b +size 60174 diff --git a/docs/static/images/guides/trading-view/octobot-automation-identifier.png b/docs/static/images/guides/trading-view/octobot-automation-identifier.png new file mode 100644 index 0000000000..3b4bad3673 --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-automation-identifier.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:30298c7c3a71f5b5b5352fee14bd034868a64f361e622665a16a81f95e9443e4 +size 79072 diff --git a/docs/static/images/guides/trading-view/octobot-automation-interface-de-connexion-tradingview.png b/docs/static/images/guides/trading-view/octobot-automation-interface-de-connexion-tradingview.png new file mode 100644 index 0000000000..70a1fd2cc0 --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-automation-interface-de-connexion-tradingview.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dae1c541182accbce845e762ba12d035dddbb24209fd16a0a23ced60f15de668 +size 87369 diff --git a/docs/static/images/guides/trading-view/octobot-automation-sell-100-percent-eth.png b/docs/static/images/guides/trading-view/octobot-automation-sell-100-percent-eth.png new file mode 100644 index 0000000000..de3e7e513e --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-automation-sell-100-percent-eth.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65c7d5a420f36de17be306a21ef46d54e81e1d5ef50123d680de90c4f6ceb964 +size 47986 diff --git a/docs/static/images/guides/trading-view/octobot-automation-webhook-url.png b/docs/static/images/guides/trading-view/octobot-automation-webhook-url.png new file mode 100644 index 0000000000..c9e39a620c --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-automation-webhook-url.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:392be305aa6ca8305e03ac4be5159fc1bdab6c3bf404d1106ae1384db1fad90c +size 66568 diff --git a/docs/static/images/guides/trading-view/octobot-automations-history.png b/docs/static/images/guides/trading-view/octobot-automations-history.png new file mode 100644 index 0000000000..224f79307c --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-automations-history.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d465c54aa319412e799ca23d85095fa52cab72f6876339877c7cada6da9c235 +size 72192 diff --git a/docs/static/images/guides/trading-view/octobot-automations-simple-interface.png b/docs/static/images/guides/trading-view/octobot-automations-simple-interface.png new file mode 100644 index 0000000000..4d4c1e08e1 --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-automations-simple-interface.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:40330afd2ea78c969441dbe54912dde6e098af210db88f9876faa3e8f9461f45 +size 49415 diff --git a/docs/static/images/guides/trading-view/octobot-automations-view-with-executed-tradingview-alerts.png b/docs/static/images/guides/trading-view/octobot-automations-view-with-executed-tradingview-alerts.png new file mode 100644 index 0000000000..c074ec7bf1 --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-automations-view-with-executed-tradingview-alerts.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a81cabbe5f639410ee4f2d4e2adbbce383b657a44d15aacb9af8b53fd097119 +size 21877 diff --git a/docs/static/images/guides/trading-view/octobot-automations-view.png b/docs/static/images/guides/trading-view/octobot-automations-view.png new file mode 100644 index 0000000000..a947af5a73 --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-automations-view.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f1ec3291cb2159d3983f751668ac1a4dba71a398bfc72d6e58b7c17ee5c46da8 +size 22844 diff --git a/docs/static/images/guides/trading-view/octobot-automatisation-email-adresse.png b/docs/static/images/guides/trading-view/octobot-automatisation-email-adresse.png new file mode 100644 index 0000000000..ea8f83a785 --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-automatisation-email-adresse.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:43cef77da30061f2ed61a593008304322be520ff174afed3670f7c8d6ba5f697 +size 67395 diff --git a/docs/static/images/guides/trading-view/octobot-automatisation-historique.png b/docs/static/images/guides/trading-view/octobot-automatisation-historique.png new file mode 100644 index 0000000000..4a435350de --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-automatisation-historique.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f01e4e514375830063e9a1b9d88a49c4a0f9beb14fa6d2010a43c6fe93170b95 +size 77980 diff --git a/docs/static/images/guides/trading-view/octobot-automatisation-identifiant.png b/docs/static/images/guides/trading-view/octobot-automatisation-identifiant.png new file mode 100644 index 0000000000..29eb576df6 --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-automatisation-identifiant.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6f3986ee2d032e97abbec58ec7c025b40dc9314a3f8185504a2468b99fc5043 +size 86389 diff --git a/docs/static/images/guides/trading-view/octobot-automatisation-webhook-url.png b/docs/static/images/guides/trading-view/octobot-automatisation-webhook-url.png new file mode 100644 index 0000000000..f610750260 --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-automatisation-webhook-url.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:797fe0199b66ee1af8885c45fe6e45caad91ad28d68086e2acf067bc67049e5c +size 76777 diff --git a/docs/static/images/guides/trading-view/octobot-automatisations-interface-selectionnee.png b/docs/static/images/guides/trading-view/octobot-automatisations-interface-selectionnee.png new file mode 100644 index 0000000000..87b36f6e67 --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-automatisations-interface-selectionnee.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f20d75ec2ccff85eb4cefa325ba919912e0edb9c92a54e2cf4c940a50873fc20 +size 66047 diff --git a/docs/static/images/guides/trading-view/octobot-automatisations-interface-vide.png b/docs/static/images/guides/trading-view/octobot-automatisations-interface-vide.png new file mode 100644 index 0000000000..89666f03ab --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-automatisations-interface-vide.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:830655b67a0decec845858b351e81c9ebe291e139c65728bc122bd089de6a8a8 +size 59360 diff --git a/docs/static/images/guides/trading-view/octobot-automatisations-interface.png b/docs/static/images/guides/trading-view/octobot-automatisations-interface.png new file mode 100644 index 0000000000..f0e0b9efec --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-automatisations-interface.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e403a6849f2883a45c40442a25edcdf4b313563c2a669b8048ad512d3f1d27f +size 66299 diff --git a/docs/static/images/guides/trading-view/octobot-automatisations-vue-avec-alerte-tradingview-executee.png b/docs/static/images/guides/trading-view/octobot-automatisations-vue-avec-alerte-tradingview-executee.png new file mode 100644 index 0000000000..e70929262d --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-automatisations-vue-avec-alerte-tradingview-executee.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa18995b9d31a41f684f020abf0b3280364879916db28b0f5af64dab2654751f +size 25243 diff --git a/docs/static/images/guides/trading-view/octobot-automatisations-vue-connexion-interface-selectionnee.png b/docs/static/images/guides/trading-view/octobot-automatisations-vue-connexion-interface-selectionnee.png new file mode 100644 index 0000000000..0e5bf19031 --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-automatisations-vue-connexion-interface-selectionnee.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d977fe1c0e024b4f216db3cb9ae358c5f040af39cac5face97627b998e2bbf4f +size 29287 diff --git a/docs/static/images/guides/trading-view/octobot-automatisations-vue.png b/docs/static/images/guides/trading-view/octobot-automatisations-vue.png new file mode 100644 index 0000000000..24a81b4d77 --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-automatisations-vue.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:161c31c8adee287f19c3c62a3d2eebaa01f2a2041fada38db713cd65375fc869 +size 23028 diff --git a/docs/static/images/guides/trading-view/octobot-create-advanced-automation.png b/docs/static/images/guides/trading-view/octobot-create-advanced-automation.png new file mode 100644 index 0000000000..c068d2f7e6 --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-create-advanced-automation.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8ebccc913db7597a274915fc7f12e8720dca339b8dfac6a20ef9907c1e47e3f +size 51507 diff --git a/docs/static/images/guides/trading-view/octobot-create-automation.png b/docs/static/images/guides/trading-view/octobot-create-automation.png new file mode 100644 index 0000000000..f950d5811b --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-create-automation.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:02fc0c5ebf45523253c0aa55773c701d0c32f8af80d0dee4dfaae39eb4b13976 +size 56875 diff --git a/docs/static/images/guides/trading-view/octobot-create-tradingview-bot-from-intro.png b/docs/static/images/guides/trading-view/octobot-create-tradingview-bot-from-intro.png new file mode 100644 index 0000000000..dcc99a5eba --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-create-tradingview-bot-from-intro.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:30cbca3d069a0bc26fa37548720adcc567fd285aceb536df253ff0c161859745 +size 56497 diff --git a/docs/static/images/guides/trading-view/octobot-create-tradingview-bot-initial-bot-highlighted-create-button.png b/docs/static/images/guides/trading-view/octobot-create-tradingview-bot-initial-bot-highlighted-create-button.png new file mode 100644 index 0000000000..07a5780d02 --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-create-tradingview-bot-initial-bot-highlighted-create-button.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cea12c6f982ef6c4aa866cdee82378f1b16f0f13d0c03433af7e408f2b15d184 +size 97988 diff --git a/docs/static/images/guides/trading-view/octobot-create-tradingview-bot-initial-bot.png b/docs/static/images/guides/trading-view/octobot-create-tradingview-bot-initial-bot.png new file mode 100644 index 0000000000..7886325b31 --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-create-tradingview-bot-initial-bot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c2ff8734980e02ba7ed532301ef31a6cb310e4becbbeb072873f060d34a263e +size 97844 diff --git a/docs/static/images/guides/trading-view/octobot-create-tradingview-bot-select-exchange.png b/docs/static/images/guides/trading-view/octobot-create-tradingview-bot-select-exchange.png new file mode 100644 index 0000000000..579c116d71 --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-create-tradingview-bot-select-exchange.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:72065776a0aa5c005c87919f2e9c7f1dcd2aa8894e8e9d3131e55a88fba55a72 +size 85798 diff --git a/docs/static/images/guides/trading-view/octobot-create-tradingview-bot-select-paper-or-real-trading.png b/docs/static/images/guides/trading-view/octobot-create-tradingview-bot-select-paper-or-real-trading.png new file mode 100644 index 0000000000..a7b0605628 --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-create-tradingview-bot-select-paper-or-real-trading.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af2f91a24dbf2aa04f12189a8f29b84cd708cf2ab6209366258add172529e386 +size 20637 diff --git a/docs/static/images/guides/trading-view/octobot-create-tradingview-bot-start-bot.png b/docs/static/images/guides/trading-view/octobot-create-tradingview-bot-start-bot.png new file mode 100644 index 0000000000..e3dce4ff96 --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-create-tradingview-bot-start-bot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e4dcde5a7cff7e898cf8d8afe687354a8374fe4ac2e4fd71a243e74dcc557c2 +size 51758 diff --git a/docs/static/images/guides/trading-view/octobot-creer-tradingview-bot-demarre.png b/docs/static/images/guides/trading-view/octobot-creer-tradingview-bot-demarre.png new file mode 100644 index 0000000000..e5f4ea5976 --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-creer-tradingview-bot-demarre.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a9e57f5f1fef97caacfa2ebc43d20bec7cce7e11c29e2659eef597c187faee18 +size 47969 diff --git a/docs/static/images/guides/trading-view/octobot-creer-tradingview-bot-depuis-intro.png b/docs/static/images/guides/trading-view/octobot-creer-tradingview-bot-depuis-intro.png new file mode 100644 index 0000000000..21e2aa3bfe --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-creer-tradingview-bot-depuis-intro.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb8bae35a738a7f3511f8986a7d44332aa6359b1b7d731f7d7dba7183815ec5f +size 62067 diff --git a/docs/static/images/guides/trading-view/octobot-creer-tradingview-bot-selectionner-compte-reel-ou-simule.png b/docs/static/images/guides/trading-view/octobot-creer-tradingview-bot-selectionner-compte-reel-ou-simule.png new file mode 100644 index 0000000000..97bd207166 --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-creer-tradingview-bot-selectionner-compte-reel-ou-simule.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a2f3679ee0dade7f4ac628af852048c38a2d219573d1b8ffbfddab18065e0b1 +size 23237 diff --git a/docs/static/images/guides/trading-view/octobot-creer-tradingview-bot-selectionner-exchange.png b/docs/static/images/guides/trading-view/octobot-creer-tradingview-bot-selectionner-exchange.png new file mode 100644 index 0000000000..c8c26752da --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-creer-tradingview-bot-selectionner-exchange.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:396ab9ff9c109e6f537fe577733beb2f4598ba0190f3c81040524a5e9f8dcb47 +size 86444 diff --git a/docs/static/images/guides/trading-view/octobot-custom-automation-buy-eth.png b/docs/static/images/guides/trading-view/octobot-custom-automation-buy-eth.png new file mode 100644 index 0000000000..c6806c412a --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-custom-automation-buy-eth.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fee9bdc998d6b1197ea9c9f93c5450b12618632be1d354cdaccecaed28fc5052 +size 93601 diff --git a/docs/static/images/guides/trading-view/octobot-many-tradingview-automations.png b/docs/static/images/guides/trading-view/octobot-many-tradingview-automations.png new file mode 100644 index 0000000000..72616d4880 --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-many-tradingview-automations.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d97e30958679a71e2e86d79e2a3599bb6effff1fb9dfd1710fd96d415a50ba3 +size 46904 diff --git a/docs/static/images/guides/trading-view/octobot-open-automation-connection-panel.png b/docs/static/images/guides/trading-view/octobot-open-automation-connection-panel.png new file mode 100644 index 0000000000..e4cfe437ef --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-open-automation-connection-panel.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6e6ac28e21209efb1dd91f6b7c1499c80bad30997f64d446d92b232f3cb0d661 +size 22918 diff --git a/docs/static/images/guides/trading-view/octobot-open-source-add-tradingview-alert-illustration.png b/docs/static/images/guides/trading-view/octobot-open-source-add-tradingview-alert-illustration.png new file mode 100644 index 0000000000..2492171bb2 --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-open-source-add-tradingview-alert-illustration.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:234247272c6f0b7a11f35e3ada2aa54208e3dfa7de68c4141c2e8cda5528ce68 +size 107410 diff --git a/docs/static/images/guides/trading-view/octobot-open-source-configure-button-tradingview-alert-email-address.png b/docs/static/images/guides/trading-view/octobot-open-source-configure-button-tradingview-alert-email-address.png new file mode 100644 index 0000000000..f3ffa6450f --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-open-source-configure-button-tradingview-alert-email-address.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0f1f208db878647e55cf3ff72aa72f50129ad25b3ffdec31f5ba67ee0b9cb748 +size 33458 diff --git a/docs/static/images/guides/trading-view/octobot-open-source-configured-tradingview-alert-and-webhook-config.png b/docs/static/images/guides/trading-view/octobot-open-source-configured-tradingview-alert-and-webhook-config.png new file mode 100644 index 0000000000..e65b150cad --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-open-source-configured-tradingview-alert-and-webhook-config.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:47966e4d84094d37d3388571243d6bca7c025743f096452f477517976d7c7ca1 +size 40011 diff --git a/docs/static/images/guides/trading-view/octobot-open-source-configured-tradingview-alert-email-address.png b/docs/static/images/guides/trading-view/octobot-open-source-configured-tradingview-alert-email-address.png new file mode 100644 index 0000000000..d0372df0ab --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-open-source-configured-tradingview-alert-email-address.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e6a49e273e339cddd841bb01fa863cca498516c211d06f4396138dcc60f5c1ff +size 20961 diff --git a/docs/static/images/guides/trading-view/octobot-open-source-configured-tradingview-alert-webhook-url.png b/docs/static/images/guides/trading-view/octobot-open-source-configured-tradingview-alert-webhook-url.png new file mode 100644 index 0000000000..70111ad6e7 --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-open-source-configured-tradingview-alert-webhook-url.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:66832b3bb2ea5c4297c0270aeb676eb3076e55f3d38cf032557093e30672fcdf +size 33969 diff --git a/docs/static/images/guides/trading-view/octobot-open-source-configuring-tradingview-alert-email-address.png b/docs/static/images/guides/trading-view/octobot-open-source-configuring-tradingview-alert-email-address.png new file mode 100644 index 0000000000..7aaca2aef6 --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-open-source-configuring-tradingview-alert-email-address.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a142e0f6fe2ee9a091fe33821de4a677be44f83bd827eec1ce0aa0f479dc5d79 +size 25148 diff --git a/docs/static/images/guides/trading-view/octobot-open-source-ngrok-webhook-configuration.png b/docs/static/images/guides/trading-view/octobot-open-source-ngrok-webhook-configuration.png new file mode 100644 index 0000000000..e7ead12f5c --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-open-source-ngrok-webhook-configuration.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:824932ed6e3be7038e0b56c9ede4d04c75ce029f2cef64db95355c4c2e87f3c5 +size 63679 diff --git a/docs/static/images/guides/trading-view/octobot-open-source-ngrok-webhook-log.png b/docs/static/images/guides/trading-view/octobot-open-source-ngrok-webhook-log.png new file mode 100644 index 0000000000..b427de677d --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-open-source-ngrok-webhook-log.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b98ded5e5a2bb212b3d118eb436e4fb53abc1a7b82828505650d251f288d4416 +size 24588 diff --git a/docs/static/images/guides/trading-view/octobot-open-source-premium-extension-webhook-configuration.png b/docs/static/images/guides/trading-view/octobot-open-source-premium-extension-webhook-configuration.png new file mode 100644 index 0000000000..ed8072bc6f --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-open-source-premium-extension-webhook-configuration.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce20645a7ddfe96f4afad5bb007bec133ff3b4df34b8f726cf12ea02690bffc6 +size 65478 diff --git a/docs/static/images/guides/trading-view/octobot-open-source-premium-extension-webhook-log.png b/docs/static/images/guides/trading-view/octobot-open-source-premium-extension-webhook-log.png new file mode 100644 index 0000000000..31345d2365 --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-open-source-premium-extension-webhook-log.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5a7d778c87f41dc0258463261a88e90d3ab2222cb46fb2cceaf60d1e570843e +size 13172 diff --git a/docs/static/images/guides/trading-view/octobot-open-source-tradingview-email-configuration-last-step.png b/docs/static/images/guides/trading-view/octobot-open-source-tradingview-email-configuration-last-step.png new file mode 100644 index 0000000000..456a6d0423 --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-open-source-tradingview-email-configuration-last-step.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de5c628c1b1e04a370b27f62ecce914e3f6f7de63ed87866de6e3e478c8f30cb +size 62471 diff --git a/docs/static/images/guides/trading-view/octobot-open-source-waiting-tradingview-verification-code.png b/docs/static/images/guides/trading-view/octobot-open-source-waiting-tradingview-verification-code.png new file mode 100644 index 0000000000..58e9d86f9a --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-open-source-waiting-tradingview-verification-code.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f4fc3be272dcad8308f39966a38c6b15c01ba0aaa715477ab7e149cd4ce1f335 +size 54865 diff --git a/docs/static/images/guides/trading-view/octobot-plusieurs-automatisations-tradingview.png b/docs/static/images/guides/trading-view/octobot-plusieurs-automatisations-tradingview.png new file mode 100644 index 0000000000..7c06885f79 --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-plusieurs-automatisations-tradingview.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5848c0b18db5746952cb321013ab425eff7bbf5251496b9853251a5bff12e0c +size 54611 diff --git a/docs/static/images/guides/trading-view/octobot-simple-automatisation-creer-acheter-btc.png b/docs/static/images/guides/trading-view/octobot-simple-automatisation-creer-acheter-btc.png new file mode 100644 index 0000000000..42e46d7439 --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-simple-automatisation-creer-acheter-btc.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de1d9a9d8b84d4b75abfc7399a3eeaa485f9022a7ef1aa3978cbfa61d119d342 +size 54785 diff --git a/docs/static/images/guides/trading-view/octobot-tradingview-bot-vue-initiale-nouvelle-automatisation-selectionnee.png b/docs/static/images/guides/trading-view/octobot-tradingview-bot-vue-initiale-nouvelle-automatisation-selectionnee.png new file mode 100644 index 0000000000..fe9db155c4 --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-tradingview-bot-vue-initiale-nouvelle-automatisation-selectionnee.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c9d6f83f0b47b4a383312b6ccb9d193d2e870c22d66b2a5dfca0140843de852 +size 101767 diff --git a/docs/static/images/guides/trading-view/octobot-tradingview-bot-vue-initiale.png b/docs/static/images/guides/trading-view/octobot-tradingview-bot-vue-initiale.png new file mode 100644 index 0000000000..a0b4345f2a --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-tradingview-bot-vue-initiale.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e013b8517be6e970ff88254dca5ad4667ba12f4d2298323284047943ce0b47e6 +size 89002 diff --git a/docs/static/images/guides/trading-view/octobot-tradingview-trading-side-of-ema-strategy-illustration-with-2-buy-and-2-sell.png b/docs/static/images/guides/trading-view/octobot-tradingview-trading-side-of-ema-strategy-illustration-with-2-buy-and-2-sell.png new file mode 100644 index 0000000000..1159455f75 --- /dev/null +++ b/docs/static/images/guides/trading-view/octobot-tradingview-trading-side-of-ema-strategy-illustration-with-2-buy-and-2-sell.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:78b1b2ef7dc5e2355108a40b6f2bced66d99c2051e91678738baa8a6d60904ab +size 187048 diff --git a/docs/static/images/guides/trading-view/open-source-octobot-start-tradingview-email-config.png b/docs/static/images/guides/trading-view/open-source-octobot-start-tradingview-email-config.png new file mode 100644 index 0000000000..18f25c8bab --- /dev/null +++ b/docs/static/images/guides/trading-view/open-source-octobot-start-tradingview-email-config.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b31b8e15863946c4ea01c353282c5cc33aa73eb29ffa00b640475199abab2bd +size 57092 diff --git a/docs/static/images/guides/trading-view/options-de-declanchement-pour-creer-une-alerte-depuis-un-indicateur-tradingview.png b/docs/static/images/guides/trading-view/options-de-declanchement-pour-creer-une-alerte-depuis-un-indicateur-tradingview.png new file mode 100644 index 0000000000..7ce58cc156 --- /dev/null +++ b/docs/static/images/guides/trading-view/options-de-declanchement-pour-creer-une-alerte-depuis-un-indicateur-tradingview.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c18df8093ac432e6c216d258e174dafa7927753178ddbd8bffe9a6a50c017c8 +size 24580 diff --git a/docs/static/images/guides/trading-view/plusieurs-btcusdt-alertes-tradingview.png b/docs/static/images/guides/trading-view/plusieurs-btcusdt-alertes-tradingview.png new file mode 100644 index 0000000000..b5b5c8cb78 --- /dev/null +++ b/docs/static/images/guides/trading-view/plusieurs-btcusdt-alertes-tradingview.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51b587711ef75fe938e603acffb74344534fb005c8a7ef9056f00ee77ceab716 +size 14682 diff --git a/docs/static/images/guides/trading-view/start-new-tradingview-octobot-from-explorer.png b/docs/static/images/guides/trading-view/start-new-tradingview-octobot-from-explorer.png new file mode 100644 index 0000000000..a887107a1e --- /dev/null +++ b/docs/static/images/guides/trading-view/start-new-tradingview-octobot-from-explorer.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0c7a10634e726b1a8f679e0fb365168a0e818d896a3b22f679ab56aa635cbb79 +size 117563 diff --git a/docs/static/images/guides/trading-view/tradingview-adding-alert-message.png b/docs/static/images/guides/trading-view/tradingview-adding-alert-message.png new file mode 100644 index 0000000000..7d9fa1fe4c --- /dev/null +++ b/docs/static/images/guides/trading-view/tradingview-adding-alert-message.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8980fbd0d381eb868938c7b8f8ac7214685a499df768b6362290b9bacd946885 +size 46200 diff --git a/docs/static/images/guides/trading-view/tradingview-adding-ema-indicator.png b/docs/static/images/guides/trading-view/tradingview-adding-ema-indicator.png new file mode 100644 index 0000000000..2ab5f2378a --- /dev/null +++ b/docs/static/images/guides/trading-view/tradingview-adding-ema-indicator.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95c54e458e19cc52b088b4c0cb348cf582aeba7c6d7d617a2655a686b196b834 +size 14249 diff --git a/docs/static/images/guides/trading-view/tradingview-adding-strategy-alert-message.png b/docs/static/images/guides/trading-view/tradingview-adding-strategy-alert-message.png new file mode 100644 index 0000000000..aae9c7b036 --- /dev/null +++ b/docs/static/images/guides/trading-view/tradingview-adding-strategy-alert-message.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65e89f8b02bec2ca19f5688830706fa2367c7ac491503d4a7a60c8557965c87c +size 20729 diff --git a/docs/static/images/guides/trading-view/tradingview-ajouter-ema-indicateur.png b/docs/static/images/guides/trading-view/tradingview-ajouter-ema-indicateur.png new file mode 100644 index 0000000000..ccb3d6c485 --- /dev/null +++ b/docs/static/images/guides/trading-view/tradingview-ajouter-ema-indicateur.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9baba55fd086e2522665e9df38b11d2920ecd5d801b8c3220f332e23be8ceec5 +size 16820 diff --git a/docs/static/images/guides/trading-view/tradingview-alert-email-form-completed.png b/docs/static/images/guides/trading-view/tradingview-alert-email-form-completed.png new file mode 100644 index 0000000000..cff78be1e3 --- /dev/null +++ b/docs/static/images/guides/trading-view/tradingview-alert-email-form-completed.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f55431c81f525d91ed65dfea5c8b061ac31668e656b7a7caa388f2b64148384 +size 40191 diff --git a/docs/static/images/guides/trading-view/tradingview-alert-email-form-confirm-code.png b/docs/static/images/guides/trading-view/tradingview-alert-email-form-confirm-code.png new file mode 100644 index 0000000000..5092f9207a --- /dev/null +++ b/docs/static/images/guides/trading-view/tradingview-alert-email-form-confirm-code.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5baff6aabb6efb7d35a4c4a1d0653ae6b05d4c7fc765a1d4e997dd9ca9af324 +size 19321 diff --git a/docs/static/images/guides/trading-view/tradingview-alert-email-form.png b/docs/static/images/guides/trading-view/tradingview-alert-email-form.png new file mode 100644 index 0000000000..6ee60b6bca --- /dev/null +++ b/docs/static/images/guides/trading-view/tradingview-alert-email-form.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71502c9908de9e9968b55532746aa475c5f8491e15e2142337ece37c07b89884 +size 12765 diff --git a/docs/static/images/guides/trading-view/tradingview-alert-notification-email-selected-form.png b/docs/static/images/guides/trading-view/tradingview-alert-notification-email-selected-form.png new file mode 100644 index 0000000000..0f1b0d9889 --- /dev/null +++ b/docs/static/images/guides/trading-view/tradingview-alert-notification-email-selected-form.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae25c09f2525950b8ff915a08b119d7e8ee6f3245f649889a743418dc590cd0a +size 40321 diff --git a/docs/static/images/guides/trading-view/tradingview-alert-notification-email-selectionne-form.png b/docs/static/images/guides/trading-view/tradingview-alert-notification-email-selectionne-form.png new file mode 100644 index 0000000000..9e766b32c3 --- /dev/null +++ b/docs/static/images/guides/trading-view/tradingview-alert-notification-email-selectionne-form.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:13a0b4b0b61ec1a4761efc43826db2af46fabbaa416f3cc0d684fd20d25f7e92 +size 45339 diff --git a/docs/static/images/guides/trading-view/tradingview-alerte-email-form-complete.png b/docs/static/images/guides/trading-view/tradingview-alerte-email-form-complete.png new file mode 100644 index 0000000000..fc0f608686 --- /dev/null +++ b/docs/static/images/guides/trading-view/tradingview-alerte-email-form-complete.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:406c80ab51d26825d338f5eef85da67fa39498b0e0a26b7eea6c9e4eddbfeb81 +size 45241 diff --git a/docs/static/images/guides/trading-view/tradingview-alerte-email-form-confirm-code.png b/docs/static/images/guides/trading-view/tradingview-alerte-email-form-confirm-code.png new file mode 100644 index 0000000000..419d2c2b7d --- /dev/null +++ b/docs/static/images/guides/trading-view/tradingview-alerte-email-form-confirm-code.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0568a214ff621bc73a6fc5fe311e00af16ed279a4c017bd9f5c509a0a942dd79 +size 20750 diff --git a/docs/static/images/guides/trading-view/tradingview-alerte-email-form.png b/docs/static/images/guides/trading-view/tradingview-alerte-email-form.png new file mode 100644 index 0000000000..3a26a92ec0 --- /dev/null +++ b/docs/static/images/guides/trading-view/tradingview-alerte-email-form.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46ae7258a27e2903e2a8380facd49dec968cf7cf03cdecbc999c3794e8c50cca +size 12506 diff --git a/docs/static/images/guides/trading-view/tradingview-automation-illustrated-by-tradingview-logo.png b/docs/static/images/guides/trading-view/tradingview-automation-illustrated-by-tradingview-logo.png new file mode 100644 index 0000000000..9507689720 --- /dev/null +++ b/docs/static/images/guides/trading-view/tradingview-automation-illustrated-by-tradingview-logo.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1748d756f4dd4626fb8853857b0e8e116596854757a2b8ac4130461d222df4dc +size 7344 diff --git a/docs/static/images/guides/trading-view/tradingview-btcusd-chart.png b/docs/static/images/guides/trading-view/tradingview-btcusd-chart.png new file mode 100644 index 0000000000..5ae3a39eee --- /dev/null +++ b/docs/static/images/guides/trading-view/tradingview-btcusd-chart.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f562567763c1dc208316a3a8504abc66d84ffd65f46865e91c801477ae11a08b +size 123677 diff --git a/docs/static/images/guides/trading-view/tradingview-community-strategies.png b/docs/static/images/guides/trading-view/tradingview-community-strategies.png new file mode 100644 index 0000000000..0a74013b7a --- /dev/null +++ b/docs/static/images/guides/trading-view/tradingview-community-strategies.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b1a53fe916a3ed902feac55128777f79d59b62b072a74c2688318082ae31b7d6 +size 74701 diff --git a/docs/static/images/guides/trading-view/tradingview-configurer-ema-indicateur.png b/docs/static/images/guides/trading-view/tradingview-configurer-ema-indicateur.png new file mode 100644 index 0000000000..edadb526f5 --- /dev/null +++ b/docs/static/images/guides/trading-view/tradingview-configurer-ema-indicateur.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ac853ad2f584cf87c5e6379c1006f49accb110c6092f6c4131cde0c52ba9eb1 +size 62156 diff --git a/docs/static/images/guides/trading-view/tradingview-configuring-ema-indicator.png b/docs/static/images/guides/trading-view/tradingview-configuring-ema-indicator.png new file mode 100644 index 0000000000..0dcf1fd731 --- /dev/null +++ b/docs/static/images/guides/trading-view/tradingview-configuring-ema-indicator.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f2c600a9763af354a2a389da1b8cbf03954472a5cd7ee9878b78d930b13db604 +size 53053 diff --git a/docs/static/images/guides/trading-view/tradingview-create-alert.png b/docs/static/images/guides/trading-view/tradingview-create-alert.png new file mode 100644 index 0000000000..5ce669e3dd --- /dev/null +++ b/docs/static/images/guides/trading-view/tradingview-create-alert.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a7fbd7d6c6718ae1ec4e9acbb3b17c050de2c7bd5447437aebcf7439d3c48fd +size 146397 diff --git a/docs/static/images/guides/trading-view/tradingview-create-death-cross-alert.png b/docs/static/images/guides/trading-view/tradingview-create-death-cross-alert.png new file mode 100644 index 0000000000..e0d404605a --- /dev/null +++ b/docs/static/images/guides/trading-view/tradingview-create-death-cross-alert.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b55688085a640eea0391681e6bbb3fdbc69acecf15a384d2efd9ea18cf3d0fec +size 33169 diff --git a/docs/static/images/guides/trading-view/tradingview-create-golden-cross-alert.png b/docs/static/images/guides/trading-view/tradingview-create-golden-cross-alert.png new file mode 100644 index 0000000000..f6057702d7 --- /dev/null +++ b/docs/static/images/guides/trading-view/tradingview-create-golden-cross-alert.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4395e931845fdc08433490abecb39149065552774bea599772add89cc6b1ef0 +size 24023 diff --git a/docs/static/images/guides/trading-view/tradingview-creer-death-cross-alerte.png b/docs/static/images/guides/trading-view/tradingview-creer-death-cross-alerte.png new file mode 100644 index 0000000000..5bccf9302e --- /dev/null +++ b/docs/static/images/guides/trading-view/tradingview-creer-death-cross-alerte.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:107237d6a05c296a6b545416b955f71888a3f004c664be5085706c057141616b +size 34362 diff --git a/docs/static/images/guides/trading-view/tradingview-creer-golden-cross-alerte.png b/docs/static/images/guides/trading-view/tradingview-creer-golden-cross-alerte.png new file mode 100644 index 0000000000..34abb0d2af --- /dev/null +++ b/docs/static/images/guides/trading-view/tradingview-creer-golden-cross-alerte.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a779d31046d48fea78d2509c81d5170b70ebffb001873652d983d24e374d666 +size 33879 diff --git a/docs/static/images/guides/trading-view/tradingview-ema-indicator-visualization-with-golden-and-death-crosses.png b/docs/static/images/guides/trading-view/tradingview-ema-indicator-visualization-with-golden-and-death-crosses.png new file mode 100644 index 0000000000..ea25e754c3 --- /dev/null +++ b/docs/static/images/guides/trading-view/tradingview-ema-indicator-visualization-with-golden-and-death-crosses.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50f865fd0c34f4140c8b528b59601568ae820b7001bcf9428da876bc7f284010 +size 109849 diff --git a/docs/static/images/guides/trading-view/tradingview-ema-strategy-illustration-with-2-buy-and-2-sell.png b/docs/static/images/guides/trading-view/tradingview-ema-strategy-illustration-with-2-buy-and-2-sell.png new file mode 100644 index 0000000000..becf3c36e5 --- /dev/null +++ b/docs/static/images/guides/trading-view/tradingview-ema-strategy-illustration-with-2-buy-and-2-sell.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb3d7bcbc6bd131c2f94ecfd977095264bd47efedbedd445bcca3ee6ffe58126 +size 176199 diff --git a/docs/static/images/guides/trading-view/tradingview-many-btcusdt-alerts.png b/docs/static/images/guides/trading-view/tradingview-many-btcusdt-alerts.png new file mode 100644 index 0000000000..d8d2abbe52 --- /dev/null +++ b/docs/static/images/guides/trading-view/tradingview-many-btcusdt-alerts.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:02f3791722c2115b5cdd5c46bf3861af4a40706ab0f1b43fd6f98594069c3be5 +size 26893 diff --git a/docs/static/images/guides/trading-view/tradingview-notification-configuration.png b/docs/static/images/guides/trading-view/tradingview-notification-configuration.png new file mode 100644 index 0000000000..2f51a46087 --- /dev/null +++ b/docs/static/images/guides/trading-view/tradingview-notification-configuration.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:76200bd48a6ba4212106d351f0f2b73133a88b42e8840ff94d9fe3099396bb92 +size 39615 diff --git a/docs/static/images/guides/trading-view/tradingview-open-strategy-code.png b/docs/static/images/guides/trading-view/tradingview-open-strategy-code.png new file mode 100644 index 0000000000..61e646a137 --- /dev/null +++ b/docs/static/images/guides/trading-view/tradingview-open-strategy-code.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3100e32f325539fda21be1ca3cedd10a1fb4e366c1490630d7b9cfd7695264e1 +size 75744 diff --git a/docs/static/images/guides/trading-view/tradingview-select-btcusdt-market.png b/docs/static/images/guides/trading-view/tradingview-select-btcusdt-market.png new file mode 100644 index 0000000000..91d7f9c1bf --- /dev/null +++ b/docs/static/images/guides/trading-view/tradingview-select-btcusdt-market.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c49d5c5b3827285f8c0af5bc41d43d5f7534711ee496033eba7b0020892a1e51 +size 24571 diff --git a/docs/static/images/guides/trading-view/tradingview-selection-btcusdt-marche.png b/docs/static/images/guides/trading-view/tradingview-selection-btcusdt-marche.png new file mode 100644 index 0000000000..2c84ba7ada --- /dev/null +++ b/docs/static/images/guides/trading-view/tradingview-selection-btcusdt-marche.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4f9314841f998ec0ccb427f918d9381fcee89ef945b9ab977786ccf4fd8782a +size 26119 diff --git a/docs/static/images/guides/trading-view/tradingview-simple-rsi-strategy.png b/docs/static/images/guides/trading-view/tradingview-simple-rsi-strategy.png new file mode 100644 index 0000000000..ddc23b4a18 --- /dev/null +++ b/docs/static/images/guides/trading-view/tradingview-simple-rsi-strategy.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a592b35ea294454c69a994aec6e463d6e62d87db73d02971a60e2cf222008b35 +size 29187 diff --git a/docs/static/images/guides/trading-view/tradingview-strategy-example-with-automation-ids.png b/docs/static/images/guides/trading-view/tradingview-strategy-example-with-automation-ids.png new file mode 100644 index 0000000000..e0f0c0f2ec --- /dev/null +++ b/docs/static/images/guides/trading-view/tradingview-strategy-example-with-automation-ids.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22909a57df68dbe07824f2cb92e772987a48539496e036c2edf7bfeec1f94224 +size 30642 diff --git a/docs/static/images/guides/trading-view/tradingview-strategy-example.png b/docs/static/images/guides/trading-view/tradingview-strategy-example.png new file mode 100644 index 0000000000..d5e5b809c6 --- /dev/null +++ b/docs/static/images/guides/trading-view/tradingview-strategy-example.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1e60ca51fded2c12d63f51ac0cec1e2d3e70f9451efc6caa9b343f81196ba2e7 +size 179403 diff --git a/docs/static/images/guides/trading-view/tradingview-visualisation-ema-indicateur-golden-et-death-crosses.png b/docs/static/images/guides/trading-view/tradingview-visualisation-ema-indicateur-golden-et-death-crosses.png new file mode 100644 index 0000000000..1d29059aff --- /dev/null +++ b/docs/static/images/guides/trading-view/tradingview-visualisation-ema-indicateur-golden-et-death-crosses.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33daf31f347fa677458f2764e2541706260db51f3195a7d7ce407f50f04c5cb8 +size 109583 diff --git a/docs/static/images/guides/trading-virtuel-configuration-du-portefeuille.png b/docs/static/images/guides/trading-virtuel-configuration-du-portefeuille.png new file mode 100644 index 0000000000..11b9d4b195 --- /dev/null +++ b/docs/static/images/guides/trading-virtuel-configuration-du-portefeuille.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:584a15c132a221d7faf05b7612ae5487622bd3e0d83a8dbc854d2ebd9ee1d5e8 +size 28568 diff --git a/docs/static/images/guides/trading-with-chatgpt-in-octobot-GPTEvaluator-configuration.png b/docs/static/images/guides/trading-with-chatgpt-in-octobot-GPTEvaluator-configuration.png new file mode 100644 index 0000000000..92925540bf --- /dev/null +++ b/docs/static/images/guides/trading-with-chatgpt-in-octobot-GPTEvaluator-configuration.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dcc56bd858437cec798960f16eedbbe8f41197158c7ba2819132ddeff74a6e58 +size 66554 diff --git a/docs/static/images/guides/tradingview-tutos/RSI-bull-market-strategy-buying-and-selling-octobot-automations.png b/docs/static/images/guides/tradingview-tutos/RSI-bull-market-strategy-buying-and-selling-octobot-automations.png new file mode 100644 index 0000000000..f377b55d79 --- /dev/null +++ b/docs/static/images/guides/tradingview-tutos/RSI-bull-market-strategy-buying-and-selling-octobot-automations.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:06da37f2c717f0493c1d7350f468b230d9d14981639c8b89f4de4fa67e9e6d4a +size 53296 diff --git a/docs/static/images/guides/tradingview-tutos/RSI-bull-market-strategy-buying-and-selling-solana.png b/docs/static/images/guides/tradingview-tutos/RSI-bull-market-strategy-buying-and-selling-solana.png new file mode 100644 index 0000000000..e163559377 --- /dev/null +++ b/docs/static/images/guides/tradingview-tutos/RSI-bull-market-strategy-buying-and-selling-solana.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7950cd4c7df28b97637aa3dfc7694a3e390cf4873e28ee2218038ce33aa3de63 +size 99793 diff --git a/docs/static/images/guides/tradingview-tutos/RSI-configuration-with-rolling-moving-averages.png b/docs/static/images/guides/tradingview-tutos/RSI-configuration-with-rolling-moving-averages.png new file mode 100644 index 0000000000..f871debc50 --- /dev/null +++ b/docs/static/images/guides/tradingview-tutos/RSI-configuration-with-rolling-moving-averages.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b2ed2db9729686a63ab3a8bcd356ea96ad8d94a5b4cbf464af7c730e19091fa1 +size 15438 diff --git a/docs/static/images/guides/tradingview-tutos/automatisations-octobot-de-la-strategie-RSI-de-bull-market.png b/docs/static/images/guides/tradingview-tutos/automatisations-octobot-de-la-strategie-RSI-de-bull-market.png new file mode 100644 index 0000000000..342be7a86d --- /dev/null +++ b/docs/static/images/guides/tradingview-tutos/automatisations-octobot-de-la-strategie-RSI-de-bull-market.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f620d1adea7d9504ba0bdaabb0f10a91e9c333fb287277dfb1405175c46e25b4 +size 57550 diff --git a/docs/static/images/guides/tradingview-tutos/configuration-RSI-avec-rolling-moving-averages.png b/docs/static/images/guides/tradingview-tutos/configuration-RSI-avec-rolling-moving-averages.png new file mode 100644 index 0000000000..3faf92e204 --- /dev/null +++ b/docs/static/images/guides/tradingview-tutos/configuration-RSI-avec-rolling-moving-averages.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d9089e4bfc36a7cf746027ec7c8216d6e87350ea54697f28d91b28dbe0c07d66 +size 15115 diff --git a/docs/static/images/guides/tradingview-tutos/extreme-buy-solana-on-rsi-ema-threshhold-tradingview-alert-configuration.png b/docs/static/images/guides/tradingview-tutos/extreme-buy-solana-on-rsi-ema-threshhold-tradingview-alert-configuration.png new file mode 100644 index 0000000000..d64066ef44 --- /dev/null +++ b/docs/static/images/guides/tradingview-tutos/extreme-buy-solana-on-rsi-ema-threshhold-tradingview-alert-configuration.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:57bfa6351b85d7464a3636c3637c95500c3c1c2625947a0d2f129d12cec3e902 +size 19561 diff --git a/docs/static/images/guides/tradingview-tutos/extreme-buy-solana-sur-rsi-ema-seuil-tradingview-alert-configuration.png b/docs/static/images/guides/tradingview-tutos/extreme-buy-solana-sur-rsi-ema-seuil-tradingview-alert-configuration.png new file mode 100644 index 0000000000..2fec656306 --- /dev/null +++ b/docs/static/images/guides/tradingview-tutos/extreme-buy-solana-sur-rsi-ema-seuil-tradingview-alert-configuration.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:298f7642a4bb1d6ee67929aae25313b229878800f6fbc06836389831fdf16010 +size 20918 diff --git a/docs/static/images/guides/tradingview-tutos/extreme-sell-solana-on-rsi-ema-threshhold-tradingview-alert-configuration.png b/docs/static/images/guides/tradingview-tutos/extreme-sell-solana-on-rsi-ema-threshhold-tradingview-alert-configuration.png new file mode 100644 index 0000000000..29a8a6006a --- /dev/null +++ b/docs/static/images/guides/tradingview-tutos/extreme-sell-solana-on-rsi-ema-threshhold-tradingview-alert-configuration.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f40469cbb0b1b7f6a327ac9250d3bfc91d0b3c21de3ccdd8255304b255b3a10b +size 19466 diff --git a/docs/static/images/guides/tradingview-tutos/extreme-sell-solana-sur-rsi-ema-seuil-tradingview-alert-configuration.png b/docs/static/images/guides/tradingview-tutos/extreme-sell-solana-sur-rsi-ema-seuil-tradingview-alert-configuration.png new file mode 100644 index 0000000000..726aa881c2 --- /dev/null +++ b/docs/static/images/guides/tradingview-tutos/extreme-sell-solana-sur-rsi-ema-seuil-tradingview-alert-configuration.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:728a78db49e13f9e7fb729345b604f659d4e090e72b5d4e6f0885ed8f858457d +size 28158 diff --git a/docs/static/images/guides/tradingview-tutos/regular-buy-solana-on-rsi-ema-threshhold-tradingview-alert-configuration.png b/docs/static/images/guides/tradingview-tutos/regular-buy-solana-on-rsi-ema-threshhold-tradingview-alert-configuration.png new file mode 100644 index 0000000000..55e783eabb --- /dev/null +++ b/docs/static/images/guides/tradingview-tutos/regular-buy-solana-on-rsi-ema-threshhold-tradingview-alert-configuration.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:68546375917ff856f4cba09b785ad4f75478bfa18abdc7ea802e169f14c0deaf +size 19559 diff --git a/docs/static/images/guides/tradingview-tutos/regular-buy-solana-sur-rsi-ema-seuil-tradingview-alert-configuration.png b/docs/static/images/guides/tradingview-tutos/regular-buy-solana-sur-rsi-ema-seuil-tradingview-alert-configuration.png new file mode 100644 index 0000000000..df92dc4240 --- /dev/null +++ b/docs/static/images/guides/tradingview-tutos/regular-buy-solana-sur-rsi-ema-seuil-tradingview-alert-configuration.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0d1535727a417e0299c18cd8b2cf73c9fe57a4601d20c5dfdfeebf574951a5a +size 20937 diff --git a/docs/static/images/guides/tradingview-tutos/regular-sell-solana-on-rsi-ema-threshhold-tradingview-alert-configuration.png b/docs/static/images/guides/tradingview-tutos/regular-sell-solana-on-rsi-ema-threshhold-tradingview-alert-configuration.png new file mode 100644 index 0000000000..f301c3d5d5 --- /dev/null +++ b/docs/static/images/guides/tradingview-tutos/regular-sell-solana-on-rsi-ema-threshhold-tradingview-alert-configuration.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c76bbd1f20e936146eaa94c7abbcbfff0bb44a90083d6d17873eb05798a8245 +size 19464 diff --git a/docs/static/images/guides/tradingview-tutos/regular-sell-solana-sur-rsi-ema-seuil-tradingview-alert-configuration.png b/docs/static/images/guides/tradingview-tutos/regular-sell-solana-sur-rsi-ema-seuil-tradingview-alert-configuration.png new file mode 100644 index 0000000000..96a0373375 --- /dev/null +++ b/docs/static/images/guides/tradingview-tutos/regular-sell-solana-sur-rsi-ema-seuil-tradingview-alert-configuration.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c0218948113e0a4d2e0269e17d77f8dd0e1b7f2d3c1e0972505c3b467f58a73c +size 20776 diff --git a/docs/static/images/guides/tradingview-tutos/strategie-RSI-de-bull-market-qui-achete-et-vend-du-solana.png b/docs/static/images/guides/tradingview-tutos/strategie-RSI-de-bull-market-qui-achete-et-vend-du-solana.png new file mode 100644 index 0000000000..e6444dd4db --- /dev/null +++ b/docs/static/images/guides/tradingview-tutos/strategie-RSI-de-bull-market-qui-achete-et-vend-du-solana.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc8128cb7c066567168caac17a9cad914c458e1b0702a049d2a1bf1c31a78fe4 +size 120041 diff --git a/docs/static/images/guides/type-de-compte-de-trading-choix-entre-reel-ou-virtuel.png b/docs/static/images/guides/type-de-compte-de-trading-choix-entre-reel-ou-virtuel.png new file mode 100644 index 0000000000..5a8b4d5f12 --- /dev/null +++ b/docs/static/images/guides/type-de-compte-de-trading-choix-entre-reel-ou-virtuel.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15a5f84b832a1c6e558fb113b6a01041f119b2ac1f2625e646978c00abe19fcc +size 40384 diff --git a/docs/static/images/guides/using-a-crypto-basket.png b/docs/static/images/guides/using-a-crypto-basket.png new file mode 100644 index 0000000000..69da3d3402 --- /dev/null +++ b/docs/static/images/guides/using-a-crypto-basket.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8435d18f55782a7a2f9ed01feaf53800ef1ef3daeb43039c4d58ea3780b1ca33 +size 23597 diff --git a/docs/static/images/guides/utiliser-un-panier-de-crypto.png b/docs/static/images/guides/utiliser-un-panier-de-crypto.png new file mode 100644 index 0000000000..46614a063b --- /dev/null +++ b/docs/static/images/guides/utiliser-un-panier-de-crypto.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:abc9b41a4f9e141e71f8eaf679d209746381ebc8bffb7c88d72af6d13047061d +size 24384 diff --git a/docs/static/images/investing/pay-with-crypto/connect-your-crypto-wallet.png b/docs/static/images/investing/pay-with-crypto/connect-your-crypto-wallet.png new file mode 100644 index 0000000000..3b0ece2432 --- /dev/null +++ b/docs/static/images/investing/pay-with-crypto/connect-your-crypto-wallet.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c71a3efeb30f8ca1a09e2128a02f4b60e9bbd1fb4bbf78f797ab8698e377d2c0 +size 251347 diff --git a/docs/static/images/investing/pay-with-crypto/pay-with-crypto-en.png b/docs/static/images/investing/pay-with-crypto/pay-with-crypto-en.png new file mode 100644 index 0000000000..5e9f7ae521 --- /dev/null +++ b/docs/static/images/investing/pay-with-crypto/pay-with-crypto-en.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:482ba30c0fc6a6d61d594db3839db03011f0cf898b8027f7d750200a9693d791 +size 54641 diff --git a/docs/static/images/investing/pay-with-crypto/pay-with-crypto-fr.png b/docs/static/images/investing/pay-with-crypto/pay-with-crypto-fr.png new file mode 100644 index 0000000000..fd840a0d06 --- /dev/null +++ b/docs/static/images/investing/pay-with-crypto/pay-with-crypto-fr.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:78dbfabe667b27a0eaf33876546d18d223a1165eec086c195f59892e70bd6b90 +size 63693 diff --git a/docs/static/images/investing/pay-with-crypto/select-a-blockchain-and-a-token.png b/docs/static/images/investing/pay-with-crypto/select-a-blockchain-and-a-token.png new file mode 100644 index 0000000000..bd19e3915d --- /dev/null +++ b/docs/static/images/investing/pay-with-crypto/select-a-blockchain-and-a-token.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2162709b0b591cb48bfc348dde1c0be6ceaa8d268d939f2ced34381e8393b17a +size 42927 diff --git a/docs/static/img/favicon.ico b/docs/static/img/favicon.ico new file mode 100644 index 0000000000..b069627cf8 Binary files /dev/null and b/docs/static/img/favicon.ico differ diff --git a/docs/static/img/logo-dark-512.png b/docs/static/img/logo-dark-512.png new file mode 100644 index 0000000000..ae778355c0 --- /dev/null +++ b/docs/static/img/logo-dark-512.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33e5f1a993134336c26a37f80c5d221f381a66e6b5fa7ce892e6245a195a9063 +size 27254 diff --git a/docs/static/img/logo-light-512.png b/docs/static/img/logo-light-512.png new file mode 100644 index 0000000000..cbd7ed42ef --- /dev/null +++ b/docs/static/img/logo-light-512.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f2fb82a43e4436748f17310457aa1cf6fde3cd17671e06afd5ea7bc999b8f126 +size 44039 diff --git a/docs/static/robots.txt b/docs/static/robots.txt new file mode 100644 index 0000000000..a2dc8e4362 --- /dev/null +++ b/docs/static/robots.txt @@ -0,0 +1,4 @@ +User-agent: * +Allow: / + +Sitemap: https://docs.octobot.cloud/sitemap.xml diff --git a/docs/tsconfig.json b/docs/tsconfig.json new file mode 100644 index 0000000000..6f3b11cdbb --- /dev/null +++ b/docs/tsconfig.json @@ -0,0 +1,7 @@ +{ + "extends": "@docusaurus/tsconfig", + "compilerOptions": { + "baseUrl": "." + }, + "exclude": [".docusaurus", "build"] +} diff --git a/docs/worker.js b/docs/worker.js new file mode 100644 index 0000000000..f2bb10fbaa --- /dev/null +++ b/docs/worker.js @@ -0,0 +1,5 @@ +export default { + async fetch() { + return new Response("Not found", { status: 404 }); + }, +}; diff --git a/docs/wrangler.jsonc b/docs/wrangler.jsonc new file mode 100644 index 0000000000..9b1ff83c47 --- /dev/null +++ b/docs/wrangler.jsonc @@ -0,0 +1,12 @@ +{ + "name": "octobot-docs", + "main": "./worker.js", + "compatibility_date": "2026-02-24", + "placement": { + "mode": "smart" + }, + "assets": { + "directory": "./build" + }, + "preview_urls": true +} diff --git a/extra_requirements.txt b/extra_requirements.txt new file mode 100644 index 0000000000..0cdf7bae23 --- /dev/null +++ b/extra_requirements.txt @@ -0,0 +1,2 @@ +## Socks proxy requirements +aiohttp_socks==0.11.0 diff --git a/full_requirements.txt b/full_requirements.txt index c743a8289b..74430e325d 100644 --- a/full_requirements.txt +++ b/full_requirements.txt @@ -1,22 +1,15 @@ -# Drakkar-Software full requirements -OctoBot-Commons[full]==1.9.92 -OctoBot-Trading[full]==2.4.238 -OctoBot-Evaluators[full]==1.9.9 -OctoBot-Tentacles-Manager[full]==2.9.19 -OctoBot-Services[full]==1.6.30 -OctoBot-Backtesting[full]==1.9.8 - ## Others -colorlog==6.8.0 +colorlog==6.10.1 # Community gmqtt==0.7.0 pgpy==0.6.0 -clickhouse-connect==0.8.18 +clickhouse-connect==0.10.0 pyiceberg==0.10.0 -pydantic<2.12 # required for pyiceberg 0.10.0 https://github.com/apache/iceberg-python/issues/2590 -pyarrow==21.0.0 +# pydantic==2.11.10 # required for pyiceberg 0.10.0 https://github.com/apache/iceberg-python/issues/2590. +pydantic==2.12.5 # 2.12.5 is compataible with pyiceberg 0.10.0, but logs tons of deprecation warnings, wait for pyiceberg new version +pyarrow==23.0.0 # used by ccxt for protobuf "websockets" such as mexc # lock protobuf to avoid using .rc versions -protobuf==5.29.5 +protobuf==6.33.5 diff --git a/octobot/__init__.py b/octobot/__init__.py index 35476a029b..54d9202257 100644 --- a/octobot/__init__.py +++ b/octobot/__init__.py @@ -16,5 +16,5 @@ PROJECT_NAME = "OctoBot" AUTHOR = "Drakkar-Software" -VERSION = "2.0.15" # major.minor.revision +VERSION = "2.1.1" # major.minor.revision LONG_VERSION = f"{VERSION}" diff --git a/octobot/automation/__init__.py b/octobot/automation/__init__.py index 2af0f700f1..67d09ee3c1 100644 --- a/octobot/automation/__init__.py +++ b/octobot/automation/__init__.py @@ -21,6 +21,7 @@ AbstractCondition, AbstractTriggerEvent, AutomationStep, + ExecutionDetails, ) @@ -36,4 +37,5 @@ "AbstractTriggerEvent", "AutomationStep", "Automation", + "ExecutionDetails", ] diff --git a/octobot/automation/automation.py b/octobot/automation/automation.py index 35886a5780..7d22af4d38 100644 --- a/octobot/automation/automation.py +++ b/octobot/automation/automation.py @@ -14,6 +14,8 @@ # You should have received a copy of the GNU General Public # License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. import asyncio +import abc +import typing import octobot_commons.logging as logging import octobot_commons.enums as common_enums @@ -22,9 +24,14 @@ import octobot.automation.bases.abstract_trigger_event as abstract_trigger_event import octobot.automation.bases.abstract_condition as abstract_condition import octobot.automation.bases.abstract_action as abstract_action +import octobot.automation.bases.automation_step as automation_step +import octobot.automation.bases.execution_details as execution_details import octobot.constants as constants import octobot.errors as errors +if typing.TYPE_CHECKING: + import octobot_tentacles_manager.configuration + class AutomationDetails: def __init__(self, trigger_event, conditions, actions): @@ -47,16 +54,21 @@ class Automation(tentacles_management.AbstractTentacle): CONDITIONS = "conditions" ACTIONS = "actions" - def __init__(self, bot_id, tentacles_setup_config, automations_config=None): + def __init__( + self, + bot_id: str, + tentacles_setup_config: "octobot_tentacles_manager.configuration.TentaclesSetupConfiguration", + automations_config: typing.Optional[dict] = {} + ): super().__init__() - self.logger = logging.get_logger(self.get_name()) - self.bot_id = bot_id - self.tentacles_setup_config = tentacles_setup_config - self.automations_config = automations_config - self.automation_tasks = [] - self.automation_details = [] + self.logger: logging.BotLogger = logging.get_logger(self.get_name()) + self.bot_id: str = bot_id + self.tentacles_setup_config: "octobot_tentacles_manager.configuration.TentaclesSetupConfiguration" = tentacles_setup_config + self.automations_config: typing.Optional[dict] = automations_config + self.automation_tasks: list[asyncio.Task] = [] + self.automation_details: list[AutomationDetails] = [] - def get_local_config(self): + def get_local_config(self) -> typing.Optional[dict]: return self.automations_config async def initialize(self) -> None: @@ -70,8 +82,11 @@ async def initialize(self) -> None: @classmethod async def get_raw_config_and_user_inputs( - cls, config, tentacles_setup_config, bot_id - ): + cls, + config: dict, + tentacles_setup_config: "octobot_tentacles_manager.configuration.TentaclesSetupConfiguration", + bot_id: str + ) -> tuple[dict, list[dict]]: tentacle_config = tentacles_manager_api.get_tentacle_config(tentacles_setup_config, cls) local_instance = cls.create_local_instance( config, tentacles_setup_config, tentacle_config @@ -116,11 +131,22 @@ def reset_config(self): ) @classmethod - def create_local_instance(cls, config, tentacles_setup_config, tentacle_config): + def create_local_instance(cls, + config: dict, + tentacles_setup_config: "octobot_tentacles_manager.configuration.TentaclesSetupConfiguration", + tentacle_config: dict + ) -> "Automation": return cls(None, tentacles_setup_config, automations_config=tentacle_config) - def _all_possible_steps(self, base_step): - return tentacles_management.get_all_classes_from_parent(base_step) + def _all_possible_steps( + self, + base_step: typing.Type[automation_step.AutomationStep] + ) -> list[typing.Type[automation_step.AutomationStep]]: + return [ + element + for element in tentacles_management.get_all_classes_from_parent(base_step) + if abc.ABC not in element.__bases__ + ] def get_all_steps(self): all_events = { @@ -162,11 +188,11 @@ def init_user_inputs(self, inputs: dict) -> None: self.automations_config.get(self.AUTOMATIONS, {}), inputs, title="Automations") default_event, default_conditions, default_actions = self._get_default_steps() - for index in range(1, automations_count + 1): + for index in range(1, int(automations_count) + 1): automation_id = f"{index}" # register trigger events self.UI.user_input(automation_id, common_enums.UserInputTypes.OBJECT, - automations.get(automation_id, {}), inputs, + automations.get(automation_id, {}), inputs, # type: ignore parent_input_name=self.AUTOMATIONS, title=f"Automation {index}") event = self.UI.user_input(self.TRIGGER_EVENT, common_enums.UserInputTypes.OPTIONS, @@ -191,14 +217,25 @@ def init_user_inputs(self, inputs: dict) -> None: title="Actions for this automation.") self._apply_user_inputs(actions, all_actions, inputs, automation_id) - def _apply_user_inputs(self, step_names, step_classes_by_name: dict, inputs, automation_id): + def _apply_user_inputs( + self, step_names: list[str], + step_classes_by_name: dict[str, typing.Type[automation_step.AutomationStep]], + inputs: dict, + automation_id: str + ) -> None: for step_name in step_names: try: self._apply_step_user_inputs(step_name, step_classes_by_name[step_name], inputs, automation_id) except KeyError: self.logger.error(f"Automation step not found: {step_name} (ignored)") - def _apply_step_user_inputs(self, step_name, step_class, inputs, automation_id): + def _apply_step_user_inputs( + self, + step_name: str, + step_class: typing.Type[automation_step.AutomationStep], + inputs: dict, + automation_id: str + ) -> None: step = step_class() user_inputs = step.get_user_inputs(self.UI, inputs, step_name) if user_inputs: @@ -210,10 +247,10 @@ def _apply_step_user_inputs(self, step_name, step_class, inputs, automation_id): title=f"{step_name} configuration" ) - def _is_valid_automation_config(self, automation_config): + def _is_valid_automation_config(self, automation_config: dict) -> bool: return automation_config.get(self.TRIGGER_EVENT) is not None - def _create_automation_details(self): + def _create_automation_details(self) -> None: all_events, all_conditions, all_actions = self.get_all_steps() automations_count = self.automations_config.get(self.AUTOMATIONS_COUNT, 0) for automation_id, automation_config in self.automations_config.get(self.AUTOMATIONS, {}).items(): @@ -232,21 +269,33 @@ def _create_automation_details(self): ] self.automation_details.append(AutomationDetails(event, conditions, actions)) - def _create_step(self, automations_config, step_name, classes): + def _create_step( + self, + automations_config: dict, + step_name: str, + classes: dict[str, typing.Type[automation_step.AutomationStep]] + ) -> automation_step.AutomationStep: step = classes[step_name]() step.apply_config(automations_config.get(step_name, {})) return step - async def _run_automation(self, automation_detail): + async def _run_automation(self, automation_detail: AutomationDetails) -> None: self.logger.info(f"Starting {automation_detail} automation") - async for _ in automation_detail.trigger_event.next_event(): - self.logger.debug(f"{automation_detail.trigger_event.get_name()} event triggered") - if await self._check_conditions(automation_detail): - await self._process_actions(automation_detail) + try: + async for execution_details in automation_detail.trigger_event.next_execution(): + self.logger.debug(f"{automation_detail.trigger_event.get_name()} event triggered: {execution_details}") + if await self._check_conditions(automation_detail, execution_details): + await self._process_actions(automation_detail, execution_details) + except errors.AutomationStopped: + self.logger.debug(f"{automation_detail.trigger_event.get_name()} automation stopped") - async def _check_conditions(self, automation_detail): + async def _check_conditions( + self, + automation_detail: AutomationDetails, + execution_details: execution_details.ExecutionDetails + ) -> bool: for condition in automation_detail.conditions: - if not await condition.evaluate(): + if not await condition.call_process(execution_details): # not all conditions are valid, skip event self.logger.debug(f"{condition.get_name()} is not valid: skipping " f"{automation_detail.trigger_event.get_name()} event") @@ -255,12 +304,18 @@ async def _check_conditions(self, automation_detail): f"{automation_detail.trigger_event.get_name()} event trigger") return True - async def _process_actions(self, automation_detail): + async def _process_actions( + self, + automation_detail: AutomationDetails, + execution_details: execution_details.ExecutionDetails + ) -> None: for action in automation_detail.actions: try: - self.logger.debug(f"Running {action.get_name()} after " - f"{automation_detail.trigger_event.get_name()} event") - await action.process() + self.logger.debug( + f"Running {action.get_name()} after " + f"{automation_detail.trigger_event.get_name()} event: {execution_details}" + ) + await action.call_process(execution_details) except Exception as err: self.logger.exception(err, True, f"Error when running action: {err}") diff --git a/octobot/automation/bases/__init__.py b/octobot/automation/bases/__init__.py index 455bdf0a15..26e5fb7c04 100644 --- a/octobot/automation/bases/__init__.py +++ b/octobot/automation/bases/__init__.py @@ -38,9 +38,16 @@ AutomationStep, ) +from octobot.automation.bases import execution_details + +from octobot.automation.bases.execution_details import ( + ExecutionDetails, +) + __all__ = [ "AbstractAction", "AbstractCondition", "AbstractTriggerEvent", "AutomationStep", + "ExecutionDetails", ] diff --git a/octobot/automation/bases/abstract_action.py b/octobot/automation/bases/abstract_action.py index 86c0b21d23..74342519a1 100644 --- a/octobot/automation/bases/abstract_action.py +++ b/octobot/automation/bases/abstract_action.py @@ -16,8 +16,16 @@ import abc import octobot.automation.bases.automation_step as automation_step +import octobot.automation.bases.execution_details as execution_details class AbstractAction(automation_step.AutomationStep, abc.ABC): - async def process(self): + + @automation_step.last_execution_details_updater + async def call_process(self, execution_details: execution_details.ExecutionDetails) -> bool: + return await self.process(execution_details) + + async def process( + self, execution_details: execution_details.ExecutionDetails + ) -> bool: raise NotImplementedError diff --git a/octobot/automation/bases/abstract_channel_based_trigger_event.py b/octobot/automation/bases/abstract_channel_based_trigger_event.py new file mode 100644 index 0000000000..33f540c69c --- /dev/null +++ b/octobot/automation/bases/abstract_channel_based_trigger_event.py @@ -0,0 +1,118 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. +import abc +import asyncio +import typing + +import async_channel +import octobot_trading.api as trading_api +import octobot.automation.bases.abstract_trigger_event as abstract_trigger_event +import octobot.errors as errors + + +class AbstractChannelBasedTriggerEvent(abstract_trigger_event.AbstractTriggerEvent, abc.ABC): + def __init__(self): + super(AbstractChannelBasedTriggerEvent, self).__init__() + + self.exchange: typing.Optional[str] = None + self.exchange_id: typing.Optional[str] = None + self.symbol: typing.Optional[str] = None + + + # internal state + self._trigger_event: asyncio.Event = asyncio.Event() + self._consumers: list[async_channel.Consumer] = [] + self._registered_consumer: bool = False + self._waiter_future: asyncio.Future = None # type: ignore + + async def register_consumers(self, exchange_id: str) -> list[async_channel.Consumer]: + raise NotImplementedError(f"{self.__class__.__name__} must implement register_consumers") + + async def _register_exchange_channel_consumer( + self, + exchange: typing.Optional[str] = None, + exchange_id: typing.Optional[str] = None, + symbol: typing.Optional[str] = None, + ): + self._registered_consumer = True + exchange = exchange or self.exchange + exchange_id = exchange_id or self.exchange_id + symbol = symbol or self.symbol + if exchange_id and exchange: + exchange_manager = trading_api.get_exchange_manager_from_exchange_name_and_id(exchange, exchange_id) + elif exchange_id: + exchange_manager = trading_api.get_exchange_manager_from_exchange_id(exchange_id) + elif exchange: + exchange_managers = trading_api.get_exchange_managers_from_exchange_name(exchange) + if len(exchange_managers) != 1: + raise ValueError(f"Expected 1 exchange manager for exchange {exchange}, got {len(exchange_managers)}") + exchange_manager = exchange_managers[0] + else: + # register consumer for all exchanges + for exchange_id in trading_api.get_exchange_ids(): + await self._register_exchange_channel_consumer( + exchange_id=exchange_id, + ) + return + if symbol: + if symbol not in trading_api.get_trading_pairs(exchange_manager): + raise ValueError( + f"Symbol {symbol} not found on {exchange}. " + f"Available symbols: {trading_api.get_trading_pairs(exchange_manager)}" + ) + self._consumers.extend(await self.register_consumers( + trading_api.get_exchange_manager_id(exchange_manager) + )) + + async def stop(self): + await super().stop() + if self._waiter_future is not None and not self._waiter_future.done(): + self._waiter_future.cancel() + for consumer in self._consumers: + await consumer.stop() + self._consumers = [] + self._registered_consumer = False + + async def check_initial_event(self): + # implement if a check immediately after registering the consumer is necessary. + # For example if the publication in registered channel can have been missed at the time of registration. + pass + + async def _get_next_event(self) -> typing.Optional[str]: + if self.should_stop: + raise errors.AutomationStopped + self._waiter_future = asyncio.Future() + if not self._registered_consumer: + await self._register_exchange_channel_consumer() + await self.check_initial_event() + try: + return await self._waiter_future + except asyncio.CancelledError as err: + if not self.should_stop: + # stop to unregister consumers + await self.stop() + # clear_future was called + raise errors.AutomationStopped from err + + def trigger(self, description: typing.Optional[str] = None): + if self._waiter_future.done(): + self.logger.debug(f"Trigger {description} already done, skipping") + else: + self._waiter_future.set_result(description) + + def clear_future(self): + if self._waiter_future is not None and not self._waiter_future.done(): + self._waiter_future.cancel() diff --git a/octobot/automation/bases/abstract_condition.py b/octobot/automation/bases/abstract_condition.py index 19bdaf5c5b..082f7b86c1 100644 --- a/octobot/automation/bases/abstract_condition.py +++ b/octobot/automation/bases/abstract_condition.py @@ -16,8 +16,15 @@ import abc import octobot.automation.bases.automation_step as automation_step +import octobot.automation.bases.execution_details as execution_details class AbstractCondition(automation_step.AutomationStep, abc.ABC): - async def evaluate(self) -> bool: + @automation_step.last_execution_details_updater + async def call_process(self, execution_details: execution_details.ExecutionDetails) -> bool: + return await self.process(execution_details) + + async def process( + self, execution_details: execution_details.ExecutionDetails + ) -> bool: raise NotImplementedError diff --git a/octobot/automation/bases/abstract_trigger_event.py b/octobot/automation/bases/abstract_trigger_event.py index 42f21199ea..3a31bd6c36 100644 --- a/octobot/automation/bases/abstract_trigger_event.py +++ b/octobot/automation/bases/abstract_trigger_event.py @@ -15,8 +15,10 @@ # License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. import abc import time +import typing import octobot.automation.bases.automation_step as automation_step +import octobot.automation.bases.execution_details as execution_details class AbstractTriggerEvent(automation_step.AutomationStep, abc.ABC): @@ -25,24 +27,23 @@ def __init__(self): self.should_stop = False self.trigger_only_once = False self.max_trigger_frequency = 0 - self._last_trigger_time = 0 async def stop(self): self.should_stop = True - async def _get_next_event(self): + async def _get_next_event(self) -> typing.Optional[str]: raise NotImplementedError - async def next_event(self): + async def next_execution(self) -> typing.AsyncGenerator[execution_details.ExecutionDetails, None]: """ Async generator, use as follows: async for event in self.next_event(): # triggered when an event occurs """ - self._last_trigger_time = 0 - while not self.should_stop and not (self.trigger_only_once and self._last_trigger_time != 0): - new_event = await self._get_next_event() + self.last_execution_details.timestamp = 0 + while not self.should_stop and not (self.trigger_only_once and self.last_execution_details.timestamp != 0): + event_description = await self._get_next_event() trigger_time = time.time() - if not self.max_trigger_frequency or (trigger_time - self._last_trigger_time > self.max_trigger_frequency): - yield new_event - self._last_trigger_time = time.time() + if not self.max_trigger_frequency or (trigger_time - self.last_execution_details.timestamp > self.max_trigger_frequency): + self.update_last_execution_details(description=event_description) + yield self.last_execution_details diff --git a/octobot/automation/bases/automation_step.py b/octobot/automation/bases/automation_step.py index da106ece01..27c1585b3f 100644 --- a/octobot/automation/bases/automation_step.py +++ b/octobot/automation/bases/automation_step.py @@ -13,24 +13,55 @@ # # You should have received a copy of the GNU General Public # License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. +import copy +import typing +import time import octobot_commons.logging as logging import octobot_commons.configuration as configuration +import octobot.automation.bases.execution_details as execution_details + + +def last_execution_details_updater(func): + async def last_execution_details_updater_wrapper(self, *args, **kwargs): + if result := await func(self, *args, **kwargs): + self.update_last_execution_details() + return result + return last_execution_details_updater_wrapper + class AutomationStep: def __init__(self): self.logger = logging.get_logger(self.get_name()) + self.last_execution_details: execution_details.ExecutionDetails = execution_details.ExecutionDetails( + timestamp=0, + description=None, + source=None, + ) @classmethod def get_name(cls): return cls.__name__ - @staticmethod - def get_description() -> str: - raise NotImplementedError + @classmethod + def get_description(cls) -> str: + raise NotImplementedError(f"get_description is not implemented for {cls.get_name()}") + + def get_execution_description(self) -> typing.Optional[str]: + return None def get_user_inputs(self, UI: configuration.UserInputFactory, inputs: dict, step_name: str) -> dict: - raise NotImplementedError + raise NotImplementedError(f"get_user_inputs is not implemented for {self.get_name()}") def apply_config(self, config): - raise NotImplementedError + raise NotImplementedError(f"apply_config is not implemented for {self.get_name()}") + + def update_last_execution_details( + self, + description: typing.Optional[str] = None, + source: typing.Optional[execution_details.ExecutionDetails] = None, + ): + self.last_execution_details.timestamp = time.time() + self.last_execution_details.description = description or self.get_execution_description() + # avoid modifying the source if it's executed again + self.last_execution_details.source = copy.deepcopy(source) diff --git a/octobot/automation/bases/execution_details.py b/octobot/automation/bases/execution_details.py new file mode 100644 index 0000000000..358e90ff1b --- /dev/null +++ b/octobot/automation/bases/execution_details.py @@ -0,0 +1,42 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. +import typing + + +class ExecutionDetails: + def __init__( + self, + timestamp: float, + description: typing.Optional[str], + source: typing.Optional["ExecutionDetails"] + ): + self.timestamp: float = timestamp + self.description: typing.Optional[str] = description + self.source: typing.Optional[ExecutionDetails] = None + + def get_initial_execution_details(self) -> "ExecutionDetails": + source = self + while source.source is not None: + source = source.source + return source + + def __str__(self): + return ( + self.description if self.description else f"Execution at {self.timestamp}" + ) + + def __repr__(self): + return self.__str__() diff --git a/octobot/backtesting/independent_backtesting.py b/octobot/backtesting/independent_backtesting.py index bcb36b31c4..08278f80a8 100644 --- a/octobot/backtesting/independent_backtesting.py +++ b/octobot/backtesting/independent_backtesting.py @@ -81,7 +81,7 @@ def __init__( self.stopped_event = None self.post_backtesting_task = None self.join_backtesting_timeout = join_backtesting_timeout - self.enable_logs = enable_logs + self.enable_logs = common_constants.FORCE_BACKTESTING_LOGS or enable_logs self.stop_when_finished = stop_when_finished self.previous_log_level = commons_logging.get_global_logger_level() self.previous_handlers_log_level = commons_logging.get_logger_level_per_handler() @@ -225,6 +225,9 @@ async def _register_available_data(self): if description is None: raise RuntimeError(f"Impossible to start backtesting: missing or invalid data file: {data_file}") exchange_name = description[backtesting_enums.DataFormatKeys.EXCHANGE.value] + # Skip data files without exchange name (e.g., social data files) + if not exchange_name: + continue if exchange_name not in self.symbols_to_create_exchange_classes: self.symbols_to_create_exchange_classes[exchange_name] = [] for symbol in description[backtesting_enums.DataFormatKeys.SYMBOLS.value]: @@ -416,6 +419,8 @@ def _find_reference_market_and_update_contract_type(self): forced_contract_type = self.octobot_origin_config.get(common_constants.CONFIG_CONTRACT_TYPE, common_constants.USE_CURRENT_PROFILE) for symbols in self.symbols_to_create_exchange_classes.values(): + if not symbols: + continue symbol = symbols[0] if next(iter(self.octobot_backtesting.exchange_type_by_exchange.values())) \ == common_constants.CONFIG_EXCHANGE_FUTURE: @@ -451,7 +456,7 @@ def _find_reference_market_and_update_contract_type(self): if ref_market_candidate != quote and \ ref_market_candidates[ref_market_candidate] < ref_market_candidates[quote]: ref_market_candidate = quote - return ref_market_candidate + return ref_market_candidate or common_constants.DEFAULT_REFERENCE_MARKET def _add_config_default_backtesting_values(self): if backtesting_constants.CONFIG_BACKTESTING not in self.backtesting_config: diff --git a/octobot/backtesting/minimal_data_importer.py b/octobot/backtesting/minimal_data_importer.py index 3548eaf7d6..2811ef1a7e 100644 --- a/octobot/backtesting/minimal_data_importer.py +++ b/octobot/backtesting/minimal_data_importer.py @@ -7,7 +7,7 @@ import octobot_backtesting.importers import octobot_backtesting.enums -import octobot_trading.util.test_tools.exchange_data as exchange_data_import +import octobot_trading.exchanges.util.exchange_data as exchange_data_import class MinimalDataImporter(octobot_backtesting.importers.ExchangeDataImporter): @@ -52,6 +52,9 @@ async def initialize(self) -> None: # nothing to do pass + async def stop(self) -> None: + self.should_stop = True + async def get_data_timestamp_interval(self, time_frame=None): return self._min_timestamp, self._max_timestamp diff --git a/octobot/backtesting/octobot_backtesting.py b/octobot/backtesting/octobot_backtesting.py index 4e1d66239e..a264e02e90 100644 --- a/octobot/backtesting/octobot_backtesting.py +++ b/octobot/backtesting/octobot_backtesting.py @@ -37,6 +37,8 @@ import octobot_services.api as service_api +import octobot_tentacles_manager.api as tentacles_manager_api + import octobot_trading.exchanges as exchanges import octobot_trading.exchange_data as exchange_data import octobot_trading.api as trading_api @@ -47,7 +49,6 @@ import octobot.limits as limits import octobot.constants as constants import octobot.errors as errors -import octobot.databases_util as databases_util class OctoBotBacktesting: @@ -111,7 +112,7 @@ async def initialize_and_run(self): self.start_time = time.time() await commons_databases.init_bot_storage( self.bot_id, - databases_util.get_run_databases_identifier( + trading_api.get_run_databases_identifier( self.backtesting_config, self.tentacles_setup_config, enable_storage=self.enable_storage, @@ -128,11 +129,13 @@ async def initialize_and_run(self): self.logger.info(f"Configured backtesting from the {start_date} to the {end_date}") await self._init_exchanges() self._ensure_limits() + # Required to be created before social evaluators + await self._create_service_feeds() await self._create_evaluators() + await self._create_services() await self._fetch_backtesting_extra_data_if_any( min_timestamp, max_timestamp ) - await self._create_service_feeds() await backtesting_api.start_backtesting(self.backtesting) if logger.BOT_CHANNEL_LOGGER is not None and self.enable_logs: await self.start_loggers() @@ -196,6 +199,8 @@ async def stop(self, memory_check=False, should_raise=False): evaluator_api.del_matrix(self.matrix_id) for service_feed in self.service_feeds: await service_api.stop_service_feed(service_feed) + await service_api.clear_bot_id_feeds(self.bot_id) + self.service_feeds = [] except Exception as e: self.logger.exception(e, True, f"Error when stopping independent backtesting: {e}") if should_raise: @@ -305,11 +310,26 @@ async def _init_evaluators(self): await evaluator_api.create_evaluator_channels(self.matrix_id, is_backtesting=True) async def _init_service_feeds(self): + social_importers = self.backtesting.get_importers(importers.SocialDataImporter) service_feed_factory = service_api.create_service_feed_factory(self.backtesting_config, asyncio.get_event_loop(), - self.bot_id) - self.service_feeds = [service_feed_factory.create_service_feed(feed) - for feed in service_feed_factory.get_available_service_feeds(True)] + self.bot_id, + self.backtesting) + self.service_feeds = [] + for feed_class in service_feed_factory.get_available_service_feeds(True): + if all (service.get_is_enabled(self.backtesting_config) for service in feed_class.REQUIRED_SERVICES): + importer = self._get_matching_importer_for_feed(feed_class, social_importers) + feed = service_feed_factory.create_service_feed(feed_class, importer=importer) + self.service_feeds.append(feed) + + def _get_matching_importer_for_feed(self, feed_class, social_importers): + if not social_importers: + return None + expected_service_name = feed_class.get_name() + for importer in social_importers: + if importer.service_name and expected_service_name.lower() in importer.service_name.lower(): + return importer + return None def _ensure_limits(self): for exchange_id in self.exchange_manager_ids: @@ -324,6 +344,11 @@ def _ensure_limits(self): ) async def _create_evaluators(self): + if not self.exchange_manager_ids: + # No exchange managers to create evaluators for (e.g., when only social data files are used) + self.logger.info("No exchange managers found, skipping evaluator creation") + return + for exchange_id in self.exchange_manager_ids: exchange_configuration = trading_api.get_exchange_configuration_from_exchange_id(exchange_id) self.evaluators = await evaluator_api.create_and_start_all_type_evaluators( @@ -340,10 +365,35 @@ async def _create_evaluators(self): async def _create_service_feeds(self): for feed in self.service_feeds: - if not await service_api.start_service_feed(feed, False, {}): + if not await service_api.start_service_feed(feed, True, {}): self.logger.error(f"Failed to start {feed.get_name()}. Evaluators requiring this service feed " f"might not work properly") + async def _create_services(self): + if not self.evaluators: + return + required_service_classes = set() + handled_evaluator_classes = set() + for evaluator in list_util.flatten_list(self.evaluators): + if evaluator is None or evaluator.__class__ in handled_evaluator_classes: + continue + handled_evaluator_classes.add(evaluator.__class__) + try: + for required_class in tentacles_manager_api.get_tentacle_classes_requirements(evaluator.__class__): + if required_class is not None and service_api.is_service_class(required_class): + required_service_classes.add(required_class) + except Exception as e: + pass + for service_class in required_service_classes: + try: + await service_api.get_service(service_class, True, self.services_config) + self.logger.info(f"Initialized {service_class.get_name()} service for backtesting") + except Exception as e: + self.logger.error( + f"Failed to initialize {service_class.get_name()} service for backtesting: {e}. " + f"Evaluators requiring this service might not work properly" + ) + async def _init_backtesting(self): if self.backtesting_data: self.backtesting_data.reset_cached_indexes() @@ -375,6 +425,7 @@ async def _configure_backtesting_time_window(self): ) return min_timestamp, max_timestamp + # TODO : deprecated, use Service fetch historical data instead async def _fetch_backtesting_extra_data_if_any( self, min_timestamp: float, max_timestamp: float ): @@ -391,6 +442,7 @@ async def _fetch_backtesting_extra_data_if_any( self.has_fetched_data = True await asyncio.gather(*coros) + # TODO : deprecated, use Service fetch historical data instead async def _fetch_gpt_history(self, evaluator, min_timestamp: float, max_timestamp: float): # prevent circular import import tentacles.Services.Services_bases.gpt_service as gpt_service @@ -410,6 +462,7 @@ async def _fetch_gpt_history(self, evaluator, min_timestamp: float, max_timestam max_timestamp ) + # TODO : deprecated, use Service fetch historical data instead async def clear_fetched_data(self): if self.has_fetched_data: # prevent circular import diff --git a/octobot/cli.py b/octobot/cli.py index 00829d9b3c..f88362cd26 100644 --- a/octobot/cli.py +++ b/octobot/cli.py @@ -18,6 +18,7 @@ import sys import multiprocessing import asyncio +import traceback import packaging.version as packaging_version @@ -40,6 +41,7 @@ sys.path.append(os.path.dirname(sys.executable)) import octobot.octobot as octobot_class + import octobot.octobot_node as octobot_node_class import octobot.commands as commands import octobot.configuration_manager as configuration_manager import octobot.octobot_backtesting_factory as octobot_backtesting @@ -51,6 +53,7 @@ import octobot.community.errors import octobot.limits as limits except ImportError as err: + traceback.print_exc() print( "Error importing OctoBot dependencies, please install OctoBot with the [full] option. " "Example: \"pip install -U octobot[full]\" " @@ -241,16 +244,18 @@ async def _get_authenticated_community_if_possible(config, logger): async def _async_load_community_data(community_auth, config, logger, is_first_startup): - if constants.IS_CLOUD_ENV and is_first_startup: - if not community_auth.is_logged_in(): - raise authentication.FailedAuthentication( - "Impossible to load community data without an authenticated user account" - ) - # auto config - if constants.USE_FETCHED_BOT_CONFIG: - await _apply_db_bot_config(logger, config, community_auth) - else: - await _apply_community_startup_info_to_config(logger, config, community_auth) + if constants.IS_CLOUD_ENV: + if is_first_startup: + if not community_auth.is_logged_in(): + raise authentication.FailedAuthentication( + "Impossible to load community data without an authenticated user account" + ) + # auto config + if constants.USE_FETCHED_BOT_CONFIG: + await _apply_db_bot_config(logger, config, community_auth) + else: + await _apply_community_startup_info_to_config(logger, config, community_auth) + await community_auth.community_bot.on_started_bot() def _apply_forced_configs(community_auth, logger, config, is_first_startup): @@ -313,7 +318,9 @@ def start_octobot(args, default_config_file=None): print(constants.LONG_VERSION) return - logger = octobot_logger.init_logger() + # log folder can be overridden by the LOGS_FOLDER environment variable, + # useful to run multiple bots from the same folder + logger = octobot_logger.init_logger(logs_folder=constants.LOGS_FOLDER) startup_messages = [] # Version @@ -362,11 +369,16 @@ def start_octobot(args, default_config_file=None): startup_messages += limits.apply_config_limits(config) # create OctoBot instance + distribution = configuration_manager.get_distribution(config.config) if args.backtesting: bot = octobot_backtesting.OctoBotBacktestingFactory(config, run_on_common_part_only=not args.whole_data_range, enable_join_timeout=args.enable_backtesting_timeout, enable_logs=not args.no_logs) + elif distribution is enums.OctoBotDistribution.NODE: + bot = octobot_node_class.OctoBotNode(config, community_authenticator=community_auth, + reset_trading_history=args.reset_trading_history, + startup_messages=startup_messages) else: bot = octobot_class.OctoBot(config, community_authenticator=community_auth, reset_trading_history=args.reset_trading_history, @@ -497,6 +509,80 @@ def octobot_parser(parser, default_config_file=None): tentacles_manager_cli.register_tentacles_manager_arguments(tentacles_parser) tentacles_parser.set_defaults(func=commands.call_tentacles_manager) + # node manager + node_parser = subparsers.add_parser("node", help='Start OctoBot in node mode.\n' + 'Use "node --help" to get the ' + 'node manager help.') + _register_node_arguments(node_parser) + node_parser.set_defaults(func=lambda args: start_node(args, default_config_file)) + + # sync server + sync_parser = subparsers.add_parser("sync", help='Start OctoBot Sync server.\n' + 'Use "sync --help" to get the ' + 'sync server help.') + _register_sync_arguments(sync_parser) + sync_parser.set_defaults(func=lambda args: start_sync(args)) + + +def _register_node_arguments(parser): + parser.add_argument( + '--host', + help='Host to bind the server to.', + type=str, + default=None + ) + parser.add_argument( + '--port', + help='Port to bind the server to (default: 8000).', + type=int, + default=None + ) + parser.add_argument( + '--master', + help='Enable master node mode (schedules and executes tasks, UI enabled by default).', + action='store_true' + ) + parser.add_argument( + '--consumer_only', + help='Start OctoBot Node in consumer only mode, in case the master node is not enough (requires a postgres database).', + type=bool, + default=False + ) + + +def start_node(args, default_config_file=None): + import octobot_node.config + + constants.FORCED_DISTRIBUTION = enums.OctoBotDistribution.NODE.value + if args.master: + octobot_node.config.settings.IS_MASTER_MODE = True + octobot_node.config.settings.CONSUMER_ONLY = args.consumer_only + start_octobot(args, default_config_file) + + +def _register_sync_arguments(parser): + parser.add_argument( + '--host', + help='Host to bind the sync server to (default: 0.0.0.0).', + type=str, + default="0.0.0.0" + ) + parser.add_argument( + '--port', + help='Port to bind the sync server to (default: 3000).', + type=int, + default=None + ) + + +def start_sync(args): + import octobot_sync.server + + octobot_sync.server.start_sync_server( + host=args.host, + port=args.port, + ) + def start_background_octobot_with_args( version=False, diff --git a/octobot/commands.py b/octobot/commands.py index 8811a6eb52..25f4261582 100644 --- a/octobot/commands.py +++ b/octobot/commands.py @@ -43,6 +43,8 @@ IGNORED_COMMAND_WHEN_RESTART = ["-u", "--update"] GLOBAL_BOT_INSTANCE = None +_signal_interrupt_count = 0 +_signal_interrupt_lock = threading.Lock() def call_tentacles_manager(command_args): @@ -87,7 +89,8 @@ def run_tentacles_install_or_update(community_auth, config): async def _install_or_update_tentacles(community_auth, config): additional_tentacles_package_urls = community_auth.get_saved_package_urls() - await install_or_update_tentacles(config, additional_tentacles_package_urls, False) + only_additional = not constants.INSTALL_DEFAULT_TENTACLES + await install_or_update_tentacles(config, additional_tentacles_package_urls, only_additional) def run_update_or_repair_tentacles_if_necessary(community_auth, config, tentacles_setup_config): @@ -135,24 +138,35 @@ async def update_or_repair_tentacles_if_necessary(community_auth, selected_profi elif force_refresh_tentacles_setup_config: community_tentacles_packages.refresh_tentacles_setup_config() - if local_profile_tentacles_setup_config is None or \ - not tentacles_manager_api.are_tentacles_up_to_date(local_profile_tentacles_setup_config, constants.VERSION): - logger.info("OctoBot tentacles are not up to date. Updating tentacles...") - _check_tentacles_install_exit() - if await install_or_update_tentacles(config, to_install_urls, False): - logger.info("OctoBot tentacles are now up to date.") + if constants.SHOULD_CHECK_TENTACLES: + if local_profile_tentacles_setup_config is None or \ + not tentacles_manager_api.are_tentacles_up_to_date(local_profile_tentacles_setup_config, constants.VERSION): + logger.info("OctoBot tentacles are not up to date. Updating tentacles...") + _check_tentacles_install_exit() + only_additional = not constants.INSTALL_DEFAULT_TENTACLES + if await install_or_update_tentacles(config, to_install_urls, only_additional): + logger.info("OctoBot tentacles are now up to date.") + else: + if to_install_urls: + logger.debug("Installing new tentacles.") + # install additional tentacles only when tentacles arch is valid. Install all tentacles otherwise + only_additional = tentacles_manager_api.is_tentacles_architecture_valid() + await install_or_update_tentacles(config, to_install_urls, only_additional) + if tentacles_manager_api.load_tentacles(verbose=True): + logger.debug("OctoBot tentacles are up to date.") + else: + logger.info("OctoBot tentacles are damaged. Reinstalling tentacles ...") + _check_tentacles_install_exit() + only_additional = not constants.INSTALL_DEFAULT_TENTACLES + await install_or_update_tentacles(config, [], only_additional) else: - if to_install_urls: - logger.debug("Installing new tentacles.") - # install additional tentacles only when tentacles arch is valid. Install all tentacles otherwise - only_additional = tentacles_manager_api.is_tentacles_architecture_valid() - await install_or_update_tentacles(config, to_install_urls, only_additional) if tentacles_manager_api.load_tentacles(verbose=True): - logger.debug("OctoBot tentacles are up to date.") + logger.debug("OctoBot tentacles loaded.") else: - logger.info("OctoBot tentacles are damaged. Installing default tentacles only ...") - _check_tentacles_install_exit() - await install_or_update_tentacles(config, [], False) + logger.error( + f"OctoBot tentacles are damaged. Continuing anyway as SHOULD_CHECK_TENTACLES " + f"is {constants.SHOULD_CHECK_TENTACLES}..." + ) async def install_or_update_tentacles( @@ -254,21 +268,46 @@ def set_global_bot_instance(bot_instance): GLOBAL_BOT_INSTANCE = bot_instance -def _signal_handler(_, __): - # run Commands.BOT.stop_threads in thread because can't use the current asyncio loop - stopping_thread = threading.Thread(target=GLOBAL_BOT_INSTANCE.task_manager.stop_tasks(), - name="Commands signal_handler stop_tasks") - stopping_thread.start() - stopping_thread.join() - os._exit(0) +def _signal_handler(signum, _): + # Handle SIGINT/SIGTERM: first delivery stops the bot; a second delivery exits without stop_tasks() + # if the first stop is stuck. + global _signal_interrupt_count + commands_logger = logging.get_logger(COMMANDS_LOGGER_NAME) + signal_text = signal.strsignal(signum) + if not signal_text: + signal_text = f"signal {signum}" + with _signal_interrupt_lock: + _signal_interrupt_count += 1 + interrupt_count = _signal_interrupt_count + if interrupt_count >= 2: + commands_logger.warning( + f"Received signal {signum} ({signal_text}) (delivery {interrupt_count}): " + f"forcing immediate process exit without stop_tasks()" + ) + os._exit(128 + signum if isinstance(signum, int) and 0 < signum < 32 else 1) + commands_logger.info( + f"Received signal {signum} ({signal_text}) (delivery {interrupt_count})" + ) + if GLOBAL_BOT_INSTANCE is not None: + commands_logger.info("Stopping OctoBot after signal (stop_bot, force=True)") + stop_bot(GLOBAL_BOT_INSTANCE, force=True) + else: + commands_logger.info("Exiting: no bot instance was registered (signal handler)") + os._exit(0) def run_bot(bot, logger): - # handle CTRL+C signal + # handle SIGINT (Ctrl+C) and SIGTERM (e.g. docker stop) try: signal.signal(signal.SIGINT, _signal_handler) except ValueError as e: logger.warning(f"Can't setup signal handler : {e}") + sigterm = getattr(signal, "SIGTERM", None) + if sigterm is not None: + try: + signal.signal(sigterm, _signal_handler) + except ValueError as e: + logger.warning(f"Can't setup SIGTERM handler : {e}") # start bot bot.task_manager.run_forever(start_bot(bot, logger)) diff --git a/octobot/community/__init__.py b/octobot/community/__init__.py index 29eaaf8095..be4e146ba0 100644 --- a/octobot/community/__init__.py +++ b/octobot/community/__init__.py @@ -21,6 +21,8 @@ BotError, BotNotFoundError, NoBotDeviceError, + MissingDeploymentError, + MissingProductsSubscriptionError, ) from octobot.community import models from octobot.community.models import ( @@ -41,6 +43,7 @@ get_master_and_nested_product_slug_from_profile_name, get_tentacles_data_exchange_config, USD_LIKE, + from_community_order_to_trading_order, ) from octobot.community.supabase_backend import ( SyncConfigurationStorage, @@ -49,7 +52,7 @@ retried_failed_supabase_request, CommunitySupabaseClient, ) - +from octobot.community import local_authenticator from octobot.community import community_analysis from octobot.community import community_manager from octobot.community import authentication @@ -90,6 +93,8 @@ from octobot.community.errors_upload import ( init_sentry_tracker, flush_tracker, + upload_error, + share_logs, ) from octobot.community.identifiers_provider import ( IdentifiersProvider, @@ -100,7 +105,14 @@ ClickhouseHistoricalBackendClient, IcebergHistoricalBackendClient, ) - +from octobot.community.community_bot import ( + CommunityBot, +) +from octobot.community.local_authenticator import ( + get_stateless_configuration, + local_user_authenticator, + local_anon_user_authenticator, +) __all__ = [ "RequestError", "StatusCodeRequestError", @@ -131,6 +143,7 @@ "get_master_and_nested_product_slug_from_profile_name", "get_tentacles_data_exchange_config", "USD_LIKE", + "from_community_order_to_trading_order", "SyncConfigurationStorage", "ASyncConfigurationStorage", "AuthenticatedAsyncSupabaseClient", @@ -155,4 +168,12 @@ "HistoricalBackendClient", "ClickhouseHistoricalBackendClient", "IcebergHistoricalBackendClient", + "CommunityBot", + "MissingDeploymentError", + "MissingProductsSubscriptionError", + "upload_error", + "share_logs", + "get_stateless_configuration", + "local_user_authenticator", + "local_anon_user_authenticator", ] diff --git a/octobot/community/authentication.py b/octobot/community/authentication.py index 3fb7c4d593..c35b5a5d79 100644 --- a/octobot/community/authentication.py +++ b/octobot/community/authentication.py @@ -34,14 +34,17 @@ import octobot.community.models.strategy_data as strategy_data import octobot.community.supabase_backend as supabase_backend import octobot.community.supabase_backend.enums as backend_enums +import octobot.community.wallet_backend as wallet_backend import octobot.community.feeds as community_feeds import octobot.community.tentacles_packages as community_tentacles_packages +import octobot.community.community_bot as community_bot import octobot_commons.constants as commons_constants import octobot_commons.enums as commons_enums import octobot_commons.authentication as authentication import octobot_commons.configuration as commons_configuration import octobot_commons.profiles as commons_profiles import octobot_trading.enums as trading_enums +import octobot_sync.client as sync_client def expired_session_retrier(func): @@ -75,8 +78,9 @@ async def bot_data_update_wrapper(*args, raise_errors=False, **kwargs): self.logger.debug(f"Skipping {func.__name__} update: no user selected bot.") return try: - self.logger.debug(f"bot_data_update: {func.__name__} initiated.") - return await func(*args, **kwargs) + with supabase_backend.error_describer(): + self.logger.debug(f"bot_data_update: {func.__name__} initiated.") + return await func(*args, **kwargs) except errors.SessionTokenExpiredError: # requried by expired_session_retrier raise @@ -104,23 +108,30 @@ class CommunityAuthentication(authentication.Authenticator): def __init__(self, config=None, backend_url=None, backend_key=None, use_as_singleton=True): super().__init__(use_as_singleton=use_as_singleton) - self.config = config - self.backend_url = backend_url or identifiers_provider.IdentifiersProvider.BACKEND_URL - self.backend_key = backend_key or identifiers_provider.IdentifiersProvider.BACKEND_KEY - self.configuration_storage = supabase_backend.ASyncConfigurationStorage(self.config) - self.supabase_client = self._create_client() - self.user_account = community_user_account.CommunityUserAccount() - self.public_data = community_public_data.CommunityPublicData() - self.successfully_fetched_tentacles_package_urls = False - self.silent_auth = False - self._community_feed = None - - self.initialized_event = None - self._login_completed = None - self._fetched_private_data = None - self._startup_info = None - - self._fetch_account_task = None + self.config: typing.Optional[commons_configuration.Configuration] = config + self.backend_url: str = backend_url or identifiers_provider.IdentifiersProvider.BACKEND_URL + self.backend_key: str = backend_key or identifiers_provider.IdentifiersProvider.BACKEND_KEY + self.configuration_storage: supabase_backend.ASyncConfigurationStorage = supabase_backend.ASyncConfigurationStorage(self.config) + self.supabase_client: supabase_backend.CommunitySupabaseClient = self._create_client() + self.user_account: community_user_account.CommunityUserAccount = community_user_account.CommunityUserAccount() + self.public_data: community_public_data.CommunityPublicData = community_public_data.CommunityPublicData() + self.community_bot: community_bot.CommunityBot = community_bot.CommunityBot(self) + self.successfully_fetched_tentacles_package_urls: bool = False + self.silent_auth: bool = False + self._community_feed: typing.Optional[community_feeds.AbstractFeed] = None + + self.initialized_event: typing.Optional[asyncio.Event] = None + self._login_completed: typing.Optional[asyncio.Event] = None + self._fetched_private_data: typing.Optional[asyncio.Event] = None + self._startup_info: typing.Optional[startup_info.StartupInfo] = None + + self._fetch_account_task: typing.Optional[asyncio.Task] = None + self._sync_client = None + self._sync_address: str = "" + self._sync_data_signer = None + self._wallet_backend: wallet_backend.WalletBackend = wallet_backend.WalletBackend( + self.configuration_storage.sync_storage, self.logger + ) @staticmethod def create(configuration: commons_configuration.Configuration, **kwargs): @@ -576,6 +587,11 @@ async def stop(self): await self.supabase_client.aclose() if self._community_feed: await self._community_feed.stop() + if self.community_bot: + self.community_bot.clear() + if self._sync_client: + await self._sync_client.close() + self._sync_client = None self.logger.debug("Stopped") def _update_supports(self, resp_status, json_data): @@ -620,6 +636,74 @@ async def _initialize_account(self, minimal=False, fetch_private_data=True): finally: self.initialized_event.set() + def _get_or_create_wallet_private_key(self, chain_id: str) -> typing.Optional[str]: + return self._wallet_backend.get_or_create_wallet_private_key(chain_id) + + def is_node_wallet_configured(self) -> bool: + return self._wallet_backend.is_node_wallet_configured() + + def get_node_wallet_address(self) -> typing.Optional[str]: + return self._wallet_backend.get_node_wallet_address() + + def create_and_encrypt_node_wallet(self, passphrase: str): + return self._wallet_backend.create_and_encrypt_node_wallet(passphrase) + + def import_and_encrypt_node_wallet(self, private_key: str, passphrase: str): + return self._wallet_backend.import_and_encrypt_node_wallet(private_key, passphrase) + + def decrypt_node_wallet(self, passphrase: str): + return self._wallet_backend.decrypt_node_wallet(passphrase) + + def verify_node_passphrase(self, passphrase: str) -> bool: + return self._wallet_backend.verify_node_passphrase(passphrase) + + def init_sync_client(self): + if self._sync_client is not None: + return + try: + chain_id = constants.SYNC_CHAIN_ID + sync_url = identifiers_provider.IdentifiersProvider.SYNC_SERVER_URL + if not sync_url and not constants.ENABLE_REPLICA_SERVER: + self.logger.debug("No sync server URL configured, skipping sync client init") + return + private_key = self._get_or_create_wallet_private_key(chain_id) + if private_key is None: + self.logger.debug("Wallet is encrypted: sync client requires passphrase to initialize") + return + self._sync_client, self._sync_address, self._sync_data_signer = sync_client.create_sync_client( + private_key=private_key, + chain_id=chain_id, + sync_url=sync_url, + start_replica_server=constants.ENABLE_REPLICA_SERVER, + replica_port=constants.REPLICA_SERVER_PORT, + replica_write_mode=constants.REPLICA_WRITE_MODE, + replica_sync_interval_ms=constants.REPLICA_SYNC_INTERVAL_MS, + ) + except Exception as e: + self.logger.exception(e, True, f"Failed to initialize sync client: {e}") + + def init_sync_client_with_passphrase(self, passphrase: str) -> None: + if self._sync_client is not None: + return + try: + chain_id = constants.SYNC_CHAIN_ID + sync_url = identifiers_provider.IdentifiersProvider.SYNC_SERVER_URL + if not sync_url and not constants.ENABLE_REPLICA_SERVER: + self.logger.debug("No sync server URL configured, skipping sync client init") + return + wallet = self.decrypt_node_wallet(passphrase) + self._sync_client, self._sync_address, self._sync_data_signer = sync_client.create_sync_client( + private_key=wallet.private_key, + chain_id=chain_id, + sync_url=sync_url, + start_replica_server=constants.ENABLE_REPLICA_SERVER, + replica_port=constants.REPLICA_SERVER_PORT, + replica_write_mode=constants.REPLICA_WRITE_MODE, + replica_sync_interval_ms=constants.REPLICA_SYNC_INTERVAL_MS, + ) + except Exception as e: + self.logger.exception(e, True, f"Failed to initialize sync client: {e}") + async def _init_community_data(self, fetch_private_data): coros = [ self.update_supports(), diff --git a/octobot/community/community_bot.py b/octobot/community/community_bot.py new file mode 100644 index 0000000000..1a9b95f5cb --- /dev/null +++ b/octobot/community/community_bot.py @@ -0,0 +1,279 @@ +# pylint: disable=E0711, E0702 +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. +import typing +import contextlib +import time +import datetime + +import octobot_commons.cache_util as cache_util +import octobot.community.errors as errors +import octobot.community.supabase_backend.enums as supabase_enums +import octobot.community.models.formatters as formatters +import octobot_commons.logging as commons_logging +import octobot_commons.enums as commons_enums +import octobot.automation + +if typing.TYPE_CHECKING: + import octobot.community.authentication as community_authentication + + +_STOPPED_STRATEGY_EXECUTION_LOG_MAX_PERIOD = 60 + +# deployment error statuses that should be cleared by the bot during startup +_CLEARABLE_DEPLOYMENT_ERROR_STATUSES: list[supabase_enums.BotDeploymentErrorsStatuses] = [ + supabase_enums.BotDeploymentErrorsStatuses.MISSING_API_KEY_TRADING_RIGHTS, + supabase_enums.BotDeploymentErrorsStatuses.INVALID_EXCHANGE_CREDENTIALS, + supabase_enums.BotDeploymentErrorsStatuses.MISSING_MINIMAL_FUNDS, + supabase_enums.BotDeploymentErrorsStatuses.INTERNAL_SERVER_ERROR, +] +_NEW_BOT_DEPLOYMENT_CLEARABLE_ERROR_STATUSES: list[supabase_enums.BotDeploymentErrorsStatuses] = ( + _CLEARABLE_DEPLOYMENT_ERROR_STATUSES + [ + # also clear stop condition triggered error status if the bot has just been deployed + supabase_enums.BotDeploymentErrorsStatuses.STOP_CONDITION_TRIGGERED, + ] +) + + +def suppressed_local_env_bot_error(f): + async def _suppressed_local_env_bot_error_wrapper(*args, **kwargs): + try: + return await f(*args, **kwargs) + except errors.BotError as err: + if octobot.constants.IS_CLOUD_ENV: + # this is not normal: propagate the error + raise err + else: + # this can happen in local environment, just log it + CommunityBot.get_logger().info(f"Skipped bot update: {err}") + return _suppressed_local_env_bot_error_wrapper + + +@contextlib.contextmanager +def caught_global_exceptions(operation_name: str): + try: + yield + except Exception as err: + CommunityBot.get_logger().exception(err, True, f"Error when running {operation_name}: {err}") + + +def initialized_bot_id(f): + @suppressed_local_env_bot_error + async def _initialized_bot_id_wrapper(self, *args, **kwargs): + if not self.authenticator.user_account.bot_id: + raise errors.BotError(self.authenticator.user_account.NO_SELECTED_BOT_DESC) + return await f(self, *args, **kwargs) + return _initialized_bot_id_wrapper + + +class CommunityBot: + """ + Bot utility methods to update the community bot representation in database + """ + + def __init__(self, authenticator: "community_authentication.CommunityAuthentication"): + self.authenticator: "community_authentication.CommunityAuthentication" = authenticator + self._has_just_been_deployed: bool = False + + async def should_trade_according_to_products_subscription_and_deployment_error_status( + self, + new_deployment_timeout: float = octobot.constants.DEFAULT_NEW_DEPLOYMENT_TIMEOUT + ) -> bool: + products_subscription = await self._fetch_products_subscription() + if self._is_product_subscription_desired_status_active(products_subscription): + # bot should be running, now check error status if not just deployed + # don't fetch deployment error status if bot should not trade + if self.had_just_been_deployed_during_startup(new_deployment_timeout) or not self._is_deployment_error_status_in( + [supabase_enums.BotDeploymentErrorsStatuses.STOP_CONDITION_TRIGGERED] + ): + # bot has just been deployed or didn't trigger stop condition yet + return True + self.get_logger().warning( + f"Bot {self.authenticator.user_account.bot_id} should not trade: " + f"products_subscription_desired_status={products_subscription[supabase_enums.ProductsSubscriptionsKeys.DESIRED_STATUS.value]}, " + f"deployment_error_status={self.authenticator.user_account.get_selected_bot_deployment_error_status()}" + ) + return False + + async def on_started_bot(self): + if octobot.constants.IS_CLOUD_ENV: + await self._insert_bot_started_log() + await self._ensure_clear_deployment_error_status() + + async def on_trading_modes_stopped_and_traders_paused( + self, + stop_reason: commons_enums.StopReason, + execution_details: typing.Optional[octobot.automation.ExecutionDetails], + schedule_bot_stop: bool, + ): + with caught_global_exceptions( + f"on_trading_modes_stopped_and_traders_paused: {stop_reason=}" + ): + if schedule_bot_stop: + # schedule bot stop: bot will pause trading and be stopped + await self.schedule_bot_stop(stop_reason) + elif stop_reason is not None: + # only update error status: bot will pause trading but remain on + await self.update_deployment_error_status_for_stop_reason(stop_reason) + if execution_details is not None: + with caught_global_exceptions("insert_stopped_strategy_execution_log"): + await self.insert_stopped_strategy_execution_log( # pylint: disable=unexpected-keyword-arg + execution_details.description, + max_period=_STOPPED_STRATEGY_EXECUTION_LOG_MAX_PERIOD # type: ignore + ) + + @initialized_bot_id + async def schedule_bot_stop( + self, stop_reason: typing.Optional[commons_enums.StopReason] + ): + if stop_reason is not None: + await self.update_deployment_error_status_for_stop_reason(stop_reason) + await self._update_product_subscription_desired_status( + supabase_enums.ProductSubscriptionDesiredStatus.CANCELED + ) + + async def update_deployment_error_status_for_stop_reason( + self, stop_reason: commons_enums.StopReason + ): + await self._update_deployment_error_status( + formatters.get_deployment_error_status_from_stop_reason(stop_reason) + ) + + @cache_util.prevented_multiple_calls + async def insert_stopped_strategy_execution_log(self, reason: typing.Optional[str]): + await self.insert_bot_log( + supabase_enums.BotLogType.STOPPED_STRATEGY_EXECUTION, { + supabase_enums.BotLogContentKeys.REASON.value: reason + } + ) + + @initialized_bot_id + async def insert_bot_log( + self, bot_log_type: supabase_enums.BotLogType, content: typing.Optional[dict] + ): + await self.authenticator.supabase_client.insert_bot_log( + self.authenticator.user_account.bot_id, + bot_log_type, + content + ) + self.get_logger().info( + f"Inserted bot log: {bot_log_type.value}: {content} " + f"[bot_id={self.authenticator.user_account.bot_id}]" + ) + + def _is_new_bot(self) -> bool: + try: + # if portfolio id is not set, it means the bot is new: its portfolio has not yet been created + return self.authenticator.user_account.get_selected_bot_current_portfolio_id() is None + except (TypeError, KeyError): + return True + + def _is_product_subscription_desired_status_active(self, products_subscription: dict) -> bool: + return ( + products_subscription[supabase_enums.ProductsSubscriptionsKeys.DESIRED_STATUS.value] in ( + supabase_enums.ProductSubscriptionDesiredStatus.ACTIVE.value, + supabase_enums.ProductSubscriptionDesiredStatus.RESTARTING.value, + ) + ) + + def _is_deployment_error_status_in( + self, error_statuses: list[supabase_enums.BotDeploymentErrorsStatuses] + ) -> bool: + deployment_error_status = self.authenticator.user_account.get_selected_bot_deployment_error_status() + for error_status in error_statuses: + if deployment_error_status == error_status.value: + return True + return False + + @suppressed_local_env_bot_error + async def _insert_bot_started_log(self): + bot_log_type = ( + supabase_enums.BotLogType.BOT_STARTED if self._is_new_bot() + else supabase_enums.BotLogType.BOT_RESTARTED + ) + with caught_global_exceptions("insert_bot_started_log"): + await self.insert_bot_log(bot_log_type, None) + + @initialized_bot_id + async def _update_deployment_error_status(self, error_status: supabase_enums.BotDeploymentErrorsStatuses): + self.get_logger().info( + f"Updating bot {self.authenticator.user_account.bot_id} deployment error " + f"status to {error_status.value}" + ) + try: + deployment_id = self.authenticator.user_account.get_selected_bot_deployment_id() + except KeyError: + raise errors.MissingDeploymentError("No deployment is set for current bot") + update = {supabase_enums.BotDeploymentKeys.ERROR_STATUS.value: error_status.value} + await self.authenticator.supabase_client.update_deployment(deployment_id, update) + + @suppressed_local_env_bot_error + async def _ensure_clear_deployment_error_status(self): + with caught_global_exceptions("ensure_clear_deployment_error_status"): + clearable_error_statuses = ( + _NEW_BOT_DEPLOYMENT_CLEARABLE_ERROR_STATUSES if self.had_just_been_deployed_during_startup() + else _CLEARABLE_DEPLOYMENT_ERROR_STATUSES + ) + if self._is_deployment_error_status_in(clearable_error_statuses): + await self._update_deployment_error_status(supabase_enums.BotDeploymentErrorsStatuses.NO_ERROR) + + async def _fetch_products_subscription(self) -> dict: + products_subscription = await self.authenticator.get_current_bot_products_subscription() + if products_subscription is None: + raise errors.MissingProductsSubscriptionError( + f"No products subscription found for bot {self.authenticator.user_account.bot_id}" + ) + return products_subscription + + async def _update_product_subscription_desired_status( + self, desired_status: supabase_enums.ProductSubscriptionDesiredStatus + ): + products_subscription = await self._fetch_products_subscription() + products_subscription_id = products_subscription[supabase_enums.ProductsSubscriptionsKeys.ID.value] + await self.authenticator.supabase_client.update_bot_products_subscription( + products_subscription_id, + {supabase_enums.ProductsSubscriptionsKeys.DESIRED_STATUS.value: desired_status.value} + ) + self.get_logger().info( + f"Updated product_subscription.desired_status to {desired_status.value} [{products_subscription_id=}]" + ) + + def had_just_been_deployed_during_startup( + self, new_deployment_timeout: float = octobot.constants.DEFAULT_NEW_DEPLOYMENT_TIMEOUT + ) -> bool: + if self._has_just_been_deployed: + return True + if deployment_time := CommunityBot.get_deployment_time(): + # bot has been deployed within the last new_deployment_timeout seconds + # store the result to avoid side effects if the method is called multiple times + self._has_just_been_deployed = time.time() < deployment_time + new_deployment_timeout + return self._has_just_been_deployed + + @staticmethod + def get_deployment_time() -> typing.Optional[float]: + if raw_deployment_time := octobot.constants.DEPLOYMENT_TIME: + try: + return datetime.datetime.fromisoformat(raw_deployment_time).timestamp() + except ValueError: + CommunityBot.get_logger().error(f"Invalid deployment time: {raw_deployment_time}") + return None + + @classmethod + def get_logger(cls): + return commons_logging.get_logger(cls.__name__) + + def clear(self): + self.authenticator = None # type: ignore diff --git a/octobot/community/errors.py b/octobot/community/errors.py index 9f0f99a82a..fbe70c8282 100644 --- a/octobot/community/errors.py +++ b/octobot/community/errors.py @@ -48,6 +48,14 @@ class MissingBotConfigError(BotError): pass +class MissingProductsSubscriptionError(BotError): + pass + + +class MissingDeploymentError(BotError): + pass + + class InvalidBotConfigError(BotError): pass diff --git a/octobot/community/errors_upload/__init__.py b/octobot/community/errors_upload/__init__.py index 84551bf1f3..c521f99e95 100644 --- a/octobot/community/errors_upload/__init__.py +++ b/octobot/community/errors_upload/__init__.py @@ -20,7 +20,15 @@ flush_tracker, ) +from octobot.community.errors_upload import error_sharing +from octobot.community.errors_upload.error_sharing import ( + upload_error, + share_logs, +) + __all__ = [ "init_sentry_tracker", "flush_tracker", + "upload_error", + "share_logs", ] diff --git a/octobot/community/errors_upload/error_sharing.py b/octobot/community/errors_upload/error_sharing.py new file mode 100644 index 0000000000..87821837e6 --- /dev/null +++ b/octobot/community/errors_upload/error_sharing.py @@ -0,0 +1,164 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. + +import base64 +import os +import secrets +import shutil +import time +import traceback +import uuid +import zipfile +from typing import Any + +import octobot_commons.authentication as authentication +import octobot_commons.logging as logging + +from starfish_sdk import StarfishClient, SyncManager + +import octobot.constants + +logger = logging.get_logger("ErrorSharing") + +ERRORS_PUSH_PATH_TEMPLATE = "/v1/push/users/{pubkey}/errors/{errorId}" +ERRORS_PULL_PATH_TEMPLATE = "/v1/pull/users/{pubkey}/errors/{errorId}" +ENCRYPTION_INFO = "octobot-error-data" + + +def _get_client_and_address(passphrase: str | None = None) -> tuple[StarfishClient, str, any] | None: + authenticator = authentication.Authenticator.get_instance_if_exists() + if authenticator is None: + return None + if passphrase: + authenticator.init_sync_client_with_passphrase(passphrase) + else: + authenticator.init_sync_client() + if authenticator._sync_client is None: + return None + return authenticator._sync_client, authenticator._sync_address, authenticator._sync_data_signer + + +def _generate_credentials() -> tuple[str, str]: + error_secret = secrets.token_hex(32) + salt = secrets.token_hex(16) + return error_secret, salt + + +async def upload_error( + client: StarfishClient, + address: str, + error: Exception, + *, + context: dict[str, Any] | None = None, + error_id: str | None = None, + sign_data=None, +) -> dict[str, Any] | None: + error_secret, salt = _generate_credentials() + push_path = ERRORS_PUSH_PATH_TEMPLATE.format(pubkey=address, errorId=salt) + pull_path = ERRORS_PULL_PATH_TEMPLATE.format(pubkey=address, errorId=salt) + + payload: dict[str, Any] = { + "id": error_id or str(uuid.uuid4()), + "timestamp": int(time.time() * 1000), + "version": octobot.constants.LONG_VERSION, + "message": str(error), + "type": type(error).__name__, + "traceback": traceback.format_exception(error), + } + if octobot.constants.COMMUNITY_BOT_ID: + payload["bot_id"] = octobot.constants.COMMUNITY_BOT_ID + if context: + payload["context"] = context + + try: + manager = SyncManager( + client=client, + pull_path=pull_path, + push_path=push_path, + encryption_secret=error_secret, + encryption_salt=salt, + encryption_info=ENCRYPTION_INFO, + sign_data=sign_data, + ) + result = await manager.push(payload) + if result is not None: + result["errorId"] = salt + result["errorSecret"] = error_secret + return result + except Exception as push_error: + logger.exception(push_error, True, f"Failed to upload error report: {push_error}") + return None + + +async def share_logs( + export_path: str, + passphrase: str | None = None, + log_paths: list[str] | None = None, +) -> dict[str, Any] | None: + result = _get_client_and_address(passphrase) + if result is None: + logger.warning("Cannot share logs: no sync client configured") + return None + client, address, data_signer = result + + error_secret, salt = _generate_credentials() + push_path = ERRORS_PUSH_PATH_TEMPLATE.format(pubkey=address, errorId=salt) + pull_path = ERRORS_PULL_PATH_TEMPLATE.format(pubkey=address, errorId=salt) + + zip_path = f"{export_path}.zip" + try: + if log_paths is not None: + with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_DEFLATED) as zf: + for path in log_paths: + if os.path.isfile(path): + zf.write(path, arcname=os.path.basename(path)) + else: + shutil.make_archive(export_path, "zip", octobot.constants.LOGS_FOLDER) + with open(zip_path, "rb") as f: + logs_b64 = base64.b64encode(f.read()).decode("ascii") + finally: + if os.path.isfile(zip_path): + os.remove(zip_path) + + payload: dict[str, Any] = { + "id": f"logs-{uuid.uuid4()}", + "timestamp": int(time.time() * 1000), + "version": octobot.constants.LONG_VERSION, + "message": "User shared logs", + "type": "logs", + "logs_zip_b64": logs_b64, + } + if octobot.constants.COMMUNITY_BOT_ID: + payload["bot_id"] = octobot.constants.COMMUNITY_BOT_ID + + try: + manager = SyncManager( + client=client, + pull_path=pull_path, + push_path=push_path, + encryption_secret=error_secret, + encryption_salt=salt, + encryption_info=ENCRYPTION_INFO, + sign_data=data_signer, + ) + result = await manager.push(payload) + if result is not None: + result["errorId"] = salt + result["errorSecret"] = error_secret + return result + except Exception as push_error: + logger.exception(push_error, True, f"Failed to share logs: {push_error}") + return None diff --git a/octobot/community/errors_upload/sentry_aiohttp_transport.py b/octobot/community/errors_upload/sentry_aiohttp_transport.py index 714cdc10d4..8cce3b0767 100644 --- a/octobot/community/errors_upload/sentry_aiohttp_transport.py +++ b/octobot/community/errors_upload/sentry_aiohttp_transport.py @@ -22,7 +22,6 @@ import sentry_sdk.consts import sentry_sdk.utils import sentry_sdk.envelope -import sentry_sdk.transport import sentry_sdk.types @@ -146,7 +145,7 @@ async def _async_send_envelope( return None def capture_event( - self, event # type: sentry_sdk.types.Event + self, event: sentry_sdk.types.Event ) -> None: """ DEPRECATED: Please use capture_envelope instead. @@ -171,6 +170,8 @@ async def async_kill(self): await self._worker.async_kill() class AiohttpWorker: + # do not inherit from sentry_sdk.worker.BackgroundWorker to avoid implementing all its methods + # only implement required methods def __init__(self, queue_size=sentry_sdk.consts.DEFAULT_QUEUE_SIZE): self.session = None self.call_tasks = [] @@ -207,7 +208,7 @@ def kill(self) -> None: self._kill_task = asyncio.create_task(self.session.close()) async def async_kill(self): - self._stopped = type + self._stopped = True self.kill() if self._kill_task and not self._kill_task.done(): await self._kill_task diff --git a/octobot/community/identifiers_provider.py b/octobot/community/identifiers_provider.py index 077c9a3935..1a2a166b2b 100644 --- a/octobot/community/identifiers_provider.py +++ b/octobot/community/identifiers_provider.py @@ -26,6 +26,7 @@ class IdentifiersProvider: FRONTEND_PASSWORD_RECOVER_URL: str = None BACKEND_URL: str = None BACKEND_KEY: str = None + SYNC_SERVER_URL: str = None @staticmethod def use_production(): @@ -34,6 +35,7 @@ def use_production(): IdentifiersProvider.FRONTEND_PASSWORD_RECOVER_URL = constants.OCTOBOT_COMMUNITY_RECOVER_PASSWORD_URL IdentifiersProvider.BACKEND_URL = constants.COMMUNITY_BACKEND_URL IdentifiersProvider.BACKEND_KEY = constants.COMMUNITY_BACKEND_KEY + IdentifiersProvider.SYNC_SERVER_URL = constants.SYNC_SERVER_URL IdentifiersProvider._register_environment(enums.CommunityEnvironments.Production) @staticmethod @@ -43,6 +45,7 @@ def use_staging(): IdentifiersProvider.FRONTEND_PASSWORD_RECOVER_URL = constants.STAGING_COMMUNITY_RECOVER_PASSWORD_URL IdentifiersProvider.BACKEND_URL = constants.STAGING_COMMUNITY_BACKEND_URL IdentifiersProvider.BACKEND_KEY = constants.STAGING_COMMUNITY_BACKEND_KEY + IdentifiersProvider.SYNC_SERVER_URL = constants.STAGING_SYNC_SERVER_URL IdentifiersProvider._register_environment(enums.CommunityEnvironments.Staging) @staticmethod diff --git a/octobot/community/local_authenticator.py b/octobot/community/local_authenticator.py new file mode 100644 index 0000000000..14761a7e3f --- /dev/null +++ b/octobot/community/local_authenticator.py @@ -0,0 +1,76 @@ +import contextlib +import typing + +import octobot.community as community +import octobot_commons.configuration +import octobot_commons.logging as common_logging +import octobot.community.identifiers_provider as identifiers_provider + + +def get_stateless_configuration() -> octobot_commons.configuration.Configuration: + configuration = octobot_commons.configuration.Configuration(None, None) + configuration.config = {} + # disable save + configuration.save = lambda *_, **__: _ # type: ignore + return configuration + +@contextlib.asynccontextmanager +async def local_user_authenticator( + email: str, + hidden: bool, + backend_url: typing.Optional[str] = None, + password: typing.Optional[str] = None, + auth_key: typing.Optional[str] = None, +) -> typing.AsyncGenerator["community.CommunityAuthentication", None]: + if not email: + raise ValueError("email is required") + community.IdentifiersProvider.use_production() + local_instance = None + configuration = get_stateless_configuration() + try: + local_instance = community.CommunityAuthentication( + config=configuration, backend_url=backend_url, use_as_singleton=False + ) + local_instance.supabase_client.is_admin = False + local_instance.silent_auth = hidden + if auth_key: + password_value = None + auth_key_value = auth_key + else: + password_value = password + auth_key_value = None + await local_instance.login( + email, password_value, password_token=None, auth_key=auth_key_value, minimal=True + ) + common_logging.get_logger("local_community_user_authenticator").info( + f"Authenticated as {email[:3]}[...]{email[-4:]}" + ) + yield local_instance + finally: + if local_instance is not None: + await local_instance.logout() + await local_instance.stop() + + +@contextlib.asynccontextmanager +async def local_anon_user_authenticator( + backend_url: typing.Optional[str] = None, + anon_key: typing.Optional[str] = None, +) -> typing.AsyncGenerator["community.CommunityAuthentication", None]: + anon_key = anon_key or identifiers_provider.IdentifiersProvider.BACKEND_KEY + community.IdentifiersProvider.use_production() + local_instance = None + configuration = get_stateless_configuration() + try: + local_instance = community.CommunityAuthentication( + config=configuration, backend_url=backend_url, backend_key=anon_key, use_as_singleton=False + ) + local_instance.supabase_client.is_admin = False + common_logging.get_logger("local_community_user_authenticator").info( + f"Authenticated as anonymous user" + ) + yield local_instance + finally: + if local_instance is not None: + await local_instance.logout() + await local_instance.stop() diff --git a/octobot/community/models/__init__.py b/octobot/community/models/__init__.py index c709652160..640f27f2c0 100644 --- a/octobot/community/models/__init__.py +++ b/octobot/community/models/__init__.py @@ -61,6 +61,8 @@ to_community_exchange_internal_name, get_tentacles_data_exchange_config, USD_LIKE, + get_deployment_error_status_from_stop_reason, + from_community_order_to_trading_order, ) from octobot.community.models.community_public_data import ( CommunityPublicData @@ -98,10 +100,12 @@ "to_community_exchange_internal_name", "get_tentacles_data_exchange_config", "USD_LIKE", + "get_deployment_error_status_from_stop_reason", "CommunityPublicData", "StrategyData", "is_custom_category", "get_custom_strategy_name", "is_custom_strategy_profile", "ExecutedProductDetails", + "from_community_order_to_trading_order", ] diff --git a/octobot/community/models/community_user_account.py b/octobot/community/models/community_user_account.py index 91b6e5c9de..b0388b9e9c 100644 --- a/octobot/community/models/community_user_account.py +++ b/octobot/community/models/community_user_account.py @@ -84,6 +84,9 @@ def is_archived(self, bot): def get_selected_bot_deployment_id(self): return self.get_bot_deployment_value(backend_enums.BotDeploymentKeys.ID) + def get_selected_bot_deployment_error_status(self) -> typing.Optional[str]: + return self.get_bot_deployment_value(backend_enums.BotDeploymentKeys.ERROR_STATUS) + def get_bot_deployment_status(self) -> (str, str): deployment = self._get_bot_deployment(self._selected_bot_raw_data) return ( diff --git a/octobot/community/models/formatters.py b/octobot/community/models/formatters.py index f7c7926b22..371673a94b 100644 --- a/octobot/community/models/formatters.py +++ b/octobot/community/models/formatters.py @@ -21,6 +21,7 @@ import octobot_commons.constants as commons_constants import octobot_commons.logging as commons_logging import octobot_commons.profiles as commons_profiles +import octobot_commons.enums as commons_enums import octobot_trading.enums as trading_enums import octobot_trading.constants as trading_constants import octobot_trading.personal_data as trading_personal_data @@ -152,6 +153,42 @@ def format_orders(orders: list, exchange_name: str) -> list: ] +def from_community_order_to_trading_order(community_order: dict) -> dict: + bot_order_type = community_order[backend_enums.OrderKeys.TYPE.value] + trade_order_type = trading_personal_data.get_trade_order_type(trading_enums.TraderOrderType(bot_order_type)) + try: + order_side = trading_enums.TradeOrderSide( + community_order[backend_enums.OrderKeys.SIDE.value] + ) + except KeyError: + # for retro-compatibility + order_side = trading_enums.TradeOrderSide.SELL if "sell" in bot_order_type.lower() else \ + trading_enums.TradeOrderSide.BUY + order_time = community_order.get(backend_enums.OrderKeys.TIME.value, 0) + exchange_id = community_order.get(backend_enums.OrderKeys.EXCHANGE_ID.value, str(order_time)) + return { + trading_enums.ExchangeConstantsOrderColumns.ID.value: f"local-{exchange_id}", + trading_enums.ExchangeConstantsOrderColumns.EXCHANGE_ID.value: exchange_id, + trading_enums.ExchangeConstantsOrderColumns.TYPE.value: trade_order_type.value, + trading_enums.ExchangeConstantsOrderColumns.STATUS.value: trading_enums.OrderStatus.OPEN.value, + trading_enums.ExchangeConstantsOrderColumns.SIDE.value: order_side.value, + trading_enums.ExchangeConstantsOrderColumns.TRIGGER_ABOVE.value: community_order.get( + backend_enums.OrderKeys.TRIGGER_ABOVE.value + ), + trading_enums.ExchangeConstantsOrderColumns.AMOUNT.value: community_order[backend_enums.OrderKeys.QUANTITY.value], + trading_enums.ExchangeConstantsOrderColumns.FILLED.value: community_order.get(backend_enums.OrderKeys.FILLED.value, 0), + trading_enums.ExchangeConstantsOrderColumns.PRICE.value: community_order[backend_enums.OrderKeys.PRICE.value], + trading_enums.ExchangeConstantsOrderColumns.SYMBOL.value: community_order[backend_enums.OrderKeys.SYMBOL.value], + trading_enums.ExchangeConstantsOrderColumns.TIMESTAMP.value: order_time, + trading_enums.ExchangeConstantsOrderColumns.REDUCE_ONLY.value: community_order.get( + backend_enums.OrderKeys.REDUCE_ONLY.value, False + ), + trading_enums.ExchangeConstantsOrderColumns.IS_ACTIVE.value: community_order.get( + backend_enums.OrderKeys.IS_ACTIVE.value, True + ), + } + + def _get_order_type(order_or_trade): order_type = order_or_trade[trading_enums.ExchangeConstantsOrderColumns.TYPE.value] try: @@ -317,5 +354,23 @@ def get_tentacles_data_exchange_config( raise ImportError(f"Import tentacles_exchanges failed: {err}") +def get_deployment_error_status_from_stop_reason( + stop_reason: commons_enums.StopReason +) -> backend_enums.BotDeploymentErrorsStatuses: + match stop_reason: + case commons_enums.StopReason.MISSING_API_KEY_TRADING_RIGHTS: + return backend_enums.BotDeploymentErrorsStatuses.MISSING_API_KEY_TRADING_RIGHTS + case commons_enums.StopReason.INVALID_EXCHANGE_CREDENTIALS: + return backend_enums.BotDeploymentErrorsStatuses.INVALID_EXCHANGE_CREDENTIALS + case commons_enums.StopReason.STOP_CONDITION_TRIGGERED: + return backend_enums.BotDeploymentErrorsStatuses.STOP_CONDITION_TRIGGERED + case commons_enums.StopReason.MISSING_MINIMAL_FUNDS: + return backend_enums.BotDeploymentErrorsStatuses.MISSING_MINIMAL_FUNDS + case commons_enums.StopReason.INVALID_CONFIG: + return backend_enums.BotDeploymentErrorsStatuses.INVALID_CONFIG + case _: + raise ValueError(f"Unhandled stop reason: {stop_reason}") + + def _get_logger(): return commons_logging.get_logger("CommunityFormatter") diff --git a/octobot/community/supabase_backend/community_supabase_client.py b/octobot/community/supabase_backend/community_supabase_client.py index 08eaee0319..3c5a3c0358 100644 --- a/octobot/community/supabase_backend/community_supabase_client.py +++ b/octobot/community/supabase_backend/community_supabase_client.py @@ -51,10 +51,10 @@ import octobot.community.supabase_backend.configuration_storage as configuration_storage import octobot.community.identifiers_provider as identifiers_provider -# Experimental to prevent httpx.PoolTimeout + _INTERNAL_LOGGERS = [ - # "httpx", "httpx._client", - # "httpcore.http11", "httpcore.http2", "httpcore.proxy", "httpcore.socks", "httpcore.connection" + "httpx", "asyncio", + "httpcore.http11", "httpcore.connection", ] # disable httpx info logs as it logs every request commons_logging.set_logging_level(_INTERNAL_LOGGERS, logging.WARNING) @@ -244,14 +244,14 @@ def get_in_saved_session(self) -> typing.Union[supabase_auth.types.Session, None async def has_login_info(self) -> bool: return bool(await self.auth._storage.get_item(self.auth._storage_key)) - async def update_metadata(self, metadata_update) -> dict: + async def update_metadata(self, metadata_update) -> dict[str, typing.Any]: return ( await self.auth.update_user({ "data": metadata_update }) ).user.model_dump() - async def get_user(self) -> dict: + async def get_user(self) -> dict[str, typing.Any]: try: user = await self._get_user() return user.model_dump() @@ -305,7 +305,7 @@ async def fetch_checkout_url(self, payment_method: str, redirect_url: str) -> di ) return json.loads(json.loads(resp)["message"]) - async def fetch_bot(self, bot_id) -> dict: + async def fetch_bot(self, bot_id: str) -> postgrest.types.JSON: with jwt_expired_auth_raiser(): try: # https://postgrest.org/en/stable/references/api/resource_embedding.html#hint-disambiguation @@ -322,7 +322,7 @@ async def fetch_bots(self) -> list: "*,bot_deployment:bot_deployments!bots_current_deployment_id_fkey!inner(*)" ).execute()).data - async def create_bot(self, deployment_type: enums.DeploymentTypes) -> dict: + async def create_bot(self, deployment_type: enums.DeploymentTypes) -> postgrest.types.JSON: created_bot = (await self.table("bots").insert({ enums.BotKeys.USER_ID.value: (await self._get_user()).id }).execute()).data[0] @@ -338,7 +338,9 @@ async def create_bot(self, deployment_type: enums.DeploymentTypes) -> dict: # fetch bot to fetch embed elements (like deployments) return await self.fetch_bot(bot_id) - async def _create_deployment(self, deployment_type, bot_id, version): + async def _create_deployment( + self, deployment_type: enums.DeploymentTypes, bot_id: str, version: str + ) -> postgrest.types.JSON: current_time = time.time() return (await self.table("bot_deployments").insert({ enums.BotDeploymentKeys.TYPE.value: deployment_type.value, @@ -350,31 +352,40 @@ async def _create_deployment(self, deployment_type, bot_id, version): ) }).execute()).data[0] - async def update_bot(self, bot_id, bot_update) -> dict: + async def update_bot(self, bot_id: str, bot_update: dict) -> postgrest.types.JSON: await self.table("bots").update(bot_update).eq(enums.BotKeys.ID.value, bot_id).execute() # fetch bot to fetch embed elements (like deployments) return await self.fetch_bot(bot_id) - async def update_deployment(self, deployment_id, deployment_update: dict) -> dict: + async def update_deployment(self, deployment_id: str, deployment_update: dict) -> postgrest.types.JSON: return (await self.table("bot_deployments").update(deployment_update).eq( enums.BotDeploymentKeys.ID.value, deployment_id ).execute()).data[0] - def get_deployment_activity_update(self, last_activity: float, next_activity: float) -> dict: + def get_deployment_activity_update(self, last_activity: float, next_activity: float) -> dict[str, dict]: return { enums.BotDeploymentKeys.ACTIVITIES.value: self._get_activities_content(last_activity, next_activity) } - def _get_activities_content(self, last_activity: float, next_activity: float): + def _get_activities_content(self, last_activity: float, next_activity: float) -> dict[str, str]: return { enums.BotDeploymentActivitiesKeys.LAST_ACTIVITY.value: self.get_formatted_time(last_activity), enums.BotDeploymentActivitiesKeys.NEXT_ACTIVITY.value: self.get_formatted_time(next_activity) } - async def delete_bot(self, bot_id) -> list: + async def delete_bot(self, bot_id: str) -> list: return (await self.table("bots").delete().eq(enums.BotKeys.ID.value, bot_id).execute()).data - async def fetch_deployment_url(self, deployment_url_id) -> dict: + async def insert_bot_log( + self, bot_id: str, bot_log_type: enums.BotLogType, content: typing.Optional[dict] + ) -> postgrest.APIResponse: + return await self.table("bot_logs").insert({ + "bot_id": bot_id, + "type": bot_log_type.value, + "content": content or None, + }).execute() + + async def fetch_deployment_url(self, deployment_url_id: str) -> postgrest.types.JSON: try: return (await self.table("bot_deployment_urls").select("*").eq( enums.BotDeploymentURLKeys.ID.value, deployment_url_id @@ -382,11 +393,13 @@ async def fetch_deployment_url(self, deployment_url_id) -> dict: except IndexError: raise errors.BotDeploymentURLNotFoundError(deployment_url_id) - async def fetch_startup_info(self, bot_id) -> dict: + async def fetch_startup_info(self, bot_id: str) -> postgrest.types.JSON: resp = await self.rpc("get_startup_info", {"bot_id": bot_id}).execute() return resp.data[0] - async def fetch_products(self, category_types: list[str], author_ids: typing.Optional[list[str]]) -> list: + async def fetch_products( + self, category_types: list[str], author_ids: typing.Optional[list[str]] + ) -> list[dict]: try: sanitized_authors = ",".join(map( postgrest.utils.sanitize_param, @@ -427,34 +440,41 @@ async def fetch_products(self, category_types: list[str], author_ids: typing.Opt commons_logging.get_logger(__name__).error(f"Error when fetching products: {err}") return [] - async def fetch_subscribed_products_urls(self) -> list: + async def fetch_subscribed_products_urls(self) -> list[str]: resp = await self.rpc("get_subscribed_products_urls").execute() return resp.data or [] - async def fetch_bot_products_subscription(self, bot_deployment_id: str) -> dict: + async def fetch_bot_products_subscription(self, bot_deployment_id: str) -> postgrest.types.JSON: return (await self.table("bot_deployments").select( "products_subscription:products_subscriptions!product_subscription_id(id, status, desired_status)" ).eq( enums.BotDeploymentKeys.ID.value, bot_deployment_id ).execute()).data[0]["products_subscription"] - async def fetch_trades(self, bot_id) -> list: + async def update_bot_products_subscription( + self, products_subscription_id: str, update: dict[str, typing.Any] + ) -> postgrest.APIResponse: + return await self.table("products_subscriptions").update(update).eq( + enums.ProductsSubscriptionsKeys.ID.value, products_subscription_id + ).execute() + + async def fetch_trades(self, bot_id: str) -> list[postgrest.types.JSON]: # should be paginated to fetch all trades, will fetch the 1000 first ones only return (await self.table("bot_trades").select("*").eq( enums.TradeKeys.BOT_ID.value, bot_id ).execute()).data - async def reset_trades(self, bot_id): + async def reset_trades(self, bot_id: str) -> list[postgrest.types.JSON]: return (await self.table("bot_trades").delete().eq( enums.TradeKeys.BOT_ID.value, bot_id ).execute()).data - async def upsert_trades(self, formatted_trades) -> list: + async def upsert_trades(self, formatted_trades: list[dict]) -> list[postgrest.types.JSON]: return (await self.table("bot_trades").upsert( formatted_trades, on_conflict=f"{enums.TradeKeys.TRADE_ID.value},{enums.TradeKeys.TIME.value}" ).execute()).data - async def update_bot_orders(self, bot_id, formatted_orders) -> dict: + async def update_bot_orders(self, bot_id: str, formatted_orders: list[dict]) -> postgrest.types.JSON: bot_update = { enums.BotKeys.ORDERS.value: formatted_orders } @@ -462,7 +482,7 @@ async def update_bot_orders(self, bot_id, formatted_orders) -> dict: bot_id, bot_update ) - async def update_bot_positions(self, bot_id, formatted_positions) -> dict: + async def update_bot_positions(self, bot_id: str, formatted_positions: list[dict]) -> postgrest.types.JSON: bot_update = { enums.BotKeys.POSITIONS.value: formatted_positions } @@ -928,7 +948,7 @@ def is_compatible_availability( ) ) - async def fetch_exchanges_by_credential_ids(self, exchange_credential_ids: list) -> dict: + async def fetch_exchanges_by_credential_ids(self, exchange_credential_ids: list) -> dict[str, dict]: exchanges = (await self.table("exchange_credentials").select( "id," f"exchange:exchanges(" @@ -997,7 +1017,7 @@ async def update_portfolio(self, portfolio_update: dict) -> list: enums.PortfolioKeys.ID.value, portfolio_update[enums.PortfolioKeys.ID.value] ).execute()).data - async def switch_portfolio(self, new_portfolio) -> dict: + async def switch_portfolio(self, new_portfolio) -> postgrest.types.JSON: # use a new current portfolio for the given bot bot_id = new_portfolio[enums.PortfolioKeys.BOT_ID.value] inserted_portfolio = (await self.table("bot_portfolios").insert(new_portfolio).execute()).data[0] @@ -1037,7 +1057,7 @@ async def fetch_gpt_signal( async def fetch_gpt_signals_history( self, exchange: typing.Union[str, None], symbol: str, time_frame: commons_enums.TimeFrames, first_open_time: float, last_open_time: float, version: str - ) -> dict: + ) -> dict[float, postgrest.types.JSON]: matcher = { "symbol": symbol, "time_frame": time_frame.value, @@ -1179,7 +1199,7 @@ async def cursor_paginated_fetch( ) return total_elements - def _format_gpt_signals(self, signals: list): + def _format_gpt_signals(self, signals: list[postgrest.types.JSON]) -> dict[float, postgrest.types.JSON]: return { self.get_parsed_time(signal["timestamp"]).timestamp(): signal["signal"]["content"] for signal in signals diff --git a/octobot/community/supabase_backend/enums.py b/octobot/community/supabase_backend/enums.py index fd15dc7fba..c7d636d21b 100644 --- a/octobot/community/supabase_backend/enums.py +++ b/octobot/community/supabase_backend/enums.py @@ -85,6 +85,8 @@ class BotDeploymentStatus(enum.Enum): class ProductSubscriptionDesiredStatus(enum.Enum): ACTIVE = 'active' CANCELED = "canceled" + RESTARTING = "restarting" + STOPPING = "stopping" class BotDeploymentErrorsStatuses(enum.Enum): @@ -99,7 +101,7 @@ class BotDeploymentErrorsStatuses(enum.Enum): TOO_MANY_ORDERS_TO_EXECUTE_STRATEGY = "too_many_orders_to_execute_strategy" MISSING_CONFIG = "missing_config" EXPIRED_BOT = "expired_bot" - MAX_SIMULATORS_REACHED = "max_simulators_reached" + STOP_CONDITION_TRIGGERED = "stop_condition_triggered" class ExchangeAccountStatuses(enum.Enum): diff --git a/octobot/community/wallet_backend/__init__.py b/octobot/community/wallet_backend/__init__.py new file mode 100644 index 0000000000..8a4ef04e26 --- /dev/null +++ b/octobot/community/wallet_backend/__init__.py @@ -0,0 +1,24 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. + +from octobot.community.wallet_backend import community_wallet +from octobot.community.wallet_backend.community_wallet import ( + WalletBackend, +) + +__all__ = [ + "WalletBackend", +] diff --git a/octobot/community/wallet_backend/community_wallet.py b/octobot/community/wallet_backend/community_wallet.py new file mode 100644 index 0000000000..93c431c223 --- /dev/null +++ b/octobot/community/wallet_backend/community_wallet.py @@ -0,0 +1,116 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. +import base64 +import typing + +import octobot_commons.cryptography.encryption as commons_encryption + +import octobot.constants as constants +import octobot_sync.chain as sync_chain + + +class WalletBackend: + def __init__(self, sync_storage, logger): + self._sync_storage = sync_storage + self.logger = logger + + def get_or_create_wallet_private_key(self, chain_id: str) -> typing.Optional[str]: + chain_type, chain_network = chain_id.split(":", 1) + wallets = self._sync_storage.get_item(constants.CONFIG_COMMUNITY_WALLETS) or {} + chain_wallets = wallets.get(chain_type, {}) + private_key = chain_wallets.get(chain_network) + if isinstance(private_key, dict): + # Encrypted keystore — plaintext key unavailable without passphrase + return None + if private_key: + return private_key + wallet = sync_chain.create_evm_wallet() + chain_wallets[chain_network] = wallet.private_key + wallets[chain_type] = chain_wallets + self._sync_storage.set_item(constants.CONFIG_COMMUNITY_WALLETS, wallets) + self.logger.info(f"Created new {chain_type} wallet for {chain_id}: {wallet.address}") + return wallet.private_key + + def _get_node_keystore(self) -> dict: + chain_type, chain_network = constants.SYNC_CHAIN_ID.split(":", 1) + wallets = self._sync_storage.get_item(constants.CONFIG_COMMUNITY_WALLETS) or {} + value = wallets.get(chain_type, {}).get(chain_network) + if isinstance(value, dict): + return value + return {} + + def _save_node_keystore(self, keystore: dict) -> None: + chain_type, chain_network = constants.SYNC_CHAIN_ID.split(":", 1) + wallets = self._sync_storage.get_item(constants.CONFIG_COMMUNITY_WALLETS) or {} + chain_wallets = wallets.get(chain_type, {}) + chain_wallets[chain_network] = keystore + wallets[chain_type] = chain_wallets + self._sync_storage.set_item(constants.CONFIG_COMMUNITY_WALLETS, wallets) + + def is_node_wallet_configured(self) -> bool: + try: + return bool(self._get_node_keystore().get("encrypted_key")) + except Exception: + return False + + def get_node_wallet_address(self) -> typing.Optional[str]: + try: + return self._get_node_keystore().get("address") or None + except Exception: + return None + + def create_and_encrypt_node_wallet(self, passphrase: str) -> sync_chain.Wallet: + wallet = sync_chain.create_evm_wallet() + key_bytes = bytes.fromhex(wallet.private_key.removeprefix("0x")) + encrypted_key, salt, iv = commons_encryption.pbkdf2_encrypt_aes_key(key_bytes, passphrase) + self._save_node_keystore({ + "address": wallet.address, + "encrypted_key": base64.b64encode(encrypted_key).decode(), + "salt": base64.b64encode(salt).decode(), + "iv": base64.b64encode(iv).decode(), + }) + return wallet + + def import_and_encrypt_node_wallet(self, private_key: str, passphrase: str) -> sync_chain.Wallet: + try: + address = sync_chain.address_from_evm_key(private_key) + except Exception as err: + raise ValueError(f"Invalid EVM private key: {err}") from err + key_bytes = bytes.fromhex(private_key.removeprefix("0x")) + encrypted_key, salt, iv = commons_encryption.pbkdf2_encrypt_aes_key(key_bytes, passphrase) + self._save_node_keystore({ + "address": address, + "encrypted_key": base64.b64encode(encrypted_key).decode(), + "salt": base64.b64encode(salt).decode(), + "iv": base64.b64encode(iv).decode(), + }) + return sync_chain.Wallet(private_key=private_key, address=address) + + def decrypt_node_wallet(self, passphrase: str) -> sync_chain.Wallet: + keystore = self._get_node_keystore() + encrypted_key = base64.b64decode(keystore["encrypted_key"]) + salt = base64.b64decode(keystore["salt"]) + iv = base64.b64decode(keystore["iv"]) + address = keystore["address"] + key_bytes = commons_encryption.pbkdf2_decrypt_aes_key(encrypted_key, passphrase, salt, iv) + return sync_chain.Wallet(private_key=key_bytes.hex(), address=address) + + def verify_node_passphrase(self, passphrase: str) -> bool: + try: + self.decrypt_node_wallet(passphrase) + return True + except Exception: + return False diff --git a/octobot/config/profile_schema.json b/octobot/config/profile_schema.json index e3087a8ea2..93f930a81c 100644 --- a/octobot/config/profile_schema.json +++ b/octobot/config/profile_schema.json @@ -31,6 +31,9 @@ "read_only": { "type": "boolean" }, + "hidden": { + "type": "boolean" + }, "extra_backtesting_time_frames": { "type": "array", "uniqueItems": true, @@ -43,6 +46,9 @@ "config": { "type": "object", "properties": { + "distribution": { + "type": "string" + }, "crypto-currencies": { "type": "object", "patternProperties": { @@ -168,6 +174,9 @@ "current-live-id": { "type": "integer", "minimum": 1 + }, + "paused": { + "type": "boolean" } }, "required": [ diff --git a/octobot/configuration_manager.py b/octobot/configuration_manager.py index fd5da04341..e13f851a9f 100644 --- a/octobot/configuration_manager.py +++ b/octobot/configuration_manager.py @@ -95,6 +95,7 @@ def config_health_check(config: configuration.Configuration, in_backtesting: boo # 3 inform about configuration issues if not (in_backtesting or + get_distribution(config.config) == enums.OctoBotDistribution.NODE or trading_api.is_trader_enabled_in_config(config.config) or trading_api.is_trader_simulator_enabled_in_config(config.config)): logger.error(f"Real trader and trader simulator are deactivated in configuration. This will prevent OctoBot " @@ -221,7 +222,12 @@ def migrate_from_previous_config(config): def get_distribution(config: dict) -> enums.OctoBotDistribution: + if constants.FORCED_DISTRIBUTION: + # if there is a forced distribution, use it + return enums.OctoBotDistribution(constants.FORCED_DISTRIBUTION) try: + # if there is no forced distribution, use the distribution from the config return enums.OctoBotDistribution(config[common_constants.CONFIG_DISTRIBUTION]) except KeyError: - return enums.OctoBotDistribution.DEFAULT + # default distribution + return enums.OctoBotDistribution(common_constants.DEFAULT_DISTRIBUTION) diff --git a/octobot/constants.py b/octobot/constants.py index 2a3bf5d38d..c9e4ac1786 100644 --- a/octobot/constants.py +++ b/octobot/constants.py @@ -109,6 +109,19 @@ OCTOBOT_MARKET_MAKING_URL = os.getenv("OCTOBOT_MARKET_MAKING_URL", "https://market-making.octobot.cloud") +# sync server +SYNC_SERVER_URL = os.getenv("SYNC_SERVER_URL", "https://prod-sync.drakkar.software") +STAGING_SYNC_SERVER_URL = os.getenv("SYNC_SERVER_URL", "https://beta-sync.drakkar.software") +SYNC_CHAIN_ID = os.getenv("SYNC_CHAIN_ID", "evm:8453") +ENABLE_REPLICA_SERVER = os_util.parse_boolean_environment_var( + "ENABLE_REPLICA_SERVER", + os.getenv("ENABLE_LOCAL_SYNC_SERVER", "false"), # backward compat +) +REPLICA_SERVER_PORT = int(os.getenv("REPLICA_SERVER_PORT", os.getenv("LOCAL_SYNC_PORT", "3000"))) +REPLICA_WRITE_MODE = os.getenv("REPLICA_WRITE_MODE", "bidirectional") +REPLICA_SYNC_INTERVAL_MS = int(os.getenv("REPLICA_SYNC_INTERVAL_MS", "60000")) +REPLICA_DATA_DIR = os.getenv("REPLICA_DATA_DIR", "") + ERROR_TRACKER_DSN = os.getenv("ERROR_TRACKER_DSN") CONFIG_COMMUNITY = "community" @@ -119,6 +132,7 @@ CONFIG_COMMUNITY_PACKAGE_URLS = "package_urls" CONFIG_COMMUNITY_ENVIRONMENT = "environment" CONFIG_COMMUNITY_LOCAL_DATA_IDENTIFIER = "local_data_identifier" +CONFIG_COMMUNITY_WALLETS = "wallets" USE_BETA_EARLY_ACCESS = os_util.parse_boolean_environment_var("USE_BETA_EARLY_ACCESS", "false") USER_ACCOUNT_EMAIL = os.getenv("USER_ACCOUNT_EMAIL", "") USER_PASSWORD_TOKEN = os.getenv("USER_PASSWORD_TOKEN", None) @@ -126,8 +140,12 @@ COMMUNITY_BOT_ID = os.getenv("COMMUNITY_BOT_ID", "") IS_DEMO = os_util.parse_boolean_environment_var("IS_DEMO", "False") IS_CLOUD_ENV = os_util.parse_boolean_environment_var("IS_CLOUD_ENV", "false") +DEPLOYMENT_TIME = os.getenv("DEPLOYMENT_TIME") # format: ISO 8601, ex: 2026-02-21T08:08:42.325Z +DEFAULT_NEW_DEPLOYMENT_TIMEOUT = 2 * octobot_commons.constants.MINUTE_TO_SECONDS USE_FETCHED_BOT_CONFIG = os_util.parse_boolean_environment_var("USE_FETCHED_BOT_CONFIG", "false") +SHOULD_CHECK_TENTACLES = os_util.parse_boolean_environment_var("SHOULD_CHECK_TENTACLES", "true") CAN_INSTALL_TENTACLES = os_util.parse_boolean_environment_var("CAN_INSTALL_TENTACLES", str(not IS_CLOUD_ENV)) +INSTALL_DEFAULT_TENTACLES = os_util.parse_boolean_environment_var("INSTALL_DEFAULT_TENTACLES", "true") PH_TRACKING_ID = os.getenv("PH_TRACKING_ID", "phc_QSuFy6zqOXXKT7zAYboYS4nJShfKovpB172aa8X9nXf") # Profiles download urls to import at startup if missing, split by "," TO_DOWNLOAD_PROFILES = os.getenv("TO_DOWNLOAD_PROFILES", None) @@ -182,10 +200,14 @@ DEFAULT_TENTACLES_PACKAGE_NAME = "OctoBot-Default-Tentacles" # logs -LOGS_FOLDER = "logs" +DEFAULT_LOGS_FOLDER = "logs" +LOGS_FOLDER = os.getenv("LOGS_FOLDER", DEFAULT_LOGS_FOLDER) FORCED_LOG_LEVEL = os.getenv("FORCED_LOG_LEVEL", "") ENV_TRADING_ENABLE_DEBUG_LOGS = os_util.parse_boolean_environment_var("ENV_TRADING_ENABLE_DEBUG_LOGS", "False") +# distribution +FORCED_DISTRIBUTION = os.getenv("DISTRIBUTION") + # system ENABLE_CLOCK_SYNCH = os_util.parse_boolean_environment_var("ENABLE_CLOCK_SYNCH", "True") ENABLE_SYSTEM_WATCHER = os_util.parse_boolean_environment_var("ENABLE_SYSTEM_WATCHER", "True") @@ -218,11 +240,11 @@ # Store the path of the octobot directory from this file since it can change depending on the installation path # (local sources, python site-packages, ...) OCTOBOT_FOLDER = pathlib.Path(__file__).parent.absolute() -CONFIG_FOLDER = f"{OCTOBOT_FOLDER}/config" +CONFIG_FOLDER = f"{OCTOBOT_FOLDER}/{octobot_commons.constants.CONFIG_FOLDER}" SCHEMA = "schema" CONFIG_FILE_SCHEMA = f"{CONFIG_FOLDER}/config_{SCHEMA}.json" PROFILE_FILE_SCHEMA = f"{CONFIG_FOLDER}/profile_{SCHEMA}.json" -DEFAULT_CONFIG_FILE = f"{CONFIG_FOLDER}/default_config.json" +DEFAULT_CONFIG_FILE = os.getenv("DEFAULT_CONFIG_FILE", f"{CONFIG_FOLDER}/default_config.json") DEFAULT_PROFILE_FILE = f"{CONFIG_FOLDER}/default_profile.json" DEFAULT_PROFILE_AVATAR_FILE_NAME = "default_profile.png" DEFAULT_PROFILE_AVATAR = f"{CONFIG_FOLDER}/{DEFAULT_PROFILE_AVATAR_FILE_NAME}" diff --git a/octobot/databases_util.py b/octobot/databases_util.py deleted file mode 100644 index 55fe80a49c..0000000000 --- a/octobot/databases_util.py +++ /dev/null @@ -1,37 +0,0 @@ -# Drakkar-Software OctoBot-Trading -# Copyright (c) Drakkar-Software, All rights reserved. -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 3.0 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library -import octobot_trading.api as trading_api -import octobot_commons.databases as databases -import octobot_commons.optimization_campaign as optimization_campaign -import octobot_commons.constants as commons_constants -import octobot_commons.errors as commons_errors - - -def get_run_databases_identifier(config, tentacles_setup_config, trading_mode_class=None, enable_storage=True): - trading_mode = commons_constants.DEFAULT_STORAGE_TRADING_MODE - try: - trading_mode = trading_mode_class or trading_api.get_activated_trading_mode(tentacles_setup_config) - except commons_errors.ConfigTradingError: - # use default value - pass - return databases.RunDatabasesIdentifier( - trading_mode, - optimization_campaign.OptimizationCampaign.get_campaign_name(tentacles_setup_config), - backtesting_id=config.get(commons_constants.CONFIG_BACKTESTING_ID), - optimizer_id=config.get(commons_constants.CONFIG_OPTIMIZER_ID), - live_id=trading_api.get_current_bot_live_id(config), - enable_storage=enable_storage - ) diff --git a/octobot/enums.py b/octobot/enums.py index eddac38775..b8d1b785bf 100644 --- a/octobot/enums.py +++ b/octobot/enums.py @@ -14,6 +14,7 @@ # You should have received a copy of the GNU General Public # License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. import enum +import octobot_commons.constants as common_constants class CommunityFeedType(enum.Enum): @@ -74,5 +75,8 @@ class OptimizerConfig(enum.Enum): class OctoBotDistribution(enum.Enum): - DEFAULT = "default" + DEFAULT = common_constants.DEFAULT_DISTRIBUTION MARKET_MAKING = "market_making" + PREDICTION_MARKET = "prediction_market" + NODE = "node" + SYNC = "sync" diff --git a/octobot/errors.py b/octobot/errors.py index 9972b04551..a5b7a6a768 100644 --- a/octobot/errors.py +++ b/octobot/errors.py @@ -13,6 +13,20 @@ # # You should have received a copy of the GNU General Public # License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. +import typing class DisabledError(Exception): pass + + +class AutomationError(Exception): + pass + + +class InvalidAutomationConfigError(AutomationError): + def __init__(self, message: str, step_name: typing.Optional[str] = None): + super().__init__(f"[{step_name}] {message}" if step_name else message) + + +class AutomationStopped(AutomationError): + pass diff --git a/octobot/initializer.py b/octobot/initializer.py index 47cc89b058..ced0aa1628 100644 --- a/octobot/initializer.py +++ b/octobot/initializer.py @@ -18,7 +18,7 @@ import octobot_commons.databases as databases import octobot_commons.logging as logging import octobot_commons.errors as commons_errors -import octobot.databases_util as databases_util +import octobot_trading.api as trading_api class Initializer: @@ -40,7 +40,7 @@ async def create(self, init_bot_storage): # init bot storage await databases.init_bot_storage( self.octobot.bot_id, - databases_util.get_run_databases_identifier( + trading_api.get_run_databases_identifier( self.octobot.config, self.octobot.tentacles_setup_config ), diff --git a/octobot/logger.py b/octobot/logger.py index a30f578f7c..33a7ca25c2 100644 --- a/octobot/logger.py +++ b/octobot/logger.py @@ -14,7 +14,7 @@ # You should have received a copy of the GNU General Public # License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. import logging -import logging.config as config +import logging.config import os import shutil import traceback @@ -48,11 +48,11 @@ def _log_uncaught_exceptions(ex_cls, ex, tb): logging.exception("{0}: {1}".format(ex_cls, ex)) -def init_logger(): +def init_logger(logs_folder: str = constants.DEFAULT_LOGS_FOLDER): try: - if not os.path.exists(constants.LOGS_FOLDER): - os.mkdir(constants.LOGS_FOLDER) - _load_logger_config() + if not os.path.exists(logs_folder): + os.mkdir(logs_folder) + _load_logger_config(logs_folder) init_bot_channel_logger() except KeyError: print( @@ -84,26 +84,36 @@ def init_bot_channel_logger(): BOT_CHANNEL_LOGGER = common_logging.get_logger("OctoBot Channel") -def _load_logger_config(): +def _load_logger_config(logs_folder: str): try: # use local logging file to allow users to customize the log level if not os.path.isfile(configuration_manager.get_user_local_config_file()): if not os.path.exists(commons_constants.USER_FOLDER): os.mkdir(commons_constants.USER_FOLDER) shutil.copyfile(constants.LOGGING_CONFIG_FILE, configuration_manager.get_user_local_config_file()) - config.fileConfig(configuration_manager.get_user_local_config_file()) + logging.config.fileConfig(configuration_manager.get_user_local_config_file()) + logger = logging.getLogger("Logging Configuration") + logger.info(f"Saving logs in '{os.path.join(os.getcwd(), logs_folder)}' folder") + if logs_folder != constants.DEFAULT_LOGS_FOLDER: + _set_log_folder(os.path.join(os.getcwd(), logs_folder)) if constants.FORCED_LOG_LEVEL: - logging.getLogger("Logging Configuration").info( + logger.info( f"Applying forced logging level {constants.FORCED_LOG_LEVEL}" ) common_logging.set_global_logger_level(constants.FORCED_LOG_LEVEL) except Exception as ex: - config.fileConfig(constants.LOGGING_CONFIG_FILE) + logging.config.fileConfig(constants.LOGGING_CONFIG_FILE) logging.getLogger("Logging Configuration").warning( f"Impossible to initialize local logging configuration file, using default one. {ex}" ) +def _set_log_folder(logs_folder: str): + for handler in logging.getLogger().handlers: + if isinstance(handler, logging.FileHandler): + handler.baseFilename = os.path.join(logs_folder, os.path.basename(handler.baseFilename)) + + async def init_exchange_chan_logger(exchange_id): await exchanges_channel.get_chan(channels_name.OctoBotTradingChannelsName.OHLCV_CHANNEL.value, exchange_id).new_consumer( @@ -283,6 +293,14 @@ async def mark_price_callback( ) +async def markets_callback( + exchange: str, exchange_id: str, markets +): + BOT_CHANNEL_LOGGER.debug( + f"MARKETS : EXCHANGE = {exchange} || MARKET RELOADED" + ) + + def _filter_balance(balance: dict): if not balance: return balance, 0 @@ -392,6 +410,8 @@ async def matrix_callback( evaluator_type, eval_note, eval_note_type, + eval_note_description, + eval_note_metadata, exchange_name, cryptocurrency, symbol, @@ -402,6 +422,7 @@ async def matrix_callback( f"EVALUATOR = {evaluator_name} || EVALUATOR_TYPE = {evaluator_type} || " f"CRYPTOCURRENCY = {cryptocurrency} || SYMBOL = {symbol} || TF = {time_frame} " f"|| NOTE = {eval_note} [MATRIX id = {matrix_id}] " + f"|| DESCRIPTION = {eval_note_description if eval_note_description else ''}" ) diff --git a/octobot/octobot.py b/octobot/octobot.py index 28ade0279e..a34b78dba7 100644 --- a/octobot/octobot.py +++ b/octobot/octobot.py @@ -16,6 +16,7 @@ import asyncio import time import uuid +import typing import octobot_commons.constants as commons_constants import octobot_commons.enums as commons_enums @@ -36,6 +37,7 @@ import octobot.logger as logger import octobot.community as community import octobot.constants as constants +import octobot.enums as enums import octobot.configuration_manager as configuration_manager import octobot.task_manager as task_manager import octobot.octobot_channel_consumer as octobot_channel_consumer @@ -84,15 +86,15 @@ def __init__(self, config: configuration.Configuration, community_authenticator= self.community_auth = community_authenticator or community.CommunityAuthentication.create(community_config) self.community_auth.update(community_config) - # octobot_api to request the current instance - self.octobot_api = octobot_api.OctoBotAPI(self) - # octobot channel global consumer self.global_consumer = octobot_channel_consumer.OctoBotChannelGlobalConsumer(self) # octobot instance id self.bot_id = str(uuid.uuid4()) + # octobot_api to request the current instance + self.octobot_api = octobot_api.OctoBotAPI(self) + # Logger self.logger = logging.get_logger(self.__class__.__name__) @@ -210,20 +212,25 @@ async def stop(self): if self._init_metadata_run_task is not None and not self._init_metadata_run_task.done(): self._init_metadata_run_task.cancel() signals.SignalPublisher.instance().stop() - await self.evaluator_producer.stop() - await self.exchange_producer.stop() + if self.evaluator_producer is not None: + await self.evaluator_producer.stop() + if self.exchange_producer is not None: + await self.exchange_producer.stop() await self.community_auth.stop() - await self.service_feed_producer.stop() + if self.service_feed_producer is not None: + await self.service_feed_producer.stop() await profiles.stop_profile_synchronizer() await os_clock_sync.stop_clock_synchronizer() await system_resources_watcher.stop_system_resources_watcher() await service_api.stop_services() - await self.interface_producer.stop() + if self.interface_producer is not None: + await self.interface_producer.stop() await databases.close_bot_storage(self.bot_id) if self.automation is not None: await self.automation.stop() await self.global_consumer.stop() - + self.octobot_api.clear() + finally: self.stopped.set() self.logger.info("Stopped, now shutting down.") diff --git a/octobot/octobot_api.py b/octobot/octobot_api.py index bff85ea547..7503ba4d9a 100644 --- a/octobot/octobot_api.py +++ b/octobot/octobot_api.py @@ -13,16 +13,35 @@ # # You should have received a copy of the GNU General Public # License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. +import typing + import octobot.constants as constants import octobot.commands as commands import octobot_commons.constants as commons_constants import octobot.automation as automation +import octobot_commons.singleton as singleton +import octobot_commons.enums as commons_enums + + +class OctoBotAPIProvider(singleton.Singleton): + def __init__(self): + self.octobot_api_by_bot_id: dict[str, "OctoBotAPI"] = {} + + def get_api(self, bot_id: str) -> "OctoBotAPI": + return self.octobot_api_by_bot_id[bot_id] + + def register_api(self, bot_id: str, octobot_api: "OctoBotAPI"): + self.octobot_api_by_bot_id[bot_id] = octobot_api + + def unregister_api(self, bot_id: str): + self.octobot_api_by_bot_id.pop(bot_id) class OctoBotAPI: def __init__(self, octobot): self._octobot = octobot + OctoBotAPIProvider.instance().register_api(self._octobot.bot_id, self) def is_initialized(self) -> bool: return self._octobot.initialized @@ -77,13 +96,41 @@ def get_interface(self, interface_class): if isinstance(interface, interface_class): return interface - def run_in_main_asyncio_loop(self, coroutine, log_exceptions=True, - timeout=commons_constants.DEFAULT_FUTURE_TIMEOUT): - return self._octobot.run_in_main_asyncio_loop(coroutine, log_exceptions=log_exceptions, timeout=timeout) + def run_in_main_asyncio_loop( + self, coroutine, log_exceptions=True, timeout=commons_constants.DEFAULT_FUTURE_TIMEOUT + ): + return self._octobot.run_in_main_asyncio_loop( + coroutine, log_exceptions=log_exceptions, timeout=timeout + ) def run_in_async_executor(self, coroutine): return self._octobot.task_manager.run_in_async_executor(coroutine) + async def stop_all_trading_modes_and_pause_traders( + self, + stop_reason: commons_enums.StopReason, + execution_details: typing.Optional[automation.ExecutionDetails], + schedule_bot_stop: bool, + ): + stop_details = f"Error status: {stop_reason.value}: {execution_details=}" + if self._octobot.exchange_producer.are_all_trading_modes_stoppped_and_traders_paused(): + self._octobot.logger.debug( + f"Skipping stop all trading modes and pause traders request. {stop_details}" + ) + return + try: + self._octobot.logger.info( + f"Scheduling bot stop. {stop_details}" + ) + await self._octobot.exchange_producer.stop_all_trading_modes_and_pause_traders(execution_details) + except Exception as err: + self._octobot.logger.exception(err, True, f"Error when stopping trading modes: {err}") + await self._octobot.community_auth.community_bot.on_trading_modes_stopped_and_traders_paused( + stop_reason, + execution_details, + schedule_bot_stop, + ) + def stop_tasks(self) -> None: self._octobot.task_manager.stop_tasks() @@ -96,3 +143,6 @@ def restart_bot() -> None: def update_bot(self) -> None: commands.update_bot(self) + + def clear(self): + OctoBotAPIProvider.instance().unregister_api(self._octobot.bot_id) diff --git a/octobot/octobot_backtesting_factory.py b/octobot/octobot_backtesting_factory.py index e0dfd223f4..f42ade25f3 100644 --- a/octobot/octobot_backtesting_factory.py +++ b/octobot/octobot_backtesting_factory.py @@ -45,7 +45,8 @@ async def initialize(self): run_on_common_part_only=self.run_on_common_part_only, join_backtesting_timeout=join_backtesting_timeout, enable_logs=self.enable_logs, - enforce_total_databases_max_size_after_run=True) + enforce_total_databases_max_size_after_run=True, + services_config=self.config) await octobot_backtesting_api.initialize_and_run_independent_backtesting(self.independent_backtesting, log_errors=False) await octobot_backtesting_api.join_independent_backtesting(self.independent_backtesting, diff --git a/octobot/octobot_node.py b/octobot/octobot_node.py new file mode 100644 index 0000000000..ae82fb9185 --- /dev/null +++ b/octobot/octobot_node.py @@ -0,0 +1,39 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. + +import octobot_commons.configuration as configuration + +import octobot.octobot as octobot +import octobot.logger as logger +import octobot.producers as producers + +class OctoBotNode(octobot.OctoBot): + def __init__(self, config: configuration.Configuration, community_authenticator=None, ignore_config=False, reset_trading_history=False, startup_messages=None): + super().__init__( + config=config, + community_authenticator=community_authenticator, + ignore_config=ignore_config, + reset_trading_history=reset_trading_history, + startup_messages=startup_messages + ) + + async def create_producers(self): + logger_consumer = await logger.init_octobot_chan_logger(self.bot_id) + self.global_consumer.add_consumer(logger_consumer) + self.interface_producer = producers.InterfaceProducer(self.global_consumer.octobot_channel, self) + + async def start_producers(self): + await self.interface_producer.run() diff --git a/octobot/producers/evaluator_producer.py b/octobot/producers/evaluator_producer.py index c8e295e2c1..ef13a71bc5 100644 --- a/octobot/producers/evaluator_producer.py +++ b/octobot/producers/evaluator_producer.py @@ -35,6 +35,7 @@ def __init__(self, channel, octobot): self.tentacles_setup_config = self.octobot.tentacles_setup_config self.matrix_id = None + self.has_real_time_evaluators_configured = False async def start(self): await evaluator_api.initialize_evaluators(self.octobot.config, self.tentacles_setup_config) @@ -42,6 +43,9 @@ async def start(self): await evaluator_api.create_evaluator_channels( self.matrix_id, is_backtesting=backtesting_api.is_backtesting_enabled(self.octobot.config) ) + self.has_real_time_evaluators_configured = len(evaluator_api.get_activated_real_time_evaluators_classes( + self.tentacles_setup_config + )) > 0 await logger.init_evaluator_chan_logger(self.matrix_id) async def create_evaluators(self, exchange_configuration): diff --git a/octobot/producers/exchange_producer.py b/octobot/producers/exchange_producer.py index 2911b16c6b..a1fde109c6 100644 --- a/octobot/producers/exchange_producer.py +++ b/octobot/producers/exchange_producer.py @@ -14,14 +14,15 @@ # You should have received a copy of the GNU General Public # License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. import asyncio +import typing import octobot_commons.enums as common_enums -import octobot_commons.constants as common_constants import octobot_trading.api as trading_api import octobot_trading.octobot_channel_consumer as trading_channel_consumer import octobot.channels as octobot_channel +import octobot.automation as automation class ExchangeProducer(octobot_channel.OctoBotChannelProducer): @@ -31,11 +32,11 @@ def __init__(self, channel, octobot, backtesting, ignore_config=False): self.ignore_config = ignore_config self.backtesting = backtesting - self.exchange_manager_ids = [] - - self.to_create_exchanges_count = 0 - self.created_all_exchanges = asyncio.Event() + self.exchange_manager_ids: list[str] = [] + self.to_create_exchanges_count: int = 0 + self.created_all_exchanges: asyncio.Event = asyncio.Event() + async def start(self): self.to_create_exchanges_count = 0 self.created_all_exchanges.clear() @@ -49,6 +50,33 @@ def register_created_exchange_id(self, exchange_id): self.created_all_exchanges.set() self.logger.debug(f"Exchange(s) created") + def are_all_trading_modes_stoppped_and_traders_paused(self) -> bool: + return all( + trading_api.are_all_trading_modes_stoppped_and_trader_paused(exchange_manager) + for exchange_manager in trading_api.get_exchange_managers_from_exchange_ids( + self.exchange_manager_ids + ) + ) + + async def stop_all_trading_modes_and_pause_traders( + self, execution_details: typing.Optional[automation.ExecutionDetails] + ): + for exchange_id in self.exchange_manager_ids: + await self._stop_exchange_trading_modes_and_pause_trader(exchange_id, execution_details) + + async def _stop_exchange_trading_modes_and_pause_trader( + self, exchange_id: str, execution_details: typing.Optional[automation.ExecutionDetails] + ): + await self.send( + bot_id=self.octobot.bot_id, + subject=common_enums.OctoBotChannelSubjects.UPDATE.value, + action=trading_channel_consumer.OctoBotChannelTradingActions.STOP_EXCHANGE_TRADING_MODES_AND_PAUSE_TRADER.value, + data={ + trading_channel_consumer.OctoBotChannelTradingDataKeys.EXCHANGE_ID.value: exchange_id, + trading_channel_consumer.OctoBotChannelTradingDataKeys.REASON.value: execution_details.description if execution_details else None, + } + ) + async def stop(self): self.logger.debug("Stopping ...") for exchange_manager in trading_api.get_exchange_managers_from_exchange_ids(self.exchange_manager_ids): @@ -56,16 +84,20 @@ async def stop(self): self.logger.debug("Stopped") async def create_exchange(self, exchange_name, backtesting): - await self.send(bot_id=self.octobot.bot_id, - subject=common_enums.OctoBotChannelSubjects.CREATION.value, - action=trading_channel_consumer.OctoBotChannelTradingActions.EXCHANGE.value, - data={ - trading_channel_consumer.OctoBotChannelTradingDataKeys.TENTACLES_SETUP_CONFIG.value: - self.octobot.tentacles_setup_config, - trading_channel_consumer.OctoBotChannelTradingDataKeys.MATRIX_ID.value: - self.octobot.evaluator_producer.matrix_id, - trading_channel_consumer.OctoBotChannelTradingDataKeys.BACKTESTING.value: backtesting, - trading_channel_consumer.OctoBotChannelTradingDataKeys.EXCHANGE_CONFIG.value: - self.octobot.config, - trading_channel_consumer.OctoBotChannelTradingDataKeys.EXCHANGE_NAME.value: exchange_name, - }) + await self.send( + bot_id=self.octobot.bot_id, + subject=common_enums.OctoBotChannelSubjects.CREATION.value, + action=trading_channel_consumer.OctoBotChannelTradingActions.EXCHANGE.value, + data={ + trading_channel_consumer.OctoBotChannelTradingDataKeys.TENTACLES_SETUP_CONFIG.value: + self.octobot.tentacles_setup_config, + trading_channel_consumer.OctoBotChannelTradingDataKeys.MATRIX_ID.value: + self.octobot.evaluator_producer.matrix_id, + trading_channel_consumer.OctoBotChannelTradingDataKeys.ENABLE_REALTIME_DATA_FETCHING.value: + self.octobot.evaluator_producer.has_real_time_evaluators_configured, + trading_channel_consumer.OctoBotChannelTradingDataKeys.BACKTESTING.value: backtesting, + trading_channel_consumer.OctoBotChannelTradingDataKeys.EXCHANGE_CONFIG.value: + self.octobot.config, + trading_channel_consumer.OctoBotChannelTradingDataKeys.EXCHANGE_NAME.value: exchange_name, + } + ) diff --git a/octobot/producers/interface_producer.py b/octobot/producers/interface_producer.py index f4accbc3aa..0abd746662 100644 --- a/octobot/producers/interface_producer.py +++ b/octobot/producers/interface_producer.py @@ -74,15 +74,18 @@ async def register_notifier(self, instance): await service_api.process_pending_notifications() async def _register_existing_exchanges(self, instance): - for exchange_id in self.octobot.exchange_producer.exchange_manager_ids: - await self._register_exchange(instance, exchange_id) + if self.octobot.exchange_producer is not None: + for exchange_id in self.octobot.exchange_producer.exchange_manager_ids: + await self._register_exchange(instance, exchange_id) async def _create_interfaces(self, in_backtesting): # do not overwrite data in case of inner bots init (backtesting) - if service_interfaces.get_bot_api() is None: - service_api.initialize_global_project_data(self.octobot.octobot_api, - constants.PROJECT_NAME, - constants.LONG_VERSION) + try: + service_interfaces.get_bot_api() + except KeyError: + service_api.initialize_global_project_data( + self.octobot.bot_id, constants.PROJECT_NAME, constants.LONG_VERSION + ) interface_factory = service_api.create_interface_factory(self.octobot.config) interface_list = interface_factory.get_available_interfaces() for interface_class in interface_list: @@ -145,7 +148,7 @@ def _is_interface_relevant(self, interface_class, backtesting_enabled): tentacles_manager_api.is_tentacle_activated_in_tentacles_setup_config( self.octobot.tentacles_setup_config, interface_class.get_name()) and \ - all(service.get_is_enabled(self.octobot.config) for service in interface_class.REQUIRED_SERVICES) and \ + all(service.get_is_enabled(self.octobot.config) for service in (interface_class.REQUIRED_SERVICES or [])) and \ (not backtesting_enabled or ( backtesting_enabled and service_api.is_enabled_in_backtesting(interface_class))) diff --git a/octobot/task_manager.py b/octobot/task_manager.py index 2a3f1f96e2..2205c26278 100644 --- a/octobot/task_manager.py +++ b/octobot/task_manager.py @@ -78,9 +78,11 @@ def run_forever(self, coroutine): target=self.run_bot_in_thread, args=(coroutine,), name="OctoBot Main Thread") self.loop_forever_thread.start() - if sys.version_info.minor >= 9: - # only required for python 3.9 + - self.loop_forever_thread.join() + # Avoid a single indefinite join() on the main thread: on Windows the main + # thread must return to the interpreter periodically or SIGINT (Ctrl+C) is + # not delivered to the custom handler. Short timeouts mirror that wake-up. + while self.loop_forever_thread.is_alive(): + self.loop_forever_thread.join(timeout=1) def stop_tasks(self, stop_octobot=True): self.logger.info("Stopping tasks...") diff --git a/packages/README.md b/packages/README.md new file mode 100644 index 0000000000..f2888aa3ec --- /dev/null +++ b/packages/README.md @@ -0,0 +1,172 @@ +# OctoBot Packages + +OctoBot is organized into self-contained packages under `packages/`. Each package encapsulates a specific domain and can contain Python code, Rust code, or both. + +## Package Types + +### Python Package + +A standard Python package managed by Pants. This is the most common type. + +``` +packages/mypackage/ + mypackage/ + __init__.py + module.py + tests/ + test_module.py + BUILD + requirements.txt # optional + full_requirements.txt # optional +``` + +**BUILD file:** +```python +python_sources(name="mypackage", sources=["mypackage/**/*.py"]) + +python_tests( + name="tests", + sources=["tests/**/test_*.py"], + dependencies=[":mypackage", "//:dev_reqs"], +) +``` + +**Registration** (root `BUILD`): +- Add the package to `PACKAGE_SOURCES` +- Add its requirements to `PACKAGE_REQS` / `PACKAGE_FULL_REQS` if applicable + +**Registration** (`pants.toml`): +- Add the package to `root_patterns` under `[source]` + +**Registration** (`.github/workflows/main.yml`): +- Add the package to the test matrix + +### Rust + Python Package + +A package that contains both Python code and colocated Rust crates compiled via [PyO3](https://pyo3.rs/) and [maturin](https://www.maturin.rs/). The Rust code is exposed to Python as a compiled extension module. + +``` +packages/mypackage/ + mypackage/ + __init__.py + core.py # imports from mypackage_rs + tests/ + test_core.py + crates/ + mypackage_core/ # pure Rust library crate + Cargo.toml + src/ + lib.rs + mypackage_py/ # PyO3 bridge crate + Cargo.toml + pyproject.toml # maturin build config + src/ + lib.rs + python/ + mypackage_rs/ # Python stub package + __init__.py # re-exports from _core + BUILD # Pants package_shell_command + crates/ + mypackage_core/ + BUILD # files target for Rust sources + BUILD + standard.rc # pylint config with extension-pkg-whitelist + requirements.txt +``` + +#### Rust Crate Layout + +- **`mypackage_core/`**: Pure Rust library with the actual logic. No Python dependencies. Testable with `cargo test`. +- **`mypackage_py/`**: PyO3 bridge that wraps `mypackage_core` functions as Python-callable. Built by maturin into a wheel (`mypackage_rs`). + +#### Cargo Configuration + +The root `Cargo.toml` defines a workspace that auto-discovers all crates: + +```toml +[workspace] +members = ["packages/*/crates/*"] +resolver = "2" +``` + +Each bridge crate has a `pyproject.toml` for maturin: + +```toml +[build-system] +requires = ["maturin>=1.7,<2.0"] +build-backend = "maturin" + +[project] +name = "mypackage-rs" +version = "0.1.0" +requires-python = ">=3.12" + +[tool.maturin] +features = ["pyo3/extension-module"] +bindings = "pyo3" +module-name = "mypackage_rs._core" +python-source = "python" +``` + +#### Pants BUILD Files + +**`crates/mypackage_core/BUILD`** - exposes Rust sources to the sandbox: +```python +files( + name="mypackage_core_sources", + sources=["Cargo.toml", "src/**/*.rs"], +) +``` + +**`crates/mypackage_py/BUILD`** - builds the Rust wheel via maturin: +```python +files( + name="mypackage_py_sources", + sources=["Cargo.toml", "pyproject.toml", "src/**/*.rs", "python/**/*.py"], +) + +package_shell_command( + name="mypackage-rs", + command="maturin build --release --out .", + execution_dependencies=[ + ":mypackage_py_sources", + "packages/mypackage/crates/mypackage_core:mypackage_core_sources", + "//:cargo_workspace", + ], + tools=["maturin", "cargo", "rustc", "cc", "python3", "ar", "bash"], + output_files=["*.whl"], + output_path="", + workdir="/", + timeout=300, + description="Build mypackage-rs maturin wheel", +) +``` + +Key fields: +- `workdir="/"` sets the working directory to the sandbox root so the Cargo workspace resolves correctly +- `output_path=""` places the wheel at `dist/` root alongside the OctoBot wheel +- `execution_dependencies` brings Rust sources and the root `Cargo.toml`/`Cargo.lock` into the sandbox + +#### Pylint Configuration + +Create a `standard.rc` in the package directory to whitelist the compiled extension: + +```ini +[MASTER] +extension-pkg-whitelist=mypackage_rs +fail-under=10.0 +ignore=CVS,tests,additional_tests +``` + +#### CI Registration + +In `.github/workflows/main.yml`: + +1. **Build job** - Rust wheels are auto-discovered. The CI runs: + ```yaml + pants package :OctoBot $(pants list --filter-target-type=package_shell_command ::) + ``` + Any `package_shell_command` target in the repo is automatically included. No manual registration needed. + +2. **Test matrix** - the package is automatically detected as Rust-enabled via the `HAS_RUST` env var (uses `hashFiles` on `crates/**/Cargo.toml`), which triggers Rust linting and backend tests. + diff --git a/packages/agents/BUILD b/packages/agents/BUILD new file mode 100644 index 0000000000..c34ad60068 --- /dev/null +++ b/packages/agents/BUILD @@ -0,0 +1,18 @@ +python_sources(name="octobot_agents", sources=["octobot_agents/**/*.py"]) + +python_tests( + name="tests", + sources=["tests/**/test_*.py"], + dependencies=[ + ":octobot_agents", + ":reqs", + ":full_reqs", + "//:dev_reqs", + "packages/commons:octobot_commons", + "packages/commons:reqs", + "packages/commons:full_reqs", + "packages/services:octobot_services", + "packages/services:reqs", + "packages/services:full_reqs", + ], +) \ No newline at end of file diff --git a/packages/agents/CHANGELOG.md b/packages/agents/CHANGELOG.md new file mode 100644 index 0000000000..e12c17ec2e --- /dev/null +++ b/packages/agents/CHANGELOG.md @@ -0,0 +1,9 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [1.0.0] - 2026-02-10 +### Added +- Initial package sources diff --git a/packages/agents/README.md b/packages/agents/README.md new file mode 100644 index 0000000000..a702ad5d71 --- /dev/null +++ b/packages/agents/README.md @@ -0,0 +1 @@ +# OctoBot Agents diff --git a/packages/agents/octobot_agents/__init__.py b/packages/agents/octobot_agents/__init__.py new file mode 100644 index 0000000000..2429b58d26 --- /dev/null +++ b/packages/agents/octobot_agents/__init__.py @@ -0,0 +1,209 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. + +from octobot_agents import agent +from octobot_agents.agent import ( + AbstractAgentChannel, + AbstractAgentChannelProducer, + AbstractAgentChannelConsumer, + AbstractAIAgentChannel, + AbstractAIAgentChannelProducer, + AbstractAIAgentChannelConsumer, + MemoryAgentMixin, + # Deep Agent + DEEP_AGENTS_AVAILABLE, + AbstractDeepAgentChannel, + AbstractDeepAgentChannelConsumer, + AbstractDeepAgentChannelProducer, + create_memory_backend, + get_agent_memory_path, + build_dictionary_subagent, + build_compiled_subagent, + build_subagents_from_agents, + build_subagents_from_producers, + create_deep_agent_safe, + create_supervisor_agent, + create_team_deep_agent, + # Skills utilities + load_skill_from_file, + discover_skills, + create_skills_files_dict, + # HITL utilities + create_interrupt_config, + build_hitl_decision, +) + +from octobot_agents import storage +from octobot_agents.storage import ( + AbstractMemoryStorage, + JSONMemoryStorage, + create_memory_storage, + get_memory_tools, + execute_memory_tool, +) + +from octobot_agents import utils +from octobot_agents.utils import ( + extract_json_from_content, + extract_json_between_braces, + extract_json_from_markdown, + extract_json_from_xml_tags, + preprocess_json_content, +) + +from octobot_agents import team +from octobot_agents.team import ( + AbstractAgentsTeamChannel, + AbstractAgentsTeamChannelProducer, + AbstractAgentsTeamChannelConsumer, + AbstractSyncAgentsTeamChannelProducer, + AbstractLiveAgentsTeamChannelProducer, + TeamManagerMixin, + CriticAgentMixin, + JudgeAgentMixin, + # Deep Agents Team + AbstractDeepAgentsTeamChannel, + AbstractDeepAgentsTeamChannelConsumer, + AbstractDeepAgentsTeamChannelProducer, +) + +from octobot_agents import errors +from octobot_agents.errors import ( + AgentError, + TeamConfigurationError, + MissingManagerError, + MissingRequiredInputError, + AgentConfigurationError, + StorageError, + UnsupportedStorageTypeError, + DeepAgentError, + DeepAgentNotAvailableError, + SubagentError, + SubagentTimeoutError, + SupervisorError, + DebateError, + DebateConvergenceError, + MemoryPathError, + ToolExecutionError, +) + +from octobot_agents import enums +from octobot_agents.enums import ( + MemoryStorageType, + StepType, + JudgeDecisionType, + AgentRole, + SubagentMode, + ToolCallMode, + MemoryScope, + ExecutionStatus, +) + +from octobot_agents import models +from octobot_agents.models import ( + SubagentConfig, + MemoryEntry, + TodoItem, + DeepAgentResult, + TeamExecutionResult, + SupervisorState, +) + +__all__ = [ + "AbstractAgentChannel", + "AbstractAgentChannelProducer", + "AbstractAgentChannelConsumer", + "AbstractAIAgentChannel", + "AbstractAIAgentChannelProducer", + "AbstractAIAgentChannelConsumer", + "AbstractAgentsTeamChannel", + "AbstractAgentsTeamChannelProducer", + "AbstractAgentsTeamChannelConsumer", + "AbstractSyncAgentsTeamChannelProducer", + "AbstractLiveAgentsTeamChannelProducer", + "AbstractMemoryStorage", + "JSONMemoryStorage", + "MemoryAgentMixin", + "create_memory_storage", + "get_memory_tools", + "execute_memory_tool", + "CriticAgentMixin", + "JudgeAgentMixin", + "AgentError", + "TeamConfigurationError", + "MissingManagerError", + "MissingRequiredInputError", + "AgentConfigurationError", + "StorageError", + "UnsupportedStorageTypeError", + # Deep Agent errors + "DeepAgentError", + "DeepAgentNotAvailableError", + "SubagentError", + "SubagentTimeoutError", + "SupervisorError", + "DebateError", + "DebateConvergenceError", + "MemoryPathError", + "ToolExecutionError", + # Deep Agent (from agent/channels/deep_agent.py) + "DEEP_AGENTS_AVAILABLE", + "AbstractDeepAgentChannel", + "AbstractDeepAgentChannelConsumer", + "AbstractDeepAgentChannelProducer", + "create_memory_backend", + "get_agent_memory_path", + "build_dictionary_subagent", + "build_compiled_subagent", + "build_subagents_from_agents", + "build_subagents_from_producers", + "create_deep_agent_safe", + "create_supervisor_agent", + "create_team_deep_agent", + # Skills utilities + "load_skill_from_file", + "discover_skills", + "create_skills_files_dict", + # HITL utilities + "create_interrupt_config", + "build_hitl_decision", + # Utilities + "extract_json_from_content", + "extract_json_between_braces", + "extract_json_from_markdown", + "extract_json_from_xml_tags", + "preprocess_json_content", + # Deep Agents Team + "AbstractDeepAgentsTeamChannel", + "AbstractDeepAgentsTeamChannelConsumer", + "AbstractDeepAgentsTeamChannelProducer", + # Enums + "MemoryStorageType", + "StepType", + "JudgeDecisionType", + "AgentRole", + "SubagentMode", + "ToolCallMode", + "MemoryScope", + "ExecutionStatus", + # Deep Agent models + "SubagentConfig", + "MemoryEntry", + "TodoItem", + "DeepAgentResult", + "TeamExecutionResult", + "SupervisorState", +] diff --git a/packages/agents/octobot_agents/agent/__init__.py b/packages/agents/octobot_agents/agent/__init__.py new file mode 100644 index 0000000000..223364ef51 --- /dev/null +++ b/packages/agents/octobot_agents/agent/__init__.py @@ -0,0 +1,92 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. + +from octobot_agents.agent import channels +from octobot_agents.agent.channels import ( + AbstractAgentChannel, + AbstractAgentChannelConsumer, + AbstractAgentChannelProducer, + AbstractAIAgentChannel, + AbstractAIAgentChannelConsumer, + AbstractAIAgentChannelProducer, + # Deep Agent + DEEP_AGENTS_AVAILABLE, + AbstractDeepAgentChannel, + AbstractDeepAgentChannelConsumer, + AbstractDeepAgentChannelProducer, + create_memory_backend, + get_agent_memory_path, + build_dictionary_subagent, + build_compiled_subagent, + build_subagents_from_agents, + build_subagents_from_producers, + create_deep_agent_safe, + create_supervisor_agent, + create_team_deep_agent, + # Skills utilities + load_skill_from_file, + discover_skills, + create_skills_files_dict, + # HITL utilities + create_interrupt_config, + build_hitl_decision, +) + +from octobot_agents import storage +from octobot_agents.storage import ( + AbstractMemoryStorage, + JSONMemoryStorage, + create_memory_storage, +) + +from octobot_agents.agent import memory +from octobot_agents.agent.memory import ( + MemoryAgentMixin, +) + +__all__ = [ + "AbstractAgentChannel", + "AbstractAgentChannelConsumer", + "AbstractAgentChannelProducer", + "AbstractAIAgentChannel", + "AbstractAIAgentChannelConsumer", + "AbstractAIAgentChannelProducer", + "AbstractMemoryStorage", + "JSONMemoryStorage", + "MemoryAgentMixin", + "create_memory_storage", + # Deep Agent + "DEEP_AGENTS_AVAILABLE", + "AbstractDeepAgentChannel", + "AbstractDeepAgentChannelConsumer", + "AbstractDeepAgentChannelProducer", + "create_memory_backend", + "get_agent_memory_path", + "build_dictionary_subagent", + "build_compiled_subagent", + "build_subagents_from_agents", + "build_subagents_from_producers", + "create_deep_agent_safe", + "create_supervisor_agent", + "create_team_deep_agent", + # Skills utilities + "load_skill_from_file", + "discover_skills", + "create_skills_files_dict", + # HITL utilities + "create_interrupt_config", + "build_hitl_decision", +] diff --git a/packages/agents/octobot_agents/agent/channels/__init__.py b/packages/agents/octobot_agents/agent/channels/__init__.py new file mode 100644 index 0000000000..76bbcadc97 --- /dev/null +++ b/packages/agents/octobot_agents/agent/channels/__init__.py @@ -0,0 +1,80 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. + +from octobot_agents.agent.channels.agent import ( + AbstractAgentChannel, + AbstractAgentChannelConsumer, + AbstractAgentChannelProducer, +) + +from octobot_agents.agent.channels.ai_agent import ( + AbstractAIAgentChannel, + AbstractAIAgentChannelConsumer, + AbstractAIAgentChannelProducer, +) + +from octobot_agents.agent.channels.deep_agent import ( + DEEP_AGENTS_AVAILABLE, + AbstractDeepAgentChannel, + AbstractDeepAgentChannelConsumer, + AbstractDeepAgentChannelProducer, + create_memory_backend, + get_agent_memory_path, + build_dictionary_subagent, + build_compiled_subagent, + build_subagents_from_agents, + build_subagents_from_producers, + create_deep_agent_safe, + create_supervisor_agent, + create_team_deep_agent, + # Skills utilities + load_skill_from_file, + discover_skills, + create_skills_files_dict, + # HITL utilities + create_interrupt_config, + build_hitl_decision, +) + +__all__ = [ + "AbstractAgentChannel", + "AbstractAgentChannelConsumer", + "AbstractAgentChannelProducer", + "AbstractAIAgentChannel", + "AbstractAIAgentChannelConsumer", + "AbstractAIAgentChannelProducer", + # Deep Agent + "DEEP_AGENTS_AVAILABLE", + "AbstractDeepAgentChannel", + "AbstractDeepAgentChannelConsumer", + "AbstractDeepAgentChannelProducer", + "create_memory_backend", + "get_agent_memory_path", + "build_dictionary_subagent", + "build_compiled_subagent", + "build_subagents_from_agents", + "build_subagents_from_producers", + "create_deep_agent_safe", + "create_supervisor_agent", + "create_team_deep_agent", + # Skills utilities + "load_skill_from_file", + "discover_skills", + "create_skills_files_dict", + # HITL utilities + "create_interrupt_config", + "build_hitl_decision", +] diff --git a/packages/agents/octobot_agents/agent/channels/agent.py b/packages/agents/octobot_agents/agent/channels/agent.py new file mode 100644 index 0000000000..b8fab6a5d6 --- /dev/null +++ b/packages/agents/octobot_agents/agent/channels/agent.py @@ -0,0 +1,186 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. +import abc +import typing + +import async_channel.enums as channel_enums +import async_channel.constants as channel_constants +import async_channel.channels as channels +import async_channel.consumer as consumer +import async_channel.producer as producer + +import octobot_commons.logging as logging + +import octobot_agents.constants as constants + + +class AbstractAgentChannelConsumer(consumer.Consumer): + """ + Abstract consumer for agent channels. + + Consumers receive agent execution results pushed by producers. + """ + __metaclass__ = abc.ABCMeta + + +class AbstractAgentChannelProducer(producer.Producer): + """ + Abstract producer for agent channels. + + Simple base class following the service feed pattern. + Producers execute agent logic and push results to consumers. + """ + __metaclass__ = abc.ABCMeta + MAX_RETRIES: int = constants.AGENT_DEFAULT_MAX_RETRIES + + +class AbstractAgentChannel(channels.Channel): + """ + Abstract channel for agents with agent_name and agent_id filtering. + + Agent tentacles should inherit from this class and define their own channel. + Example: + class TechnicalAnalysisAIAgentChannel(AbstractAgentChannel): + OUTPUT_SCHEMA = TechnicalAnalysisOutput + """ + __metaclass__ = abc.ABCMeta + + PRODUCER_CLASS = AbstractAgentChannelProducer + CONSUMER_CLASS = AbstractAgentChannelConsumer + + VERSION = constants.AGENT_DEFAULT_VERSION + + OUTPUT_SCHEMA: typing.Optional[typing.Type] = None + + DEFAULT_PRIORITY_LEVEL = channel_enums.ChannelConsumerPriorityLevels.HIGH.value + + def __init__( + self, + team_name: typing.Optional[str] = None, + team_id: typing.Optional[str] = None, + ): + """ + Initialize the agent channel. + + Args: + team_name: Optional name of the team this channel belongs to. + team_id: Optional unique identifier for the team instance. + """ + super().__init__() + self.team_name = team_name + self.team_id = team_id + self.logger = logging.get_logger(self.__class__.__name__) + + @classmethod + def get_output_schema(cls) -> typing.Optional[typing.Type]: + """ + Get the Pydantic model class for this channel's output. + + Override OUTPUT_SCHEMA in subclasses to define the expected output format. + This schema is used by _call_llm() as the default response_schema. + + Returns: + The Pydantic BaseModel class, or None if not defined. + """ + return cls.OUTPUT_SCHEMA + + # pylint: disable=arguments-renamed + async def new_consumer( + self, + callback: typing.Callable = None, + consumer_instance: "AbstractAgentChannelConsumer" = None, + size: int = 0, + priority_level: int = DEFAULT_PRIORITY_LEVEL, + agent_name: str = channel_constants.CHANNEL_WILDCARD, + agent_id: str = channel_constants.CHANNEL_WILDCARD, + **kwargs, + ) -> "AbstractAgentChannelConsumer": + """ + Create a new consumer for this channel. + + Args: + callback: Method to call when consuming queue data. + consumer_instance: Existing consumer instance to use. + size: Queue size (0 = unlimited). + priority_level: Consumer priority level. + agent_name: Filter by agent name (wildcard = all agents). + agent_id: Filter by agent id (wildcard = all instances). + **kwargs: Additional arguments. + + Returns: + The created consumer instance. + """ + consumer_inst = ( + consumer_instance + if consumer_instance + else self.CONSUMER_CLASS(callback, size=size, priority_level=priority_level) + ) + await self._add_new_consumer_and_run( + consumer_inst, + agent_name=agent_name, + agent_id=agent_id, + **kwargs, + ) + await self._check_producers_state() + return consumer_inst + + def get_filtered_consumers( + self, + agent_name: str = channel_constants.CHANNEL_WILDCARD, + agent_id: str = channel_constants.CHANNEL_WILDCARD, + ) -> list: + """ + Get consumers matching the specified filters. + + Args: + agent_name: Filter by agent name. + agent_id: Filter by agent id. + + Returns: + List of matching consumer instances. + """ + return self.get_consumer_from_filters({ + constants.AGENT_NAME_KEY: agent_name, + constants.AGENT_ID_KEY: agent_id, + }) + + # pylint: disable=arguments-renamed + async def _add_new_consumer_and_run( + self, + consumer_inst: "AbstractAgentChannelConsumer", + agent_name: str = channel_constants.CHANNEL_WILDCARD, + agent_id: str = channel_constants.CHANNEL_WILDCARD, + **kwargs, + ) -> None: + """ + Add consumer to the channel and start it. + + Args: + consumer_inst: The consumer instance to add. + agent_name: Agent name filter for this consumer. + agent_id: Agent id filter for this consumer. + """ + self.add_new_consumer( + consumer_inst, + { + constants.AGENT_NAME_KEY: agent_name, + constants.AGENT_ID_KEY: agent_id, + }, + ) + await consumer_inst.run(with_task=not self.is_synchronized) + self.logger.debug( + f"Consumer started for agent_name={agent_name}, agent_id={agent_id}: {consumer_inst}" + ) diff --git a/packages/agents/octobot_agents/agent/channels/ai_agent.py b/packages/agents/octobot_agents/agent/channels/ai_agent.py new file mode 100644 index 0000000000..2c383fbe2d --- /dev/null +++ b/packages/agents/octobot_agents/agent/channels/ai_agent.py @@ -0,0 +1,459 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. +import abc +import contextlib +import json +import typing + +import octobot_commons.logging as logging + +import octobot_agents.agent.channels.agent as agent_channels +import octobot_agents.constants as constants +import octobot_agents.storage as storage +import octobot_agents.enums as enums +import octobot_services.services as services +import octobot_services.enums as services_enums +import octobot_services.errors as services_errors + +class AbstractAIAgentChannel(agent_channels.AbstractAgentChannel): + """ + Channel for AI agents. + + Inherits from AbstractAgentChannel with no additional functionality. + """ + __metaclass__ = abc.ABCMeta + + +class AbstractAIAgentChannelConsumer(agent_channels.AbstractAgentChannelConsumer): + """ + Consumer for AI agent channels with input aggregation support. + + Can aggregate inputs from multiple producers before triggering the associated producer. + Useful for agents that need to wait for multiple upstream agents to complete. + """ + __metaclass__ = abc.ABCMeta + + def __init__( + self, + callback: typing.Optional[typing.Callable] = None, + size: int = 0, + priority_level: int = agent_channels.AbstractAgentChannel.DEFAULT_PRIORITY_LEVEL, + expected_inputs: int = 1, + ): + """ + Initialize the AI agent consumer. + + Args: + callback: Method to call when consuming queue data. + size: Queue size (0 = unlimited). + priority_level: Consumer priority level. + expected_inputs: Number of inputs to aggregate before triggering. + """ + super().__init__(callback, size=size, priority_level=priority_level) + self.expected_inputs = expected_inputs + self.received_inputs: typing.Dict[str, typing.Any] = {} + + def is_ready(self) -> bool: + """Check if all expected inputs have been received.""" + return len(self.received_inputs) >= self.expected_inputs + + def add_input(self, source_name: str, data: typing.Any) -> None: + """ + Add input data from a source. + + Args: + source_name: Name of the source agent. + data: The data received from the source. + """ + self.received_inputs[source_name] = data + + def get_aggregated_inputs(self) -> typing.Dict[str, typing.Any]: + """Get all received inputs.""" + return self.received_inputs.copy() + + def clear_inputs(self) -> None: + """Clear all received inputs.""" + self.received_inputs.clear() + + +class AbstractAIAgentChannelProducer(agent_channels.AbstractAgentChannelProducer, abc.ABC): + """ + Producer for AI agents with LLM calling capabilities and optional memory management. + + Follows the same pattern as AbstractServiceFeed inheriting from + AbstractServiceFeedChannelProducer. + + Provides common functionality for LLM calling, prompt management, + and data formatting. Retry logic is handled by the service layer. + Memory functionality is optional and can be enabled via ENABLE_MEMORY class variable + or enable_memory constructor parameter. + Subclasses should implement _get_default_prompt() and execute() methods. + """ + + AGENT_VERSION: str = "1.0.0" + DEFAULT_MODEL: typing.Optional[str] = None + DEFAULT_MAX_TOKENS: int = constants.AGENT_DEFAULT_MAX_TOKENS + DEFAULT_TEMPERATURE: float = constants.AGENT_DEFAULT_TEMPERATURE + MAX_RETRIES: int = constants.AGENT_DEFAULT_MAX_RETRIES + # Model policy for multi-model config: fast (analysts, debators) or reasoning (judge, final step). None = use self.model. + MODEL_POLICY: typing.Optional[services_enums.AIModelPolicy] = None + + # Memory configuration + ENABLE_MEMORY: bool = False + MEMORY_SEARCH_LIMIT: int = 5 + MEMORY_STORAGE_ENABLED: bool = True + MEMORY_AGENT_ID_KEY: str = constants.MEMORY_AGENT_ID_KEY + + AGENT_CHANNEL: typing.Optional[typing.Type[agent_channels.AbstractAgentChannel]] = None + AGENT_CONSUMER: typing.Optional[typing.Type[AbstractAIAgentChannelConsumer]] = None + + def __init__( + self, + channel: typing.Optional[agent_channels.AbstractAgentChannel], + ai_service: typing.Optional[services.AbstractAIService] = None, + model: typing.Optional[str] = None, + max_tokens: typing.Optional[int] = None, + temperature: typing.Optional[float] = None, + enable_memory: typing.Optional[bool] = None, + ): + """ + Initialize the AI agent producer. + + Args: + channel: The channel this producer is registered to. + model: LLM model to use. Defaults to DEFAULT_MODEL. + max_tokens: Maximum tokens for response. Defaults to DEFAULT_MAX_TOKENS. + temperature: Temperature for LLM randomness. Defaults to DEFAULT_TEMPERATURE. + enable_memory: Override class-level ENABLE_MEMORY setting. + """ + super().__init__(channel) + self.name = self.__class__.__name__ + self.model = model or self.DEFAULT_MODEL + self.max_tokens = max_tokens or self.DEFAULT_MAX_TOKENS + self.temperature = temperature or self.DEFAULT_TEMPERATURE + self._custom_prompt: typing.Optional[str] = None + self.ai_service: services.AbstractAIService = None + self.logger = logging.get_logger(f"{self.__class__.__name__}") + + # Initialize memory storage if memory is enabled + memory_enabled = enable_memory if enable_memory is not None else self.ENABLE_MEMORY + self.memory_manager: storage.AbstractMemoryStorage = storage.create_memory_storage( + enums.MemoryStorageType.JSON, + agent_name=self.__class__.__name__, + agent_version=self.AGENT_VERSION, + enabled=memory_enabled, + search_limit=self.MEMORY_SEARCH_LIMIT, + storage_enabled=self.MEMORY_STORAGE_ENABLED, + agent_id_key=self.MEMORY_AGENT_ID_KEY, + ) + + def has_memory_enabled(self) -> bool: + """ + Check if memory is enabled for this agent. + + Returns: + True if memory is enabled, False otherwise. + """ + return self.memory_manager.is_enabled() + + @property + def prompt(self) -> str: + """Get the agent's prompt, allowing override via config.""" + return self._custom_prompt or self._get_default_prompt() + + @prompt.setter + def prompt(self, value: str) -> None: + """Allow custom prompt override.""" + self._custom_prompt = value + + @abc.abstractmethod + def _get_default_prompt(self) -> str: + """ + Return the default prompt for this agent type. + + Subclasses must implement this to provide their system prompt. + + Returns: + The default system prompt string. + """ + raise NotImplementedError("_get_default_prompt not implemented") + + @abc.abstractmethod + async def execute(self, input_data: typing.Any, ai_service: services.AbstractAIService) -> typing.Any: + """ + Execute the agent's primary function. + + Args: + input_data: The input data for the agent to process. + ai_service: The AI service instance (AbstractAIService). + + Returns: + The agent's output, type depends on the specific agent. + """ + raise NotImplementedError("execute not implemented") + + async def push( + self, + result: typing.Any, + agent_name: typing.Optional[str] = None, + agent_id: typing.Optional[str] = None, + ) -> None: + """ + Push a result to filtered consumers. + + Args: + result: The result data to push. + agent_name: Agent name for filtering (defaults to name). + agent_id: Agent id for filtering. + """ + if self.channel is None: + return + await self.perform( + result, + agent_name=agent_name or self.name, + agent_id=agent_id or "", + ) + + async def perform( + self, + result: typing.Any, + agent_name: str, + agent_id: str, + ) -> None: + """ + Send result to matching consumers. + + Args: + result: The result data to send. + agent_name: Agent name for consumer filtering. + agent_id: Agent id for consumer filtering. + """ + if self.channel is None: + return + for consumer_instance in self.channel.get_filtered_consumers( + agent_name=agent_name, + agent_id=agent_id, + ): + await consumer_instance.queue.put({ + "agent_name": agent_name, + "agent_id": agent_id, + "result": result, + }) + + @contextlib.contextmanager + def _memory_tool_executor(self): + """ + Context manager that provides a memory tool executor callback. + + Yields: + A callable function that executes memory tools with the signature: + (tool_name: str, arguments: dict) -> Any + """ + def executor(tool_name: str, arguments: dict) -> typing.Any: + return storage.execute_memory_tool(self.memory_manager, tool_name, arguments) + + yield executor + + async def _call_llm( + self, + messages: list, + llm_service: services.AbstractAIService, + json_output: bool = True, + response_schema: typing.Optional[typing.Any] = None, + input_data: typing.Optional[typing.Any] = None, + memory_query: typing.Optional[str] = None, + tools: typing.Optional[list] = None, + return_tool_calls: bool = False, + ) -> typing.Any: + """ + Common LLM calling method with error handling and optional memory. + + Automatically registers memory tools when memory is enabled. Memory retrieval is done + via LLM tools (get_memory_summaries, get_memory_by_id). + Custom tools can be provided and will be merged with memory tools if both are present. + Retry logic is handled by the service layer via the retry_llm_completion decorator. + + Args: + messages: List of message dicts with 'role' and 'content'. + llm_service: The LLM service instance. + json_output: Whether to parse response as JSON. + response_schema: Optional Pydantic model or JSON schema for structured output. + If None, uses the channel's OUTPUT_SCHEMA as default. + input_data: Optional input data for memory retrieval (kept for backward compatibility). + memory_query: Optional custom query for memory search (not used with tools). + tools: Optional list of custom tools to provide to the LLM. + + Returns: + Parsed JSON dict or raw string response. + """ + # Register memory tools if memory is enabled, and merge with custom tools + all_tools = [] + if self.memory_manager.is_enabled(): + all_tools.extend(storage.get_memory_tools(self.memory_manager, llm_service)) + if tools: + all_tools.extend(tools) + + # Use channel's output schema as default if not explicitly provided + effective_schema = response_schema + if effective_schema is None and self.AGENT_CHANNEL is not None: + effective_schema = self.AGENT_CHANNEL.get_output_schema() + + # Resolve model from policy if set (use class attribute MODEL_POLICY) + effective_model = self.model + if self.MODEL_POLICY is not None: + policy_model = llm_service.get_model_for_policy(self.MODEL_POLICY.value) + if policy_model: + effective_model = policy_model + + # Call LLM with automatic tool calling orchestration if tools are available + # Retry logic is handled by the service layer decorator + if all_tools: + # Use context manager to get executor and keep it open for the entire call + try: + with self._memory_tool_executor() as executor: + # Call LLM with automatic tool calling orchestration + return await llm_service.get_completion_with_tools( + messages=messages, + tool_executor=executor if not return_tool_calls else None, + model=effective_model, + max_tokens=self.max_tokens, + temperature=self.temperature, + json_output=json_output, + response_schema=effective_schema, + tools=all_tools, + return_tool_calls=return_tool_calls, + ) + except services_errors.InvalidRequestError as e: + # Check if error is due to tool support + error_message = str(e).lower() + if "does not support tools" in error_message or "does not support" in error_message and "tool" in error_message: + # Model doesn't support tools - fall back to regular completion + self.logger.warning( + f"Model {self.model} does not support tools. " + f"Falling back to regular completion without memory tools. " + f"Error: {e}" + ) + # Fall through to regular get_completion below + else: + # Different error - re-raise it + raise + except Exception as e: + # Check if it's a tool support error from the underlying API + error_message = str(e).lower() + if "does not support tools" in error_message or "does not support" in error_message and "tool" in error_message: + # Model doesn't support tools - fall back to regular completion + self.logger.warning( + f"Model {self.model} does not support tools. " + f"Falling back to regular completion without memory tools. " + f"Error: {e}" + ) + # Fall through to regular get_completion below + else: + # Different error - re-raise it + raise + + # No tools or fallback from tool error - use regular get_completion + response = await llm_service.get_completion( + messages=messages, + model=effective_model, + max_tokens=self.max_tokens, + temperature=self.temperature, + json_output=json_output, + response_schema=effective_schema, + tools=None, + ) + return llm_service.parse_completion_response( + response, + json_output=json_output + ) + + def format_data(self, data: typing.Any, default_message: str = "No data available.") -> str: + """ + Format data for inclusion in prompts. + + Args: + data: Data to format (dict, list, or other JSON-serializable type). + default_message: Message to return if data is empty/None. + + Returns: + JSON-formatted string or default message. + """ + if not data: + return default_message + return json.dumps(data, indent=2, default=str) + + async def _get_relevant_memories( + self, + query: str, + input_data: typing.Any, + limit: typing.Optional[int] = None, + ) -> typing.List[dict]: + """ + Retrieve relevant memories for the current context. + + Note: With tool-based approach, memories are retrieved via LLM tools. + This method is kept for backward compatibility but may not be used. + + Args: + query: Search query for finding relevant memories. + input_data: Current input data (may contain agent_id). + limit: Maximum number of memories to retrieve (defaults to MEMORY_SEARCH_LIMIT). + + Returns: + List of memory dictionaries with 'memory' and 'metadata' keys. + """ + return await self.memory_manager.search_memories(query, input_data, limit=limit) + + def _format_memories_for_prompt(self, memories: typing.List[dict]) -> str: + """ + Format memories for inclusion in prompts. + + Delegates to memory manager. + + Args: + memories: List of memory dictionaries. + + Returns: + Formatted string with memories, or empty string if none. + """ + return self.memory_manager.format_memories_for_prompt(memories) + + async def _store_execution_memory( + self, + input_data: typing.Any, + output: typing.Any, + user_message: typing.Optional[str] = None, + assistant_message: typing.Optional[str] = None, + metadata: typing.Optional[dict] = None, + ) -> None: + """ + Store memory from agent execution. + + Note: Memory storage is now handled by MemoryAgent, not automatically after LLM calls. + This method is kept for backward compatibility but should not be called automatically. + + Args: + input_data: The input data that was processed. + output: The agent's output/result. + user_message: Optional user message (auto-built if not provided). + assistant_message: Optional assistant message (auto-built if not provided). + metadata: Optional metadata to attach. + """ + # Memory storage is now handled by MemoryAgent + # This method is kept for manual memory storage if needed + await self.memory_manager.store_execution_memory( + input_data, output, user_message, assistant_message, metadata + ) diff --git a/packages/agents/octobot_agents/agent/channels/deep_agent.py b/packages/agents/octobot_agents/agent/channels/deep_agent.py new file mode 100644 index 0000000000..ee33cb5910 --- /dev/null +++ b/packages/agents/octobot_agents/agent/channels/deep_agent.py @@ -0,0 +1,871 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. +""" +Deep Agent channel, consumer, and producer for LangChain Deep Agents integration. + +Features: +- SubAgentMiddleware for manager delegation +- TodoListMiddleware for task planning +- CompositeBackend for long-term memory +- Dangling tool call repair +- Streaming support +- Debug logging for agent operations + +See LangChain Deep Agents docs: +- https://docs.langchain.com/oss/python/deepagents/middleware +- https://docs.langchain.com/oss/python/deepagents/long-term-memory +- https://docs.langchain.com/oss/python/deepagents/harness +""" + +import abc +import typing +import logging +import uuid + +import octobot_agents.agent.channels.ai_agent as ai_agent_channels +import octobot_agents.errors as errors +import octobot_agents.constants as constants +import octobot_services.services as services + +logger = logging.getLogger(__name__.split(".")[-1]) + +try: + from deepagents import create_deep_agent, CompiledSubAgent + from deepagents.backends import CompositeBackend, StateBackend, StoreBackend + from langchain.agents.middleware import TodoListMiddleware + from deepagents.middleware.subagents import SubAgentMiddleware + from langgraph.store.memory import InMemoryStore + from langgraph.checkpoint.memory import MemorySaver + from langgraph.types import Command + DEEP_AGENTS_AVAILABLE = True +except ImportError as e: + DEEP_AGENTS_AVAILABLE = False + logger.debug(f"deepagents not available - Deep Agent features disabled: {e}") + + +class AbstractDeepAgentChannel(ai_agent_channels.AbstractAIAgentChannel): + __metaclass__ = abc.ABCMeta + + +class AbstractDeepAgentChannelConsumer(ai_agent_channels.AbstractAIAgentChannelConsumer): + __metaclass__ = abc.ABCMeta + + def __init__( + self, + callback: typing.Optional[typing.Callable] = None, + size: int = 0, + priority_level: int = ai_agent_channels.AbstractAIAgentChannel.DEFAULT_PRIORITY_LEVEL, + expected_inputs: int = 1, + ): + super().__init__( + callback=callback, + size=size, + priority_level=priority_level, + expected_inputs=expected_inputs, + ) + self.subagent_results: typing.Dict[str, typing.Any] = {} + + def add_subagent_result(self, subagent_name: str, result: typing.Any) -> None: + self.subagent_results[subagent_name] = result + + def get_subagent_results(self) -> typing.Dict[str, typing.Any]: + return self.subagent_results.copy() + + def clear_subagent_results(self) -> None: + self.subagent_results.clear() + + +class AbstractDeepAgentChannelProducer(ai_agent_channels.AbstractAIAgentChannelProducer, abc.ABC): + """ + Producer for Deep Agents with supervisor pattern and subagent orchestration. + + Features: + - SubAgentMiddleware for task delegation + - TodoListMiddleware for planning + - CompositeBackend with /memories/ for persistent storage + - Dangling tool call repair + - Streaming support + - Debug logging + """ + + AGENT_CHANNEL: typing.Optional[typing.Type[AbstractDeepAgentChannel]] = None + AGENT_CONSUMER: typing.Optional[typing.Type[AbstractDeepAgentChannelConsumer]] = None + + MAX_ITERATIONS: int = 10 + ENABLE_WRITE_TODOS: bool = True + ENABLE_STREAMING: bool = False + + ENABLE_HITL: bool = False + HITL_INTERRUPT_TOOLS: dict[str, typing.Any] = {} + + SKILLS_DIRS: list[str] = [] + + def __init__( + self, + channel: typing.Optional[AbstractDeepAgentChannel], + ai_service: typing.Optional[services.AbstractAIService] = None, + model: typing.Optional[str] = None, + max_tokens: typing.Optional[int] = None, + temperature: typing.Optional[float] = None, + enable_memory: typing.Optional[bool] = None, + store: typing.Any | None = None, + checkpointer: typing.Any | None = None, + skills: list[str] | None = None, + interrupt_on: dict[str, typing.Any] | None = None, + enable_streaming: bool | None = None, + ): + super().__init__( + channel=channel, + model=model, + max_tokens=max_tokens, + temperature=temperature, + enable_memory=enable_memory, + ) + + self.ai_service = ai_service + + self._store = store + self._checkpointer = checkpointer + self._deep_agent = None + self._subagents: list[dict[str, typing.Any]] = [] + + self._interrupt_on = interrupt_on or self.HITL_INTERRUPT_TOOLS + self._skills = skills or self.SKILLS_DIRS + self._enable_streaming = enable_streaming if enable_streaming is not None else self.ENABLE_STREAMING + + self._current_thread_id: str | None = None + + def get_subagents(self) -> list[dict[str, typing.Any]]: + return [] + + def get_compiled_subagents(self) -> list[typing.Any]: + return [] + + def get_skills(self) -> list[str]: + return self._skills + + def get_agent_skills(self, agent_name: str) -> list[str] | None: + """ + Get skills for a specific agent/subagent. + Override to provide agent-specific skills. + + Args: + agent_name: Name of the agent/subagent + + Returns: + List of skill paths (e.g., ["./technical-analysis/"]) or None + """ + return None + + def get_agent_skills_files(self, agent_name: str) -> dict[str, str] | None: + """ + Get skill files for a specific agent/subagent. + Override to provide agent-specific skill files. + + Args: + agent_name: Name of the agent/subagent + + Returns: + Dict mapping virtual paths to file content or None + """ + skills_dir = self.get_skills_resources_dir() # pylint: disable=assignment-from-none + if not skills_dir: + return None + + # Try to find agent-specific skills directory + import os + agent_skills_dir = os.path.join(skills_dir, agent_name) + if os.path.isdir(agent_skills_dir): + return create_skills_files_dict(agent_skills_dir) + + return None + + def get_skills_resources_dir(self) -> str | None: + """ + Get the tentacle's resources/skills directory path. + Override this to provide a custom skills directory. + By default, returns None (no auto-discovery). + + Example implementation in tentacle: + import os + return os.path.join(os.path.dirname(__file__), "resources", "skills") + """ + return None + + def get_interrupt_config(self) -> dict[str, typing.Any]: + return self._interrupt_on + + def get_middleware(self) -> list[typing.Any]: + """ + Get middleware list for this producer. + + Override to add custom middleware. Default includes: + - TodoListMiddleware (if ENABLE_WRITE_TODOS) + - SubAgentMiddleware with subagents + """ + middleware = [] + + if not DEEP_AGENTS_AVAILABLE: + return middleware + + # Build middleware with deduplication and merging + middleware_dict = {} # Map middleware type to instance + + if self.ENABLE_WRITE_TODOS and TodoListMiddleware: + middleware_type_name = TodoListMiddleware.__name__ + if middleware_type_name not in middleware_dict: + middleware_dict[middleware_type_name] = TodoListMiddleware() + + subagents = self.get_subagents() + self.get_compiled_subagents() + if subagents and SubAgentMiddleware: + model = self.model + if self.ai_service: + model = self.ai_service.model or self.model + middleware_type_name = SubAgentMiddleware.__name__ + + if middleware_type_name in middleware_dict: + # Merge subagents: append new subagents to existing ones + existing = middleware_dict[middleware_type_name] + try: + existing_subagents = existing.subagents + if existing_subagents: + # Only add subagents that aren't already present + existing_names = {s.get('name') if isinstance(s, dict) else getattr(s, 'name', None) for s in existing_subagents} + new_subagents = [s for s in subagents if (s.get('name') if isinstance(s, dict) else getattr(s, 'name', None)) not in existing_names] + if new_subagents: + existing.subagents = existing_subagents + new_subagents + except AttributeError: + pass + else: + middleware_dict[middleware_type_name] = SubAgentMiddleware( + default_model=model, + default_tools=[], + subagents=subagents, + ) + + return list(middleware_dict.values()) + + def _create_memory_backend(self) -> typing.Callable: + """ + Create a CompositeBackend factory for long-term memory. + + Routes: + - /memories/* -> StoreBackend (persistent) + - else -> StateBackend (transient) + """ + def make_backend(runtime): + if not DEEP_AGENTS_AVAILABLE or not CompositeBackend: + return None + return CompositeBackend( + default=StateBackend(runtime), + routes={ + f"{constants.MEMORIES_PATH_PREFIX}": StoreBackend(runtime) + } + ) + return make_backend + + def _get_or_create_store(self) -> typing.Any: + if self._store is None and DEEP_AGENTS_AVAILABLE: + self._store = InMemoryStore() + return self._store + + def _get_or_create_checkpointer(self) -> typing.Any: + if self._checkpointer is None and DEEP_AGENTS_AVAILABLE: + self._checkpointer = MemorySaver() + return self._checkpointer + + def _build_deep_agent( + self, + additional_tools: list[typing.Callable] | None = None, + ) -> typing.Any: + if not DEEP_AGENTS_AVAILABLE: + raise errors.DeepAgentNotAvailableError("deep_agents package is required") + + logger.debug(f"[{self.name}] Building deep agent...") + + store = self._get_or_create_store() + + model = None + if self.ai_service: + logger.debug(f"[{self.name}] Initializing chat model from AI service") + model = self.ai_service.init_chat_model(model=self.model) + else: + model = self.model + + agent_kwargs: dict[str, typing.Any] = { + "model": model, + "system_prompt": self.prompt, + "tools": additional_tools or [], + "store": store, + "backend": self._create_memory_backend(), + } + + middleware = self.get_middleware() + if middleware: + agent_kwargs["middleware"] = middleware + logger.debug(f"[{self.name}] Using middleware: {[type(m).__name__ for m in middleware]}") + + # Auto-discover skills from tentacle's resources/skills directory + skills = self.get_skills() + skills_dir = self.get_skills_resources_dir() # pylint: disable=assignment-from-none + + if skills_dir: + discovered = discover_skills(skills_dir) + if discovered: + skills = (skills or []) + discovered + logger.debug(f"[{self.name}] Auto-discovered {len(discovered)} skills from {skills_dir}") + + if skills: + agent_kwargs["skills"] = skills + logger.debug(f"[{self.name}] Using skills: {skills}") + + interrupt_config = self.get_interrupt_config() + if interrupt_config: + checkpointer = self._get_or_create_checkpointer() + agent_kwargs["interrupt_on"] = interrupt_config + agent_kwargs["checkpointer"] = checkpointer + logger.debug(f"[{self.name}] HITL enabled for tools: {list(interrupt_config.keys())}") + + logger.debug(f"[{self.name}] Deep agent built successfully") + return create_deep_agent(**agent_kwargs) + + def get_deep_agent( + self, + additional_tools: list[typing.Callable] | None = None, + force_rebuild: bool = False, + ) -> typing.Any: + if self._deep_agent is None or force_rebuild: + self._deep_agent = self._build_deep_agent(additional_tools) + return self._deep_agent + + async def invoke_deep_agent( + self, + message: str, + additional_tools: list[typing.Callable] | None = None, + thread_id: str | None = None, + ) -> dict: + agent = self.get_deep_agent(additional_tools) + if agent is None: + return {"error": "Deep Agent not available"} + + if thread_id is None: + thread_id = str(uuid.uuid4()) + self._current_thread_id = thread_id + + config = {"configurable": {"thread_id": thread_id}} + + logger.debug(f"[{self.name}] Invoking deep agent with message: {message[:100]}...") + + try: + if self._enable_streaming: + return await self._invoke_with_streaming(agent, message, config) + else: + result = await agent.ainvoke( + {"messages": [{"role": "user", "content": message}]}, + config=config, + ) + logger.debug(f"[{self.name}] Deep agent invocation complete") + return result + except Exception as e: + logger.error(f"[{self.name}] Error invoking Deep Agent: {e}") + return {"error": str(e)} + + async def _invoke_with_streaming( + self, + agent: typing.Any, + message: str, + config: dict, + ) -> dict: + """Invoke agent with streaming, logging events as they occur.""" + + logger.debug(f"[{self.name}] Starting streaming invocation") + + async for event in agent.astream( + {"messages": [{"role": "user", "content": message}]}, + config=config, + stream_mode="updates", + ): + for node_name, node_output in event.items(): + if node_name == "agent": + messages = node_output.get("messages", []) + for msg in messages: + tool_calls = msg.get("tool_calls") + if tool_calls: + for tc in tool_calls: + tool_name = tc["name"] if isinstance(tc, dict) else tc.name + logger.debug(f"[{self.name}] 🔧 Calling tool: {tool_name}") + elif msg.get("content"): + content = msg.get("content", "") + content_preview = content[:100] if len(content) > 100 else content + logger.debug(f"[{self.name}] 💭 Agent thinking: {content_preview}...") + + elif node_name == "tools": + messages = node_output.get("messages", []) + for msg in messages: + msg_name = msg.get("name") + if msg_name: + logger.debug(f"[{self.name}] ✅ Tool result from: {msg_name}") + + + state = await agent.aget_state(config) + logger.debug(f"[{self.name}] Streaming complete") + return {"messages": state.values.get("messages", [])} + + def is_interrupted(self, result: dict) -> bool: + return constants.HITL_INTERRUPT_KEY in result + + def get_interrupt_info(self, result: dict) -> dict | None: + if not self.is_interrupted(result): + return None + + interrupts = result[constants.HITL_INTERRUPT_KEY] + if not interrupts: + return None + + return interrupts[0].get('value', interrupts[0]) + + async def resume_with_decisions( + self, + decisions: list[dict[str, typing.Any]], + thread_id: str | None = None, + ) -> dict: + if not DEEP_AGENTS_AVAILABLE: + return {"error": "Deep Agents not available"} + + agent = self.get_deep_agent() + if agent is None: + return {"error": "Deep Agent not available"} + + thread_id = thread_id or self._current_thread_id + if thread_id is None: + return {"error": "No thread_id for resume"} + + config = {"configurable": {"thread_id": thread_id}} + + logger.debug(f"[{self.name}] Resuming with {len(decisions)} decisions") + + try: + result = await agent.ainvoke( + Command(resume={"decisions": decisions}), + config=config, + ) + return result + except Exception as e: + logger.error(f"[{self.name}] Error resuming Deep Agent: {e}") + return {"error": str(e)} + + async def approve_all_interrupts(self, result: dict, thread_id: str | None = None) -> dict: + interrupt_info = self.get_interrupt_info(result) + if interrupt_info is None: + return result + + action_requests = interrupt_info.get("action_requests", []) + decisions = [{"type": constants.HITL_DECISION_APPROVE} for _ in action_requests] + + return await self.resume_with_decisions(decisions, thread_id) + + async def reject_all_interrupts(self, result: dict, thread_id: str | None = None) -> dict: + interrupt_info = self.get_interrupt_info(result) + if interrupt_info is None: + return result + + action_requests = interrupt_info.get("action_requests", []) + decisions = [{"type": constants.HITL_DECISION_REJECT} for _ in action_requests] + + return await self.resume_with_decisions(decisions, thread_id) + + +def create_memory_backend( + memories_path_prefix: str = constants.MEMORIES_PATH_PREFIX, +) -> typing.Any: + if not DEEP_AGENTS_AVAILABLE: + raise errors.DeepAgentNotAvailableError("deep_agents is required for memory backend") + + return InMemoryStore() + + +def get_agent_memory_path(agent_name: str, memory_type: str = "data") -> str: + return f"{constants.MEMORIES_PATH_PREFIX}{agent_name}/{memory_type}" + + +def build_dictionary_subagent( + name: str, + instructions: str, + description: str | None = None, + tools: list[typing.Callable] | None = None, + model: str | None = None, + model_provider: str | None = None, + handoff_back: bool = True, + interrupt_on: dict[str, typing.Any] | None = None, + middleware: list[typing.Any] | None = None, + skills: list[str] | None = None, + files: dict[str, str] | None = None, +) -> dict[str, typing.Any]: + """Build a dictionary-based subagent definition. + + Args: + name: Agent name + instructions: System prompt/instructions + description: Optional description for delegation + tools: Optional list of tools + model: Optional model override + model_provider: Optional model provider (e.g., 'ollama', 'openai', 'anthropic') + handoff_back: Whether agent can hand back to manager + interrupt_on: HITL interrupt configuration + middleware: Optional middleware list + skills: Optional list of skill paths (e.g., ["./technical-analysis/"]) + files: Optional dict of virtual files (e.g., {"/skills/ta/SKILL.md": content}) + """ + subagent: dict[str, typing.Any] = { + "name": name, + "system_prompt": instructions, + } + + if description: + subagent["description"] = description + else: + subagent["description"] = instructions[:200] + "..." if len(instructions) > 200 else instructions + + if tools: + subagent["tools"] = tools + + if model: + subagent["model"] = model + + if model_provider: + subagent["model_provider"] = model_provider + + if handoff_back: + subagent["handoff_back"] = True + + if interrupt_on: + subagent["interrupt_on"] = interrupt_on + + if middleware: + subagent["middleware"] = middleware + + if skills: + subagent["skills"] = skills + + if files: + subagent["files"] = files + + return subagent + + +def build_compiled_subagent( + name: str, + description: str, + runnable: typing.Any, +) -> typing.Any: + if not DEEP_AGENTS_AVAILABLE or CompiledSubAgent is None: + logger.error("Cannot create CompiledSubAgent - deep_agents not installed") + return None + + return CompiledSubAgent( + name=name, + description=description, + runnable=runnable, + ) + + +def build_subagents_from_agents( + agents: list[dict[str, typing.Any]], +) -> list[dict[str, typing.Any]]: + return [ + build_dictionary_subagent( + name=agent.get("name", agent.get(constants.AGENT_NAME_KEY, "unnamed")), + instructions=agent.get("instructions", agent.get("system_prompt", agent.get("prompt", ""))), + description=agent.get("description"), + tools=agent.get("tools"), + model=agent.get("model"), + model_provider=agent.get("model_provider"), + handoff_back=agent.get("handoff_back", True), + interrupt_on=agent.get("interrupt_on"), + middleware=agent.get("middleware"), + ) + for agent in agents + ] + + +def build_subagents_from_producers( + producers: list[ai_agent_channels.AbstractAIAgentChannelProducer], + include_descriptions: bool = True, +) -> list[dict[str, typing.Any]]: + subagents = [] + for producer in producers: + description = None + if include_descriptions: + description = producer.__class__.__doc__ or f"Agent: {producer.name}" + if len(description) > 200: + description = description[:200] + "..." + + subagents.append(build_dictionary_subagent( + name=producer.name, + instructions=producer.prompt, + description=description, + model=producer.model, + model_provider=producer.ai_service.ai_provider.value if producer.ai_service else None, + handoff_back=True, + )) + + return subagents + + +def create_deep_agent_safe( + model: str | None = None, + instructions: str = "", + tools: list[typing.Callable] | None = None, + subagents: list[dict[str, typing.Any]] | None = None, + store: typing.Any | None = None, + enable_todos: bool = True, + **kwargs, +) -> typing.Any: + if not DEEP_AGENTS_AVAILABLE: + logger.error("Cannot create Deep Agent - deep_agents not installed") + return None + + if store is None: + store = create_memory_backend() + + middleware = [] + if enable_todos and TodoListMiddleware: + middleware.append(TodoListMiddleware()) + if subagents and SubAgentMiddleware: + middleware.append(SubAgentMiddleware( + default_model=model, + default_tools=[], + subagents=subagents, + )) + + def make_backend(runtime): + return CompositeBackend( + default=StateBackend(runtime), + routes={f"{constants.MEMORIES_PATH_PREFIX}": StoreBackend(runtime)} + ) + + return create_deep_agent( + model=model, + system_prompt=instructions, + tools=tools or [], + store=store, + backend=make_backend, + middleware=middleware if middleware else None, + **kwargs, + ) + + +def create_supervisor_agent( + name: str, + instructions: str, + subagents: list[dict[str, typing.Any]], + model: str | None = None, + tools: list[typing.Callable] | None = None, + store: typing.Any | None = None, + **kwargs, +) -> typing.Any: + if not DEEP_AGENTS_AVAILABLE: + logger.error("Cannot create supervisor - deep_agents not installed") + return None + + supervisor_instructions = f""" +{instructions} + +As a supervisor, you coordinate the following workers: +{', '.join(s.get('name', 'unnamed') for s in subagents)} + +Use write_todos to plan your approach before delegating. +Delegate specific tasks to appropriate workers. +Synthesize their outputs into a coherent result. +""".strip() + + return create_deep_agent_safe( + model=model, + instructions=supervisor_instructions, + tools=tools or [], + subagents=subagents, + store=store, + **kwargs, + ) + + +def create_team_deep_agent( + team_name: str, + manager_instructions: str, + workers: list[dict[str, typing.Any]], + manager_tools: list[typing.Callable] | None = None, + model: str | None = None, + store: typing.Any | None = None, + enable_debate: bool = False, + critic_config: dict[str, typing.Any] | None = None, + **kwargs, +) -> typing.Any: + if not DEEP_AGENTS_AVAILABLE: + logger.error("Cannot create team - deep_agents not installed") + return None + + subagents = build_subagents_from_agents(workers) + + if enable_debate and critic_config: + critic_subagent = build_dictionary_subagent( + name=critic_config.get("name", "critic"), + instructions=critic_config.get("instructions", "Critique the analysis..."), + tools=critic_config.get("tools"), + model_provider=critic_config.get("model_provider"), + handoff_back=True, + ) + subagents.append(critic_subagent) + + team_instructions = f""" +You are the manager of the {team_name} team. + +{manager_instructions} + +Your team members: +{chr(10).join(f"- {w.get('name', 'unnamed')}: {w.get('instructions', '')[:100]}..." for w in workers)} + +Workflow: +1. Use write_todos to plan your approach +2. Delegate tasks to appropriate team members +3. Collect and synthesize their results +4. {"Run debate rounds with critic if needed" if enable_debate else "Provide final synthesized output"} +""".strip() + + return create_supervisor_agent( + name=f"{team_name}_manager", + instructions=team_instructions, + subagents=subagents, + model=model, + tools=manager_tools, + store=store, + **kwargs, + ) + + +def load_skill_from_file(skill_path: str) -> dict[str, typing.Any] | None: + try: + with open(skill_path, 'r', encoding='utf-8') as f: + content = f.read() + + if content.startswith('---'): + parts = content.split('---', 2) + if len(parts) >= 3: + import yaml + frontmatter = yaml.safe_load(parts[1]) + instructions = parts[2].strip() + + return { + "name": frontmatter.get("name", "unnamed-skill"), + "description": frontmatter.get("description", ""), + "instructions": instructions, + "path": skill_path, + } + + return { + "name": skill_path.split('/')[-2] if '/' in skill_path else "unnamed-skill", + "description": "", + "instructions": content, + "path": skill_path, + } + + except Exception as e: + logger.error(f"Error loading skill from {skill_path}: {e}") + return None + + +def discover_skills(skills_dir: str) -> list[str]: + import os + skill_paths = [] + + try: + if not os.path.isdir(skills_dir): + return [] + + for entry in os.listdir(skills_dir): + skill_manifest = os.path.join(skills_dir, entry, constants.SKILLS_MANIFEST_FILE) + if os.path.isfile(skill_manifest): + skill_paths.append(f"./{entry}/") + + except Exception as e: + logger.error(f"Error discovering skills in {skills_dir}: {e}") + + return skill_paths + + +def create_skills_files_dict(skills_dir: str) -> dict[str, str]: + import os + files = {} + + try: + if not os.path.isdir(skills_dir): + return {} + + for entry in os.listdir(skills_dir): + skill_folder = os.path.join(skills_dir, entry) + if not os.path.isdir(skill_folder): + continue + + for filename in os.listdir(skill_folder): + file_path = os.path.join(skill_folder, filename) + if os.path.isfile(file_path): + virtual_path = f"{constants.SKILLS_PATH_PREFIX}{entry}/{filename}" + with open(file_path, 'r', encoding='utf-8') as f: + files[virtual_path] = f.read() + + except Exception as e: + logger.error(f"Error creating skills files dict: {e}") + + return files + + +def create_interrupt_config( + high_risk_tools: list[str] | None = None, + medium_risk_tools: list[str] | None = None, + low_risk_tools: list[str] | None = None, +) -> dict[str, typing.Any]: + config = {} + + for tool_name in (high_risk_tools or []): + config[tool_name] = { + "allowed_decisions": [constants.HITL_DECISION_APPROVE, constants.HITL_DECISION_EDIT, constants.HITL_DECISION_REJECT] + } + + for tool_name in (medium_risk_tools or []): + config[tool_name] = { + "allowed_decisions": [constants.HITL_DECISION_APPROVE, constants.HITL_DECISION_REJECT] + } + + for tool_name in (low_risk_tools or []): + config[tool_name] = False + + return config + + +def build_hitl_decision( + decision_type: str, + edited_action: dict[str, typing.Any] | None = None, +) -> dict[str, typing.Any]: + if decision_type not in constants.HITL_ALLOWED_DECISIONS: + raise ValueError(f"Invalid decision type: {decision_type}. Must be one of {constants.HITL_ALLOWED_DECISIONS}") + + decision: dict[str, typing.Any] = {"type": decision_type} + + if decision_type == constants.HITL_DECISION_EDIT: + if edited_action is None: + raise ValueError("edited_action required for edit decision") + decision["edited_action"] = edited_action + + return decision diff --git a/packages/agents/octobot_agents/agent/memory/__init__.py b/packages/agents/octobot_agents/agent/memory/__init__.py new file mode 100644 index 0000000000..915aa3a1b9 --- /dev/null +++ b/packages/agents/octobot_agents/agent/memory/__init__.py @@ -0,0 +1,37 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. + +from octobot_agents.agent.memory import channels +from octobot_agents.agent.memory.channels import ( + MemoryAgentMixin, + MemoryAgentChannel, + MemoryAgentConsumer, + MemoryAgentProducer, + AIMemoryAgentChannel, + AIMemoryAgentConsumer, + AIMemoryAgentProducer, +) + + +__all__ = [ + "MemoryAgentMixin", + "MemoryAgentChannel", + "MemoryAgentConsumer", + "MemoryAgentProducer", + "AIMemoryAgentChannel", + "AIMemoryAgentConsumer", + "AIMemoryAgentProducer", +] diff --git a/packages/agents/octobot_agents/agent/memory/channels/__init__.py b/packages/agents/octobot_agents/agent/memory/channels/__init__.py new file mode 100644 index 0000000000..2d98ec5701 --- /dev/null +++ b/packages/agents/octobot_agents/agent/memory/channels/__init__.py @@ -0,0 +1,35 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. + +from octobot_agents.agent.memory.channels.memory_agent import ( + MemoryAgentMixin, + MemoryAgentChannel, + MemoryAgentConsumer, + MemoryAgentProducer, + AIMemoryAgentChannel, + AIMemoryAgentConsumer, + AIMemoryAgentProducer, +) + +__all__ = [ + "MemoryAgentMixin", + "MemoryAgentChannel", + "MemoryAgentConsumer", + "MemoryAgentProducer", + "AIMemoryAgentChannel", + "AIMemoryAgentConsumer", + "AIMemoryAgentProducer", +] diff --git a/packages/agents/octobot_agents/agent/memory/channels/memory_agent.py b/packages/agents/octobot_agents/agent/memory/channels/memory_agent.py new file mode 100644 index 0000000000..5e5ed8cc5b --- /dev/null +++ b/packages/agents/octobot_agents/agent/memory/channels/memory_agent.py @@ -0,0 +1,153 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. +import typing + +import octobot_commons.logging as logging + +import octobot_agents.agent.channels.agent as agent_channels +import octobot_agents.agent.channels.ai_agent as ai_agent_channels +import octobot_agents.models as models +import octobot_services.services.abstract_ai_service as abstract_ai_service + + +class MemoryAgentMixin: + """ + Mixin that provides memory agent functionality. + + Memory agents are responsible for managing agent memories based on critic analysis. + """ + + async def execute( + self, + input_data: typing.Union[models.MemoryInput, typing.Dict[str, typing.Any]], + ai_service: abstract_ai_service.AbstractAIService + ) -> models.MemoryOperation: + """ + Execute memory operations based on critic analysis. + + Args: + input_data: Contains {"critic_analysis": CriticAnalysis, "agent_outputs": Dict, "execution_metadata": dict} + ai_service: The AI service instance (for AI memory agents) + + Returns: + MemoryOperation with list of operations performed + """ + raise NotImplementedError("execute must be implemented by subclasses") + + @staticmethod + def _get_agent_from_team( + team_producer: typing.Optional[typing.Any], + agent_name: str + ) -> typing.Optional[ai_agent_channels.AbstractAIAgentChannelProducer]: + """ + Get agent instance from team producer (manager or regular agent). + + Args: + team_producer: The team producer instance. + agent_name: Name of the agent to retrieve. + + Returns: + The agent instance if found, None otherwise. + """ + if not team_producer: + return None + manager = team_producer.get_manager() + if manager and manager.name == agent_name: + return manager + return team_producer.get_agent_by_name(agent_name) + + @staticmethod + def _collect_all_agent_names( + agent_outputs: typing.Dict[str, typing.Any], + team_producer: typing.Optional[typing.Any] + ) -> typing.Set[str]: + """ + Collect all agent names from outputs and team producer. + + Args: + agent_outputs: Dict of agent outputs. + team_producer: The team producer instance. + + Returns: + Set of all agent names. + """ + all_agent_names = set(agent_outputs.keys()) + if team_producer: + manager = team_producer.get_manager() + if manager: + try: + all_agent_names.add(manager.name) + except AttributeError: + pass + return all_agent_names + + +class MemoryAgentChannel(agent_channels.AbstractAgentChannel): + OUTPUT_SCHEMA = models.MemoryOperation + + +class MemoryAgentConsumer(agent_channels.AbstractAgentChannelConsumer): + pass + + +class MemoryAgentProducer(MemoryAgentMixin, agent_channels.AbstractAgentChannelProducer): + + AGENT_CHANNEL = MemoryAgentChannel + AGENT_CONSUMER = MemoryAgentConsumer + + def __init__( + self, + channel: typing.Optional[MemoryAgentChannel] = None, + self_improving: bool = True, + **kwargs, + ): + super().__init__(channel, **kwargs) + self.self_improving = self_improving + self.name = self.__class__.__name__ + self.logger = logging.get_logger(self.__class__.__name__) + + +class AIMemoryAgentChannel(MemoryAgentChannel, ai_agent_channels.AbstractAIAgentChannel): + pass + + +class AIMemoryAgentConsumer(MemoryAgentConsumer, ai_agent_channels.AbstractAIAgentChannelConsumer): + pass + + +class AIMemoryAgentProducer(MemoryAgentProducer, ai_agent_channels.AbstractAIAgentChannelProducer): + + AGENT_CHANNEL = AIMemoryAgentChannel + AGENT_CONSUMER = AIMemoryAgentConsumer + + def __init__( + self, + channel: typing.Optional[AIMemoryAgentChannel] = None, + model: typing.Optional[str] = None, + max_tokens: typing.Optional[int] = None, + temperature: typing.Optional[float] = None, + self_improving: bool = True, + **kwargs, + ): + super().__init__( + channel=channel, + model=model, + max_tokens=max_tokens, + temperature=temperature, + self_improving=self_improving, + **kwargs + ) + self.name = self.__class__.__name__ diff --git a/packages/agents/octobot_agents/constants.py b/packages/agents/octobot_agents/constants.py new file mode 100644 index 0000000000..4d6086e768 --- /dev/null +++ b/packages/agents/octobot_agents/constants.py @@ -0,0 +1,114 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. + +AGENT_NAME_KEY = "agent_name" +AGENT_ID_KEY = "agent_id" +TEAM_NAME_KEY = "team_name" +TEAM_ID_KEY = "team_id" +RESULT_KEY = "result" +DEFAULT_AGENT_RESULT = "NO_OUTPUT_AVAILABLE" + +# Agent defaults +AGENT_DEFAULT_VERSION = "1.0.0" +AGENT_DEFAULT_MAX_TOKENS: int = 10000 +AGENT_DEFAULT_TEMPERATURE: float = 0.3 +AGENT_DEFAULT_MAX_RETRIES: int = 3 + +# Memory keys +MEMORY_USER_ID_KEY = "user_id" +MEMORY_AGENT_ID_KEY = "agent_id" + +# Memory operations +MEMORY_OPERATION_GENERATE = "generate" +MEMORY_OPERATION_MERGE = "merge" +MEMORY_OPERATION_UPDATE = "update" +MEMORY_OPERATION_REMOVE = "remove" +MEMORY_OPERATION_GROUP = "group" + +# Memory defaults +DEFAULT_CATEGORY = "general" +DEFAULT_IMPORTANCE_SCORE = 0.5 +DEFAULT_CONFIDENCE_SCORE = 0.5 +DEFAULT_MAX_MEMORIES = 100 + +# Memory length limits +MEMORY_TITLE_MAX_LENGTH = 100 +MEMORY_CONTEXT_MAX_LENGTH = 200 +MEMORY_CONTENT_MAX_LENGTH = 500 + +# Storage constants +MEMORY_FOLDER_NAME = "agents" +MEMORY_FILE_EXTENSION = ".json" + +# Analysis constants +DEFAULT_ANALYSIS_DIR = "analysis/" + +# Team modification constants +MODIFICATION_ADDITIONAL_INSTRUCTIONS = "additional_instructions" +MODIFICATION_CUSTOM_PROMPT = "custom_prompt" +MODIFICATION_EXECUTION_HINTS = "execution_hints" + +# Critic analysis types +ANALYSIS_TYPE_ISSUES = "issues" +ANALYSIS_TYPE_IMPROVEMENTS = "improvements" +ANALYSIS_TYPE_ERRORS = "errors" +ANALYSIS_TYPE_INCONSISTENCIES = "inconsistencies" +ANALYSIS_TYPE_OPTIMIZATIONS = "optimizations" + +# Manager tool names +TOOL_RUN_AGENT = "run_agent" +TOOL_RUN_DEBATE = "run_debate" +TOOL_FINISH = "finish" + +# Deep Agent memory paths - using /memories/ prefix for persistent storage +MEMORIES_PATH_PREFIX = "/memories/" +MEMORIES_AGENT_DATA = "data" +MEMORIES_AGENT_CONTEXT = "context" +MEMORIES_AGENT_HISTORY = "history" +MEMORIES_TEAM_SHARED = "shared" + +# Deep Agent supervisor defaults +SUPERVISOR_MAX_DELEGATION_DEPTH = 3 +SUPERVISOR_WORKER_TIMEOUT_SECONDS = 60 + +# Subagent configuration keys +SUBAGENT_NAME_KEY = "name" +SUBAGENT_INSTRUCTIONS_KEY = "instructions" +SUBAGENT_TOOLS_KEY = "tools" +SUBAGENT_MODEL_KEY = "model" +SUBAGENT_HANDOFF_BACK_KEY = "handoff_back" + +# Debate workflow constants +DEBATE_MAX_ROUNDS = 3 +DEBATE_CONVERGENCE_THRESHOLD = 0.8 +DEBATE_MIN_CONFIDENCE = 0.6 + +# Human-in-the-loop (HITL) constants +HITL_DECISION_APPROVE = "approve" +HITL_DECISION_EDIT = "edit" +HITL_DECISION_REJECT = "reject" +HITL_ALLOWED_DECISIONS = [HITL_DECISION_APPROVE, HITL_DECISION_EDIT, HITL_DECISION_REJECT] +HITL_DEFAULT_ALLOWED = [HITL_DECISION_APPROVE, HITL_DECISION_REJECT] +HITL_INTERRUPT_KEY = "__interrupt__" + +# Skills configuration +SKILLS_PATH_PREFIX = "/skills/" +SKILLS_MANIFEST_FILE = "SKILL.md" +SKILLS_DEFAULT_DIR = "./skills/" + +# CompiledSubAgent types +COMPILED_SUBAGENT_TYPE = "compiled" +DICTIONARY_SUBAGENT_TYPE = "dictionary" diff --git a/packages/agents/octobot_agents/enums.py b/packages/agents/octobot_agents/enums.py new file mode 100644 index 0000000000..cd19613edc --- /dev/null +++ b/packages/agents/octobot_agents/enums.py @@ -0,0 +1,64 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. +import enum + + +class MemoryStorageType(enum.Enum): + JSON = "json" + + +class StepType(enum.Enum): + AGENT = "agent" + DEBATE = "debate" + + +class JudgeDecisionType(enum.Enum): + CONTINUE = "continue" + EXIT = "exit" + + +class AgentRole(enum.Enum): + MANAGER = "manager" # Orchestrates other agents + WORKER = "worker" # Performs specialized tasks + CRITIC = "critic" # Critiques and validates + JUDGE = "judge" # Makes final decisions in debates + MEMORY = "memory" # Manages long-term memory + + +class SubagentMode(enum.Enum): + SEQUENTIAL = "sequential" # Execute one at a time + PARALLEL = "parallel" # Execute concurrently + DAG = "dag" # Execute following dependency graph + + +class ToolCallMode(enum.Enum): + SYNC = "sync" # Wait for result + ASYNC = "async" # Fire and forget + STREAMING = "streaming" # Stream results + + +class MemoryScope(enum.Enum): + AGENT = "agent" # Private to single agent + TEAM = "team" # Shared within team + GLOBAL = "global" # Shared across all agents + + +class ExecutionStatus(enum.Enum): + PENDING = "pending" + RUNNING = "running" + COMPLETED = "completed" + FAILED = "failed" + TIMEOUT = "timeout" diff --git a/packages/agents/octobot_agents/errors.py b/packages/agents/octobot_agents/errors.py new file mode 100644 index 0000000000..26b0c12377 --- /dev/null +++ b/packages/agents/octobot_agents/errors.py @@ -0,0 +1,78 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. + +class AgentError(Exception): + """Base exception for all octobot_agents errors.""" + + +class TeamConfigurationError(AgentError): + """Raised when a team is misconfigured.""" + + +class MissingManagerError(TeamConfigurationError): + """Raised when a team requires a manager but none is provided.""" + + +class MissingRequiredInputError(AgentError): + """Raised when required input data is missing.""" + + +class AgentConfigurationError(AgentError): + """Raised when an agent is misconfigured.""" + + +class StorageError(AgentError): + """Raised when there's an error with storage operations.""" + + +class UnsupportedStorageTypeError(StorageError): + """Raised when an unsupported storage type is requested.""" + + +class DeepAgentError(AgentError): + """Base exception for Deep Agent related errors.""" + + +class DeepAgentNotAvailableError(DeepAgentError): + """Raised when deep_agents package is not installed.""" + + +class SubagentError(DeepAgentError): + """Raised when there's an error with subagent execution.""" + + +class SubagentTimeoutError(SubagentError): + """Raised when a subagent execution times out.""" + + +class SupervisorError(DeepAgentError): + """Raised when the supervisor agent encounters an error.""" + + +class DebateError(AgentError): + """Raised when there's an error in the debate workflow.""" + + +class DebateConvergenceError(DebateError): + """Raised when debate fails to converge within max rounds.""" + + +class MemoryPathError(StorageError): + """Raised when there's an error with memory path operations.""" + + +class ToolExecutionError(AgentError): + """Raised when a tool execution fails.""" diff --git a/packages/agents/octobot_agents/models.py b/packages/agents/octobot_agents/models.py new file mode 100644 index 0000000000..00672ea45d --- /dev/null +++ b/packages/agents/octobot_agents/models.py @@ -0,0 +1,763 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. +import typing +from typing import Any, Dict, List, Optional, Union, TypedDict + +import pydantic +from pydantic import BaseModel, ConfigDict + +import octobot_agents.constants as constants +import octobot_agents.errors as errors +import octobot_agents.utils as utils + +if typing.TYPE_CHECKING: + from octobot_agents.team.channels.agents_team import AbstractAgentsTeamChannelProducer + + +class AgentBaseModel(BaseModel): + """ + Base Pydantic model for OctoBot agents with JSON schema strict mode control. + + Models can override __strict_json_schema__ to control strict mode: + - False (default): Disable strict mode (for models with Union types) + - True: Enable strict mode (for models without Union types) + """ + __strict_json_schema__: bool = False + + @staticmethod + def normalize_agent_error(error: Any) -> Optional[str]: + """ + Normalize common non-actionable error strings from LLM outputs. + + Returns a cleaned error string, or None if the error should be ignored. + """ + if error is None: + return None + error_value = str(error).strip() + if not error_value: + return None + lowered = error_value.lower() + ignorable_tokens = ( + "invalid json", + "json output is invalid", + "error parsing json", + "no error", + "no error found", + "no", + "none", + "null", + "error", + ) + if any(token in lowered for token in ignorable_tokens): + return None + return error_value + + @staticmethod + def recover_json_from_error(error: Any) -> Optional[Dict[str, Any]]: + """ + Try to extract JSON payloads from error strings produced by LLM parsers. + """ + if error is None: + return None + error_text = str(error) + marker = "Error parsing JSON from response" + if marker in error_text: + error_text = error_text.split(marker, 1)[-1].strip() + if not error_text: + return None + return utils.extract_json_from_content(error_text) + + @staticmethod + def normalize_tool_call_response( + response_data: Any, + finish_tool_name: Optional[str] = None, + ) -> tuple[Any, Optional[str]]: + """ + Normalize tool-call responses coming from LLMs. + + Returns a tuple of (normalized_response, error_message_if_any). + """ + if response_data is None: + return None, "LLM did not return any tool calls." + + # If we got an error dict, try to recover JSON from it + if isinstance(response_data, dict) and "error" in response_data and "tool_name" not in response_data: + error_msg = response_data.get("error", "Unknown error") + if finish_tool_name and (not str(error_msg).strip()): + return {"tool_name": finish_tool_name, "arguments": {}}, None + if finish_tool_name and ("<finish>" in error_msg or "</finish>" in error_msg): + return {"tool_name": finish_tool_name, "arguments": {}}, None + extracted = AgentBaseModel.recover_json_from_error(error_msg) + if extracted: + response_data = extracted + else: + return None, error_msg + + # If we got a raw string, try to extract JSON + if isinstance(response_data, str): + if finish_tool_name and not response_data.strip(): + return {"tool_name": finish_tool_name, "arguments": {}}, None + if finish_tool_name and ("<finish>" in response_data or "</finish>" in response_data): + return {"tool_name": finish_tool_name, "arguments": {}}, None + extracted = AgentBaseModel.recover_json_from_error(response_data) + if extracted: + response_data = extracted + + # Map finish-like payloads into a finish tool call + if isinstance(response_data, dict) and "tool_name" not in response_data and finish_tool_name: + if "team_name" in response_data or "current_results" in response_data: + return {"tool_name": finish_tool_name, "arguments": {}}, None + + return response_data, None + +class AgentInstruction(AgentBaseModel): + """Instruction to send to an agent via channel.modify()""" + model_config = ConfigDict(extra="forbid") + + modification_type: str # One of MODIFICATION_ADDITIONAL_INSTRUCTIONS, MODIFICATION_CUSTOM_PROMPT, etc. + value: Union[str, Dict[str, typing.Any]] # The instruction content (string for prompts, dict for hints) + + +class DebatePhaseConfig(AgentBaseModel): + """Configuration for a debate phase: debators take turns, judge decides continue or exit.""" + model_config = ConfigDict(extra="forbid") + + debator_agent_names: List[str] # Agent names that debate (e.g. Bull, Bear) + judge_agent_name: str # Agent name of the judge that decides continue/exit + max_rounds: int = 3 # Maximum debate rounds before forcing exit + + +class JudgeDecision(AgentBaseModel): + """Result of a judge agent execute(): continue or exit the debate, with reasoning and optional summary.""" + model_config = ConfigDict(extra="forbid") + + decision: str # JudgeDecisionType.CONTINUE.value or JudgeDecisionType.EXIT.value + reasoning: str + summary: Optional[str] = None # When decision is exit, concise synthesis; when continue, None + + +class ExecutionStep(AgentBaseModel): + """Single step in the execution plan""" + model_config = ConfigDict(extra="forbid") + + # Agent name may be omitted for debate steps (we'll fill a default). + agent_name: Optional[str] = None + # Allow instructions as either structured AgentInstruction objects or simple strings + instructions: Optional[Union[List[AgentInstruction], List[str]]] = None # Instructions to send before execution + wait_for: Optional[List[str]] = None # Agent names to wait for before executing + skip: bool = False # Skip this agent in this iteration + # Debate step: when step_type is StepType.DEBATE.value, use debate_config instead of single agent + step_type: Optional[str] = None # StepType.AGENT.value (default) or StepType.DEBATE.value + debate_config: Optional[DebatePhaseConfig] = None # Required when step_type == StepType.DEBATE.value + + @pydantic.model_validator(mode="after") + def validate_and_normalize(self) -> "ExecutionStep": + """Normalize and validate step after construction. + + - For debate steps, set default agent_name if missing. + - Convert plain string instructions into AgentInstruction objects. + - Enforce agent_name requirement for non-debate steps. + """ + # Fill default agent_name for debate steps + if self.step_type == "debate" and not self.agent_name: + if self.debate_config and getattr(self.debate_config, "judge_agent_name", None): + self.agent_name = f"debate_{self.debate_config.judge_agent_name}" + else: + self.agent_name = "debate_phase" + + # Require agent_name for agent steps + if self.step_type in (None, "agent") and not self.agent_name: + raise ValueError("agent_name is required for agent steps") + + # Normalize instructions: convert plain strings to AgentInstruction + if self.instructions: + normalized: List[AgentInstruction] = [] + for instr in self.instructions: + if isinstance(instr, str): + normalized.append( + AgentInstruction( + modification_type=constants.MODIFICATION_ADDITIONAL_INSTRUCTIONS, + value=instr, + ) + ) + else: + normalized.append(instr) + self.instructions = normalized + + return self + + +class ExecutionPlan(AgentBaseModel): + """Complete execution plan - returned by plan-driven AI managers like AIPlanTeamManagerAgent""" + model_config = ConfigDict(extra="forbid") + + steps: List[ExecutionStep] + loop: bool = False # Whether to loop execution + loop_condition: Optional[str] = None # Condition description for looping + max_iterations: Optional[int] = None # Maximum loop iterations + + @classmethod + def model_validate_with_agent_names( + cls, + data: Any, + allowed_agent_names: List[str], + ) -> "ExecutionPlan": + plan = cls.model_validate(data) + allowed = set(allowed_agent_names) + allowed_map = {name.lower(): name for name in allowed_agent_names} + for step in plan.steps: + try: + step_type = step.step_type + agent_name = step.agent_name + except Exception: + continue + if step_type in (None, "agent") and agent_name not in allowed: + try: + key = agent_name.lower() + except Exception: + raise ValueError(f"Invalid agent_name: {agent_name}") + normalized = allowed_map.get(key) + if normalized is None: + import difflib + matches = difflib.get_close_matches(key, allowed_map.keys(), n=1, cutoff=0.6) + if matches: + normalized = allowed_map[matches[0]] + if normalized is None: + raise ValueError(f"Invalid agent_name: {agent_name}") + step.agent_name = normalized + return plan + + @pydantic.model_validator(mode="after") + def normalize_loop_settings(self) -> "ExecutionPlan": + # Clamp pathological or invalid max_iterations to a sane upper bound + max_cap = 3 + if self.max_iterations is None: + return self + try: + max_iter = int(self.max_iterations) + except Exception: + self.max_iterations = 1 + return self + if max_iter < 1: + self.max_iterations = 1 + elif max_iter > max_cap: + self.max_iterations = max_cap + else: + self.max_iterations = max_iter + return self + + @classmethod + def model_validate_or_self(cls, data: Any) -> "ExecutionPlan": + """ + Validate dict to model, or return model if already validated. + + Args: + data: Either a dict to validate or an ExecutionPlan instance. + + Returns: + ExecutionPlan model instance. + """ + if isinstance(data, cls): + return data + return cls.model_validate(data) + + def to_dict(self) -> dict: + """ + Convert to dict. + + Returns: + Dict representation of the execution plan. + """ + try: + return self.model_dump() + except AttributeError: + # Fallback for Pydantic v1 + return self.dict() + + +class ManagerToolCall(AgentBaseModel): + """Tool call from LLM in tools-driven manager.""" + model_config = ConfigDict(extra="forbid") + + tool_name: str # Name of the tool to call (e.g., "run_agent", "run_debate", "finish") + arguments: Dict[str, Any] # Arguments for the tool call + + +class RunAgentArgs(AgentBaseModel): + """Arguments for run_agent tool.""" + model_config = ConfigDict(extra="forbid") + + # Allow agent_name to be optional here; manager may fill defaults or validate later. + agent_name: Optional[str] = None # Name of the agent to run + instructions: Optional[Union[List[AgentInstruction], List[str]]] = None # Instructions to send before execution + + @pydantic.model_validator(mode="before") + @classmethod + def normalize_instructions(cls, data: Any) -> Any: + if isinstance(data, dict) and "instructions" in data: + instructions = data["instructions"] + # If instructions is a string, wrap it in a list + if isinstance(instructions, str): + data["instructions"] = [instructions] + # If a single dict is provided, wrap it to normalize later + elif isinstance(instructions, dict): + # Ignore schema-like dicts accidentally passed as instructions + if "$ref" in instructions and "type" in instructions and \ + "modification_type" not in instructions and \ + "value" not in instructions and \ + "description" not in instructions: + data["instructions"] = [] + return data + data["instructions"] = [instructions] + # Normalize list entries that only provide a description + elif isinstance(instructions, list): + normalized = [] + for instr in instructions: + if isinstance(instr, dict) and "$ref" in instr and "type" in instr and \ + "modification_type" not in instr and "value" not in instr and "description" not in instr: + continue + if isinstance(instr, dict) and "description" in instr and \ + "modification_type" not in instr and "value" not in instr: + normalized.append({ + "modification_type": constants.MODIFICATION_ADDITIONAL_INSTRUCTIONS, + "value": instr["description"], + }) + elif isinstance(instr, dict) and "modification_type" not in instr and "value" not in instr: + normalized.append({ + "modification_type": constants.MODIFICATION_ADDITIONAL_INSTRUCTIONS, + "value": instr, + }) + else: + normalized.append(instr) + data["instructions"] = normalized + return data + + +class RunDebateArgs(AgentBaseModel): + """Arguments for run_debate tool.""" + model_config = ConfigDict(extra="forbid") + + debator_agent_names: List[str] # Agent names that debate + judge_agent_name: str # Agent name of the judge + max_rounds: int = 3 # Maximum debate rounds + + +class ManagerState(AgentBaseModel): + """State maintained during tools-driven manager execution.""" + model_config = ConfigDict(extra="forbid") + + completed_agents: List[str] # Names of agents that have been executed + results: Dict[str, Any] # Results from completed agents + initial_data: Dict[str, Any] # Original input data + tool_call_history: List[ManagerToolCall] # History of tool calls made + + +class ManagerResult(AgentBaseModel): + """Result returned by tools-driven manager after execution.""" + model_config = ConfigDict(extra="forbid") + + completed_agents: List[str] # Names of agents that were executed + results: Dict[str, Any] # Results from completed agents (agent_name -> result) + tool_calls_used: int # Number of tool calls made during execution + + +class AgentImprovement(AgentBaseModel): + """Improvements needed for a specific agent.""" + __strict_json_schema__ = True + + agent_name: str # Name of the agent + improvements: List[str] # Specific improvements for this agent + issues: List[str] # Agent-specific issues + errors: List[str] # Agent-specific errors + reasoning: str # Why this agent needs improvement + + @classmethod + def model_validate_or_self(cls, data: Any) -> "AgentImprovement": + """ + Validate dict to model, or return model if already validated. + + Args: + data: Either a dict to validate or an AgentImprovement instance. + + Returns: + AgentImprovement model instance. + """ + if isinstance(data, cls): + return data + return cls.model_validate(data) + + +class CriticAnalysis(AgentBaseModel): + """Analysis result from CriticAgent.""" + __strict_json_schema__ = True + + issues: List[str] # General problems found (team-level) + errors: List[str] # General errors encountered (team-level) + inconsistencies: List[str] # Inconsistencies detected (team-level) + optimizations: List[str] # General optimization opportunities (team-level) + summary: str # Overall analysis summary + agent_improvements: Dict[str, AgentImprovement] # Agent-specific improvements + # Key: agent_name, Value: AgentImprovement + # Only includes agents that need improvements + # If agent not in dict, no improvements needed for that agent + + @classmethod + def model_validate_or_self(cls, data: Any) -> "CriticAnalysis": + """ + Validate dict to model, or return model if already validated. + + Args: + data: Either a dict to validate or a CriticAnalysis instance. + + Returns: + CriticAnalysis model instance. + """ + if isinstance(data, cls): + return data + return cls.model_validate(data) + + def get_agent_improvements(self) -> Dict[str, AgentImprovement]: + """ + Get agent improvements. + + Returns: + Dict mapping agent names to AgentImprovement objects. + """ + return self.agent_improvements + + def get_summary(self) -> str: + """ + Get summary. + + Returns: + Summary string. + """ + return self.summary + + def get_issues(self) -> List[str]: + """ + Get issues. + + Returns: + List of issue strings. + """ + return self.issues + + +class MemoryOperation(AgentBaseModel): + """Result of a memory operation.""" + success: bool + operations: List[str] # ["generated", "merged", "updated", "removed", "grouped"] + memory_ids: List[str] # UUIDs of affected memories (across all agents) + agent_updates: Dict[str, List[str]] # Map of agent_name -> list of memory_ids updated for that agent + agents_processed: List[str] # List of agent names that were processed + agents_skipped: List[str] # List of agent names that were skipped (no improvements needed) + message: str # Description of what happened + + +class MemoryStorageModel(AgentBaseModel): + """ + Pydantic model for memory storage with enforced structure and length limits. + + Ensures memories contain concise, precise instructions/actions/advice. + """ + title: str = pydantic.Field( + ..., + min_length=1, + max_length=constants.MEMORY_TITLE_MAX_LENGTH, + description=f"Short, clear title summarizing the memory (max {constants.MEMORY_TITLE_MAX_LENGTH} chars)" + ) + context: str = pydantic.Field( + ..., + min_length=1, + max_length=constants.MEMORY_CONTEXT_MAX_LENGTH, + description=f"Context explaining what problem this addresses (max {constants.MEMORY_CONTEXT_MAX_LENGTH} chars)" + ) + content: str = pydantic.Field( + ..., + min_length=1, + max_length=constants.MEMORY_CONTENT_MAX_LENGTH, + description=f"Concise, precise instructions/actions/advice (max {constants.MEMORY_CONTENT_MAX_LENGTH} chars). Should be summarized if longer." + ) + category: str = pydantic.Field( + default=constants.DEFAULT_CATEGORY, + description="Memory category" + ) + tags: typing.List[str] = pydantic.Field( + default_factory=list, + description="Tags for categorization" + ) + importance_score: float = pydantic.Field( + default=constants.DEFAULT_IMPORTANCE_SCORE, + ge=0.0, + le=1.0, + description="Importance score (0.0-1.0)" + ) + confidence_score: float = pydantic.Field( + default=constants.DEFAULT_CONFIDENCE_SCORE, + ge=0.0, + le=1.0, + description="Confidence score (0.0-1.0)" + ) + + @pydantic.field_validator('title', 'context', 'content') + @classmethod + def validate_not_empty(cls, v: str) -> str: + """Ensure fields are not just whitespace.""" + if not v or not v.strip(): + raise errors.AgentError("Field cannot be empty or whitespace only") + return v.strip() + + @pydantic.field_validator('content') + @classmethod + def validate_content_format(cls, v: str) -> str: + """Ensure content is concise and actionable.""" + # Remove excessive whitespace + v = ' '.join(v.split()) + return v + + +class MemoryInstruction(AgentBaseModel): + """Instruction structure for a single memory.""" + title: str = pydantic.Field( + ..., + min_length=1, + max_length=constants.MEMORY_TITLE_MAX_LENGTH, + description=f"Short, clear title (max {constants.MEMORY_TITLE_MAX_LENGTH} chars)" + ) + structured_actions: typing.List[str] = pydantic.Field( + default_factory=list, + description="Short, direct command-like actions (imperative format)" + ) + guidance: typing.Optional[str] = pydantic.Field( + default=None, + max_length=100, + description="Optional very short guidance (max 100 chars, only if needed)" + ) + context: str = pydantic.Field( + ..., + min_length=1, + max_length=constants.MEMORY_CONTEXT_MAX_LENGTH, + description=f"Short context about what problem this addresses (max {constants.MEMORY_CONTEXT_MAX_LENGTH} chars)" + ) + + def build_content(self) -> str: + """ + Build content string as simple command list - no headers, just direct commands. + + Ensures content does not exceed MEMORY_CONTENT_MAX_LENGTH. + Format: Simple list of commands, one per line, no numbering or headers. + """ + content_parts = [] + + # Format as simple command list - no headers, remove numbering + for action in self.structured_actions: + # Remove numbering if present (e.g., "1. ", "2. "), make imperative + action_clean = action.lstrip("0123456789. ").strip() + if action_clean: + content_parts.append(action_clean) + + # Only add guidance if very short (one sentence max) + if self.guidance and len(self.guidance) < 100: + try: + guidance_clean = self.guidance.strip() + except AttributeError: + guidance_clean = "" + if guidance_clean: + content_parts.append(guidance_clean) + + content = "\n".join(content_parts) if content_parts else "Follow instructions" + + # Truncate if exceeds limit (shouldn't happen if LLM follows instructions, but safety check) + if len(content) > constants.MEMORY_CONTENT_MAX_LENGTH: + truncated = content[:constants.MEMORY_CONTENT_MAX_LENGTH] + # Try to truncate at line boundary (prefer) or sentence boundary + last_newline = truncated.rfind('\n') + last_period = truncated.rfind('.') + last_break = max(last_newline, last_period) + if last_break > constants.MEMORY_CONTENT_MAX_LENGTH * 0.7: + content = truncated[:last_break + 1].strip() + else: + content = truncated.strip() + + return content + + @classmethod + def model_validate_or_self(cls, data: typing.Any) -> "MemoryInstruction": + """Validate dict to model, or return model if already validated.""" + if isinstance(data, cls): + return data + return cls.model_validate(data) + + +class AgentMemoryInstruction(AgentBaseModel): + """LLM response structure for agent memory instructions.""" + __strict_json_schema__ = True + + agent_name: str + instructions: MemoryInstruction + + @classmethod + def model_validate_or_self(cls, data: typing.Any) -> "AgentMemoryInstruction": + """Validate dict to model, or return model if already validated.""" + if isinstance(data, cls): + return data + return cls.model_validate(data) + + +class AgentMemoryInstructionsList(AgentBaseModel): + """Wrapper for list of agent memory instructions.""" + __strict_json_schema__ = True + + instructions: typing.List[AgentMemoryInstruction] + + + +class ManagerInput(TypedDict, total=False): + """Input data structure for manager agent execute() method.""" + team_producer: "AbstractAgentsTeamChannelProducer" + initial_data: Dict[str, Any] + instructions: Optional[str] + + +class CriticInput(TypedDict, total=False): + """Input data structure for critic agent execute() method.""" + team_producer: "AbstractAgentsTeamChannelProducer" + execution_plan: "ExecutionPlan" + execution_results: Dict[str, Any] + agent_outputs: Dict[str, Any] + execution_metadata: Dict[str, Any] + + +class JudgeInput(TypedDict, total=False): + """Input data structure for judge agent execute() method (debate step).""" + debate_history: List[Dict[str, Any]] # List of {agent_name, message, round} + debator_agent_names: List[str] + current_round: int + max_rounds: int + _initial_state: Dict[str, Any] # Optional context from team initial_data + + +class MemoryInput(TypedDict, total=False): + """Input data structure for memory agent execute() method.""" + critic_analysis: "CriticAnalysis" + agent_outputs: Dict[str, Any] + execution_metadata: Dict[str, Any] + + +class SubagentConfig(AgentBaseModel): + """Configuration for a Deep Agent subagent.""" + model_config = ConfigDict(extra="forbid") + + name: str = pydantic.Field(..., description="Unique name for the subagent") + instructions: str = pydantic.Field(..., description="System instructions for the subagent") + tools: Optional[List[str]] = pydantic.Field(default=None, description="Tool names available to subagent") + model: Optional[str] = pydantic.Field(default=None, description="Model override") + handoff_back: bool = pydantic.Field(default=True, description="Whether to hand back to supervisor") + + def to_dict(self) -> Dict[str, Any]: + """Convert to dictionary format for Deep Agents.""" + result: Dict[str, Any] = { + "name": self.name, + "instructions": self.instructions, + } + if self.tools: + result["tools"] = self.tools + if self.model: + result["model"] = self.model + if self.handoff_back: + result["handoff_back"] = True + return result + + +class MemoryEntry(AgentBaseModel): + """A single memory entry for Deep Agent persistent storage.""" + model_config = ConfigDict(extra="forbid") + + key: str = pydantic.Field(..., description="Memory key/path") + value: Any = pydantic.Field(..., description="Memory value") + timestamp: Optional[str] = pydantic.Field(default=None, description="ISO timestamp") + metadata: Optional[Dict[str, Any]] = pydantic.Field(default=None, description="Additional metadata") + + +class TodoItem(AgentBaseModel): + """A todo item for Deep Agent write_todos planning.""" + model_config = ConfigDict(extra="forbid") + + task: str = pydantic.Field(..., description="Task description") + status: str = pydantic.Field(default="pending", description="Task status: pending, in_progress, done") + assigned_to: Optional[str] = pydantic.Field(default=None, description="Subagent name assigned to task") + priority: int = pydantic.Field(default=1, description="Priority 1-5 (1=highest)") + depends_on: Optional[List[str]] = pydantic.Field(default=None, description="Task dependencies") + + +class DeepAgentResult(AgentBaseModel): + """Result from Deep Agent execution.""" + model_config = ConfigDict(extra="forbid") + + output: Any = pydantic.Field(..., description="Agent output/response") + iterations: int = pydantic.Field(default=0, description="Number of reasoning iterations") + subagents_called: List[str] = pydantic.Field(default_factory=list, description="Subagents that were invoked") + tools_called: List[str] = pydantic.Field(default_factory=list, description="Tools that were called") + memory_operations: int = pydantic.Field(default=0, description="Number of memory read/write operations") + + @classmethod + def from_agent_output(cls, output: Any, metadata: Optional[Dict[str, Any]] = None) -> "DeepAgentResult": + """Create result from raw agent output.""" + metadata = metadata or {} + return cls( + output=output, + iterations=metadata.get("iterations", 0), + subagents_called=metadata.get("subagents_called", []), + tools_called=metadata.get("tools_called", []), + memory_operations=metadata.get("memory_operations", 0), + ) + + +class TeamExecutionResult(AgentBaseModel): + """Result from a complete team execution using Deep Agents.""" + model_config = ConfigDict(extra="forbid") + + team_name: str = pydantic.Field(..., description="Name of the team") + final_output: Any = pydantic.Field(..., description="Final synthesized output") + worker_results: Dict[str, DeepAgentResult] = pydantic.Field( + default_factory=dict, + description="Results from each worker agent" + ) + debate_results: Optional[Dict[str, Any]] = pydantic.Field( + default=None, + description="Results from debate phase if enabled" + ) + total_iterations: int = pydantic.Field(default=0, description="Total iterations across all agents") + execution_time_ms: Optional[float] = pydantic.Field(default=None, description="Total execution time") + + def get_worker_output(self, worker_name: str) -> Optional[Any]: + """Get output from a specific worker.""" + if worker_name in self.worker_results: + return self.worker_results[worker_name].output + return None + + +class SupervisorState(AgentBaseModel): + """State maintained by supervisor during team orchestration.""" + model_config = ConfigDict(extra="forbid") + + pending_workers: List[str] = pydantic.Field(default_factory=list, description="Workers not yet called") + completed_workers: List[str] = pydantic.Field(default_factory=list, description="Workers that completed") + worker_outputs: Dict[str, Any] = pydantic.Field(default_factory=dict, description="Outputs from workers") + current_todos: List[TodoItem] = pydantic.Field(default_factory=list, description="Current todo list") + debate_round: int = pydantic.Field(default=0, description="Current debate round if in debate") + phase: str = pydantic.Field(default="planning", description="Current phase: planning, delegating, synthesizing, debating") diff --git a/packages/agents/octobot_agents/storage/__init__.py b/packages/agents/octobot_agents/storage/__init__.py new file mode 100644 index 0000000000..9f6d55b790 --- /dev/null +++ b/packages/agents/octobot_agents/storage/__init__.py @@ -0,0 +1,41 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. + +from octobot_agents.storage import memory, history +from octobot_agents.storage.memory import ( + AbstractMemoryStorage, + JSONMemoryStorage, + create_memory_storage, + get_memory_tools, + execute_memory_tool, +) +from octobot_agents.storage.history import ( + AbstractAnalysisStorage, + JSONAnalysisStorage, + create_analysis_storage, +) + +__all__ = [ + "AbstractMemoryStorage", + "JSONMemoryStorage", + "create_memory_storage", + "get_memory_tools", + "execute_memory_tool", + "AbstractAnalysisStorage", + "JSONAnalysisStorage", + "create_analysis_storage", +] + diff --git a/packages/agents/octobot_agents/storage/history/__init__.py b/packages/agents/octobot_agents/storage/history/__init__.py new file mode 100644 index 0000000000..6882fe0597 --- /dev/null +++ b/packages/agents/octobot_agents/storage/history/__init__.py @@ -0,0 +1,34 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. + +from octobot_agents.storage.history import abstract_analysis_storage +from octobot_agents.storage.history.abstract_analysis_storage import AbstractAnalysisStorage + +from octobot_agents.storage.history import json_analysis_storage +from octobot_agents.storage.history.json_analysis_storage import ( + JSONAnalysisStorage, +) + +from octobot_agents.storage.history import storage +from octobot_agents.storage.history.storage import ( + create_analysis_storage, +) + +__all__ = [ + "AbstractAnalysisStorage", + "JSONAnalysisStorage", + "create_analysis_storage", +] diff --git a/packages/agents/octobot_agents/storage/history/abstract_analysis_storage.py b/packages/agents/octobot_agents/storage/history/abstract_analysis_storage.py new file mode 100644 index 0000000000..78d28f9281 --- /dev/null +++ b/packages/agents/octobot_agents/storage/history/abstract_analysis_storage.py @@ -0,0 +1,65 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. + +import abc +import typing + + +class AbstractAnalysisStorage(abc.ABC): + """ + Abstract base class for team analysis storage. + + Handles persistence of agent analysis results for debugging and audit purposes. + Implementations manage directory structure, file format, and cleanup operations. + """ + + @abc.abstractmethod + def save_analysis( + self, + agent_name: str, + result: typing.Any, + team_name: str, + team_id: typing.Optional[str], + ) -> None: + """ + Save analysis results to persistent storage. + + Args: + agent_name: Name of the agent producing the analysis. + result: The analysis result to save (dict, str, or other serializable). + team_name: Name of the team. + team_id: ID of the team instance (optional). + """ + raise NotImplementedError("save_analysis must be implemented by subclasses") + + @abc.abstractmethod + def clear_transient_files(self) -> None: + """ + Clear analysis files from previous runs. + + Removes all analysis files to ensure clean state for next execution. + """ + raise NotImplementedError("clear_transient_files must be implemented by subclasses") + + @abc.abstractmethod + def get_analysis_path(self) -> str: + """ + Get the base directory path for analysis storage. + + Returns: + The directory path where analysis files are stored. + """ + raise NotImplementedError("get_analysis_path must be implemented by subclasses") diff --git a/packages/agents/octobot_agents/storage/history/json_analysis_storage.py b/packages/agents/octobot_agents/storage/history/json_analysis_storage.py new file mode 100644 index 0000000000..9189284934 --- /dev/null +++ b/packages/agents/octobot_agents/storage/history/json_analysis_storage.py @@ -0,0 +1,100 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. + +import json +import os +import typing + +import octobot_commons.logging as logging + +import octobot_agents.storage.history.abstract_analysis_storage as abstract_storage + + +class JSONAnalysisStorage(abstract_storage.AbstractAnalysisStorage): + """ + JSON file-based storage for team analysis results. + + Saves agent analysis results to individual JSON files in an analysis directory + for cross-agent access and debugging/audit purposes. + + Directory structure: + analysis/ + {agent_name}.json + """ + + def __init__( + self, + analysis_dir: str = "analysis", + ): + self.analysis_dir = analysis_dir + self.logger = logging.get_logger(self.__class__.__name__) + + def get_analysis_path(self) -> str: + return os.path.join(os.getcwd(), self.analysis_dir) + + def save_analysis( + self, + agent_name: str, + result: typing.Any, + team_name: str, + team_id: typing.Optional[str], + ) -> None: + try: + base_dir = self.get_analysis_path() + if not os.path.exists(base_dir): + os.makedirs(base_dir, exist_ok=True) + + file_path = os.path.join(base_dir, f"{agent_name}.json") + if isinstance(result, dict): + analysis_data = result + else: + try: + analysis_data = vars(result) if hasattr(result, "__dict__") else str(result) + except Exception: + analysis_data = str(result) + + output_data = { + "agent_name": agent_name, + "team_name": team_name, + "team_id": team_id, + "result": analysis_data, + } + + with open(file_path, "w", encoding="utf-8") as f: + json.dump(output_data, f, indent=2, default=str) + + self.logger.debug(f"Analysis saved for {agent_name} to {file_path}") + except Exception as e: + self.logger.warning(f"Failed to save analysis for {agent_name}: {e}") + + def clear_transient_files(self) -> None: + try: + base_dir = self.get_analysis_path() + cleared_count = 0 + for filename in os.listdir(base_dir): + if filename.endswith(".json"): + file_path = os.path.join(base_dir, filename) + try: + os.remove(file_path) + cleared_count += 1 + except Exception as e: + self.logger.warning(f"Failed to remove {file_path}: {e}") + + self.logger.debug(f"Cleared {cleared_count} analysis files from {base_dir}") + except FileNotFoundError: + self.logger.debug(f"Analysis directory {base_dir} does not exist; nothing to clear") + except Exception as e: + self.logger.warning(f"Failed to clear transient files: {e}") diff --git a/packages/agents/octobot_agents/storage/history/storage.py b/packages/agents/octobot_agents/storage/history/storage.py new file mode 100644 index 0000000000..4e40c01ab9 --- /dev/null +++ b/packages/agents/octobot_agents/storage/history/storage.py @@ -0,0 +1,33 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. + +import typing + +import octobot_agents.enums as enums +import octobot_agents.constants as constants +import octobot_agents.storage.history.json_analysis_storage as json_storage + +if typing.TYPE_CHECKING: + import octobot_agents.storage.history.abstract_analysis_storage as abstract_storage + +def create_analysis_storage( + storage_type: enums.MemoryStorageType = enums.MemoryStorageType.JSON, + analysis_dir: str = constants.DEFAULT_ANALYSIS_DIR, +) -> "abstract_storage.AbstractAnalysisStorage": + if storage_type == enums.MemoryStorageType.JSON: + return json_storage.JSONAnalysisStorage(analysis_dir=analysis_dir) + else: + raise ValueError(f"Unknown storage type: {storage_type}") diff --git a/packages/agents/octobot_agents/storage/memory/__init__.py b/packages/agents/octobot_agents/storage/memory/__init__.py new file mode 100644 index 0000000000..e4c8bc1491 --- /dev/null +++ b/packages/agents/octobot_agents/storage/memory/__init__.py @@ -0,0 +1,41 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. + +from octobot_agents.storage.memory import abstract_memory_storage +from octobot_agents.storage.memory.abstract_memory_storage import ( + AbstractMemoryStorage +) +from octobot_agents.storage.memory import json_memory_storage +from octobot_agents.storage.memory.json_memory_storage import ( + JSONMemoryStorage +) +from octobot_agents.storage.memory import factory +from octobot_agents.storage.memory.factory import ( + create_memory_storage, +) +from octobot_agents.storage.memory import tools +from octobot_agents.storage.memory.tools import ( + get_memory_tools, + execute_memory_tool, +) + +__all__ = [ + "AbstractMemoryStorage", + "JSONMemoryStorage", + "create_memory_storage", + "get_memory_tools", + "execute_memory_tool", +] diff --git a/packages/agents/octobot_agents/storage/memory/abstract_memory_storage.py b/packages/agents/octobot_agents/storage/memory/abstract_memory_storage.py new file mode 100644 index 0000000000..63b326cf15 --- /dev/null +++ b/packages/agents/octobot_agents/storage/memory/abstract_memory_storage.py @@ -0,0 +1,180 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. +import abc +import typing + + +class AbstractMemoryStorage(abc.ABC): + """ + Abstract base class for memory storage. + + Defines the interface that all memory storage implementations must follow. + Memory storage is responsible for storing, retrieving, and managing + agent memories. + """ + + @abc.abstractmethod + def is_enabled(self) -> bool: + """ + Check if memory is enabled and available. + + Returns: + True if memory is enabled, False otherwise. + """ + raise NotImplementedError("is_enabled must be implemented by subclasses") + + @abc.abstractmethod + def extract_agent_id(self, input_data: typing.Any) -> str: + """ + Extract agent_id from input_data. + + Args: + input_data: Input data that may contain agent_id. + + Returns: + The agent_id string, or empty string if not found. + """ + raise NotImplementedError("extract_agent_id must be implemented by subclasses") + + @abc.abstractmethod + async def search_memories( + self, + query: str, + input_data: typing.Any, + limit: typing.Optional[int] = None, + ) -> typing.List[dict]: + """ + Search for relevant memories. + + Args: + query: Search query. + input_data: Input data containing agent_id or other context. + limit: Maximum memories to retrieve (defaults to search_limit). + + Returns: + List of memory dictionaries with 'memory' and 'metadata' keys. + """ + raise NotImplementedError("search_memories must be implemented by subclasses") + + @abc.abstractmethod + async def store_memory( + self, + messages: typing.List[dict], + input_data: typing.Any, + output: typing.Any = None, + metadata: typing.Optional[dict] = None, + ) -> None: + """ + Store memories from agent execution. + + Args: + messages: Conversation messages (user + assistant). + input_data: Input data for context. + output: Optional agent output. + metadata: Optional metadata to attach. + """ + raise NotImplementedError("store_memory must be implemented by subclasses") + + @abc.abstractmethod + def format_memories_for_prompt(self, memories: typing.List[dict]) -> str: + """ + Format memories for inclusion in prompts. + + Args: + memories: List of memory dictionaries. + + Returns: + Formatted string with memories, or empty string if none. + """ + raise NotImplementedError("format_memories_for_prompt must be implemented by subclasses") + + @abc.abstractmethod + async def store_execution_memory( + self, + input_data: typing.Any, + output: typing.Any, + user_message: typing.Optional[str] = None, + assistant_message: typing.Optional[str] = None, + metadata: typing.Optional[dict] = None, + ) -> None: + """ + Convenience method to store memory from agent execution. + + Automatically builds messages from input_data and output if not provided. + + Args: + input_data: The input data that was processed. + output: The agent's output/result. + user_message: Optional user message (auto-built if not provided). + assistant_message: Optional assistant message (auto-built if not provided). + metadata: Optional metadata to attach. + """ + raise NotImplementedError("store_execution_memory must be implemented by subclasses") + + @abc.abstractmethod + def get_all_memories(self) -> typing.List[dict]: + """ + Get all memories (for summaries). + + Returns: + List of all memory dictionaries. + """ + raise NotImplementedError("get_all_memories must be implemented by subclasses") + + @abc.abstractmethod + def get_memory_by_id(self, memory_id: str) -> typing.Optional[dict]: + """ + Get a memory by its ID. + + Args: + memory_id: The ID of the memory to retrieve. + + Returns: + The memory dictionary if found, None otherwise. + """ + raise NotImplementedError("get_memory_by_id must be implemented by subclasses") + + @abc.abstractmethod + def increment_memory_use(self, memory_id: str) -> None: + """ + Increment use_count for a memory. + + Args: + memory_id: The ID of the memory to update. + """ + raise NotImplementedError("increment_memory_use must be implemented by subclasses") + + @property + @abc.abstractmethod + def agent_version(self) -> str: + """ + Get the agent version. + + Returns: + The agent version string. + """ + raise NotImplementedError("agent_version property must be implemented by subclasses") + + @property + @abc.abstractmethod + def max_memories(self) -> int: + """ + Get the maximum number of memories. + + Returns: + The maximum number of memories that can be stored. + """ + raise NotImplementedError("max_memories property must be implemented by subclasses") diff --git a/packages/agents/octobot_agents/storage/memory/factory.py b/packages/agents/octobot_agents/storage/memory/factory.py new file mode 100644 index 0000000000..da0a31712e --- /dev/null +++ b/packages/agents/octobot_agents/storage/memory/factory.py @@ -0,0 +1,63 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. +import octobot_agents.storage.memory.abstract_memory_storage as abstract_memory_storage +import octobot_agents.enums as enums +import octobot_agents.storage.memory.json_memory_storage as json_memory_storage +import octobot_agents.constants as constants +import octobot_agents.errors as errors + + +def create_memory_storage( + storage_type: enums.MemoryStorageType, + agent_name: str, + agent_version: str, + enabled: bool = True, + search_limit: int = 5, + storage_enabled: bool = True, + agent_id_key: str = "agent_id", + max_memories: int = constants.DEFAULT_MAX_MEMORIES, +) -> abstract_memory_storage.AbstractMemoryStorage: + """ + Factory function to create a memory storage instance based on storage type. + + Args: + storage_type: The type of storage to create (MemoryStorageType enum). + agent_name: Name of the agent using this memory storage. + agent_version: Version of the agent. + enabled: Whether memory is enabled. + search_limit: Maximum number of memories to retrieve. + storage_enabled: Whether to store new memories. + agent_id_key: Key in input_data for agent_id. + max_memories: Maximum number of memories to store. + + Returns: + An instance of AbstractMemoryStorage corresponding to the storage_type. + + Raises: + ValueError: If storage_type is not supported. + """ + if storage_type == enums.MemoryStorageType.JSON: + return json_memory_storage.JSONMemoryStorage( + agent_name=agent_name, + agent_version=agent_version, + enabled=enabled, + search_limit=search_limit, + storage_enabled=storage_enabled, + agent_id_key=agent_id_key, + max_memories=max_memories, + ) + else: + raise errors.UnsupportedStorageTypeError(f"Unsupported memory storage type: {storage_type}") diff --git a/packages/agents/octobot_agents/storage/memory/json_memory_storage.py b/packages/agents/octobot_agents/storage/memory/json_memory_storage.py new file mode 100644 index 0000000000..29882aca42 --- /dev/null +++ b/packages/agents/octobot_agents/storage/memory/json_memory_storage.py @@ -0,0 +1,521 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. +import json +import os +import sys +import typing +import uuid + +import pydantic +import octobot_commons.constants as commons_constants +import octobot_commons.logging as logging + +import octobot_agents.storage.memory.abstract_memory_storage as abstract_memory_storage +import octobot_agents.constants as constants +import octobot_agents.models as models + +# Platform-specific file locking +fcntl = None +msvcrt = None +try: + if sys.platform != 'win32': + import fcntl + HAS_FCNTL = True + else: + import msvcrt + HAS_FCNTL = False + HAS_FILE_LOCKING = True +except ImportError: + HAS_FILE_LOCKING = False + + +class JSONMemoryStorage(abstract_memory_storage.AbstractMemoryStorage): + """ + Memory storage for AI agents using JSON file-based storage. + + Each agent has its own JSON file at `user/data/agents/memories/<agent_name>.json`. + Memory is stored with structured fields: id, title, context, content, category, tags, + importance_score, confidence_score, and metadata (with use_count). + """ + + def __init__( + self, + agent_name: str, + agent_version: str, + enabled: bool = True, + search_limit: int = 5, + storage_enabled: bool = True, + agent_id_key: str = "agent_id", + max_memories: int = constants.DEFAULT_MAX_MEMORIES, + ): + self.agent_name = agent_name + self._agent_version = agent_version + self.enabled = enabled + self.search_limit = search_limit + self.storage_enabled = storage_enabled + self.agent_id_key = agent_id_key + self._max_memories = max_memories + self.logger = logging.get_logger(f"{self.__class__.__name__}[{agent_name}]") + + self._memories: typing.List[dict] = [] + self._memory_file_path: typing.Optional[str] = None + + if self.enabled: + self._memory_file_path = self._get_memory_file_path() + self._ensure_directory_exists() + self._load_memories() + self.logger.debug(f"Memory storage initialized for {agent_name} with {len(self._memories)} memories") + + def _get_memory_file_path(self) -> str: + memory_dir = os.path.join( + commons_constants.USER_FOLDER, + commons_constants.DATA_FOLDER, + constants.MEMORY_FOLDER_NAME, + "memories" + ) + # Sanitize agent_name for filename + safe_agent_name = self.agent_name.replace("/", "_").replace("\\", "_") + return os.path.join(memory_dir, f"{safe_agent_name}{constants.MEMORY_FILE_EXTENSION}") + + def _ensure_directory_exists(self) -> None: + if self._memory_file_path: + directory = os.path.dirname(self._memory_file_path) + os.makedirs(directory, exist_ok=True) + + def _load_memories(self) -> None: + if not self._memory_file_path or not os.path.exists(self._memory_file_path): + self._memories = [] + return + + try: + with open(self._memory_file_path, 'r', encoding='utf-8') as f: + data = json.load(f) + + # Validate agent_version + stored_version = data.get("agent_version") + if stored_version and stored_version != self.agent_version: + self.logger.warning( + f"Memory file version mismatch for {self.agent_name}: " + f"stored={stored_version}, current={self.agent_version}" + ) + + self._memories = data.get("memories", []) + self.logger.debug(f"Loaded {len(self._memories)} memories from {self._memory_file_path}") + except (json.JSONDecodeError, IOError) as e: + self.logger.warning(f"Error loading memories from {self._memory_file_path}: {e}") + self._memories = [] + + def _save_memories(self) -> None: + if not self._memory_file_path: + return + + try: + # Use atomic write: write to temp file, then rename + temp_path = f"{self._memory_file_path}.tmp" + + with open(temp_path, 'w', encoding='utf-8') as f: + # Acquire exclusive lock if available + if HAS_FILE_LOCKING: + try: + if HAS_FCNTL: + fcntl.flock(f.fileno(), fcntl.LOCK_EX) + else: + # Windows + file_size = os.path.getsize(temp_path) if os.path.exists(temp_path) else 0 + msvcrt.locking(f.fileno(), msvcrt.LK_LOCK, file_size) + except (IOError, OSError) as e: + self.logger.warning(f"Could not acquire file lock: {e}") + + data = { + "agent_version": self.agent_version, + "memories": self._memories, + } + json.dump(data, f, indent=2, ensure_ascii=False) + f.flush() + os.fsync(f.fileno()) + + # Atomic rename + os.replace(temp_path, self._memory_file_path) + self.logger.debug(f"Saved {len(self._memories)} memories to {self._memory_file_path}") + except (IOError, OSError) as e: + self.logger.warning(f"Error saving memories to {self._memory_file_path}: {e}") + + def is_enabled(self) -> bool: + return self.enabled + + def extract_agent_id(self, input_data: typing.Any) -> str: + if isinstance(input_data, dict): + return input_data.get(self.agent_id_key, "") + return "" + + async def search_memories( + self, + query: str, + input_data: typing.Any, + limit: typing.Optional[int] = None, + ) -> typing.List[dict]: + if not self.is_enabled(): + return [] + + try: + limit = limit or self.search_limit + + # TODO: Implement Embedding-Based Search for better semantic matching + # - Use sentence-transformers with 'all-MiniLM-L6-v2' model + # - Generate embeddings when storing memories + # - Calculate cosine similarity for search queries + # - See plan documentation for detailed implementation guide + + # For now, return all memories as summaries (LLM will filter via tools) + # Sort by importance_score and confidence_score (highest first) + sorted_memories = sorted( + self._memories, + key=lambda m: ( + m.get("importance_score", 0.5) * 0.6 + + m.get("confidence_score", 0.5) * 0.4 + ), + reverse=True + ) + + # Return summaries (limit applied by LLM tool) + results = [] + for mem in sorted_memories[:limit]: + results.append({ + "memory": mem.get("content", ""), + "metadata": { + "id": mem.get("id"), + "title": mem.get("title", ""), + "context": mem.get("context", ""), + "category": mem.get("category", constants.DEFAULT_CATEGORY), + "tags": mem.get("tags", []), + "importance_score": mem.get("importance_score", constants.DEFAULT_IMPORTANCE_SCORE), + "confidence_score": mem.get("confidence_score", constants.DEFAULT_CONFIDENCE_SCORE), + } + }) + + if results: + self.logger.debug(f"Retrieved {len(results)} memory summaries") + return results + except Exception as e: + self.logger.warning(f"Error searching memories: {e}") + return [] + + def _truncate_content( + self, + title: str, + context: str, + content: str, + title_max_length: typing.Optional[int] = None, + context_max_length: typing.Optional[int] = None, + content_max_length: typing.Optional[int] = None, + ) -> typing.Tuple[str, str, str]: + title_max = title_max_length or constants.MEMORY_TITLE_MAX_LENGTH + context_max = context_max_length or constants.MEMORY_CONTEXT_MAX_LENGTH + content_max = content_max_length or constants.MEMORY_CONTENT_MAX_LENGTH + + # Truncate title if needed + if len(title) > title_max: + truncated_title = title[:title_max] + last_space = truncated_title.rfind(' ') + if last_space > title_max * 0.7: + title = truncated_title[:last_space].strip() + else: + title = truncated_title.strip() + + # Truncate context if needed + if len(context) > context_max: + truncated_context = context[:context_max] + last_space = truncated_context.rfind(' ') + if last_space > context_max * 0.7: + context = truncated_context[:last_space].strip() + else: + context = truncated_context.strip() + + # Truncate content if needed + if len(content) > content_max: + truncated = content[:content_max] + # Try to truncate at sentence boundary + last_period = truncated.rfind('.') + last_newline = truncated.rfind('\n') + last_break = max(last_period, last_newline) + if last_break > content_max * 0.7: + content = truncated[:last_break + 1].strip() + else: + content = truncated.strip() + + return title, context, content + + async def store_memory( + self, + messages: typing.List[dict], + input_data: typing.Any, + output: typing.Any = None, + metadata: typing.Optional[dict] = None, + ) -> None: + if not self.is_enabled() or not self.storage_enabled: + return + + try: + agent_id = self.extract_agent_id(input_data) + + # Extract title and context from metadata if provided, otherwise generate from messages + user_message = None + assistant_message = None + for msg in messages: + if msg.get("role") == "user": + user_message = msg.get("content", "") + elif msg.get("role") == "assistant": + assistant_message = msg.get("content", "") + + # Use title from metadata if provided, otherwise generate from user message + if metadata and metadata.get("title"): + title = metadata.get("title") + else: + title = (user_message[:50] if user_message else "Memory") if user_message else "Memory" + + # Use context from metadata if provided, otherwise generate from agent_id + if metadata and metadata.get("context"): + context = metadata.get("context") + else: + context = f"Agent execution context" + if agent_id: + context += f" for agent_id: {agent_id}" + + # Build content from messages + # If metadata has title/context, it's likely instructional content - use user_message directly + # Otherwise, format as conversation + if metadata and (metadata.get("title") or metadata.get("context")): + # Instructional content - use user_message directly without "User: " prefix + content = user_message if user_message else "" + else: + # Regular conversation memory - format with prefixes + content_parts = [] + if user_message: + content_parts.append(f"User: {user_message}") + if assistant_message: + content_parts.append(f"Assistant: {assistant_message}") + if output is not None: + if isinstance(output, dict): + content_parts.append(f"Output: {json.dumps(output, indent=2, default=str)}") + else: + content_parts.append(f"Output: {str(output)}") + content = "\n".join(content_parts) + + # Truncate content if needed to fit within limits (fallback safety check) + # Content should already be concise from memory agent generation + title, context, content = self._truncate_content( + title=title, + context=context, + content=content + ) + + # Extract category and tags from metadata + category = metadata.get("category", constants.DEFAULT_CATEGORY) if metadata else constants.DEFAULT_CATEGORY + tags = metadata.get("tags", []) if metadata else [] + importance_score = metadata.get("importance_score", constants.DEFAULT_IMPORTANCE_SCORE) if metadata else constants.DEFAULT_IMPORTANCE_SCORE + + # Create and validate MemoryStorageModel + try: + memory_model = models.MemoryStorageModel( + title=title, + context=context, + content=content, + category=category, + tags=tags, + importance_score=importance_score, + confidence_score=constants.DEFAULT_CONFIDENCE_SCORE, + ) + except pydantic.ValidationError as e: + self.logger.error(f"Memory validation failed: {e}") + raise + + # Create memory dict from validated model + base_metadata = { + self.agent_id_key: agent_id, + "use_count": 0, + } + + extra_metadata = {} + if metadata: + for key, value in metadata.items(): + if key not in { + "category", + "tags", + "importance_score", + "confidence_score", + "title", + "context", + }: + extra_metadata[key] = value + memory = { + "id": uuid.uuid4().hex, + "title": memory_model.title, + "context": memory_model.context, + "content": memory_model.content, + "category": memory_model.category, + "tags": memory_model.tags, + "importance_score": memory_model.importance_score, + "confidence_score": memory_model.confidence_score, + "metadata": { + **base_metadata, + **extra_metadata, + }, + } + + self._memories.append(memory) + + # Prune if needed + if len(self._memories) > self.max_memories: + self._prune_memories() + + self._save_memories() + self.logger.debug("Stored memory") + except Exception as e: + self.logger.warning(f"Error storing memory: {e}") + + def format_memories_for_prompt(self, memories: typing.List[dict]) -> str: + if not memories: + return "" + + memory_lines = [] + for mem in memories: + memory_text = mem.get("memory", "") + metadata = mem.get("metadata", {}) + if memory_text: + title = metadata.get("title", "") + context = metadata.get("context", "") + tags = metadata.get("tags", []) + category = metadata.get("category", "") + importance = metadata.get("importance_score", 0.5) + confidence = metadata.get("confidence_score", 0.5) + use_count = metadata.get("use_count", 0) + + line = f"- {memory_text}" + if title: + line = f"## {title}\n{line}" + if context: + line += f"\n Context: {context}" + if category: + line += f"\n Category: {category}" + if tags: + line += f"\n Tags: {', '.join(tags)}" + line += f"\n Importance: {importance}, Confidence: {confidence}, Used: {use_count} times" + memory_lines.append(line) + + if memory_lines: + return "\n".join(memory_lines) + return "" + + async def store_execution_memory( + self, + input_data: typing.Any, + output: typing.Any, + user_message: typing.Optional[str] = None, + assistant_message: typing.Optional[str] = None, + metadata: typing.Optional[dict] = None, + ) -> None: + if not self.is_enabled() or not self.storage_enabled: + return + + # Build messages if not provided + messages = [] + if user_message: + messages.append({"role": "user", "content": user_message}) + elif isinstance(input_data, dict): + # Auto-build user message from input_data + user_content = json.dumps(input_data, indent=2, default=str)[:500] + messages.append({"role": "user", "content": user_content}) + + if assistant_message: + messages.append({"role": "assistant", "content": assistant_message}) + elif output is not None: + # Auto-build assistant message from output + if isinstance(output, dict): + assistant_content = json.dumps(output, indent=2, default=str)[:500] + else: + assistant_content = str(output)[:500] + messages.append({"role": "assistant", "content": assistant_content}) + + if messages: + await self.store_memory(messages, input_data, output, metadata) + + def _prune_memories(self) -> None: + if len(self._memories) <= self.max_memories: + return + + def priority_score(mem: dict) -> float: + importance = mem.get("importance_score", constants.DEFAULT_IMPORTANCE_SCORE) + confidence = mem.get("confidence_score", constants.DEFAULT_CONFIDENCE_SCORE) + use_count = mem.get("metadata", {}).get("use_count", 0) + return (importance * 0.4) + (confidence * 0.3) + (use_count / 100.0 * 0.3) + + sorted_memories = sorted(self._memories, key=priority_score) + + # Remove lowest priority memories, but never prune critical ones (importance >= 0.9) + to_remove = [] + for mem in sorted_memories: + if len(self._memories) - len(to_remove) <= self.max_memories: + break + if mem.get("importance_score", 0) < 0.9: + to_remove.append(mem) + + for mem in to_remove: + self._memories.remove(mem) + + if to_remove: + self.logger.info(f"Pruned {len(to_remove)} memories (kept {len(self._memories)})") + + def update_memory_importance(self, memory_id: str, score: float) -> None: + for mem in self._memories: + if mem.get("id") == memory_id: + mem["importance_score"] = max(0.0, min(1.0, score)) + self._save_memories() + return + self.logger.warning(f"Memory {memory_id} not found for importance update") + + def update_memory_confidence(self, memory_id: str, score: float) -> None: + for mem in self._memories: + if mem.get("id") == memory_id: + mem["confidence_score"] = max(0.0, min(1.0, score)) + self._save_memories() + return + self.logger.warning(f"Memory {memory_id} not found for confidence update") + + def increment_memory_use(self, memory_id: str) -> None: + for mem in self._memories: + if mem.get("id") == memory_id: + metadata = mem.setdefault("metadata", {}) + metadata["use_count"] = metadata.get("use_count", 0) + 1 + self._save_memories() + return + self.logger.warning(f"Memory {memory_id} not found for use count increment") + + def get_memory_by_id(self, memory_id: str) -> typing.Optional[dict]: + for mem in self._memories: + if mem.get("id") == memory_id: + return mem + return None + + def get_all_memories(self) -> typing.List[dict]: + return self._memories.copy() + + @property + def agent_version(self) -> str: + return self._agent_version + + @property + def max_memories(self) -> int: + return self._max_memories diff --git a/packages/agents/octobot_agents/storage/memory/tools.py b/packages/agents/octobot_agents/storage/memory/tools.py new file mode 100644 index 0000000000..dd22e73951 --- /dev/null +++ b/packages/agents/octobot_agents/storage/memory/tools.py @@ -0,0 +1,131 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. +import typing + +import octobot_services.services.abstract_ai_service as abstract_ai_service + +import octobot_agents.storage.memory.abstract_memory_storage as abstract_memory_storage + + +def get_memory_tools(memory_manager: abstract_memory_storage.AbstractMemoryStorage, ai_service: abstract_ai_service.AbstractAIService) -> typing.List[dict]: + if not memory_manager or not memory_manager.is_enabled(): + return [] + + return [ + ai_service.format_tool_definition( + name="get_memory_summaries", + description="Get a list of available memories with summaries (id, title, context, tags, category, importance, confidence). Use this to see what memories are available before fetching specific ones.", + parameters={ + "type": "object", + "properties": { + "category": { + "type": "string", + "description": "Optional filter by memory category", + }, + "min_importance": { + "type": "number", + "description": "Optional minimum importance score (0.0-1.0)", + }, + }, + }, + ), + ai_service.format_tool_definition( + name="get_memory_by_id", + description="Get the full content of a specific memory by its UUID. Use this after getting memory summaries to fetch detailed information.", + parameters={ + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The UUID of the memory to fetch", + }, + }, + "required": ["id"], + }, + ), + ] + + +def execute_memory_tool( + memory_manager: abstract_memory_storage.AbstractMemoryStorage, + tool_name: str, + arguments: dict, +) -> typing.Any: + if not memory_manager or not memory_manager.is_enabled(): + return {"error": "Memory is not enabled"} + + try: + if tool_name == "get_memory_summaries": + category = arguments.get("category") + min_importance = arguments.get("min_importance") + + all_memories = memory_manager.get_all_memories() + + # Filter by category if provided + if category: + all_memories = [m for m in all_memories if m.get("category") == category] + + # Filter by importance if provided + if min_importance is not None: + all_memories = [ + m for m in all_memories + if m.get("importance_score", 0.5) >= min_importance + ] + + # Return summaries + summaries = [] + for mem in all_memories: + summaries.append({ + "id": mem.get("id"), + "title": mem.get("title", ""), + "context": mem.get("context", ""), + "tags": mem.get("tags", []), + "category": mem.get("category", "general"), + "importance_score": mem.get("importance_score", 0.5), + "confidence_score": mem.get("confidence_score", 0.5), + }) + + return summaries + + elif tool_name == "get_memory_by_id": + memory_id = arguments.get("id") + if not memory_id: + return {"error": "Memory ID is required"} + + memory = memory_manager.get_memory_by_id(memory_id) + if not memory: + return {"error": f"Memory with ID {memory_id} not found"} + + # Increment use count + memory_manager.increment_memory_use(memory_id) + + return { + "id": memory.get("id"), + "title": memory.get("title", ""), + "context": memory.get("context", ""), + "content": memory.get("content", ""), + "category": memory.get("category", "general"), + "tags": memory.get("tags", []), + "importance_score": memory.get("importance_score", 0.5), + "confidence_score": memory.get("confidence_score", 0.5), + "metadata": memory.get("metadata", {}), + } + + else: + return {"error": f"Unknown tool: {tool_name}"} + + except Exception as e: + return {"error": str(e)} diff --git a/packages/agents/octobot_agents/team/__init__.py b/packages/agents/octobot_agents/team/__init__.py new file mode 100644 index 0000000000..dcd05961d3 --- /dev/null +++ b/packages/agents/octobot_agents/team/__init__.py @@ -0,0 +1,62 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. + +from octobot_agents.team import channels +from octobot_agents.team.channels import ( + AbstractAgentsTeamChannel, + AbstractAgentsTeamChannelProducer, + AbstractAgentsTeamChannelConsumer, + AbstractSyncAgentsTeamChannelProducer, + AbstractLiveAgentsTeamChannelProducer, + AbstractDeepAgentsTeamChannel, + AbstractDeepAgentsTeamChannelConsumer, + AbstractDeepAgentsTeamChannelProducer, +) + +from octobot_agents.team import critic +from octobot_agents.team.critic import CriticAgentMixin + +from octobot_agents.team import judge +from octobot_agents.team.judge import JudgeAgentMixin + +from octobot_agents.team import manager +from octobot_agents.team.manager import ( + TeamManagerMixin, +) + +from octobot_agents.constants import ( + MODIFICATION_ADDITIONAL_INSTRUCTIONS, + MODIFICATION_CUSTOM_PROMPT, + MODIFICATION_EXECUTION_HINTS, +) + +__all__ = [ + "AbstractAgentsTeamChannel", + "AbstractAgentsTeamChannelProducer", + "AbstractAgentsTeamChannelConsumer", + "AbstractSyncAgentsTeamChannelProducer", + "AbstractLiveAgentsTeamChannelProducer", + # Deep Agents Team + "AbstractDeepAgentsTeamChannel", + "AbstractDeepAgentsTeamChannelConsumer", + "AbstractDeepAgentsTeamChannelProducer", + "TeamManagerMixin", + "MODIFICATION_ADDITIONAL_INSTRUCTIONS", + "MODIFICATION_CUSTOM_PROMPT", + "MODIFICATION_EXECUTION_HINTS", + "CriticAgentMixin", + "JudgeAgentMixin", +] diff --git a/packages/agents/octobot_agents/team/channels/__init__.py b/packages/agents/octobot_agents/team/channels/__init__.py new file mode 100644 index 0000000000..8fb0d4211c --- /dev/null +++ b/packages/agents/octobot_agents/team/channels/__init__.py @@ -0,0 +1,44 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. + +from octobot_agents.team.channels.agents_team import ( + AbstractAgentsTeamChannel, + AbstractAgentsTeamChannelConsumer, + AbstractAgentsTeamChannelProducer, +) + +from octobot_agents.team.channels.ai_agents_team import ( + AbstractSyncAgentsTeamChannelProducer, + AbstractLiveAgentsTeamChannelProducer, +) + +from octobot_agents.team.channels.deep_agents_team import ( + AbstractDeepAgentsTeamChannel, + AbstractDeepAgentsTeamChannelConsumer, + AbstractDeepAgentsTeamChannelProducer, +) + +__all__ = [ + "AbstractAgentsTeamChannel", + "AbstractAgentsTeamChannelConsumer", + "AbstractAgentsTeamChannelProducer", + "AbstractSyncAgentsTeamChannelProducer", + "AbstractLiveAgentsTeamChannelProducer", + # Deep Agents Team + "AbstractDeepAgentsTeamChannel", + "AbstractDeepAgentsTeamChannelConsumer", + "AbstractDeepAgentsTeamChannelProducer", +] diff --git a/packages/agents/octobot_agents/team/channels/agents_team.py b/packages/agents/octobot_agents/team/channels/agents_team.py new file mode 100644 index 0000000000..a8271613dc --- /dev/null +++ b/packages/agents/octobot_agents/team/channels/agents_team.py @@ -0,0 +1,664 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. +""" +Abstract agents team channel classes for orchestrating teams of agents. + +Teams follow the same channel pattern as individual agents, enabling: +- Composable teams (teams can consume from other teams) +- DAG-based agent relationships +""" +import abc +import typing +import collections + +import octobot_commons.logging as logging + +import octobot_agents.constants as constants +import octobot_agents.agent.channels as agent_channels +import octobot_agents.team.manager as team_manager +import octobot_agents.team.critic as team_critic +import octobot_agents.team.judge as team_judge +import octobot_agents.agent.memory.channels as memory_channels +import octobot_agents.enums as enums +import octobot_agents.errors as errors +import octobot_agents.models as models +import octobot_services.services.abstract_ai_service as abstract_ai_service + +class AbstractAgentsTeamChannelConsumer(agent_channels.AbstractAgentChannelConsumer): + """ + Consumer for team outputs. + + Can be used to consume results from a team's final output channel. + """ + __metaclass__ = abc.ABCMeta + + +class AbstractAgentsTeamChannelProducer(agent_channels.AbstractAgentChannelProducer, abc.ABC): + """ + Base producer for agent teams with common DAG logic. + + This class provides: + - DAG computation from relations + - Entry/terminal agent identification + - Topological ordering for execution + + Subclasses implement different execution modes: + - AbstractSyncAgentsTeamChannelProducer: Direct one-shot execution + - AbstractLiveAgentsTeamChannelProducer: Channel-based long-running execution + + Relation semantics: + - relations = [(A, B), ...] where A and B are Channel types + - Means: A's producer output feeds into B's producer input + """ + + # Override in subclasses with dedicated channel and consumer classes + TEAM_CHANNEL: typing.Optional[typing.Type["AbstractAgentsTeamChannel"]] = None + TEAM_CONSUMER: typing.Optional[typing.Type[AbstractAgentsTeamChannelConsumer]] = None + TEAM_NAME: str = "AbstractAgentsTeam" + + # Class attributes for critic, memory, manager, and judge agent classes + # Teams can override these to specify which implementations to use + # If None, the feature is disabled + CriticAgentClass: typing.Optional[typing.Type[team_critic.CriticAgentProducer]] = None + MemoryAgentClass: typing.Optional[typing.Type[memory_channels.MemoryAgentProducer]] = None + ManagerAgentClass: typing.Optional[typing.Type[team_manager.ManagerAgentProducer]] = None + JudgeAgentClass: typing.Optional[typing.Type[team_judge.JudgeAgentProducer]] = None + + def __init__( + self, + channel: typing.Optional["AbstractAgentsTeamChannel"], + agents: typing.List[agent_channels.AbstractAIAgentChannelProducer], + relations: typing.List[typing.Tuple[typing.Type[agent_channels.AbstractAgentChannel], typing.Type[agent_channels.AbstractAgentChannel]]], + ai_service: abstract_ai_service.AbstractAIService, + team_name: typing.Optional[str] = None, + team_id: typing.Optional[str] = None, + manager: typing.Optional[team_manager.ManagerAgentProducer] = None, + self_improving: bool = False, + critic_agent: typing.Optional[team_critic.CriticAgentProducer] = None, + memory_agent: typing.Optional[memory_channels.MemoryAgentProducer] = None, + judge_agent: typing.Optional[team_judge.JudgeAgentProducer] = None, + ): + """ + Initialize the agent team producer. + + Args: + channel: The team's output channel (optional). + agents: List of agent producer instances. + relations: List of (SourceAgentChannel, TargetAgentChannel) edges. + e.g., [(SignalAIAgentChannel, RiskAIAgentChannel)] means + RiskAgent receives input from SignalAgent. + ai_service: The AI service for LLM calls. + team_name: Name of the team (defaults to TEAM_NAME). + team_id: Unique identifier for this team instance. + manager: Optional team manager agent. If None, uses ManagerAgentClass if defined. + Raises MissingManagerError if both are None. + self_improving: Whether to enable automatic critic and memory update after execution. + critic_agent: Optional critic agent. If None and self_improving=True, uses CriticAgentClass if defined. + memory_agent: Optional memory agent. If None and self_improving=True, uses MemoryAgentClass if defined. + judge_agent: Optional judge agent for debate phases. If None, uses JudgeAgentClass if defined. + """ + super().__init__(channel) + self.agents = agents + self.relations = relations + self.ai_service = ai_service + self.team_name = team_name or self.TEAM_NAME + self.team_id = team_id or "" + self.logger = logging.get_logger(f"{self.__class__.__name__}{f'[{self.team_id}]' if self.team_id else ''}") + + # Initialize manager - use class attribute if not provided + if manager is None: + if self.ManagerAgentClass is not None: + self.manager = self.ManagerAgentClass(channel=None) + else: + raise errors.MissingManagerError( + f"{self.__class__.__name__} requires a manager. " + f"Either set ManagerAgentClass class attribute or pass manager parameter." + ) + else: + self.manager = manager + + # Initialize self-improving mechanism + self.self_improving = self_improving + if self_improving: + if critic_agent is None: + if self.CriticAgentClass is not None: + self.critic_agent = self.CriticAgentClass(channel=None) + else: + self.critic_agent = None + else: + self.critic_agent = critic_agent + + if memory_agent is None: + if self.MemoryAgentClass is not None: + self.memory_agent = self.MemoryAgentClass(channel=None) + else: + self.memory_agent = None + else: + self.memory_agent = memory_agent + else: + self.critic_agent = critic_agent + self.memory_agent = memory_agent + + # Judge agent for debate phases (optional) + if judge_agent is None and self.JudgeAgentClass is not None: + # pylint: disable=not-callable + self.judge_agent = self.JudgeAgentClass() + if self.judge_agent.logger is None: + self.judge_agent.logger = self.logger + else: + self.judge_agent = judge_agent + + self.last_execution_plan: typing.Optional[models.ExecutionPlan] = None + self.last_execution_results: typing.Dict[str, typing.Any] = {} + self.last_debate_state: typing.Optional[typing.Dict[str, typing.Any]] = None # debate_history, judge_decisions for logging + + self._producer_by_channel: typing.Dict[typing.Type[agent_channels.AbstractAgentChannel], agent_channels.AbstractAIAgentChannelProducer] = {} + self._producer_by_name: typing.Dict[str, agent_channels.AbstractAIAgentChannelProducer] = {} + for agent in self.agents: + if agent.AGENT_CHANNEL is not None: + self._producer_by_channel[agent.AGENT_CHANNEL] = agent + self._producer_by_name[agent.name] = agent + + def get_manager(self) -> typing.Optional[team_manager.ManagerAgentProducer]: + """ + Get the team manager. + + Returns: + The team manager agent, or None if not set. + """ + return self.manager + + def get_agent_by_name(self, name: str) -> typing.Optional[agent_channels.AbstractAIAgentChannelProducer]: + """ + Get an agent by name. + + Args: + name: The name of the agent to retrieve. + + Returns: + The agent producer if found, None otherwise. + """ + return self._producer_by_name.get(name) + + def _build_dag(self) -> typing.Tuple[ + typing.Dict[typing.Type[agent_channels.AbstractAgentChannel], typing.List[typing.Type[agent_channels.AbstractAgentChannel]]], + typing.Dict[typing.Type[agent_channels.AbstractAgentChannel], typing.List[typing.Type[agent_channels.AbstractAgentChannel]]] + ]: + """ + Build DAG edge mappings from relations. + + Returns: + Tuple of (incoming_edges, outgoing_edges) dicts. + - incoming_edges[B] = [A, ...] means B receives from A + - outgoing_edges[A] = [B, ...] means A sends to B + """ + incoming_edges: typing.Dict[typing.Type[agent_channels.AbstractAgentChannel], typing.List[typing.Type[agent_channels.AbstractAgentChannel]]] = collections.defaultdict(list) + outgoing_edges: typing.Dict[typing.Type[agent_channels.AbstractAgentChannel], typing.List[typing.Type[agent_channels.AbstractAgentChannel]]] = collections.defaultdict(list) + + for source_channel, target_channel in self.relations: + incoming_edges[target_channel].append(source_channel) + outgoing_edges[source_channel].append(target_channel) + + return incoming_edges, outgoing_edges + + def _get_entry_agents(self) -> typing.List[agent_channels.AbstractAIAgentChannelProducer]: + incoming_edges, _ = self._build_dag() + entry_agents = [] + + for agent in self.agents: + channel_type = agent.AGENT_CHANNEL + if channel_type is None: + continue + # Entry: no incoming edges + if channel_type not in incoming_edges or not incoming_edges[channel_type]: + entry_agents.append(agent) + + return entry_agents + + def _get_terminal_agents(self) -> typing.List[agent_channels.AbstractAIAgentChannelProducer]: + _, outgoing_edges = self._build_dag() + terminal_agents = [] + + for agent in self.agents: + channel_type = agent.AGENT_CHANNEL + if channel_type is None: + continue + # Terminal: no outgoing edges + if channel_type not in outgoing_edges or not outgoing_edges[channel_type]: + terminal_agents.append(agent) + + return terminal_agents + + def _get_execution_order(self) -> typing.List[agent_channels.AbstractAIAgentChannelProducer]: + incoming_edges, outgoing_edges = self._build_dag() + + # Count incoming edges for each node + in_degree: typing.Dict[typing.Type[agent_channels.AbstractAgentChannel], int] = collections.defaultdict(int) + for agent in self.agents: + channel_type = agent.AGENT_CHANNEL + if channel_type is not None: + in_degree[channel_type] = len(incoming_edges.get(channel_type, [])) + + # Start with nodes that have no incoming edges + queue: typing.List[typing.Type[agent_channels.AbstractAgentChannel]] = [ + channel_type for channel_type, degree in in_degree.items() if degree == 0 + ] + + ordered_channels: typing.List[typing.Type[agent_channels.AbstractAgentChannel]] = [] + + while queue: + current = queue.pop(0) + ordered_channels.append(current) + + # Reduce in-degree for all successors + for successor in outgoing_edges.get(current, []): + in_degree[successor] -= 1 + if in_degree[successor] == 0: + queue.append(successor) + + # Convert channel types back to producers + return [self._producer_by_channel[ch] for ch in ordered_channels if ch in self._producer_by_channel] + + @staticmethod + def _get_debate_message(result: typing.Union[typing.Dict[str, typing.Any], typing.Any]) -> str: + if isinstance(result, dict): + return result.get("message", result.get("reasoning", result.get("content", str(result)))) + msg = getattr(result, "message", None) + if msg is not None: + return str(msg) + reasoning = getattr(result, "reasoning", None) + if reasoning is not None: + return str(reasoning) + return str(result) + + async def _run_debate( + self, + debate_config: "models.DebatePhaseConfig", + initial_data: typing.Dict[str, typing.Any], + results: typing.Dict[str, typing.Dict[str, typing.Any]], + completed_agents: typing.Set[str], + incoming_edges: typing.Dict[typing.Type[agent_channels.AbstractAgentChannel], typing.List[typing.Type[agent_channels.AbstractAgentChannel]]], + ) -> typing.Tuple[typing.Dict[str, typing.Dict[str, typing.Any]], typing.Set[str]]: + """ + Run a debate phase: debators take turns each round, then judge decides continue or exit. + + Updates results and completed_agents. Sets self.last_debate_state with debate_history + and judge_decisions for structured logging. + """ + if self.judge_agent is None: + self.logger.warning("Debate step requires a judge agent but none is configured; skipping debate.") + return results, completed_agents + + debate_history: typing.List[typing.Dict[str, typing.Any]] = [] + judge_decisions: typing.List[typing.Dict[str, typing.Any]] = [] + debator_names = list(debate_config.debator_agent_names) + max_rounds = debate_config.max_rounds + + for round_num in range(1, max_rounds + 1): + # Run each debator in order this round + for debator_name in debator_names: + agent = self._producer_by_name.get(debator_name) + if agent is None: + self.logger.warning(f"Debator {debator_name} not found in team; skipping.") + continue + # Build input: initial state + debate history so far + agent_input: typing.Dict[str, typing.Any] = { + "_debate_history": debate_history, + "_debate_round": round_num, + } + if isinstance(initial_data, dict): + agent_input["_initial_state"] = initial_data + # Predecessor outputs for DAG semantics + channel_type = agent.AGENT_CHANNEL + predecessors = [] + if channel_type is not None: + predecessors = incoming_edges.get(channel_type, []) + for pred_channel in predecessors: + pred_agent = self._producer_by_channel.get(pred_channel) + if pred_agent and pred_agent.name in results: + pred_result = results[pred_agent.name] + agent_input[pred_agent.name] = { + constants.AGENT_NAME_KEY: pred_agent.name, + constants.AGENT_ID_KEY: "", + constants.RESULT_KEY: pred_result.get(constants.RESULT_KEY), + } + if not agent_input.get("_initial_state") and not predecessors: + agent_input = initial_data if isinstance(initial_data, dict) else agent_input + + try: + result = await agent.execute(agent_input, self.ai_service) + except Exception as e: + self.logger.error(f"Debator {debator_name} execution failed: {e}") + raise + # Extract message text for debate history (agent-specific) + message = self._get_debate_message(result) + debate_history.append({ + "agent_name": debator_name, + "message": str(message), + "round": round_num, + }) + results[debator_name] = { + constants.AGENT_NAME_KEY: debator_name, + constants.AGENT_ID_KEY: "", + constants.RESULT_KEY: result, + } + completed_agents.add(debator_name) + + # Run judge + judge_input = { + "debate_history": debate_history, + "debator_agent_names": debator_names, + "current_round": round_num, + "max_rounds": max_rounds, + } + if isinstance(initial_data, dict): + judge_input["_initial_state"] = initial_data + try: + judge_out = await self.judge_agent.execute(judge_input, self.ai_service) + except Exception as e: + self.logger.error(f"Judge execution failed: {e}") + raise + if isinstance(judge_out, dict): + judge_dict = judge_out + else: + _dump = getattr(judge_out, "model_dump", None) or getattr(judge_out, "dict", None) + judge_dict = _dump() if _dump else {"decision": enums.JudgeDecisionType.EXIT.value, "reasoning": str(judge_out), "summary": None} + judge_decisions.append({ + "round": round_num, + "decision": judge_dict.get("decision", enums.JudgeDecisionType.EXIT.value), + "reasoning": judge_dict.get("reasoning", ""), + "summary": judge_dict.get("summary"), + }) + if self.logger: + self.logger.debug( + f"Debate round {round_num}: judge decision={judge_dict.get('decision', 'exit')} " + f"reasoning={judge_dict.get('reasoning', '')[:100]}..." + ) + if judge_dict.get("decision") == enums.JudgeDecisionType.EXIT.value or round_num >= max_rounds: + break + + self.last_debate_state = { + "debate_history": debate_history, + "judge_decisions": judge_decisions, + } + return results, completed_agents + + async def _execute_plan( + self, + execution_plan: models.ExecutionPlan, + initial_data: typing.Dict[str, typing.Any], + ) -> typing.Dict[str, typing.Any]: + incoming_edges, _ = self._build_dag() + terminal_agents = self._get_terminal_agents() + + # Store results by agent name + results: typing.Dict[str, typing.Dict[str, typing.Any]] = {} + completed_agents: typing.Set[str] = set() + + # Normalize debate steps based on judge availability and cap excessive debate steps + debate_steps = [step for step in execution_plan.steps if step.step_type == enums.StepType.DEBATE.value] + if self.judge_agent is None: + if debate_steps: + self.logger.debug( + f"Skipping {len(debate_steps)} debate step(s) - no judge agent configured in team" + ) + execution_plan.steps = [step for step in execution_plan.steps if step.step_type != enums.StepType.DEBATE.value] + else: + max_debate_steps = 3 + if len(debate_steps) > max_debate_steps: + kept = 0 + filtered_steps = [] + for step in execution_plan.steps: + if step.step_type == enums.StepType.DEBATE.value: + kept += 1 + if kept > max_debate_steps: + continue + filtered_steps.append(step) + execution_plan.steps = filtered_steps + self.logger.debug( + f"Capped debate steps to {max_debate_steps} (was {len(debate_steps)})" + ) + + iteration = 0 + max_iterations = execution_plan.max_iterations or 1 + + while iteration < max_iterations: + iteration += 1 + self.logger.debug(f"Executing plan iteration {iteration}/{max_iterations}") + + # Execute each step in the plan + for step in execution_plan.steps: + if step.skip: + self.logger.debug(f"Skipping agent: {step.agent_name}") + continue + + # Debate step: run debators and judge with rounds + step_type = step.step_type or enums.StepType.AGENT.value + debate_config = step.debate_config + if step_type == enums.StepType.DEBATE.value and debate_config is not None: + results, completed_agents = await self._run_debate( + debate_config, initial_data, results, completed_agents, incoming_edges + ) + continue + + agent = self._producer_by_name.get(step.agent_name) + if agent is None: + self.logger.warning(f"Agent {step.agent_name} not found in team") + continue + + # Wait for dependencies if specified + if step.wait_for: + for dep_name in step.wait_for: + if dep_name not in completed_agents: + self.logger.debug(f"Waiting for dependency: {dep_name}") + # In a real implementation, we might want to wait for actual completion + # For now, we assume dependencies are already completed + + # Send instructions if provided + if step.instructions: + instruction_dict: typing.Dict[str, typing.Any] = {} + for instruction in step.instructions: + if instruction.modification_type == constants.MODIFICATION_ADDITIONAL_INSTRUCTIONS: + instruction_dict[constants.MODIFICATION_ADDITIONAL_INSTRUCTIONS] = instruction.value + elif instruction.modification_type == constants.MODIFICATION_CUSTOM_PROMPT: + instruction_dict[constants.MODIFICATION_CUSTOM_PROMPT] = instruction.value + elif instruction.modification_type == constants.MODIFICATION_EXECUTION_HINTS: + instruction_dict[constants.MODIFICATION_EXECUTION_HINTS] = instruction.value + + if instruction_dict: + await self.manager.send_instruction_to_agent(agent, instruction_dict) + + # Gather inputs from predecessors + channel_type = agent.AGENT_CHANNEL + if channel_type is None: + continue + + predecessors = incoming_edges.get(channel_type, []) + + if not predecessors: + # Entry agent: use initial_data + agent_input = initial_data + else: + # Non-entry agent: gather predecessor outputs + agent_input = {} + for pred_channel in predecessors: + pred_agent = self._producer_by_channel.get(pred_channel) + if pred_agent and pred_agent.name in results: + pred_result = results[pred_agent.name] + agent_input[pred_agent.name] = { + constants.AGENT_NAME_KEY: pred_agent.name, + constants.AGENT_ID_KEY: "", + constants.RESULT_KEY: pred_result.get(constants.RESULT_KEY), + } + + # Store initial_data in a special key for agents that need it (like distribution agent) + # This allows agents to access initial state without breaking agents that expect only predecessor outputs + if isinstance(initial_data, dict): + agent_input["_initial_state"] = initial_data + + self.logger.debug(f"Executing agent: {agent.name}") + try: + result = await agent.execute(agent_input, self.ai_service) + results[agent.name] = { + constants.AGENT_NAME_KEY: agent.name, + constants.AGENT_ID_KEY: "", + constants.RESULT_KEY: result, + } + completed_agents.add(agent.name) + except Exception as e: + self.logger.error(f"Agent {agent.name} execution failed: {e}") + raise + + # Check loop condition + if not execution_plan.loop: + break + + # Evaluate loop condition (simplified - in real implementation, this would be more sophisticated) + if execution_plan.loop_condition: + self.logger.debug(f"Loop condition: {execution_plan.loop_condition}") + # For now, we'll break after one iteration if loop_condition is set + # In a real implementation, this would evaluate the condition + break + + # Collect terminal results and all agent outputs for critic + terminal_results: typing.Dict[str, typing.Any] = {} + all_agent_outputs: typing.Dict[str, typing.Any] = {} + for agent in self.agents: + if agent.name in results: + agent_result = results[agent.name].get(constants.RESULT_KEY) + all_agent_outputs[agent.name] = agent_result + if agent in terminal_agents: + terminal_results[agent.name] = agent_result + + # Store for self-improvement + self.last_execution_results = all_agent_outputs + + return terminal_results + + def _get_agent_outputs_from_execution(self) -> typing.Dict[str, typing.Any]: + outputs = {} + for agent in self.agents: + # Try to get output from execution results + if agent.name in self.last_execution_results: + result = self.last_execution_results[agent.name] + try: + # Try dict access + outputs[agent.name] = result.get("result", result) + except AttributeError: + # Not a dict, use directly + outputs[agent.name] = result + + # Include manager output if manager is an AI agent with memory enabled + manager = self.get_manager() + if manager is not None: + try: + if manager.has_memory_enabled(): + # Manager's output is the execution plan + if self.last_execution_plan is not None: + outputs[manager.name] = self.last_execution_plan + except AttributeError: + # Manager is not an AI agent (no has_memory_enabled method) + pass + + return outputs + + async def _self_improve_in_background(self, execution_results: typing.Dict[str, typing.Any]) -> None: + """ + Run critic and memory update in background without blocking. + + Args: + execution_results: Results from team execution. + """ + try: + # 1. Run critic agent + # Manager is already included in agent_outputs via _get_agent_outputs_from_execution() + critic_input = { + "team_producer": self, + "execution_plan": self.last_execution_plan, + "execution_results": execution_results, + "agent_outputs": self._get_agent_outputs_from_execution(), + "execution_metadata": { + "team_name": self.team_name, + "team_id": self.team_id, + }, + } + critic_analysis = await self.critic_agent.execute(critic_input, self.ai_service) + + # 2. Run memory agent with critic output (only for agents needing improvements) + memory_input = { + "critic_analysis": critic_analysis, # Contains agent_improvements dict + "agent_outputs": self._get_agent_outputs_from_execution(), + "execution_metadata": { + "execution_plan": self.last_execution_plan, + "team_name": self.team_name, + "team_producer": self, + }, + } + memory_operation = await self.memory_agent.execute(memory_input, self.ai_service) + + self.logger.debug( + f"Self-improvement completed: {memory_operation.message}. " + f"Processed {len(memory_operation.agents_processed)} agents, " + f"skipped {len(memory_operation.agents_skipped)} agents" + ) + except Exception as e: + self.logger.warning(f"Self-improvement failed (non-blocking): {e}") + + @abc.abstractmethod + async def run(self, initial_data: typing.Dict[str, typing.Any]) -> typing.Dict[str, typing.Any]: + """ + Execute the team pipeline. + + Args: + initial_data: Initial data to pass to entry agents. + + Returns: + Dict with results from terminal agents. + """ + raise NotImplementedError("run must be implemented by subclasses") + + async def push( + self, + result: typing.Any, + agent_name: typing.Optional[str] = None, + agent_id: typing.Optional[str] = None, + ) -> None: + """Push team result to the team's channel.""" + if self.channel is None: + return + + team_name = agent_name or self.team_name + for consumer_instance in self.channel.get_filtered_consumers( + agent_name=team_name, + agent_id=agent_id or self.team_id, + ): + await consumer_instance.queue.put({ + constants.AGENT_NAME_KEY: team_name, + constants.AGENT_ID_KEY: agent_id or self.team_id, + constants.RESULT_KEY: result, + }) + + +class AbstractAgentsTeamChannel(agent_channels.AbstractAgentChannel): + """ + Channel for team outputs. + + Allows teams to be composed - one team's output can feed another team. + """ + __metaclass__ = abc.ABCMeta + + PRODUCER_CLASS = AbstractAgentsTeamChannelProducer + CONSUMER_CLASS = AbstractAgentsTeamChannelConsumer diff --git a/packages/agents/octobot_agents/team/channels/ai_agents_team.py b/packages/agents/octobot_agents/team/channels/ai_agents_team.py new file mode 100644 index 0000000000..4ff4b3c5cf --- /dev/null +++ b/packages/agents/octobot_agents/team/channels/ai_agents_team.py @@ -0,0 +1,464 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. +import asyncio +import typing + +import octobot_agents.agent as agent +import octobot_agents.constants as constants +import octobot_agents.team.manager as team_manager +import octobot_agents.team.critic as team_critic +import octobot_agents.team.judge as team_judge +import octobot_agents.agent.memory.channels as memory_channels +import octobot_agents.team.channels.agents_team as agents_team +import octobot_agents.errors as errors +import octobot_agents.storage.history as storage_history +import octobot_agents.storage.history.abstract_analysis_storage as abstract_analysis_storage +import octobot_services.services.abstract_ai_service as abstract_ai_service +import octobot_agents.models as models + + +class AbstractSyncAgentsTeamChannelProducer(agents_team.AbstractAgentsTeamChannelProducer): + """ + Sync (one-shot) team producer for direct sequential execution. + + Executes agents in topological order without using channels or consumers. + Each agent's execute() is called directly with outputs from predecessors. + + Use this for: + - Simple sequential pipelines + - One-shot batch processing + - Testing and debugging + """ + + def __init__( + self, + channel: typing.Optional[agents_team.AbstractAgentsTeamChannel], + agents: typing.List[agent.AbstractAIAgentChannelProducer], + relations: typing.List[typing.Tuple[typing.Type[agent.AbstractAIAgentChannel], typing.Type[agent.AbstractAIAgentChannel]]], + ai_service: abstract_ai_service.AbstractAIService, + team_name: typing.Optional[str] = None, + team_id: typing.Optional[str] = None, + manager: typing.Optional[team_manager.ManagerAgentProducer] = None, + self_improving: bool = False, + critic_agent: typing.Optional[team_critic.CriticAgentProducer] = None, + memory_agent: typing.Optional[memory_channels.MemoryAgentProducer] = None, + judge_agent: typing.Optional[team_judge.JudgeAgentProducer] = None, + analysis_storage: typing.Optional[abstract_analysis_storage.AbstractAnalysisStorage] = None, + ): + """ + Initialize the sync AI team producer. + + Uses CriticAgentClass / JudgeAgentClass attributes if defined, otherwise disabled. + + Args: + channel: The team's output channel (optional). + agents: List of agent producer instances. + relations: List of (SourceAgentChannel, TargetAgentChannel) edges. + ai_service: The AI service for LLM calls. + team_name: Name of the team (defaults to TEAM_NAME). + team_id: Unique identifier for this team instance. + manager: Optional team manager agent. + self_improving: Whether to enable automatic improvement after execution. + critic_agent: Optional critic agent for analysis. + memory_agent: Optional memory agent for storing improvements. + judge_agent: Optional judge agent for debate phases. + analysis_storage: Optional analysis storage instance. If None, uses JSONAnalysisStorage. + """ + # Call parent init first - it handles critic/memory/judge agent instantiation via class attributes + super().__init__( + channel=channel, + agents=agents, + relations=relations, + ai_service=ai_service, + team_name=team_name, + team_id=team_id, + manager=manager, + self_improving=self_improving, + critic_agent=critic_agent, + memory_agent=memory_agent, + judge_agent=judge_agent, + ) + + # Initialize analysis storage + if analysis_storage is None: + self.analysis_storage = storage_history.create_analysis_storage() + else: + self.analysis_storage = analysis_storage + + async def run(self, initial_data: typing.Dict[str, typing.Any]) -> typing.Dict[str, typing.Any]: + """ + Execute the team pipeline synchronously using the manager. + + 1. Get ExecutionPlan from manager.execute() + 2. Execute the plan + 3. Return terminal agent results + 4. Trigger self-improvement in background if enabled + + Args: + initial_data: Initial data to pass to entry agents. + + Returns: + Dict with results from all terminal agents. + """ + # Build input_data for manager + manager_input = { + "team_producer": self, + "initial_data": initial_data, + "instructions": None, # Can be extended to accept instructions + } + + # Get execution plan or terminal results from manager + manager_result = await self.manager.execute(manager_input, self.ai_service) + + terminal_results: typing.Dict[str, typing.Any] + if isinstance(manager_result, models.ExecutionPlan): + # Plan-driven manager: execute the plan + self.last_execution_plan = manager_result + terminal_results = await self._execute_plan(manager_result, initial_data) + elif isinstance(manager_result, models.ManagerResult): + # Tools-driven manager: extract results from ManagerResult model + terminal_results = manager_result.results + self.last_execution_plan = None + else: + raise ValueError(f"Unexpected manager result type: {type(manager_result)}") + + self.last_execution_results = terminal_results + + self.logger.debug(f"Sync execution completed with {len(terminal_results)} results") + + # Push team result if we have a channel + if self.channel is not None: + await self.push(terminal_results) + + # Trigger self-improvement in background if enabled + if self.self_improving and self.critic_agent and self.memory_agent: + asyncio.create_task(self._self_improve_in_background(terminal_results)) + + return terminal_results + + def save_analysis( + self, + agent_name: str, + result: typing.Any, + ) -> None: + """ + Save analysis results to storage for debugging/audit purposes. + + Delegates to the analysis storage backend. Results are saved with metadata + for cross-agent access and debugging. + + Args: + agent_name: Name of the agent producing the analysis. + result: The analysis result to save (dict, str, or other serializable). + """ + try: + self.analysis_storage.save_analysis( + agent_name=agent_name, + result=result, + team_name=self.team_name, + team_id=self.team_id, + ) + except Exception as e: + self.logger.warning(f"Failed to save analysis for {agent_name}: {e}") + + def clear_transient_files(self) -> None: + """ + Clear analysis files from previous runs. + + Delegates to the analysis storage backend to ensure clean state + for the next execution. + """ + try: + self.analysis_storage.clear_transient_files() + except Exception as e: + self.logger.warning(f"Failed to clear transient files: {e}") + + + +class AbstractLiveAgentsTeamChannelProducer(agents_team.AbstractAgentsTeamChannelProducer): + """ + Live (long-running) team producer with full channel-based execution. + + Creates channels for each agent and wires consumers based on relations. + Agents communicate asynchronously through their channels. + + Use this for: + - Long-running pipelines with continuous updates + - Complex DAG workflows with parallel execution + - Reactive systems where agents respond to events + """ + + def __init__( + self, + channel: typing.Optional[agents_team.AbstractAgentsTeamChannel], + agents: typing.List[agent.AbstractAIAgentChannelProducer], + relations: typing.List[typing.Tuple[typing.Type[agent.AbstractAIAgentChannel], typing.Type[agent.AbstractAIAgentChannel]]], + ai_service: abstract_ai_service.AbstractAIService, + team_name: typing.Optional[str] = None, + team_id: typing.Optional[str] = None, + manager: typing.Optional[team_manager.ManagerAgentProducer] = None, + self_improving: bool = False, + critic_agent: typing.Optional[team_critic.CriticAgentProducer] = None, + memory_agent: typing.Optional[memory_channels.MemoryAgentProducer] = None, + judge_agent: typing.Optional[team_judge.JudgeAgentProducer] = None, + ): + """ + Initialize the live AI team producer. + + Uses CriticAgentClass / JudgeAgentClass attribute if defined, otherwise disabled. + """ + # Call parent init - it handles critic/memory/judge agent instantiation via class attributes + super().__init__( + channel=channel, + agents=agents, + relations=relations, + ai_service=ai_service, + team_name=team_name, + team_id=team_id, + manager=manager, + self_improving=self_improving, + critic_agent=critic_agent, + memory_agent=memory_agent, + judge_agent=judge_agent, + ) + + # Live-specific state + self._channels: typing.Dict[typing.Type[agent.AbstractAIAgentChannel], agent.AbstractAIAgentChannel] = {} + self._entry_agents: typing.List[agent.AbstractAIAgentChannelProducer] = [] + self._terminal_agents: typing.List[agent.AbstractAIAgentChannelProducer] = [] + self._terminal_results: typing.Dict[str, typing.Any] = {} + self._completion_event: typing.Optional[asyncio.Event] = None + + async def setup(self) -> None: + """ + Create channels for all agents and wire consumers based on relations. + + This method: + 1. Creates a channel instance for each agent using agent.AGENT_CHANNEL + 2. Identifies entry agents (no incoming edges in relations) + 3. Identifies terminal agents (no outgoing edges in relations) + 4. For each relation (A, B): registers B's consumer on A's channel + """ + incoming_edges, _ = self._build_dag() + + # Create channels and map producers + for agent_inst in self.agents: + if agent_inst.AGENT_CHANNEL is None: + raise errors.AgentConfigurationError(f"Agent {agent_inst.__class__.__name__} has no AGENT_CHANNEL defined") + + channel_type = agent_inst.AGENT_CHANNEL + # Pass team_name and team_id to channels + channel_instance = channel_type( + team_name=self.team_name, + team_id=self.team_id, + ) + self._channels[channel_type] = channel_instance + + # Set the channel on the producer + agent_inst.channel = channel_instance + agent_inst.ai_service = self.ai_service + + # Identify entry and terminal agents + self._entry_agents = self._get_entry_agents() + self._terminal_agents = self._get_terminal_agents() + + # Wire consumers based on relations + for source_channel_type, target_channel_type in self.relations: + source_channel = self._channels.get(source_channel_type) + target_producer = self._producer_by_channel.get(target_channel_type) + + if source_channel is None: + self.logger.warning(f"Source channel {source_channel_type.__name__} not found in team") + continue + if target_producer is None: + self.logger.warning(f"Target producer for {target_channel_type.__name__} not found in team") + continue + + # Calculate expected inputs for target + expected_inputs = len(incoming_edges[target_channel_type]) + + # Create consumer for the target that listens on source's channel + consumer_class = target_producer.AGENT_CONSUMER or agent.AbstractAIAgentChannelConsumer + consumer_instance = consumer_class( + callback=self._create_consumer_callback(target_producer, target_channel_type), + expected_inputs=expected_inputs, + ) + + # Register consumer on source channel + await source_channel.new_consumer( + consumer_instance=consumer_instance, + agent_name=self._producer_by_channel[source_channel_type].name, + ) + + # Wire terminal agent callbacks to collect results + for terminal_agent in self._terminal_agents: + terminal_channel = self._channels.get(terminal_agent.AGENT_CHANNEL) + if terminal_channel: + await terminal_channel.new_consumer( + callback=self._create_terminal_callback(terminal_agent), + agent_name=terminal_agent.name, + ) + + self.logger.debug( + f"Team setup complete: {len(self._entry_agents)} entry agents, " + f"{len(self._terminal_agents)} terminal agents, " + f"{len(self.relations)} relations" + ) + + def _create_consumer_callback( + self, + target_producer: agent.AbstractAIAgentChannelProducer, + target_channel_type: typing.Type[agent.AbstractAIAgentChannel], + ) -> typing.Callable: + """Create a callback that aggregates inputs and triggers the producer.""" + + # Track received inputs for this target (key: agent_name) + received_inputs: typing.Dict[str, typing.Dict[str, typing.Any]] = {} + incoming_edges, _ = self._build_dag() + expected_count = len(incoming_edges.get(target_channel_type, [])) + + async def callback(data: dict) -> None: + source_name = data.get(constants.AGENT_NAME_KEY, "unknown") + source_id = data.get(constants.AGENT_ID_KEY, "") + result = data.get(constants.RESULT_KEY) + + # Store with both name and id for full context + received_inputs[source_name] = { + constants.AGENT_NAME_KEY: source_name, + constants.AGENT_ID_KEY: source_id, + constants.RESULT_KEY: result, + } + + self.logger.debug( + f"Target {target_producer.name} received input from {source_name}[{source_id}] " + f"({len(received_inputs)}/{expected_count})" + ) + + # Trigger when all inputs received + if len(received_inputs) >= expected_count: + self.logger.debug(f"Triggering {target_producer.name} with {len(received_inputs)} inputs") + try: + # Pass the full input data including agent_id + result = await target_producer.execute(received_inputs.copy(), self.ai_service) + await target_producer.push(result) + except Exception as e: + self.logger.error(f"Agent {target_producer.name} execution failed: {e}") + raise + finally: + received_inputs.clear() + + return callback + + def _create_terminal_callback( + self, + terminal_agent: agent.AbstractAIAgentChannelProducer, + ) -> typing.Callable: + """Create a callback that collects terminal agent results.""" + + async def callback(data: dict) -> None: + result = data.get(constants.RESULT_KEY) + self._terminal_results[terminal_agent.name] = result + + self.logger.debug( + f"Terminal agent {terminal_agent.name} completed " + f"({len(self._terminal_results)}/{len(self._terminal_agents)})" + ) + + # Check if all terminal agents completed + if len(self._terminal_results) >= len(self._terminal_agents): + if self._completion_event: + self._completion_event.set() + + return callback + + async def run(self, initial_data: typing.Dict[str, typing.Any]) -> typing.Dict[str, typing.Any]: + """ + Execute the team pipeline with channels. + + 1. Setup channels and consumers (if not already done) + 2. Start entry agents with initial_data + 3. Wait for terminal agents to complete + 4. Produce team output to team's channel + + Args: + initial_data: Initial data to pass to entry agents. + + Returns: + Dict with results from all terminal agents. + """ + # Setup if not already done + if not self._channels: + await self.setup() + + # Clear previous results + self._terminal_results.clear() + self._completion_event = asyncio.Event() + + # Start entry agents + self.logger.debug(f"Starting {len(self._entry_agents)} entry agents") + + entry_tasks = [] + for entry_agent in self._entry_agents: + async def run_entry(agent: agent.AbstractAIAgentChannelProducer) -> None: # pylint: disable=redefined-outer-name + try: + result = await agent.execute(initial_data, self.ai_service) + await agent.push(result) + except Exception as e: + self.logger.error(f"Entry agent {agent.name} failed: {e}") + raise + + entry_tasks.append(asyncio.create_task(run_entry(entry_agent))) + + # Wait for all entry agents to complete + if entry_tasks: + await asyncio.gather(*entry_tasks) + + # Wait for terminal agents to complete (with timeout) + try: + await asyncio.wait_for(self._completion_event.wait(), timeout=300.0) + except asyncio.TimeoutError: + self.logger.error("Team execution timed out waiting for terminal agents") + raise + + self.logger.debug(f"Team execution completed with {len(self._terminal_results)} results") + + # Store execution results for self-improvement + self.last_execution_results = self._terminal_results.copy() + + # Push team result if we have a channel + if self.channel is not None: + await self.push(self._terminal_results) + + # Trigger self-improvement in background if enabled + if self.self_improving and self.critic_agent and self.memory_agent: + asyncio.create_task(self._self_improve_in_background(self._terminal_results)) + + return self._terminal_results + + async def stop(self) -> None: + """Stop all agents and cleanup channels.""" + for channel in self._channels.values(): + try: + await channel.stop() + except Exception as e: + self.logger.warning(f"Error stopping channel: {e}") + + self._channels.clear() + self._entry_agents.clear() + self._terminal_agents.clear() + self._terminal_results.clear() + + self.logger.debug("Team stopped") diff --git a/packages/agents/octobot_agents/team/channels/deep_agents_team.py b/packages/agents/octobot_agents/team/channels/deep_agents_team.py new file mode 100644 index 0000000000..cc8382ef49 --- /dev/null +++ b/packages/agents/octobot_agents/team/channels/deep_agents_team.py @@ -0,0 +1,618 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. +""" +Deep Agents Team implementation using LangChain Deep Agents. + +Features: +- SubAgentMiddleware for task delegation to workers +- TodoListMiddleware for planning +- CompositeBackend for long-term memory (/memories/) +- Streaming support +- Debug logging for agent operations +""" + +import abc +import typing +import json +import logging +import uuid + +import octobot_agents.team.channels.agents_team as agents_team +import octobot_agents.agent.channels.agent as agent_channels +import octobot_agents.agent.channels.deep_agent as deep_agent +import octobot_agents.constants as constants +import octobot_agents.errors as errors +import octobot_agents.storage.history as history +import octobot_services.services.abstract_ai_service as abstract_ai_service + +logger = logging.getLogger(__name__.split(".")[-1]) + +try: + from deepagents import create_deep_agent + from deepagents.backends import CompositeBackend, StateBackend, StoreBackend + from langgraph.store.memory import InMemoryStore + from langgraph.checkpoint.memory import MemorySaver + from langgraph.types import Command + DEEP_AGENTS_AVAILABLE = True +except ImportError as e: + DEEP_AGENTS_AVAILABLE = False + logger.debug(f"deepagents not available - Deep Agent features disabled: {e}") + + +class AbstractDeepAgentsTeamChannel(agents_team.AbstractAgentsTeamChannel): + __metaclass__ = abc.ABCMeta + + +class AbstractDeepAgentsTeamChannelConsumer(agents_team.AbstractAgentsTeamChannelConsumer): + __metaclass__ = abc.ABCMeta + + +class AbstractDeepAgentsTeamChannelProducer(agents_team.AbstractAgentsTeamChannelProducer, abc.ABC): + """ + Team producer using LangChain Deep Agents with supervisor pattern. + + Features: + - SubAgentMiddleware for worker delegation + - TodoListMiddleware for planning + - CompositeBackend with /memories/ for persistent storage + - Streaming support + - Debug logging + """ + + TEAM_CHANNEL: typing.Type[AbstractDeepAgentsTeamChannel] = AbstractDeepAgentsTeamChannel + TEAM_CONSUMER: typing.Type[AbstractDeepAgentsTeamChannelConsumer] = AbstractDeepAgentsTeamChannelConsumer + + MAX_ITERATIONS: int = 10 + ENABLE_DEBATE: bool = False + ENABLE_STREAMING: bool = False + + ENABLE_HITL: bool = False + HITL_INTERRUPT_TOOLS: dict[str, typing.Any] = {} + + SKILLS_DIRS: list[str] = [] + + def __init__( + self, + channel: typing.Optional[AbstractDeepAgentsTeamChannel] = None, + ai_service: typing.Optional[abstract_ai_service.AbstractAIService] = None, + model: typing.Optional[str] = None, + max_tokens: typing.Optional[int] = None, + temperature: typing.Optional[float] = None, + team_name: typing.Optional[str] = None, + team_id: typing.Optional[str] = None, + store: typing.Any = None, + checkpointer: typing.Any = None, + skills: list[str] | None = None, + interrupt_on: dict[str, typing.Any] | None = None, + enable_streaming: bool | None = None, + analysis_storage: typing.Optional[typing.Any] = None, + ): + # pylint: disable=super-init-not-called,non-parent-init-called + agent_channels.AbstractAgentChannelProducer.__init__(self, channel) + self.ai_service = ai_service + self.model = model + self.max_tokens = max_tokens + self.temperature = temperature or constants.AGENT_DEFAULT_TEMPERATURE + self.team_name = team_name or self.__class__.__dict__.get('TEAM_NAME', self.__class__.__name__) + self.team_id = team_id + + self._store = store + self._checkpointer = checkpointer + self._deep_agent = None + self._workers: list[dict[str, typing.Any]] = [] + + self._interrupt_on = interrupt_on or self.HITL_INTERRUPT_TOOLS + self._skills = skills or self.SKILLS_DIRS + self._enable_streaming = enable_streaming if enable_streaming is not None else self.ENABLE_STREAMING + + self._current_thread_id: str | None = None + + self.logger = logging.getLogger(f"{self.__class__.__name__}") + + # Initialize analysis storage + if analysis_storage is None: + self.analysis_storage = history.create_analysis_storage() + else: + self.analysis_storage = analysis_storage + + if not deep_agent.DEEP_AGENTS_AVAILABLE: + self.logger.warning("deep_agents not available - team will not function") + + @abc.abstractmethod + def get_worker_definitions(self) -> list[dict[str, typing.Any]]: + raise NotImplementedError("Subclasses must implement get_worker_definitions()") + + @abc.abstractmethod + def get_manager_instructions(self) -> str: + raise NotImplementedError("Subclasses must implement get_manager_instructions()") + + def save_analysis( + self, + agent_name: str, + result: typing.Any, + ) -> None: + """ + Save analysis results to storage for debugging/audit purposes. + + Delegates to the analysis storage backend. Results are saved with metadata + for cross-agent access and debugging. + + Args: + agent_name: Name of the agent/worker producing the analysis. + result: The analysis result to save (dict, str, or other serializable). + """ + try: + self.analysis_storage.save_analysis( + agent_name=agent_name, + result=result, + team_name=self.team_name, + team_id=self.team_id, + ) + except Exception as e: + self.logger.warning(f"Failed to save analysis for {agent_name}: {e}") + + def clear_transient_files(self) -> None: + """ + Clear analysis files from previous runs. + + Delegates to the analysis storage backend to ensure clean state + for the next execution. + """ + try: + self.analysis_storage.clear_transient_files() + except Exception as e: + self.logger.warning(f"Failed to clear transient files: {e}") + + def get_manager_tools(self) -> list[typing.Callable] | None: + return None + + def get_critic_config(self) -> dict[str, typing.Any] | None: + if not self.ENABLE_DEBATE: + return None + return { + "name": "critic", + "instructions": "Critique the analysis, identify weaknesses, suggest improvements.", + } + + def get_interrupt_config(self) -> dict[str, typing.Any]: + return self._interrupt_on + + def get_skills(self) -> list[str]: + return self._skills + + def get_agent_skills(self, agent_name: str) -> list[str] | None: + """ + Get skills for a specific worker agent. + Override to provide agent-specific skills. + + Args: + agent_name: Name of the worker agent + + Returns: + List of skill paths (e.g., ["./technical-analysis/"]) or None + """ + return None + + def get_agent_skills_files(self, agent_name: str) -> dict[str, str] | None: + """ + Get skill files for a specific worker agent. + Override to provide agent-specific skill files. + + Args: + agent_name: Name of the worker agent + + Returns: + Dict mapping virtual paths to file content or None + """ + skills_dir = self.get_skills_resources_dir() # pylint: disable=assignment-from-none + if not skills_dir: + return None + + # Try to find agent-specific skills directory + import os + agent_skills_dir = os.path.join(skills_dir, agent_name) + if os.path.isdir(agent_skills_dir): + return deep_agent.create_skills_files_dict(agent_skills_dir) + + return None + + def get_skills_resources_dir(self) -> str | None: + """ + Get the tentacle's resources/skills directory path. + Override this to provide a custom skills directory. + By default, returns None (no auto-discovery). + + Example implementation in tentacle: + import os + return os.path.join(os.path.dirname(__file__), "resources", "skills") + """ + return None + + def _create_memory_backend(self) -> typing.Callable: + def make_backend(runtime): + if not deep_agent.DEEP_AGENTS_AVAILABLE or not CompositeBackend: + return None + return CompositeBackend( + default=StateBackend(runtime), + routes={ + f"{constants.MEMORIES_PATH_PREFIX}": StoreBackend(runtime) + } + ) + return make_backend + + def _get_or_create_store(self) -> typing.Any: + if self._store is None and deep_agent.DEEP_AGENTS_AVAILABLE: + self._store = InMemoryStore() + return self._store + + def _get_or_create_checkpointer(self) -> typing.Any: + if self._checkpointer is None and deep_agent.DEEP_AGENTS_AVAILABLE: + self._checkpointer = MemorySaver() + return self._checkpointer + + def _build_deep_agent(self) -> typing.Any: + if not deep_agent.DEEP_AGENTS_AVAILABLE: + raise errors.DeepAgentNotAvailableError("deep_agents package is required") + + self.logger.debug(f"[{self.team_name}] Building deep agent team...") + + workers = self.get_worker_definitions() + self._workers = workers + + # Build subagents with their individual skills + subagents = [] + for w in workers: + agent_name = w.get("name", "unnamed") + + # Get agent-specific skills + agent_skills = self.get_agent_skills(agent_name) # pylint: disable=assignment-from-none + agent_files = self.get_agent_skills_files(agent_name) # pylint: disable=assignment-from-none + + if agent_skills: + self.logger.debug(f"[{self.team_name}] Loading skills for {agent_name}: {agent_skills}") + if agent_files: + self.logger.debug(f"[{self.team_name}] Loading {len(agent_files)} skill files for {agent_name}") + + # Prefer using the default_model when available (it can be a BaseChatModel instance). + # If a worker overrides the model, build a concrete chat model instance so LangChain + # does not need to infer a provider from a raw model string. + subagent_model = w.get("model") + if subagent_model is None: + if self.ai_service is None: + subagent_model = self.model + else: + subagent_model = None + elif self.ai_service is not None and isinstance(subagent_model, str): + subagent_model = self.ai_service.init_chat_model(model=subagent_model) + + subagent = deep_agent.build_dictionary_subagent( + name=agent_name, + instructions=w.get("instructions", ""), + description=w.get("description"), + tools=w.get("tools"), + model=subagent_model, + model_provider=w.get("model_provider") or (self.ai_service.ai_provider.value if self.ai_service else None), + handoff_back=w.get("handoff_back", True), + interrupt_on=w.get("interrupt_on"), + skills=agent_skills, + files=agent_files, + ) + subagents.append(subagent) + + self.logger.debug(f"[{self.team_name}] Created {len(subagents)} worker subagents") + + critic_config = self.get_critic_config() + if self.ENABLE_DEBATE and critic_config: + critic_subagent = deep_agent.build_dictionary_subagent( + name=critic_config.get("name", "critic"), + instructions=critic_config.get("instructions", ""), + description="Critiques analyses and suggests improvements", + tools=critic_config.get("tools"), + model_provider=critic_config.get("model_provider") or (self.ai_service.ai_provider.value if self.ai_service else None), + handoff_back=True, + ) + subagents.append(critic_subagent) + self.logger.debug(f"[{self.team_name}] Added critic subagent for debate mode") + + manager_instructions = self.get_manager_instructions() + team_instructions = f""" +You are the manager of the {self.team_name} team. + +{manager_instructions} + +Your team members: +{chr(10).join(f"- {w.get('name', 'unnamed')}: {w.get('description', w.get('instructions', '')[:100])}..." for w in workers)} + +Workflow: +1. Use write_todos to plan your approach +2. Delegate tasks to appropriate team members +3. Collect and synthesize their results +4. {"Run debate rounds with critic if needed" if self.ENABLE_DEBATE else "Provide final synthesized output"} + +Save important insights to /memories/ for future reference. +""".strip() + + self.logger.debug(f"[{self.team_name}] Initializing chat model from AI service") + model = None + if self.ai_service is not None: + model = self.ai_service.init_chat_model(model=self.model) + + agent_kwargs: dict[str, typing.Any] = { + "model": model, + "system_prompt": team_instructions, + "tools": self.get_manager_tools() or [], + "store": self._get_or_create_store(), + "backend": self._create_memory_backend(), + "name": f"{self.team_name}_manager", + } + + # Pass subagents directly - create_deep_agent will wrap them in SubAgentMiddleware + if subagents: + agent_kwargs["subagents"] = subagents + self.logger.debug(f"[{self.team_name}] Passing {len(subagents)} subagents to create_deep_agent") + + # Auto-discover skills from tentacle's resources/skills directory for manager + skills = self.get_skills() + skills_dir = self.get_skills_resources_dir() # pylint: disable=assignment-from-none + + if skills_dir: + discovered = deep_agent.discover_skills(skills_dir) + if discovered: + skills = (skills or []) + discovered + self.logger.debug(f"[{self.team_name}] Auto-discovered {len(discovered)} skills from {skills_dir}") + + if skills: + agent_kwargs["skills"] = skills + self.logger.debug(f"[{self.team_name}] Using skills: {skills}") + + interrupt_config = self.get_interrupt_config() + if interrupt_config: + checkpointer = self._get_or_create_checkpointer() + agent_kwargs["interrupt_on"] = interrupt_config + agent_kwargs["checkpointer"] = checkpointer + self.logger.debug(f"[{self.team_name}] HITL enabled for tools: {list(interrupt_config.keys())}") + + self.logger.debug(f"[{self.team_name}] Deep agent team built successfully") + return create_deep_agent(**agent_kwargs) + + def get_deep_agent(self, force_rebuild: bool = False) -> typing.Any: + if self._deep_agent is None or force_rebuild: + self._deep_agent = self._build_deep_agent() + return self._deep_agent + + async def run( + self, + initial_data: typing.Dict[str, typing.Any], + thread_id: str | None = None, + skills_files: dict[str, str] | None = None, + ) -> typing.Dict[str, typing.Any]: + if not deep_agent.DEEP_AGENTS_AVAILABLE: + return {"error": "Deep Agents not available"} + + agent = self.get_deep_agent() + if agent is None: + return {"error": "Failed to create Deep Agent"} + + message = self._build_input_message(initial_data) + + if thread_id is None: + thread_id = str(uuid.uuid4()) + self._current_thread_id = thread_id + + config = {"configurable": {"thread_id": thread_id}} + + invoke_input: dict[str, typing.Any] = { + "messages": [{"role": "user", "content": message}] + } + + if skills_files: + invoke_input["files"] = skills_files + + self.logger.debug(f"[{self.team_name}] Running team with input: {message[:100]}...") + + try: + if self._enable_streaming: + result = await self._run_with_streaming(agent, invoke_input, config) + else: + result = await agent.ainvoke(invoke_input, config=config) + + if self.is_interrupted(result): + return result + + parsed_result = self._parse_result(result) + + if self.channel is not None: + await self.push(parsed_result) + + self.logger.debug(f"[{self.team_name}] Team run complete") + return parsed_result + + except Exception as e: + self.logger.error(f"[{self.team_name}] Error running Deep Agent team: {e}") + return {"error": str(e)} + + async def _run_with_streaming( + self, + agent: typing.Any, + invoke_input: dict, + config: dict, + ) -> dict: + + self.logger.debug(f"[{self.team_name}] Starting streaming run") + + async for event in agent.astream( + invoke_input, + config=config, + stream_mode="updates", + ): + for node_name, node_output in event.items(): + if node_name == "agent": + messages = node_output.get("messages", []) + for msg in messages: + # Handle both dict-like messages and LangChain message objects + tool_calls = msg.get("tool_calls") if isinstance(msg, dict) else getattr(msg, "tool_calls", None) + if tool_calls: + for tc in tool_calls: + tool_name = tc["name"] if isinstance(tc, dict) else getattr(tc, "name", "unknown") + self.logger.debug(f"[{self.team_name}] 🔧 Calling tool: {tool_name}") + else: + content = msg.get("content", "") if isinstance(msg, dict) else getattr(msg, "content", "") + if content: + content_preview = content[:100] if len(content) > 100 else content + self.logger.debug(f"[{self.team_name}] 💭 Agent thinking: {content_preview}...") + + elif node_name == "tools": + messages = node_output.get("messages", []) + for msg in messages: + msg_name = msg.get("name") if isinstance(msg, dict) else getattr(msg, "name", None) + if msg_name: + self.logger.debug(f"[{self.team_name}] ✅ Tool result from: {msg_name}") + + result = event # pylint: disable=unused-variable + + state = await agent.aget_state(config) + self.logger.debug(f"[{self.team_name}] Streaming complete") + return {"messages": state.values.get("messages", [])} + + def is_interrupted(self, result: dict) -> bool: + return constants.HITL_INTERRUPT_KEY in result + + def get_interrupt_info(self, result: dict) -> dict | None: + if not self.is_interrupted(result): + return None + + interrupts = result[constants.HITL_INTERRUPT_KEY] + if not interrupts: + return None + + interrupt_obj = interrupts[0] + # Handle both dict and object types + if isinstance(interrupt_obj, dict): + return interrupt_obj.get('value', interrupt_obj) + else: + return getattr(interrupt_obj, 'value', interrupt_obj) + + async def resume_with_decisions( + self, + decisions: list[dict[str, typing.Any]], + thread_id: str | None = None, + ) -> dict: + if not deep_agent.DEEP_AGENTS_AVAILABLE: + return {"error": "Deep Agents not available"} + + agent = self.get_deep_agent() + if agent is None: + return {"error": "Deep Agent not available"} + + thread_id = thread_id or self._current_thread_id + if thread_id is None: + return {"error": "No thread_id for resume"} + + config = {"configurable": {"thread_id": thread_id}} + + self.logger.debug(f"[{self.team_name}] Resuming with {len(decisions)} decisions") + + try: + result = await agent.ainvoke( + Command(resume={"decisions": decisions}), + config=config, + ) + + if self.is_interrupted(result): + return result + + parsed_result = self._parse_result(result) + + if self.channel is not None: + await self.push(parsed_result) + + return parsed_result + + except Exception as e: + self.logger.error(f"[{self.team_name}] Error resuming Deep Agent team: {e}") + return {"error": str(e)} + + async def approve_all_interrupts(self, result: dict, thread_id: str | None = None) -> dict: + interrupt_info = self.get_interrupt_info(result) + if interrupt_info is None: + return result + + action_requests = interrupt_info.get("action_requests", []) + decisions = [{"type": constants.HITL_DECISION_APPROVE} for _ in action_requests] + + return await self.resume_with_decisions(decisions, thread_id) + + async def reject_all_interrupts(self, result: dict, thread_id: str | None = None) -> dict: + interrupt_info = self.get_interrupt_info(result) + if interrupt_info is None: + return result + + action_requests = interrupt_info.get("action_requests", []) + decisions = [{"type": constants.HITL_DECISION_REJECT} for _ in action_requests] + + return await self.resume_with_decisions(decisions, thread_id) + + def _build_input_message(self, initial_data: typing.Dict[str, typing.Any]) -> str: + data_str = json.dumps(initial_data, indent=2, default=str) + return f""" +Process the following data with your team: + +{data_str} + +Coordinate with your workers and provide a final synthesized result. +""".strip() + + def _parse_result(self, result: typing.Dict[str, typing.Any]) -> typing.Dict[str, typing.Any]: + try: + messages = result.get("messages", []) + if not messages: + return {"error": "No response from agent"} + + last_message = messages[-1] + # Handle both dict and LangChain message objects + if isinstance(last_message, dict): + content = last_message.get("content", "") + else: + content = getattr(last_message, "content", str(last_message)) + + try: + json_start = content.find("{") + json_end = content.rfind("}") + 1 + if json_start >= 0 and json_end > json_start: + json_str = content[json_start:json_end] + return json.loads(json_str) + except json.JSONDecodeError: + pass + + return {"result": content} + + except Exception as e: + self.logger.error(f"Error parsing result: {e}") + return {"error": str(e)} + + async def push(self, result: typing.Any) -> None: + if self.channel is None: + return + + for consumer in self.channel.get_consumers(): + await consumer.queue.put({ + "team_name": self.team_name, + "team_id": self.team_id or "", + "result": result, + }) + + def get_memory_path(self, memory_type: str = "data") -> str: + return f"{constants.MEMORIES_PATH_PREFIX}{self.team_name}/{memory_type}" diff --git a/packages/agents/octobot_agents/team/critic/__init__.py b/packages/agents/octobot_agents/team/critic/__init__.py new file mode 100644 index 0000000000..4086f85aca --- /dev/null +++ b/packages/agents/octobot_agents/team/critic/__init__.py @@ -0,0 +1,36 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. + +from octobot_agents.team.critic.channels.critic_agent import ( + CriticAgentMixin, + CriticAgentChannel, + CriticAgentConsumer, + CriticAgentProducer, + AICriticAgentChannel, + AICriticAgentConsumer, + AICriticAgentProducer, +) + + +__all__ = [ + "CriticAgentMixin", + "CriticAgentChannel", + "CriticAgentConsumer", + "CriticAgentProducer", + "AICriticAgentChannel", + "AICriticAgentConsumer", + "AICriticAgentProducer", +] diff --git a/packages/agents/octobot_agents/team/critic/channels/__init__.py b/packages/agents/octobot_agents/team/critic/channels/__init__.py new file mode 100644 index 0000000000..63476a6dc7 --- /dev/null +++ b/packages/agents/octobot_agents/team/critic/channels/__init__.py @@ -0,0 +1,35 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. + +from octobot_agents.team.critic.channels.critic_agent import ( + CriticAgentMixin, + CriticAgentChannel, + CriticAgentConsumer, + CriticAgentProducer, + AICriticAgentChannel, + AICriticAgentConsumer, + AICriticAgentProducer, +) + +__all__ = [ + "CriticAgentMixin", + "CriticAgentChannel", + "CriticAgentConsumer", + "CriticAgentProducer", + "AICriticAgentChannel", + "AICriticAgentConsumer", + "AICriticAgentProducer", +] diff --git a/packages/agents/octobot_agents/team/critic/channels/critic_agent.py b/packages/agents/octobot_agents/team/critic/channels/critic_agent.py new file mode 100644 index 0000000000..5d69b7cd0a --- /dev/null +++ b/packages/agents/octobot_agents/team/critic/channels/critic_agent.py @@ -0,0 +1,106 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. +import typing + +import octobot_commons.logging as logging + +import octobot_agents.models as models +import octobot_agents.agent.channels.agent as agent_channels +import octobot_agents.agent.channels.ai_agent as ai_agent_channels +import octobot_services.services.abstract_ai_service as abstract_ai_service + + +class CriticAgentMixin: + """ + Mixin that provides critic agent functionality. + + Critic agents analyze team execution to find issues, improvements, errors, and inconsistencies. + """ + + async def execute( + self, + input_data: typing.Union[models.CriticInput, typing.Dict[str, typing.Any]], + ai_service: abstract_ai_service.AbstractAIService + ) -> models.CriticAnalysis: + """ + Execute critic analysis of team execution. + + Args: + input_data: Contains {"team_producer": team_producer, "execution_plan": ExecutionPlan, "execution_results": Dict, "agent_outputs": Dict, "execution_metadata": dict} + ai_service: The AI service instance (for AI critic agents) + + Returns: + CriticAnalysis with issues, improvements, errors, inconsistencies, and agent_improvements + """ + raise NotImplementedError("execute must be implemented by subclasses") + + +class CriticAgentChannel(agent_channels.AbstractAgentChannel): + OUTPUT_SCHEMA = models.CriticAnalysis + + +class CriticAgentConsumer(agent_channels.AbstractAgentChannelConsumer): + pass + + +class CriticAgentProducer(CriticAgentMixin, agent_channels.AbstractAgentChannelProducer): + + AGENT_CHANNEL = CriticAgentChannel + AGENT_CONSUMER = CriticAgentConsumer + + def __init__( + self, + channel: typing.Optional[CriticAgentChannel] = None, + self_improving: bool = True, + **kwargs, + ): + super().__init__(channel, **kwargs) + self.self_improving = self_improving + self.name = self.__class__.__name__ + self.logger = logging.get_logger(self.__class__.__name__) + + +class AICriticAgentChannel(CriticAgentChannel, ai_agent_channels.AbstractAIAgentChannel): + pass + + +class AICriticAgentConsumer(CriticAgentConsumer, ai_agent_channels.AbstractAIAgentChannelConsumer): + pass + + +class AICriticAgentProducer(CriticAgentProducer, ai_agent_channels.AbstractAIAgentChannelProducer): + + AGENT_CHANNEL = AICriticAgentChannel + AGENT_CONSUMER = AICriticAgentConsumer + + def __init__( + self, + channel: typing.Optional[AICriticAgentChannel] = None, + model: typing.Optional[str] = None, + max_tokens: typing.Optional[int] = None, + temperature: typing.Optional[float] = None, + self_improving: bool = True, + **kwargs, + ): + super().__init__( + channel=channel, + model=model, + max_tokens=max_tokens, + temperature=temperature, + self_improving=self_improving, + **kwargs + ) + self.name = self.__class__.__name__ diff --git a/packages/agents/octobot_agents/team/judge/__init__.py b/packages/agents/octobot_agents/team/judge/__init__.py new file mode 100644 index 0000000000..e969276924 --- /dev/null +++ b/packages/agents/octobot_agents/team/judge/__init__.py @@ -0,0 +1,35 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. + +from octobot_agents.team.judge.channels.judge_agent import ( + JudgeAgentMixin, + JudgeAgentChannel, + JudgeAgentConsumer, + JudgeAgentProducer, + AIJudgeAgentChannel, + AIJudgeAgentConsumer, + AIJudgeAgentProducer, +) + +__all__ = [ + "JudgeAgentMixin", + "JudgeAgentChannel", + "JudgeAgentConsumer", + "JudgeAgentProducer", + "AIJudgeAgentChannel", + "AIJudgeAgentConsumer", + "AIJudgeAgentProducer", +] diff --git a/packages/agents/octobot_agents/team/judge/channels/__init__.py b/packages/agents/octobot_agents/team/judge/channels/__init__.py new file mode 100644 index 0000000000..e969276924 --- /dev/null +++ b/packages/agents/octobot_agents/team/judge/channels/__init__.py @@ -0,0 +1,35 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. + +from octobot_agents.team.judge.channels.judge_agent import ( + JudgeAgentMixin, + JudgeAgentChannel, + JudgeAgentConsumer, + JudgeAgentProducer, + AIJudgeAgentChannel, + AIJudgeAgentConsumer, + AIJudgeAgentProducer, +) + +__all__ = [ + "JudgeAgentMixin", + "JudgeAgentChannel", + "JudgeAgentConsumer", + "JudgeAgentProducer", + "AIJudgeAgentChannel", + "AIJudgeAgentConsumer", + "AIJudgeAgentProducer", +] diff --git a/packages/agents/octobot_agents/team/judge/channels/judge_agent.py b/packages/agents/octobot_agents/team/judge/channels/judge_agent.py new file mode 100644 index 0000000000..493f868555 --- /dev/null +++ b/packages/agents/octobot_agents/team/judge/channels/judge_agent.py @@ -0,0 +1,110 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. +""" +Abstract judge agent interface and base channel classes for debate phases. + +Judge agents decide whether a debate should continue or exit and optionally +provide a synthesis summary when exiting. +""" +import typing + +import octobot_commons.logging as logging + +import octobot_agents.models as models +import octobot_agents.agent.channels.agent as agent_channels +import octobot_agents.agent.channels.ai_agent as ai_agent_channels +import octobot_services.services.abstract_ai_service as abstract_ai_service + + +class JudgeAgentMixin: + """ + Mixin that provides judge agent functionality. + + Judge agents are used in debate phases: they receive debate history + (messages from debator agents) and decide whether to continue the debate + or exit with an optional synthesis summary. + """ + + async def execute( + self, + input_data: typing.Union[typing.Dict[str, typing.Any], models.JudgeInput], + ai_service: abstract_ai_service.AbstractAIService, + ) -> models.JudgeDecision: + """ + Execute judge decision on debate state. + + Args: + input_data: Contains debate_history (list of {agent_name, message, round}), + debator_agent_names, current_round, max_rounds, and optional + _initial_state for context. + ai_service: The AI service instance (for AI judge agents). + + Returns: + JudgeDecision with decision ("continue" or "exit"), reasoning, and optional summary. + """ + raise NotImplementedError("execute must be implemented by subclasses") + + +class JudgeAgentChannel(agent_channels.AbstractAgentChannel): + OUTPUT_SCHEMA = models.JudgeDecision + + +class JudgeAgentConsumer(agent_channels.AbstractAgentChannelConsumer): + pass + + +class JudgeAgentProducer(JudgeAgentMixin, agent_channels.AbstractAgentChannelProducer): + AGENT_CHANNEL = JudgeAgentChannel + AGENT_CONSUMER = JudgeAgentConsumer + + def __init__( + self, + channel: typing.Optional[JudgeAgentChannel] = None, + **kwargs, + ): + super().__init__(channel, **kwargs) + self.name = self.__class__.__name__ + self.logger = logging.get_logger(self.__class__.__name__) + + +class AIJudgeAgentChannel(JudgeAgentChannel, ai_agent_channels.AbstractAIAgentChannel): + pass + + +class AIJudgeAgentConsumer(JudgeAgentConsumer, ai_agent_channels.AbstractAIAgentChannelConsumer): + pass + + +class AIJudgeAgentProducer(JudgeAgentProducer, ai_agent_channels.AbstractAIAgentChannelProducer): + AGENT_CHANNEL = AIJudgeAgentChannel + AGENT_CONSUMER = AIJudgeAgentConsumer + + def __init__( + self, + channel: typing.Optional[AIJudgeAgentChannel] = None, + model: typing.Optional[str] = None, + max_tokens: typing.Optional[int] = None, + temperature: typing.Optional[float] = None, + **kwargs, + ): + super().__init__( + channel=channel, + model=model, + max_tokens=max_tokens, + temperature=temperature, + **kwargs + ) + self.name = self.__class__.__name__ diff --git a/packages/agents/octobot_agents/team/manager/__init__.py b/packages/agents/octobot_agents/team/manager/__init__.py new file mode 100644 index 0000000000..663994c7ae --- /dev/null +++ b/packages/agents/octobot_agents/team/manager/__init__.py @@ -0,0 +1,47 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. + +from octobot_agents.team.manager.channels.manager_agent import ( + TeamManagerMixin, + ManagerAgentChannel, + ManagerAgentConsumer, + ManagerAgentProducer, + AIManagerAgentChannel, + AIManagerAgentConsumer, + AIManagerAgentProducer, + AIPlanManagerAgentChannel, + AIPlanManagerAgentConsumer, + AIPlanManagerAgentProducer, + AIToolsManagerAgentChannel, + AIToolsManagerAgentConsumer, + AIToolsManagerAgentProducer, +) + +__all__ = [ + "TeamManagerMixin", + "ManagerAgentChannel", + "ManagerAgentConsumer", + "ManagerAgentProducer", + "AIManagerAgentChannel", + "AIManagerAgentConsumer", + "AIManagerAgentProducer", + "AIPlanManagerAgentChannel", + "AIPlanManagerAgentConsumer", + "AIPlanManagerAgentProducer", + "AIToolsManagerAgentChannel", + "AIToolsManagerAgentConsumer", + "AIToolsManagerAgentProducer", +] diff --git a/packages/agents/octobot_agents/team/manager/channels/__init__.py b/packages/agents/octobot_agents/team/manager/channels/__init__.py new file mode 100644 index 0000000000..663994c7ae --- /dev/null +++ b/packages/agents/octobot_agents/team/manager/channels/__init__.py @@ -0,0 +1,47 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. + +from octobot_agents.team.manager.channels.manager_agent import ( + TeamManagerMixin, + ManagerAgentChannel, + ManagerAgentConsumer, + ManagerAgentProducer, + AIManagerAgentChannel, + AIManagerAgentConsumer, + AIManagerAgentProducer, + AIPlanManagerAgentChannel, + AIPlanManagerAgentConsumer, + AIPlanManagerAgentProducer, + AIToolsManagerAgentChannel, + AIToolsManagerAgentConsumer, + AIToolsManagerAgentProducer, +) + +__all__ = [ + "TeamManagerMixin", + "ManagerAgentChannel", + "ManagerAgentConsumer", + "ManagerAgentProducer", + "AIManagerAgentChannel", + "AIManagerAgentConsumer", + "AIManagerAgentProducer", + "AIPlanManagerAgentChannel", + "AIPlanManagerAgentConsumer", + "AIPlanManagerAgentProducer", + "AIToolsManagerAgentChannel", + "AIToolsManagerAgentConsumer", + "AIToolsManagerAgentProducer", +] diff --git a/packages/agents/octobot_agents/team/manager/channels/manager_agent.py b/packages/agents/octobot_agents/team/manager/channels/manager_agent.py new file mode 100644 index 0000000000..36a19c9c33 --- /dev/null +++ b/packages/agents/octobot_agents/team/manager/channels/manager_agent.py @@ -0,0 +1,432 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. +import typing + +import octobot_commons.logging as logging + +import octobot_services.enums as services_enums + +import octobot_agents.agent.channels.agent as agent_channels +import octobot_agents.agent.channels.ai_agent as ai_agent_channels +import octobot_agents.models as models +import octobot_agents.constants as constants +import octobot_agents.utils.retry as retry_utils + +class TeamManagerMixin: + """ + Mixin that provides team manager functionality. + + Both managers are agents and follow the agent pattern with channels. + """ + + async def execute( + self, + input_data: typing.Union[models.ManagerInput, typing.Dict[str, typing.Any]], + ai_service: typing.Any + ) -> typing.Union[models.ExecutionPlan, models.ManagerResult]: + """ + Execute the manager's logic and return an execution plan or terminal results. + + Args: + input_data: Contains {"team_producer": team_producer, "initial_data": initial_data, "instructions": instructions} + ai_service: The AI service instance (for AI managers) + + Returns: + ExecutionPlan with steps for team execution (plan-driven) or models.ManagerResult with terminal results (tools-driven) + """ + raise NotImplementedError("execute must be implemented by subclasses") + + async def send_instruction_to_agent( + self, + agent: ai_agent_channels.AbstractAIAgentChannelProducer, + instruction: typing.Dict[str, typing.Any], + ) -> None: + """ + Send instruction to an agent via channel.modify(). + + Args: + agent: The agent producer to send instructions to + instruction: Dict with modification constants as keys (e.g., {MODIFICATION_ADDITIONAL_INSTRUCTIONS: "..."}) + """ + if agent.channel is None: + self.logger.debug(f"Agent {agent.name} has no channel, skipping instructions") + return + + await agent.channel.modify(**instruction) + + +class ManagerAgentChannel(agent_channels.AbstractAgentChannel): + OUTPUT_SCHEMA = models.ExecutionPlan + + +class ManagerAgentConsumer(agent_channels.AbstractAgentChannelConsumer): + pass + + +class ManagerAgentProducer(TeamManagerMixin, agent_channels.AbstractAgentChannelProducer): + AGENT_CHANNEL = ManagerAgentChannel + AGENT_CONSUMER = ManagerAgentConsumer + + def __init__( + self, + channel: typing.Optional[ManagerAgentChannel] = None, + **kwargs, + ): + super().__init__(channel, **kwargs) + self.name = self.__class__.__name__ + self.logger = logging.get_logger(self.__class__.__name__) + + +class AIManagerAgentChannel(ManagerAgentChannel, ai_agent_channels.AbstractAIAgentChannel): + pass + + +class AIManagerAgentConsumer(ManagerAgentConsumer, ai_agent_channels.AbstractAIAgentChannelConsumer): + pass + + +class AIManagerAgentProducer(ManagerAgentProducer, ai_agent_channels.AbstractAIAgentChannelProducer): + + AGENT_CHANNEL = AIManagerAgentChannel + AGENT_CONSUMER = AIManagerAgentConsumer + MODEL_POLICY = services_enums.AIModelPolicy.REASONING + + def __init__( + self, + channel: typing.Optional[AIManagerAgentChannel] = None, + model: typing.Optional[str] = None, + max_tokens: typing.Optional[int] = None, + temperature: typing.Optional[float] = None, + **kwargs, + ): + super().__init__( + channel=channel, + model=model, + max_tokens=max_tokens, + temperature=temperature, + **kwargs + ) + self.name = self.__class__.__name__ + + +class AIPlanManagerAgentChannel(AIManagerAgentChannel): + pass + + +class AIPlanManagerAgentConsumer(AIManagerAgentConsumer): + pass + + +class AIPlanManagerAgentProducer(AIManagerAgentProducer): + + AGENT_CHANNEL = AIPlanManagerAgentChannel + AGENT_CONSUMER = AIPlanManagerAgentConsumer + + def __init__( + self, + channel: typing.Optional[AIPlanManagerAgentChannel] = None, + model: typing.Optional[str] = None, + max_tokens: typing.Optional[int] = None, + temperature: typing.Optional[float] = None, + **kwargs, + ): + super().__init__( + channel=channel, + model=model, + max_tokens=max_tokens, + temperature=temperature, + **kwargs + ) + +class AIToolsManagerAgentChannel(AIManagerAgentChannel): + pass + + +class AIToolsManagerAgentConsumer(AIManagerAgentConsumer): + pass + + +class AIToolsManagerAgentProducer(AIManagerAgentProducer): + + AGENT_CHANNEL = AIToolsManagerAgentChannel + AGENT_CONSUMER = AIToolsManagerAgentConsumer + + def __init__( + self, + channel: typing.Optional[AIToolsManagerAgentChannel] = None, + model: typing.Optional[str] = None, + max_tokens: typing.Optional[int] = None, + temperature: typing.Optional[float] = None, + max_tool_calls: typing.Optional[int] = None, + **kwargs, + ): + super().__init__( + channel=channel, + model=model, + max_tokens=max_tokens, + temperature=temperature, + **kwargs + ) + self.max_tool_calls = max_tool_calls or 50 + + async def execute( + self, + input_data: typing.Union[models.ManagerInput, typing.Dict[str, typing.Any]], + ai_service: typing.Any + ) -> models.ManagerResult: + """ + Execute tools-driven management with internal tool loop. + + Returns models.ManagerResult with terminal results instead of ExecutionPlan. + """ + team_producer = input_data.get("team_producer") + initial_data = input_data.get("initial_data", {}) + instructions = input_data.get("instructions") + + if team_producer is None: + raise ValueError("team_producer is required in input_data") + + # Initialize state + state = models.ManagerState( + completed_agents=[], + results={}, + initial_data=initial_data, + tool_call_history=[] + ) + + tool_call_count = 0 + + while tool_call_count < self.max_tool_calls: + # Build context for LLM + context = self._build_tools_context(team_producer, state, instructions) + + # Get tool call from LLM + tool_call = await self._get_tool_call(context, ai_service) + + if tool_call.tool_name == constants.TOOL_FINISH: + # Finish tool called - return current results + break + + # Execute the tool + await self._execute_tool(tool_call, team_producer, state, ai_service) + + tool_call_count += 1 + state.tool_call_history.append(tool_call) + + return models.ManagerResult( + completed_agents=state.completed_agents, + results=state.results, + tool_calls_used=tool_call_count, + ) + + def _build_tools_context( + self, + team_producer: typing.Any, + state: models.ManagerState, + instructions: typing.Optional[str] + ) -> typing.Dict[str, typing.Any]: + """Build context dict for LLM tool call.""" + agents_info = [] + for agent in team_producer.agents: + agents_info.append({ + "name": agent.name, + "channel": agent.AGENT_CHANNEL.__name__ if agent.AGENT_CHANNEL else None, + }) + + return { + "team_name": team_producer.team_name, + "agents": agents_info, + "completed_agents": state.completed_agents, + "current_results": state.results, + "initial_data": state.initial_data, + "instructions": instructions, + "tool_call_history": [call.model_dump() for call in state.tool_call_history], + } + + async def _get_tool_call( + self, + context: typing.Dict[str, typing.Any], + ai_service: typing.Any + ) -> models.ManagerToolCall: + system_prompt = self._get_tools_prompt() + messages = [ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": f"Context: {self.format_data(context)}"}, + ] + + tools = [ + ai_service.format_tool_definition( + name=constants.TOOL_RUN_AGENT, + description="Run a specific agent and get its result", + parameters=models.RunAgentArgs.model_json_schema(), + ), + ai_service.format_tool_definition( + name=constants.TOOL_RUN_DEBATE, + description="Run a debate between agents with a judge", + parameters=models.RunDebateArgs.model_json_schema(), + ), + ai_service.format_tool_definition( + name=constants.TOOL_FINISH, + description="Finish execution and return current results", + parameters={}, + ), + ] + + response_data = await self._call_llm( + messages, + ai_service, + json_output=True, + tools=tools, + return_tool_calls=True, + ) + + response_data, error_msg = models.AgentBaseModel.normalize_tool_call_response( + response_data, + finish_tool_name=constants.TOOL_FINISH, + ) + if error_msg: + raise ValueError(f"LLM failed to return valid tool calls: {error_msg}") + return models.ManagerToolCall.model_validate(response_data) + + @retry_utils.retry_async(lambda self, agent, *args, **kwargs: agent.MAX_RETRIES) + async def _execute_agent_with_retry( + self, + agent: ai_agent_channels.AbstractAIAgentChannelProducer, + agent_input: typing.Dict[str, typing.Any], + ai_service: typing.Any, + ) -> typing.Any: + return await agent.execute(agent_input, ai_service) + + def _get_tools_prompt(self) -> str: + """Get the tools system prompt.""" + return """You are a tools-driven team manager responsible for coordinating AI agents to complete tasks. + +Your goal is to analyze the available agents and current context, then use the available tools to execute the appropriate agents in sequence to achieve the team's objective. + +Available tools: +- run_agent: Execute a single agent by name to get its specialized output +- run_debate: Run a debate between multiple agents with a judge to resolve complex decisions +- finish: Complete execution when you have gathered sufficient results + +Important: +- Always run at least one agent before calling finish. +- Do NOT respond with plain text. You MUST respond with a tool call. +- If unsure, call finish with empty arguments. + +Examples (tool calls only, no prose): +- run_agent {\"agent_name\": \"SignalAIAgentProducer\"} +- run_debate {\"debator_agent_names\": [\"BullResearchAIAgentProducer\", \"BearResearchAIAgentProducer\"], \"judge_agent_name\": \"RiskJudgeAIAgentProducer\", \"max_rounds\": 3} +- finish {} +""" + + async def _execute_tool( + self, + tool_call: models.ManagerToolCall, + team_producer: typing.Any, + state: models.ManagerState, + ai_service: typing.Any + ) -> None: + """Execute a tool and update state.""" + if tool_call.tool_name == constants.TOOL_RUN_AGENT: + await self._tool_run_agent(tool_call.arguments, team_producer, state, ai_service) + elif tool_call.tool_name == constants.TOOL_RUN_DEBATE: + await self._tool_run_debate(tool_call.arguments, team_producer, state, ai_service) + else: + self.logger.warning(f"Unknown tool: {tool_call.tool_name}") + + async def _tool_run_agent( + self, + args: typing.Dict[str, typing.Any], + team_producer: typing.Any, + state: models.ManagerState, + ai_service: typing.Any + ) -> None: + """Run a single agent with proper input structure for team execution.""" + run_args = models.RunAgentArgs.model_validate(args) + agent = team_producer._producer_by_name.get(run_args.agent_name) # pylint: disable=protected-access + + if agent is None: + self.logger.warning(f"Agent {run_args.agent_name} not found") + return + + # Build agent input following team channel structure + # For entry agents: pass initial_data directly + # For non-entry agents: pass dict with predecessor results keyed by agent name + + # Check if agent is an entry agent (has no predecessors in the team) + incoming_edges, _ = team_producer._build_dag() # pylint: disable=protected-access + agent_channel_type = agent.AGENT_CHANNEL + predecessors = incoming_edges.get(agent_channel_type, []) + + if not predecessors: + # Entry agent: receives initial_data directly + agent_input = state.initial_data.copy() + if run_args.instructions: + agent_input["instructions"] = run_args.instructions + else: + # Non-entry agent: receives predecessor results in channel format + agent_input = {} + + # Add each predecessor's result in the expected format + for pred_channel_type in predecessors: + # Find the predecessor agent by channel type + pred_agent = team_producer._producer_by_channel.get(pred_channel_type) # pylint: disable=protected-access + if pred_agent and pred_agent.name in state.results: + pred_result_entry = state.results[pred_agent.name] + agent_input[pred_agent.name] = { + constants.AGENT_NAME_KEY: pred_agent.name, + constants.RESULT_KEY: pred_result_entry.get("result"), + } + + # Also preserve initial_state for agents that need it + agent_input["_initial_state"] = state.initial_data.copy() + + if run_args.instructions: + agent_input["instructions"] = run_args.instructions + + result = await self._execute_agent_with_retry(agent, agent_input, ai_service) + state.completed_agents.append(run_args.agent_name) + state.results[run_args.agent_name] = { + "agent_name": run_args.agent_name, + "result": result, + } + + async def _tool_run_debate( + self, + args: typing.Dict[str, typing.Any], + team_producer: typing.Any, + state: models.ManagerState, + ai_service: typing.Any + ) -> None: + """Run a debate.""" + debate_args = models.RunDebateArgs.model_validate(args) + + # Use team's debate method + debate_results, completed = await team_producer._run_debate( # pylint: disable=protected-access + debate_config={ + "debator_agent_names": debate_args.debator_agent_names, + "judge_agent_name": debate_args.judge_agent_name, + "max_rounds": debate_args.max_rounds, + }, + initial_data=state.initial_data, + results=state.results, + completed_agents=set(state.completed_agents), + incoming_edges={}, # Simplified + ) + + # Update state + state.completed_agents.extend(completed - set(state.completed_agents)) + state.results.update(debate_results) diff --git a/packages/agents/octobot_agents/utils/__init__.py b/packages/agents/octobot_agents/utils/__init__.py new file mode 100644 index 0000000000..40395a00ab --- /dev/null +++ b/packages/agents/octobot_agents/utils/__init__.py @@ -0,0 +1,31 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. + +from octobot_agents.utils.extractor import ( + extract_json_from_content, + extract_json_between_braces, + extract_json_from_markdown, + extract_json_from_xml_tags, + preprocess_json_content, +) + +__all__ = [ + "extract_json_from_content", + "extract_json_between_braces", + "extract_json_from_markdown", + "extract_json_from_xml_tags", + "preprocess_json_content", +] diff --git a/packages/agents/octobot_agents/utils/extractor.py b/packages/agents/octobot_agents/utils/extractor.py new file mode 100644 index 0000000000..05624534b0 --- /dev/null +++ b/packages/agents/octobot_agents/utils/extractor.py @@ -0,0 +1,155 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. +import json +import re +import typing + + +def preprocess_json_content(content: str) -> str: + if not content: + return "" + cleaned = content.strip() + cleaned = _strip_wrapping_quotes(cleaned) + if "\\n" in cleaned and "\n" not in cleaned: + try: + cleaned = bytes(cleaned, "utf-8").decode("unicode_escape") + except Exception: + pass + fenced = _extract_fenced_content(cleaned) + return fenced if fenced is not None else cleaned + + +def extract_json_from_content(content: str) -> typing.Optional[typing.Dict[str, typing.Any]]: + if not content: + return None + + cleaned = preprocess_json_content(content) + parsed = _try_load(cleaned) + if parsed is not None: + return parsed + + for candidate in ( + extract_json_between_braces(cleaned), + extract_json_from_markdown(content), + extract_json_from_xml_tags(content), + ): + if candidate is not None: + return candidate + + return None + + +def extract_json_between_braces(content: str) -> typing.Optional[typing.Dict[str, typing.Any]]: + if not content: + return None + start = 0 + while True: + json_str = _find_first_json_object(content, start_index=start) + if not json_str: + return None + parsed = _try_load(json_str) + if parsed is not None: + return parsed + next_pos = content.find("{", start + 1) + if next_pos == -1: + return None + start = next_pos + + +def extract_json_from_markdown(content: str) -> typing.Optional[typing.Dict[str, typing.Any]]: + matches = re.findall(r"```(?:json)?\s*(.*?)\s*```", content, re.DOTALL) + for match in matches: + parsed = _try_load(match.strip()) + if parsed is not None: + return parsed + return None + + +def extract_json_from_xml_tags(content: str) -> typing.Optional[typing.Dict[str, typing.Any]]: + matches = re.findall(r"<[^>]+>(.*?)</[^>]+>", content, re.DOTALL) + for match in matches: + match_str = match.strip() + parsed = _try_load(match_str) + if parsed is not None: + return parsed + parsed = extract_json_between_braces(match_str) + if parsed is not None: + return parsed + return None + + +def _try_load(content: str) -> typing.Optional[typing.Dict[str, typing.Any]]: + try: + return json.loads(content) + except Exception: + return None + + +def _find_first_json_object(content: str, start_index: int = 0) -> typing.Optional[str]: + if not content: + return None + start = content.find("{", start_index) + if start < 0: + return None + depth = 0 + in_string = False + escape = False + for idx in range(start, len(content)): + ch = content[idx] + if in_string: + if escape: + escape = False + continue + if ch == "\\": + escape = True + continue + if ch == "\"": + in_string = False + continue + if ch == "\"": + in_string = True + continue + if ch == "{": + depth += 1 + continue + if ch == "}": + depth -= 1 + if depth == 0: + return content[start:idx + 1] + return None + + +def _extract_fenced_content(content: str) -> typing.Optional[str]: + match = re.search(r"```(?:json)?\s*(.*?)\s*```", content, re.DOTALL) + if match: + return match.group(1).strip() + if "```" in content: + start = content.find("```") + end = content.rfind("```") + if start != -1 and end != -1 and end > start + 3: + inner = content[start + 3:end] + if inner.startswith("json"): + inner = inner[4:] + return inner.strip() + return None + + +def _strip_wrapping_quotes(content: str) -> str: + if (content.startswith("'") and content.endswith("'")) or ( + content.startswith('"') and content.endswith('"') + ): + return content[1:-1].strip() + return content diff --git a/packages/agents/octobot_agents/utils/retry.py b/packages/agents/octobot_agents/utils/retry.py new file mode 100644 index 0000000000..5df55dc966 --- /dev/null +++ b/packages/agents/octobot_agents/utils/retry.py @@ -0,0 +1,39 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. +import functools +import typing + + +def retry_async(get_retries: typing.Callable[..., int]): + def decorator(func): + @functools.wraps(func) + async def wrapper(*args, **kwargs): + retries = max(0, int(get_retries(*args, **kwargs))) + attempt = 0 + while True: + try: + return await func(*args, **kwargs) + except Exception as e: + if attempt >= retries: + raise + attempt += 1 + self_ref = args[0] if args else None + if self_ref is not None and hasattr(self_ref, "logger"): + self_ref.logger.warning( + f"{func.__name__} failed. Retrying ({attempt}/{retries}). Error: {e}" + ) + return wrapper + return decorator diff --git a/packages/agents/standard.rc b/packages/agents/standard.rc new file mode 100644 index 0000000000..75d50d647a --- /dev/null +++ b/packages/agents/standard.rc @@ -0,0 +1,515 @@ +[MASTER] + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. +extension-pkg-whitelist= + +# Add files or directories to the blacklist. They should be base names, not +# paths. +ignore=CVS,tests + +# Add files or directories matching the regex patterns to the blacklist. The +# regex matches against base names, not paths. +ignore-patterns= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the +# number of processors available to use. +jobs=4 + +# Control the amount of potential inferred values when inferring a single +# object. This can help the performance when dealing with large functions or +# complex, nested conditions. +limit-inference-results=100 + +# List of plugins (as comma separated values of python module names) to load, +# usually to register additional checkers. +load-plugins= + +# Pickle collected data for later comparisons. +persistent=yes + +# Specify a configuration file. +#rcfile= + +# When enabled, pylint would attempt to guess common misconfiguration and emit +# user-friendly hints instead of false-positive error messages. +# suggestion-mode=yes + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED. +confidence= + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once). You can also use "--disable=all" to +# disable everything first and then reenable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use "--disable=all --enable=classes +# --disable=W". +disable=C0114, # doc + C0115, # doc + C0116, # doc + E0611, # false positive + broad-except, + broad-exception-raised, + abstract-method, + arguments-differ, + unused-argument, + reimported, + logging-format-interpolation, + fixme, + attribute-defined-outside-init, + useless-super-delegation, + pointless-string-statement, + raise-missing-from, + logging-fstring-interpolation, + f-string-without-interpolation, + c-extension-no-member, # cythonized imports no member + C, R # only errors TODO remove + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +enable= + + +[REPORTS] + +# Python expression which should return a score less than or equal to 10. You +# have access to the variables 'error', 'warning', 'refactor', and 'convention' +# which contain the number of messages in each category, as well as 'statement' +# which is the total number of statements analyzed. This score is used by the +# global evaluation report (RP0004). +evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details. +#msg-template= + +# Set the output format. Available formats are text, parseable, colorized, json +# and msvs (visual studio). You can also give a reporter class, e.g. +# mypackage.mymodule.MyReporterClass. +output-format=text + +# Tells whether to display a full report or only the messages. +reports=no + +# Activate the evaluation score. +score=yes + + +[REFACTORING] + +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 + +# Complete name of functions that never returns. When checking for +# inconsistent-return-statements if a never returning function is called then +# it will be considered as an explicit return statement and no message will be +# printed. +never-returning-functions=sys.exit + + +[BASIC] + +# Naming style matching correct argument names. +argument-naming-style=snake_case + +# Regular expression matching correct argument names. Overrides argument- +# naming-style. +#argument-rgx= + +# Naming style matching correct attribute names. +attr-naming-style=snake_case + +# Regular expression matching correct attribute names. Overrides attr-naming- +# style. +#attr-rgx= + +# Bad variable names which should always be refused, separated by a comma. +bad-names=foo, + bar, + baz, + toto, + tutu, + tata + +# Naming style matching correct class attribute names. +class-attribute-naming-style=any + +# Regular expression matching correct class attribute names. Overrides class- +# attribute-naming-style. +#class-attribute-rgx= + +# Naming style matching correct class names. +class-naming-style=PascalCase + +# Regular expression matching correct class names. Overrides class-naming- +# style. +#class-rgx= + +# Naming style matching correct constant names. +const-naming-style=UPPER_CASE + +# Regular expression matching correct constant names. Overrides const-naming- +# style. +#const-rgx= + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + +# Naming style matching correct function names. +function-naming-style=snake_case + +# Regular expression matching correct function names. Overrides function- +# naming-style. +#function-rgx= + +# Good variable names which should always be accepted, separated by a comma. +good-names=i, + j, + k, + ex, + Run, + _ + +# Include a hint for the correct naming format with invalid-name. +include-naming-hint=no + +# Naming style matching correct inline iteration names. +inlinevar-naming-style=any + +# Regular expression matching correct inline iteration names. Overrides +# inlinevar-naming-style. +#inlinevar-rgx= + +# Naming style matching correct method names. +method-naming-style=snake_case + +# Regular expression matching correct method names. Overrides method-naming- +# style. +#method-rgx= + +# Naming style matching correct module names. +module-naming-style=snake_case + +# Regular expression matching correct module names. Overrides module-naming- +# style. +#module-rgx= + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +# These decorators are taken in consideration only for invalid-name. +property-classes=abc.abstractproperty + +# Naming style matching correct variable names. +variable-naming-style=snake_case + +# Regular expression matching correct variable names. Overrides variable- +# naming-style. +#variable-rgx= + + +[FORMAT] + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )?<?https?://\S+>?$ + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Maximum number of characters on a single line. +max-line-length=100 + +# Maximum number of lines in a module. +max-module-lines=1000 + +# Allow the body of a class to be on the same line as the declaration if body +# contains single statement. +single-line-class-stmt=no + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + + +[LOGGING] + +# Format style used to check logging format string. `old` means using % +# formatting, `new` is for `{}` formatting,and `fstr` is for f-strings. +logging-format-style=old + +# Logging modules to check that the string format arguments are in logging +# function parameter format. +logging-modules=logging + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME, + XXX, + TODO + + +[SIMILARITIES] + +# Ignore comments when computing similarities. +ignore-comments=yes + +# Ignore docstrings when computing similarities. +ignore-docstrings=yes + +# Ignore imports when computing similarities. +ignore-imports=no + +# Minimum lines number of a similarity. +min-similarity-lines=4 + + +[SPELLING] + +# Limits count of emitted suggestions for spelling mistakes. +max-spelling-suggestions=4 + +# Spelling dictionary name. Available dictionaries: none. To make it work, +# install the python-enchant package. +spelling-dict= + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains the private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to the private dictionary (see the +# --spelling-private-dict-file option) instead of raising a message. +spelling-store-unknown-words=no + + +[STRING] + +# This flag controls whether the implicit-str-concat-in-sequence should +# generate a warning on implicit string concatenation in sequences defined over +# several lines. +check-str-concat-over-line-jumps=no + + +[TYPECHECK] + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + +# Tells whether missing members accessed in mixin class should be ignored. A +# mixin class is detected if its name ends with "mixin" (case insensitive). +ignore-mixin-members=yes + +# Tells whether to warn about missing members when the owner of the attribute +# is inferred to be None. +ignore-none=yes + +# This flag controls whether pylint should warn about no-member and similar +# checks whenever an opaque object is returned when inferring. The inference +# can return multiple potential results while evaluating a Python object, but +# some branches might not be evaluated, which results in partial inference. In +# that case, it might be useful to still emit no-member and other checks for +# the rest of the inferred objects. +ignore-on-opaque-inference=yes + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local + +# List of module names for which member attributes should not be checked +# (useful for modules/projects where namespaces are manipulated during runtime +# and thus existing member attributes cannot be deduced by static analysis). It +# supports qualified module names, as well as Unix pattern matching. +ignored-modules= + +# Show a hint with possible names when a member name was not found. The aspect +# of finding the hint is based on edit distance. +missing-member-hint=yes + +# The minimum edit distance a name should have in order to be considered a +# similar match for a missing member name. +missing-member-hint-distance=1 + +# The total number of similar names that should be taken in consideration when +# showing a hint for a missing member. +missing-member-max-choices=1 + +# List of decorators that change the signature of a decorated function. +signature-mutators= + + +[VARIABLES] + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid defining new builtins when possible. +additional-builtins= + +# Tells whether unused global variables should be treated as a violation. +allow-global-unused-variables=yes + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_, + _cb + +# A regular expression matching the name of dummy variables (i.e. expected to +# not be used). +dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ + +# Argument names that match this expression will be ignored. Default to name +# with leading underscore. +ignored-argument-names=_.*|^ignored_|^unused_ + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io + + +[CLASSES] + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__, + __new__, + setUp, + __post_init__ + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict, + _fields, + _replace, + _source, + _make + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=cls + + +[DESIGN] + +# Maximum number of arguments for function / method. +max-args=5 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Maximum number of boolean expressions in an if statement (see R0916). +max-bool-expr=5 + +# Maximum number of branch for function / method body. +max-branches=12 + +# Maximum number of locals for function / method body. +max-locals=15 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 + +# Maximum number of return / yield for function / method body. +max-returns=6 + +# Maximum number of statements in function / method body. +max-statements=50 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + + +[IMPORTS] + +# List of modules that can be imported at any level, not just the top level +# one. +allow-any-import-level= + +# Allow wildcard imports from modules that define __all__. +allow-wildcard-with-all=no + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + +# Deprecated modules which should not be used, separated by a comma. +deprecated-modules=optparse,tkinter.tix + +# Create a graph of external dependencies in the given file (report RP0402 must +# not be disabled). +ext-import-graph= + +# Create a graph of every (i.e. internal and external) dependencies in the +# given file (report RP0402 must not be disabled). +import-graph= + +# Create a graph of internal dependencies in the given file (report RP0402 must +# not be disabled). +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant + +# Couples of modules and preferred modules, separated by a comma. +preferred-modules= + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when being caught. Defaults to +# "BaseException, Exception". +overgeneral-exceptions=builtins.BaseException, + builtins.Exception diff --git a/packages/agents/tests/__init__.py b/packages/agents/tests/__init__.py new file mode 100644 index 0000000000..af52643543 --- /dev/null +++ b/packages/agents/tests/__init__.py @@ -0,0 +1,15 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. diff --git a/packages/agents/tests/test_agent.py b/packages/agents/tests/test_agent.py new file mode 100644 index 0000000000..54d6e808ca --- /dev/null +++ b/packages/agents/tests/test_agent.py @@ -0,0 +1,70 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. +import pytest + +import octobot_agents.agent.channels.ai_agent as ai_agent_channels +import octobot_agents.agent.channels.agent as agent_channels +import octobot_agents.constants as agent_constants + + +class TestAgentChannel(agent_channels.AbstractAgentChannel): + """Test channel for testing.""" + pass + + +class TestAIAgentProducer(ai_agent_channels.AbstractAIAgentChannelProducer): + """Test agent producer for testing.""" + + AGENT_CHANNEL = TestAgentChannel + + def _get_default_prompt(self) -> str: + return "You are a test agent." + + async def execute(self, input_data, ai_service): + return {"result": "test"} + + +def test_agent_name_is_class_name(): + """Test that agent.name is set to the class name.""" + channel = TestAgentChannel() + agent = TestAIAgentProducer(channel) + + assert agent.name == "TestAIAgentProducer" + assert agent.name == agent.__class__.__name__ + + +def test_agent_default_values(): + """Test that agent uses default values from constants.""" + channel = TestAgentChannel() + agent = TestAIAgentProducer(channel) + + assert agent.max_tokens == agent_constants.AGENT_DEFAULT_MAX_TOKENS + assert agent.temperature == agent_constants.AGENT_DEFAULT_TEMPERATURE + assert agent.MAX_RETRIES == agent_constants.AGENT_DEFAULT_MAX_RETRIES + + +def test_agent_custom_values(): + """Test that agent can override default values.""" + channel = TestAgentChannel() + agent = TestAIAgentProducer( + channel, + max_tokens=5000, + temperature=0.7, + ) + + assert agent.max_tokens == 5000 + assert agent.temperature == 0.7 + assert agent.name == "TestAIAgentProducer" diff --git a/packages/agents/tests/util/test_extractor.py b/packages/agents/tests/util/test_extractor.py new file mode 100644 index 0000000000..5696a0b1a4 --- /dev/null +++ b/packages/agents/tests/util/test_extractor.py @@ -0,0 +1,81 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>. +import pytest + +import octobot_agents.utils.extractor as agent_extractor + + +def test_preprocess_json_content_strips_fences(): + content = """```json + {"key": "value"} + ```""" + assert agent_extractor.preprocess_json_content(content) == '{"key": "value"}' + + +def test_extract_json_from_content_direct_json(): + content = '{"key": "value", "num": 1}' + assert agent_extractor.extract_json_from_content(content) == {"key": "value", "num": 1} + + +def test_extract_json_from_content_markdown_json(): + content = """```json + {"key": "value"} + ```""" + assert agent_extractor.extract_json_from_content(content) == {"key": "value"} + + +def test_extract_json_between_braces(): + content = "prefix {\"key\": \"value\"} suffix" + assert agent_extractor.extract_json_between_braces(content) == {"key": "value"} + +def test_extract_json_between_braces_with_braces_in_text(): + content = "prefix {not json} {\"key\": \"value\"} suffix" + assert agent_extractor.extract_json_between_braces(content) == {"key": "value"} + + +def test_extract_json_from_markdown(): + content = """```json + {"key": "value"} + ```""" + assert agent_extractor.extract_json_from_markdown(content) == {"key": "value"} + +def test_extract_json_from_content_prefixed_markdown(): + content = """Error parsing JSON from response ```json + {"key": "value"} + ```""" + assert agent_extractor.extract_json_from_content(content) == {"key": "value"} + +def test_extract_json_from_content_single_quoted_payload(): + content = "'Error parsing JSON from response ```json\\n{\"key\": \"value\"}\\n```'" + assert agent_extractor.extract_json_from_content(content) == {"key": "value"} + +def test_extract_json_from_content_fenced_with_suffix(): + content = """Error parsing JSON from response +```json +{"key": "value"} +``` +--- +Extra text after fence.""" + assert agent_extractor.extract_json_from_content(content) == {"key": "value"} + + +def test_extract_json_from_xml_tags(): + content = "<final_answer>{\"key\": \"value\"}</final_answer>" + assert agent_extractor.extract_json_from_xml_tags(content) == {"key": "value"} + + +def test_extract_json_from_content_invalid(): + assert agent_extractor.extract_json_from_content("not json") is None diff --git a/packages/async_channel/.coveragerc b/packages/async_channel/.coveragerc new file mode 100644 index 0000000000..4daebdc3c9 --- /dev/null +++ b/packages/async_channel/.coveragerc @@ -0,0 +1,4 @@ +[run] +omit = + tests/* + setup.py diff --git a/packages/async_channel/.gitignore b/packages/async_channel/.gitignore new file mode 100644 index 0000000000..30ba67ed5a --- /dev/null +++ b/packages/async_channel/.gitignore @@ -0,0 +1,115 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ + +# ide +.idea + +# cython +*.html +*.c +cython_debug/ + +# doc +docs/build diff --git a/packages/async_channel/.readthedocs.yml b/packages/async_channel/.readthedocs.yml new file mode 100644 index 0000000000..a6624d32b5 --- /dev/null +++ b/packages/async_channel/.readthedocs.yml @@ -0,0 +1,15 @@ +version: 2 + +# Build documentation in the docs/ directory with Sphinx +sphinx: + configuration: docs/source/conf.py + +# Optionally build your docs in additional formats such as PDF and ePub +formats: all + +# Optionally set the version of Python and requirements required to build your docs +python: + version: 3.8 + install: + - requirements: requirements.txt + - requirements: dev_requirements.txt diff --git a/packages/async_channel/BUILD b/packages/async_channel/BUILD new file mode 100644 index 0000000000..acb8ba67b1 --- /dev/null +++ b/packages/async_channel/BUILD @@ -0,0 +1,7 @@ +python_sources(name="async_channel", sources=["async_channel/**/*.py"]) + +python_tests( + name="tests", + sources=["tests/**/test_*.py"], + dependencies=[":async_channel", "//:dev_reqs"], +) \ No newline at end of file diff --git a/packages/async_channel/CHANGELOG.md b/packages/async_channel/CHANGELOG.md new file mode 100644 index 0000000000..fe8ba45d4a --- /dev/null +++ b/packages/async_channel/CHANGELOG.md @@ -0,0 +1,414 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [2.2.2] - 2026-01-03 +### Added +- Detailed typing to all classes + +## [2.2.1] - 2023-09-03 +### Added +- channel_name to creator + +## [2.2.0] - 2023-05-02 +### Updated +- Supported python versions +### Removed +- Cython + +## [2.1.0] - 2022-12-23 +### Updated +- [Cython] update to 0.29.32 + +## [2.0.14] - 2022-12-22 +### Added +- [Channel] get_prioritized_consumers + +## [2.0.13] - 2022-01-08 +### Added +- [SupervisedConsumer] add ability to join current perform task + +### Updated +- bump requirements + +## [2.0.12] - 2021-07-19 +### Updated +- bump requirements + +## [2.0.11] - 2021-07-19 +### Updated +- bump requirements + +## [2.0.10] - 2021-05-05 +### Updated +- bump requirements + +## [2.0.9] - 2021-03-03 +### Added +- Python 3.9 support + +## [2.0.8] - 2021-02-25 +### Updated +- cython requirement + +## [2.0.7] - 2020-12-07 +### Updated +- revert import statements changes + +## [2.0.6] - 2020-11-07 +### Updated +- revert import statements changes + +## [2.0.5] - 2020-11-07 +### Updated +- import statements + +## [2.0.4] - 2020-11-07 +### Fixed +- async_channel.util python file + +## [2.0.3] - 2020-10-23 +### Updated +- Python 3.8 support + +## [2.0.2] - 2020-10-13 +### Fixed +- Cython headers export + +## [2.0.1] - 2020-10-01 +### Added +- Logging dynamic implementation +- Project name to 'Async-Channel' + +## [2.0.0] - 2020-10-01 +### Update +- Project name to 'channel' +- python and cython imports behaviour + +### Removed +- OctoBot-Commons requirement + +## [1.4.11] - 2020-09-01 +### Update +- Requirements + +## [1.4.10] - 2020-08-15 +### Update +- Requirements + +## [1.4.9] - 2020-06-19 +### Update +- Requirements + +## [1.4.8] - 2020-05-27 +### Update +- Cython version + +## [1.4.7] - 2020-05-17 +### Fixed +- [Producer] pause is running was not set + +## [1.4.6] - 2020-05-16 +### Updated +- Requirements + +## [1.4.5] - 2020-05-13 +### Changed +- [Channel] Default priority value to HIGH + +## [1.4.4] - 2020-05-13 +### Added +- [Channel] Producer pause and resume check with consumer priority levels + +## [1.4.2] - 2020-05-11 +### Added +- [CI] Azure pipeline + +### Removed +- [CI] macOs build on travis +- [CI] Appveyor builds + +## [1.4.1] - 2020-05-09 +### Added +- [ChannelInstances] Channel id support + +## [1.4.0] - 2020-05-01 +### Added +- Synchronous Channel +- Synchronous Consumer +- Synchronous Producer + +## [1.3.25] - 2020-04-27 +### Added +- [Channel] consumer filtering by list + +## [1.3.24] - 2020-04-17 +### Added +- [Producer] pause and resume default implementation + +## [1.3.23] - 2020-04-07 +### Fixed +- Wildcard imports + +## [1.3.22] - 2020-03-26 +### Added +- Documentation basis with sphinx +- Pylint check on CI +- Black check on CI + +### Fixed +- Documentation issues +- Pylint issues +- Black issues + +## [1.3.21] - 2020-03-05 +### Changed +- Exception logger from Commons + +### Updated +- Commons version to >= 1.3.0 + +## [1.3.20] - 2020-02-10 +### Added +- flush method to channels +- ```__str__``` representation for consumers + +## [1.3.19] - 2020-01-02 +### Changed +- create_channel_instance now returns the created channel + +### Fixed +- fix set_chan channel name default value inference + +## [1.3.18] - 2019-12-24 +### Changed +- Channels __ methods to _ methods (syntax update) + +## [1.3.17] - 2019-12-21 +### Updated +- Commons version to >= 1.2.0 + +### Added +- Makefile + +## [1.3.16] - 2019-12-14 +### Updated +- Commons version to >= 1.1.50 + +### Fixed +- test_set_chan + +## [1.3.15] - 2019-11-07 +### Updated +- Cython version to 0.29.14 + +## [1.3.14] - 2019-10-29 +### Added +- OSX support + +## [1.3.13] - 2019-10-09 +### Added +- PyPi manylinux deployment + +## [1.3.12] - 2019-10-08 +### Fixed +- Install with setup + +## [1.3.11] - 2019-10-07 +### Added +- CancelledError catching in consume task + +## [1.3.10] - 2019-10-05 +### Added +- Producer is_running attribute + +## [1.3.9] - 2019-10-03 +### Added +- Check if the new producer is already registered before channel registration + +## [1.3.8] - 2019-10-02 +### Fixed +- kwargs argument cython compatibility + +## [1.3.7] - 2019-09-25 +### Changed +- Cython compilation directives (optimization purposes) + +## [1.3.6] - 2019-09-22 +### Fixed +- Fix internal consumer callback + +## [1.3.5] - 2019-09-21 +### Fixed +- Travis channel '__check_producers_state()' method crash when compiled + +## [1.3.4] - 2019-09-09 +### Fixed +- Producer 'wait_for_processing' declaration + +## [1.3.3] - 2019-09-08 +### Changed +- Channel 'get_consumer_from_filters' manage wildcard filters + +## Related issue +- #9 [Channel] Implement consumer filter + +## [1.3.2] - 2019-09-07 +### Changed +- Channel 'get_consumer_from_filters' method compilation from cython to python + +## [1.3.1] - 2019-09-07 +### Added +- Producer supervised consumer wait method 'wait_for_processing' + +### Changed +- Consumer tests + +## [1.3.0] - 2019-09-07 +### Added +- Supervised Consumer that notify the consumption end + +### Fixed +- Consumer tests + +## [1.2.0] - 2019-09-04 +### Added +- Channel add_new_consumer method to add a new consumer with filters +- Channel get_consumer_from_filters to get a list of consumers that match with filters + +### Changed +- Channel new_consumer method can handle a consumer filters dict +- Channel __add_new_consumer_and_run to use consumer filters and not consumer name + +## [1.1.14] - 2019-08-29 +### Added +- Tests + +## [1.1.13] - 2019-08-29 +### Fixed +- Internal consumer implementation + +## [1.1.12] - 2019-08-29 +### Fixed +- Internal consumer consume method + +## [1.1.11] - 2019-08-28 +### Added +- Internal consumer : the callback is defined into the consumer class and is not a constructor param anymore + +## [1.1.10] - 2019-08-27 +### Added +- Consumer instance param in channel new_consumer to handle a new consumer with an already created instance + +## [1.1.9] - 2019-08-26 +### Fixed +- Queue to async + +## [1.1.8] - 2019-08-16 +### Changed +- Replaced Channels class by orphan public methods + +### Removed +- Channels class + +## [1.1.7] - 2019-08-14 +### Added +- Setup install requirements + +## [1.1.6] - 2019-08-14 +### Changed +- ChannelInstances class to commons singleton class implementation + +## [1.1.5] - 2019-08-13 +### Fixed +- Changed Producer attributes to public +- Changed Consumer attributes to public + +## [1.1.4] - 2019-08-13 +### Fixed +- Channel is_paused attribute to public + +## [1.1.3] - 2019-08-13 +### Added +- Producer pause and resume methods +- Channel producers pause/resume management + +### Related issue +- [Producer] Implement channel pause and resume #8 + +## [1.1.2] - 2019-08-12 +### Fixed +- Channel init_consumer_if_necessary object key type + +## [1.1.1] - 2019-08-12 +### Fixed +- Channel init_consumer_if_necessary iterable type + +## [1.1.0] - 2019-08-11 +### Added +- Channel global tests + +### Changed +- Migrate Consumer start, run and stop methods to async + +### Fixed +- Consumer attributes queue and filter_size to public +- Channel start and stop methods +- Channels methods Cython compliance + +## [1.0.12] - 2019-08-09 +### Changed +- PyDoc fixes + +## [1.0.11] - 2019-08-09 +### Added +- Channel new consumer methods + +## [1.0.10] - 2019-08-08 +### Added +- Channel internal_producer + +## [1.0.9] - 2019-08-07 +### Changed +- Channel creation utility refactored in two different methods + +## [1.0.8] - 2019-08-06 +### Added +- Channel creation utility + +## [1.0.7] - 2019-08-04 +### Added +- Channel 'modify' method that calls all producers modify method +- Channel 'register_producer' method to register all its producers. + +## [1.0.6] - 2019-08-03 +### Added +- constants.py file + +### Modified +- Import way with __init__.py files + +### Removed +- Unused evaluator package + +## [1.0.5] - 2019-08-03 +### Added +- Producer 'modify' method + +## [1.0.4] - 2019-06-10 +### Fixed +- ExchangeChannel deprecated imports + +## [1.0.3] - 2019-06-10 +### Removed +- [OctoBot-Trading] migrate exchange channels to OctoBot-Trading + +## [1.0.2] - 2019-06-09 +### Fixed +- [OctoBot-Trading] Exchange get_name() method deprecated + +## [1.0.1] - 2019-05-27 +### Changed +- Migrate to cython with pure python diff --git a/packages/async_channel/LICENSE b/packages/async_channel/LICENSE new file mode 100644 index 0000000000..0a041280bd --- /dev/null +++ b/packages/async_channel/LICENSE @@ -0,0 +1,165 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/> + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff --git a/packages/async_channel/MANIFEST.in b/packages/async_channel/MANIFEST.in new file mode 100644 index 0000000000..4a3e3ff758 --- /dev/null +++ b/packages/async_channel/MANIFEST.in @@ -0,0 +1,8 @@ +recursive-include async_channel *.pxd + +include README.md +include LICENSE +include CHANGELOG.md +include requirements.txt + +global-exclude *.c diff --git a/packages/async_channel/README.md b/packages/async_channel/README.md new file mode 100644 index 0000000000..7e7f350af2 --- /dev/null +++ b/packages/async_channel/README.md @@ -0,0 +1,52 @@ +# Async-Channel +[![Codacy Badge](https://app.codacy.com/project/badge/Grade/523d43c62f1d4de08395752367f5fddc)](https://www.codacy.com/gh/Drakkar-Software/Async-Channel/dashboard?utm_source=github.com&utm_medium=referral&utm_content=Drakkar-Software/Async-Channel&utm_campaign=Badge_Grade) +[![Github-Action-CI](https://github.com/Drakkar-Software/Async-Channel/workflows/Async-Channel-Default-CI/badge.svg)](https://github.com/Drakkar-Software/Async-Channel/actions) +[![Build Status](https://cloud.drone.io/api/badges/Drakkar-Software/Async-Channel/status.svg)](https://cloud.drone.io/Drakkar-Software/Async-Channel) +[![Coverage Status](https://coveralls.io/repos/github/Drakkar-Software/OctoBot-Channels/badge.svg?branch=master)](https://coveralls.io/github/Drakkar-Software/OctoBot-Channels?branch=master) +[![Doc Status](https://readthedocs.org/projects/octobot-channels/badge/?version=stable)](https://octobot-channels.readthedocs.io/en/stable/?badge=stable) +[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) + +Python multi-task communication library. Used by [OctoBot](https://github.com/Drakkar-Software/OctoBot) project. + +## Installation +With python3 : `pip install async-channel` + +## Usage +Example +```python +import async_channel.consumer as consumer +import async_channel.producer as producer +import async_channel.channels as channels +import async_channel.util as util + +class AwesomeProducer(producer.Producer): + pass + +class AwesomeConsumer(consumer.Consumer): + pass + +class AwesomeChannel(channels.Channel): + PRODUCER_CLASS = AwesomeProducer + CONSUMER_CLASS = AwesomeConsumer + +async def callback(data): + print("Consumer called !") + print("Received : " + data) + +# Creates the channel +await util.create_channel_instance(AwesomeChannel, channels.Channels) + +# Add a new consumer to the channel +await channels.Channels.get_chan("Awesome").new_consumer(callback) + +# Creates a producer that send data to the consumer through the channel +producer = AwesomeProducer(channels.Channels.get_chan("Awesome")) +await producer.run() +await producer.send("test") + +# Stops the channel with all its producers and consumers +# await channels.Channels.get_chan("Awesome").stop() +``` + +# Developer documentation +On [readthedocs.io](https://octobot-channels.readthedocs.io/en/latest/) diff --git a/packages/async_channel/async_channel/__init__.py b/packages/async_channel/async_channel/__init__.py new file mode 100644 index 0000000000..d03244e339 --- /dev/null +++ b/packages/async_channel/async_channel/__init__.py @@ -0,0 +1,52 @@ +# Drakkar-Software Async-Channel +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +""" +Define async_channel project +""" + +from async_channel import constants +from async_channel.constants import ( + CHANNEL_WILDCARD, + DEFAULT_QUEUE_SIZE, +) + +from async_channel import enums +from async_channel.enums import ChannelConsumerPriorityLevels + +from async_channel import producer +from async_channel.producer import Producer + +from async_channel import consumer +from async_channel.consumer import ( + Consumer, + InternalConsumer, + SupervisedConsumer, +) + +PROJECT_NAME = "async-channel" +VERSION = "2.2.2" # major.minor.revision + +__all__ = [ + "CHANNEL_WILDCARD", + "DEFAULT_QUEUE_SIZE", + "ChannelConsumerPriorityLevels", + "Producer", + "Consumer", + "InternalConsumer", + "SupervisedConsumer", + "PROJECT_NAME", + "VERSION", +] diff --git a/packages/async_channel/async_channel/channels/__init__.py b/packages/async_channel/async_channel/channels/__init__.py new file mode 100644 index 0000000000..1253d77ee6 --- /dev/null +++ b/packages/async_channel/async_channel/channels/__init__.py @@ -0,0 +1,48 @@ +# Drakkar-Software Async-Channel +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +""" +Define async_channel implementation and usage +""" +from async_channel.channels import channel_instances +from async_channel.channels import channel + +from async_channel.channels.channel_instances import ( + ChannelInstances, + set_chan_at_id, + get_channels, + del_channel_container, + get_chan_at_id, + del_chan_at_id, +) +from async_channel.channels.channel import ( + Channel, + set_chan, + del_chan, + get_chan, +) + +__all__ = [ + "ChannelInstances", + "set_chan_at_id", + "get_channels", + "del_channel_container", + "get_chan_at_id", + "del_chan_at_id", + "Channel", + "set_chan", + "del_chan", + "get_chan", +] diff --git a/packages/async_channel/async_channel/channels/channel.py b/packages/async_channel/async_channel/channels/channel.py new file mode 100644 index 0000000000..f71f521049 --- /dev/null +++ b/packages/async_channel/async_channel/channels/channel.py @@ -0,0 +1,402 @@ +# pylint: disable=too-many-positional-arguments +# Drakkar-Software Async-Channel +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +""" +Defines the channel core class : Channel +""" +import typing + +import async_channel.util.logging_util as logging +import async_channel.enums +import async_channel.channels.channel_instances as channel_instances + +if typing.TYPE_CHECKING: + import async_channel.producer + + +# pylint: disable=undefined-variable, not-callable +class Channel: + """ + A Channel is the object to connect a producer / producers class(es) to a consumer / consumers class(es) + It contains a registered consumers dict to notify every consumer when a producer 'send' something. + It contains a registered producers list to allow producer modification through 'modify'. + To access channels a 'Channels' singleton is created to manage instances. + """ + + # Channel producer class + PRODUCER_CLASS: typing.Optional[typing.Type["async_channel.producer.Producer"]] = ( + None + ) + + # Channel consumer class + CONSUMER_CLASS: typing.Optional[typing.Type["async_channel.consumer.Consumer"]] = ( + None + ) + + # Consumer instance in consumer filters + INSTANCE_KEY = "consumer_instance" + + # Channel default consumer priority level + DEFAULT_PRIORITY_LEVEL = ( + async_channel.enums.ChannelConsumerPriorityLevels.HIGH.value + ) + + def __init__(self): + self.logger = logging.get_logger(self.__class__.__name__) + + # Channel unique id + self.chan_id: typing.Optional[str] = None + + # Channel subscribed producers list + self.producers: list["async_channel.producer.Producer"] = [] + + # Channel subscribed consumers list: list dicts of dicts containing: + # - At least a consumer instance under the INSTANCE_KEY key + # - Possibly other filters under other keys and values + self.consumers: list[dict[str, typing.Any]] = [] + + # Used to perform global send from non-producer context + self.internal_producer: typing.Optional["async_channel.producer.Producer"] = ( + None + ) + + # Used to save producers state (paused or not) + self.is_paused: bool = True + + # Used to synchronize producers and consumer + self.is_synchronized: bool = False + + @classmethod + def get_name(cls) -> str: + """ + Default implementation is to return the name of the class without the 'Channel' substring + :returns the channel name + """ + return cls.__name__.replace("Channel", "") + + # pylint: disable=too-many-arguments + async def new_consumer( + self, + callback: object = None, + consumer_filters: typing.Optional[dict] = None, + internal_consumer: typing.Optional["async_channel.consumer.Consumer"] = None, + size: int = 0, + priority_level: int = DEFAULT_PRIORITY_LEVEL, + ) -> "async_channel.consumer.Consumer": + """ + Create an appropriate consumer instance for this async_channel and add it to the consumer list + Should end by calling '_check_producers_state' + :param callback: method that should be called when consuming the queue + :param consumer_filters: the consumer filters + :param size: queue size, default 0 + :param priority_level: used by Producers the lowest level has the highest priority + :param internal_consumer: internal consumer instance to use if specified + :return: consumer instance created + """ + consumer = ( + internal_consumer + if internal_consumer + else self.CONSUMER_CLASS(callback, size=size, priority_level=priority_level) # type: ignore + ) + await self._add_new_consumer_and_run(consumer, consumer_filters) + await self._check_producers_state() + return consumer + + # pylint: disable=unused-argument + async def _add_new_consumer_and_run( + self, + consumer: "async_channel.consumer.Consumer", + consumer_filters: typing.Optional[dict], + **kwargs, + ) -> None: + """ + Should be called by 'new_consumer' to add the consumer to self.consumers and call 'consumer.run()' + :param consumer: the consumer to add + :param kwargs: additional params for consumer list + :return: None + """ + if consumer_filters is None: + consumer_filters = {} + + self.add_new_consumer(consumer, consumer_filters) + await consumer.run(with_task=not self.is_synchronized) + + def add_new_consumer( + self, consumer: "async_channel.consumer.Consumer", consumer_filters: dict + ) -> None: + """ + Add a new consumer to consumer list with filters + :param consumer: the consumer to add + :param consumer_filters: the consumer selection filters (used by 'get_consumer_from_filters') + :return: None + """ + consumer_filters[self.INSTANCE_KEY] = consumer + self.consumers.append(consumer_filters) + + def get_consumer_from_filters( + self, consumer_filters: dict + ) -> list["async_channel.consumer.Consumer"]: + """ + Returns the instance filtered consumers list + WARNING: + >>> get_consumer_from_filters({"A": 1}) + Can return a consumer described by {"A": True} because in python 1 == True + :param consumer_filters: The consumer filters dict + :return: the filtered consumer list + """ + return self._filter_consumers(consumer_filters) + + def get_consumers(self) -> list["async_channel.consumer.Consumer"]: + """ + Returns all consumers instance + Can be overwritten according to the class needs + :return: the subscribed consumers list + """ + return [consumer[self.INSTANCE_KEY] for consumer in self.consumers] + + def get_prioritized_consumers( + self, priority_level: int + ) -> list["async_channel.consumer.Consumer"]: + """ + Returns all consumers instance + Can be overwritten according to the class needs + :return: the subscribed consumers list + """ + return [ + consumer[self.INSTANCE_KEY] + for consumer in self.consumers + if consumer[self.INSTANCE_KEY].priority_level <= priority_level + ] + + def _filter_consumers( + self, consumer_filters: dict + ) -> list["async_channel.consumer.Consumer"]: + """ + Returns the consumers that match the selection + Returns all consumer instances if consumer_filter is empty + :param consumer_filters: listed consumer filters + :return: the list of the filtered consumers + """ + return [ + consumer[self.INSTANCE_KEY] + for consumer in self.consumers + if _check_filters(consumer, consumer_filters) + ] + + async def remove_consumer( + self, consumer: "async_channel.consumer.Consumer" + ) -> None: + """ + Should be overwritten according to the class needs + Should end by calling '_check_producers_state' and then 'consumer.stop' + :param consumer: consumer instance to remove from consumers list + """ + for consumer_candidate in self.consumers: + if consumer == consumer_candidate[self.INSTANCE_KEY]: + self.consumers.remove(consumer_candidate) + await self._check_producers_state() + await consumer.stop() + + async def _check_producers_state(self) -> None: + """ + Checks if producers should be paused or resumed after a consumer addition or removal + """ + if self._should_pause_producers(): + self.is_paused = True + for producer in self.get_producers(): + await producer.pause() + return + if self._should_resume_producers(): + self.is_paused = False + for producer in self.get_producers(): + await producer.resume() + + def _should_pause_producers(self) -> bool: + """ + Check if channel producers should be paused + :return: True if channel producers should be paused + """ + if self.is_paused: + return False + if not self.get_consumers(): + return True + for consumer in self.get_consumers(): + if ( + consumer.priority_level + < async_channel.ChannelConsumerPriorityLevels.OPTIONAL.value + ): + return False + return True + + def _should_resume_producers(self) -> bool: + """ + Check if channel producers should be resumed + :return: True if channel producers should be resumed + """ + if not self.is_paused: + return False + if not self.get_consumers(): + return False + for consumer in self.get_consumers(): + if ( + consumer.priority_level + < async_channel.ChannelConsumerPriorityLevels.OPTIONAL.value + ): + return True + return False + + async def register_producer( + self, producer: "async_channel.producer.Producer" + ) -> None: + """ + Add the producer to producers list + Can be overwritten to perform additional action when registering + Should end by calling 'pause' if self.is_paused + :param Producer producer: created channel producer to register + """ + if producer not in self.producers: + self.producers.append(producer) + + if self.is_paused: + await producer.pause() + + def unregister_producer(self, producer: "async_channel.producer.Producer") -> None: + """ + Remove the producer from producers list + Can be overwritten to perform additional action when registering + :param Producer producer: created channel producer to unregister + """ + if producer in self.producers: + self.producers.remove(producer) + + def get_producers(self) -> typing.Iterable["async_channel.producer.Producer"]: + """ + Should be overwritten according to the class needs + :return: async_channel producers iterable + """ + return self.producers + + async def start(self) -> None: + """ + Call each registered consumers start method + """ + for consumer in self.get_consumers(): + await consumer.start() + + async def stop(self) -> None: + """ + Call each registered consumers and producers stop method + """ + for consumer in self.get_consumers(): + await consumer.stop() + + for producer in self.get_producers(): + await producer.stop() + + if self.internal_producer is not None: + await self.internal_producer.stop() + + def flush(self) -> None: + """ + Flush the channel object before stopping + """ + if self.internal_producer is not None: + self.internal_producer.channel = None + for producer in self.get_producers(): + producer.channel = None + + async def run(self) -> None: + """ + Call each registered consumers run method + """ + for consumer in self.get_consumers(): + await consumer.run(with_task=not self.is_synchronized) + + async def modify(self, **kwargs) -> None: + """ + Call each registered producers modify method + """ + for producer in self.get_producers(): + await producer.modify(**kwargs) + + def get_internal_producer(self, **kwargs) -> "async_channel.producer.Producer": + """ + Returns internal producer if exists else creates it + :param kwargs: arguments for internal producer __init__ + :return: internal producer instance + """ + if not self.internal_producer: + try: + self.internal_producer = self.PRODUCER_CLASS(self, **kwargs) # type: ignore + except TypeError: + self.logger.exception("PRODUCER_CLASS not defined") + raise + return self.internal_producer + + +def set_chan(chan: Channel, name: str) -> Channel: + """ + Set a new Channel instance in the channels list according to channel name + :param chan: new Channel instance + :param name: name of the channel + :return: the channel instance if succeed else raise a ValueError + """ + chan_name = name if name else chan.get_name() + if chan_name not in channel_instances.ChannelInstances.instance().channels: + channel_instances.ChannelInstances.instance().channels[chan_name] = chan + return chan + raise ValueError(f"Channel {chan_name} already exists.") + + +def del_chan(name: str) -> None: + """ + Delete a Channel instance from the channels list according to channel name + :param name: name of the channel to delete + """ + if name in channel_instances.ChannelInstances.instance().channels: + channel_instances.ChannelInstances.instance().channels.pop(name, None) + + +def get_chan(chan_name: str) -> Channel: + """ + Return the channel instance from channel name + :param chan_name: the channel name + :return: the Channel instance + """ + return channel_instances.ChannelInstances.instance().channels[chan_name] + + +def _check_filters(consumer_filters: dict, expected_filters: dict) -> bool: + """ + Checks if the consumer match the specified filters + Returns True if expected_filters is empty + :param consumer_filters: consumer filters + :param expected_filters: selected filters + :return: True if the consumer match the selection, else False + """ + try: + for key, value in expected_filters.items(): + if value == async_channel.CHANNEL_WILDCARD: + continue + if isinstance(consumer_filters[key], list): + if set(consumer_filters[key]) & {value, async_channel.CHANNEL_WILDCARD}: + continue + return False + if consumer_filters[key] not in [value, async_channel.CHANNEL_WILDCARD]: + return False + return True + except KeyError: + return False diff --git a/packages/async_channel/async_channel/channels/channel_instances.py b/packages/async_channel/async_channel/channels/channel_instances.py new file mode 100644 index 0000000000..6bbd90feca --- /dev/null +++ b/packages/async_channel/async_channel/channels/channel_instances.py @@ -0,0 +1,123 @@ +# Drakkar-Software Async-Channel +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +""" +This module defines created Channels interaction methods +""" +import typing +import async_channel.util.logging_util as logging + +if typing.TYPE_CHECKING: + import async_channel.channels.channel + + +class ChannelInstances: + """ + Singleton that contains Channel instances + Singleton implementation from https://stackoverflow.com/questions/51245056/singleton-is-not-working-in-cython + """ + + _instances = {} + + @classmethod + def instance(cls, *args, **kwargs) -> "ChannelInstances": + """ + Create the instance if not already created + Return the class instance + :param args: the constructor arguments + :param kwargs: the constructor optional arguments + :return: the class only instance + """ + if cls not in cls._instances: + cls._instances[cls] = cls(*args, **kwargs) + return cls._instances[cls] + + def __init__(self): + self.channels: dict[ + str, dict[str, "async_channel.channels.channel.Channel"] + ] = {} + + +def set_chan_at_id( + chan: "async_channel.channels.channel.Channel", name: str +) -> "async_channel.channels.channel.Channel": + """ + Add a new async_channel to the channels instances dictionary at chan.id + :param chan: the channel instance + :param name: the channel name + """ + chan_name = chan.get_name() if name else name + + try: + chan_instance = ChannelInstances.instance().channels[chan.chan_id] + except KeyError: + ChannelInstances.instance().channels[chan.chan_id] = {} + chan_instance = ChannelInstances.instance().channels[chan.chan_id] + + if chan_name not in chan_instance: + chan_instance[chan_name] = chan + return chan + raise ValueError(f"Channel {chan_name} already exists.") + + +def get_channels(chan_id: str) -> dict[str, "async_channel.channels.channel.Channel"]: + """ + Get async_channel instances by async_channel id + :param chan_id: the channel id + :return: the channel instances at async_channel id + """ + try: + return ChannelInstances.instance().channels[chan_id] + except KeyError as exception: + raise KeyError(f"Channels not found with chan_id: {chan_id}") from exception + + +def del_channel_container(chan_id: str) -> None: + """ + Delete all async_channel id instances + :param chan_id: the channel id + """ + ChannelInstances.instance().channels.pop(chan_id, None) + + +def get_chan_at_id( + chan_name: str, chan_id: str +) -> "async_channel.channels.channel.Channel": + """ + Get the channel instance that matches the name and the id + :param chan_name: the channel name + :param chan_id: the channel id + :return: the channel instance if any + """ + try: + return ChannelInstances.instance().channels[chan_id][chan_name] + except KeyError as exception: + raise KeyError( + f"Channel {chan_name} not found with chan_id: {chan_id}" + ) from exception + + +def del_chan_at_id(chan_name: str, chan_id: str) -> None: + """ + Delete the channel instance that matches the name and the id + :param chan_name: the channel name + :param chan_id: the channel id + """ + try: + ChannelInstances.instance().channels[chan_id].pop(chan_name, None) + except KeyError: + logging.get_logger(ChannelInstances.__name__).warning( + f"Can't del chan {chan_name} with chan_id: {chan_id}" + ) diff --git a/packages/async_channel/async_channel/constants.py b/packages/async_channel/async_channel/constants.py new file mode 100644 index 0000000000..c27391bd83 --- /dev/null +++ b/packages/async_channel/async_channel/constants.py @@ -0,0 +1,21 @@ +# Drakkar-Software Async-Channel +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +""" +Define async_channel global constants +""" +CHANNEL_WILDCARD = "*" + +DEFAULT_QUEUE_SIZE = 0 # unlimited diff --git a/packages/async_channel/async_channel/consumer.py b/packages/async_channel/async_channel/consumer.py new file mode 100644 index 0000000000..349d49c90b --- /dev/null +++ b/packages/async_channel/async_channel/consumer.py @@ -0,0 +1,213 @@ +# Drakkar-Software Async-Channel +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +""" +Define async_channel Consumer class +""" +import asyncio +import typing + +import async_channel.util.logging_util as logging +import async_channel.enums + + +class Consumer: + """ + A consumer keeps reading from the channel and processes any data passed to it. + A consumer will start consuming by calling its 'consume' method. + The data processing implementation is coded in the 'perform' method. + A consumer also responds to channel events like pause and stop. + """ + + def __init__( + self, + callback: typing.Callable, + size: int = async_channel.constants.DEFAULT_QUEUE_SIZE, + priority_level: int = async_channel.enums.ChannelConsumerPriorityLevels.HIGH.value, + ): + self.logger = logging.get_logger(self.__class__.__name__) + + # Consumer data queue. It contains producer's work (received through Producer.send()). + self.queue: asyncio.Queue = asyncio.Queue(maxsize=size) + + # Method to be called when performing task is done + self.callback: typing.Callable = callback + + # Should only be used with .cancel() + self.consume_task: typing.Optional[asyncio.Task] = None + + """ + Should be used as the perform while loop condition + >>> while(self.should_stop): + ... + """ + self.should_stop: bool = False + + # Default priority level + # Used by Producers to call consumers by prioritization + # The lowest level has the highest priority + self.priority_level: int = priority_level + + async def consume(self) -> None: + """ + Should be overwritten with a self.queue.get() in a while loop + """ + while not self.should_stop: + try: + await self.perform(await self.queue.get()) + except asyncio.CancelledError: + self.logger.debug("Cancelled task") + except Exception as consume_exception: # pylint: disable=broad-except + self.logger.exception( + consume_exception, + publish_error_if_necessary=True, # type: ignore + error_message=f"Exception when calling callback on {self}: {consume_exception}", # type: ignore + ) + finally: + await self.consume_ends() + + async def perform(self, kwargs) -> None: + """ + Should be overwritten to handle queue data + :param kwargs: queue get content + """ + await self.callback(**kwargs) + + async def consume_ends(self) -> None: + """ + Should be overwritten to handle consumption ends + """ + + async def start(self) -> None: + """ + Should be implemented for consumer's non-triggered tasks + """ + self.should_stop = False + + async def stop(self) -> None: + """ + Stops non-triggered tasks management + """ + self.should_stop = True + if self.consume_task: + self.consume_task.cancel() + + def create_task(self) -> None: + """ + Creates a new asyncio task that contains start() execution + """ + self.consume_task = asyncio.create_task(self.consume()) + + async def run(self, with_task: bool = True) -> None: + """ + - Initialize the consumer + - Start the consumer main task + :param with_task: If the consumer should run in a task + """ + await self.start() + if with_task: + self.create_task() + + async def join(self, timeout: float) -> None: + """ + Implemented in SupervisedConsumer to wait for any "perform" call to be finished. + Instantly returns on regular consumer + """ + + async def join_queue(self) -> None: + """ + Implemented in SupervisedConsumer to wait for the whole queue to finish + processing. + Instantly returns on regular consumer + """ + + def __str__(self) -> str: + return f"{self.__class__.__name__} with callback: {self.callback.__name__}" + + +class InternalConsumer(Consumer): + """ + An InternalConsumer is a classic Consumer except that his callback is declared internally + """ + + def __init__(self): + """ + The constructor only override the callback to be the 'internal_callback' method + """ + super().__init__(None) + self.callback: typing.Callable = self.internal_callback + + async def internal_callback(self, **kwargs: dict) -> None: + """ + The method triggered when the producer has pushed into the channel + :param kwargs: Additional params + """ + raise NotImplementedError("internal_callback is not implemented") + + +class SupervisedConsumer(Consumer): + """ + A SupervisedConsumer is a classic Consumer that notifies the queue when its work is done + """ + + def __init__( + self, + callback: typing.Callable, + size: int = async_channel.constants.DEFAULT_QUEUE_SIZE, + priority_level: int = async_channel.enums.ChannelConsumerPriorityLevels.HIGH.value, + ): + """ + The constructor only override the callback to be the 'internal_callback' method + """ + super().__init__(callback, size=size, priority_level=priority_level) + + # Clear when perform is running (set after) + self.idle: asyncio.Event = asyncio.Event() + self.idle.set() + + async def join(self, timeout: float) -> None: + """ + Wait for any perform to be finished. + """ + if not self.idle.is_set(): + await asyncio.wait_for(self.idle.wait(), timeout) + + async def join_queue(self) -> None: + """ + Wait for the consumer queue to finish processing. + """ + await self.queue.join() + + async def perform(self, kwargs) -> None: + """ + Clear self.idle event when perform is being done then set it + :param kwargs: queue get content + """ + try: + self.idle.clear() + await self.callback(**kwargs) + finally: + self.idle.set() + + async def consume_ends(self) -> None: + """ + The method called when the work is done + """ + try: + self.queue.task_done() + except ( + ValueError + ): # when task_done() is called when the Exception was CancelledError + pass diff --git a/packages/async_channel/async_channel/enums.py b/packages/async_channel/async_channel/enums.py new file mode 100644 index 0000000000..0f36a8b7e1 --- /dev/null +++ b/packages/async_channel/async_channel/enums.py @@ -0,0 +1,31 @@ +# Drakkar-Software Async-Channel +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +""" +Define async_channel global enums +""" + +import enum + + +class ChannelConsumerPriorityLevels(enum.Enum): + """ + Channel consumer priority levels + """ + + HIGH = 0 + MEDIUM = 1 + # LOW = 2 not necessary for now + OPTIONAL = 2 diff --git a/packages/async_channel/async_channel/producer.py b/packages/async_channel/async_channel/producer.py new file mode 100644 index 0000000000..75b2edd4c5 --- /dev/null +++ b/packages/async_channel/async_channel/producer.py @@ -0,0 +1,178 @@ +# Drakkar-Software Async-Channel +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +""" +Define async_channel Producer class +""" +import asyncio +import typing + +import async_channel.util.logging_util as logging + +if typing.TYPE_CHECKING: + import async_channel.channels.channel + + +class Producer: + """ + A Producer is responsible for producing some output that may be placed onto the head of a queue. + A Consumer will consume this data through the same shared queue. + A producer doesn't need to know or care about its consumers. + But if there is no space in the queue, it won't be able to share what it has produced. + It manages its consumer calls by priority levels + When the channel is synchronized priority levels are used to priorities or delay consumer calls + """ + + def __init__(self, channel: "async_channel.channels.channel.Channel"): + self.logger = logging.get_logger(self.__class__.__name__) + + # Related async_channel instance + self.channel: "async_channel.channels.channel.Channel" = channel + + """ + Should only be used with .cancel() + """ + self.produce_task: typing.Optional[asyncio.Task] = None + + """ + Should be used as the perform while loop condition + while(self.should_stop): + ... + """ + self.should_stop: bool = False + + """ + Should be used to know if the producer is already started + """ + self.is_running: bool = False + + async def send(self, data: typing.Any) -> None: + """ + Send to each consumer data though its queue + :param data: data to be put into consumers queues + + The implementation should use 'self.async_channel.get_consumers' + Example + >>> for consumer in self.async_channel.get_consumers(): + >>> await consumer.queue.put({ + >>> "my_key": my_value + >>> }) + """ + for consumer in self.channel.get_consumers(): + await consumer.queue.put(data) + + async def push(self, **kwargs) -> None: + """ + Push notification that new data should be sent implementation + When nothing should be done on data : self.send() + """ + + async def start(self) -> None: + """ + Should be implemented for producer's non-triggered tasks + """ + + async def pause(self) -> None: + """ + Called when the channel runs out of consumer + """ + self.logger.debug("Pausing...") + self.is_running = False + # Triggers itself if not already paused + if not self.channel.is_paused: + self.channel.is_paused = True + + async def resume(self) -> None: + """ + Called when the channel is no longer out of consumer + """ + self.logger.debug("Resuming...") + # Triggers itself if not already resumed + if self.channel.is_paused: + self.channel.is_paused = False + + async def perform(self, **kwargs) -> None: + """ + Should implement producer's non-triggered tasks + Can be use to force producer to perform tasks + """ + + async def modify(self, **kwargs) -> None: + """ + Should be implemented when producer can be modified during perform() + """ + + async def wait_for_processing(self) -> None: + """ + Should be used only with SupervisedConsumers + It will wait until all consumers have notified that their consume() method have ended + """ + await asyncio.gather( + *(consumer.join_queue() for consumer in self.channel.get_consumers()) + ) + + async def synchronized_perform_consumers_queue( + self, priority_level: int, join_consumers: bool, timeout: float + ) -> None: + """ + Empties the queue synchronously for each consumers + :param priority_level: the consumer minimal priority level + :param join_consumers: True if consumer tasks should be joined. Avoids orphaned tasks to run without + :param timeout: Time to wait for consumers in join call + waiting for them when started before this check (when check, their queue is empty but a task is running) + """ + for consumer in self.channel.get_prioritized_consumers(priority_level): + while not consumer.queue.empty(): + await consumer.perform(await consumer.queue.get()) + if join_consumers: + await consumer.join(timeout) + + async def stop(self) -> None: + """ + Stops non-triggered tasks management + """ + self.should_stop = True + self.is_running = False + if self.produce_task: + self.produce_task.cancel() + + def create_task(self) -> None: + """ + Creates a new asyncio task that contains start() execution + """ + self.is_running = True + self.produce_task = asyncio.create_task(self.start()) + + async def run(self) -> None: + """ + Start the producer main task + Shouldn't start the producer main task if the channel is synchronized + Should always call + >>> self.async_channel.register_producer + """ + await self.channel.register_producer(self) + if not self.channel.is_synchronized: + self.create_task() + + def is_consumers_queue_empty(self, priority_level: int) -> bool: + """ + Check if consumers queue are empty + :param priority_level: the consumer minimal priority level + :return: the check result + """ + for consumer in self.channel.get_consumers(): + if consumer.priority_level <= priority_level and not consumer.queue.empty(): + return False + return True diff --git a/packages/async_channel/async_channel/util/__init__.py b/packages/async_channel/async_channel/util/__init__.py new file mode 100644 index 0000000000..b8a9c90f51 --- /dev/null +++ b/packages/async_channel/async_channel/util/__init__.py @@ -0,0 +1,41 @@ +# Drakkar-Software Async-Channel +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +""" +Define Channel helping methods +""" +from async_channel.util import channel_creator +from async_channel.util import logging_util +from async_channel.util import synchronization_util + +from async_channel.util.channel_creator import ( + create_all_subclasses_channel, + create_channel_instance, +) + +from async_channel.util.logging_util import ( + get_logger, +) + +from async_channel.util.synchronization_util import ( + trigger_and_bypass_consumers_queue, +) + +__all__ = [ + "create_all_subclasses_channel", + "create_channel_instance", + "get_logger", + "trigger_and_bypass_consumers_queue", +] diff --git a/packages/async_channel/async_channel/util/channel_creator.py b/packages/async_channel/async_channel/util/channel_creator.py new file mode 100644 index 0000000000..9d41e23825 --- /dev/null +++ b/packages/async_channel/async_channel/util/channel_creator.py @@ -0,0 +1,67 @@ +# Drakkar-Software Async-Channel +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +""" +Define Channel creation helping methods +""" +import typing + +if typing.TYPE_CHECKING: + import async_channel.channels.channel + + +async def create_all_subclasses_channel( + channel_class: typing.Type["async_channel.channels.channel.Channel"], + set_chan_method: typing.Callable, + is_synchronized: bool = False, + **kwargs: dict +) -> None: + """ + Calls 'channel_creator.create_channel_instance' for each subclasses of the 'channel_class' param + :param channel_class: The class in which to search for subclasses + :param set_chan_method: The method reference used in 'channel_creator.create_channel_instance' + :param is_synchronized: the channel is_synchronized attribute + :param kwargs: Some additional params passed to 'channel_creator.create_channel_instance' + """ + for to_be_created_channel_class in channel_class.__subclasses__(): + await create_channel_instance( + to_be_created_channel_class, + set_chan_method, + is_synchronized=is_synchronized, + **kwargs + ) + + +async def create_channel_instance( + channel_class: typing.Type["async_channel.channels.channel.Channel"], + set_chan_method: typing.Callable, + is_synchronized: bool = False, + channel_name: typing.Optional[str] = None, + **kwargs: dict +) -> "async_channel.channels.channel.Channel": + """ + Creates, initialize and start a async_channel instance + :param channel_class: The class to instantiate with optional kwargs params + :param set_chan_method: The method to call to add the created channel instance to a Channel list + :param is_synchronized: the channel is_synchronized attribute + :param channel_name: name of the channel to create. Defaults to channel_class.get_name() + :param kwargs: Some additional params passed to the 'channel_class' constructor + :return: the created 'channel_class' instance + """ + created_channel = channel_class(**kwargs) + set_chan_method(created_channel, name=channel_name or channel_class.get_name()) + created_channel.is_synchronized = is_synchronized + await created_channel.start() + return created_channel diff --git a/packages/async_channel/async_channel/util/logging_util.py b/packages/async_channel/async_channel/util/logging_util.py new file mode 100644 index 0000000000..8f47fa1984 --- /dev/null +++ b/packages/async_channel/async_channel/util/logging_util.py @@ -0,0 +1,33 @@ +# Drakkar-Software Async-Channel +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +""" +Define async_channel logger implementation +""" +import logging + + +# pylint: disable=no-member, import-outside-toplevel +def get_logger(name: str = "") -> logging.Logger: + """ + :param name: the logger name + :return: the logger implementation, can be octobot_commons one or default python logging + """ + try: + import octobot_commons.logging as common_logging + + return common_logging.get_logger(logger_name=name) + except ImportError: + return logging.getLogger(name) diff --git a/packages/async_channel/async_channel/util/synchronization_util.py b/packages/async_channel/async_channel/util/synchronization_util.py new file mode 100644 index 0000000000..1db3cd6915 --- /dev/null +++ b/packages/async_channel/async_channel/util/synchronization_util.py @@ -0,0 +1,37 @@ +# Drakkar-Software Async-Channel +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +""" +Define async_channel synchronization utilities +""" +import asyncio +import typing + +if typing.TYPE_CHECKING: + import async_channel.consumer + + +async def trigger_and_bypass_consumers_queue( + consumers: list["async_channel.consumer.Consumer"], kwargs: dict +): + """ + Triggers the consumers queue and bypasses the consumers callback. + Warning: this can cause concurrent async executions of the consumer callback + as the queue is bypassed. + """ + await asyncio.gather(*[ + consumer.callback(**kwargs) + for consumer in consumers + ]) diff --git a/packages/async_channel/dev_requirements.txt b/packages/async_channel/dev_requirements.txt new file mode 100644 index 0000000000..5444f6a736 --- /dev/null +++ b/packages/async_channel/dev_requirements.txt @@ -0,0 +1,24 @@ +pytest>=7.1 +pytest-asyncio>=0.19 +pytest-pep8 +pytest-cov +pytest-asyncio +pytest-xdist + +mock>=4.0.2 + +coverage +coveralls + +twine +pip +setuptools +wheel + +pur + +sphinx==3.2.1 +sphinx_rtd_theme + +pylint +black==25.12.0 diff --git a/packages/async_channel/docs/Makefile b/packages/async_channel/docs/Makefile new file mode 100644 index 0000000000..d0c3cbf102 --- /dev/null +++ b/packages/async_channel/docs/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = source +BUILDDIR = build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/packages/async_channel/docs/make.bat b/packages/async_channel/docs/make.bat new file mode 100644 index 0000000000..6247f7e231 --- /dev/null +++ b/packages/async_channel/docs/make.bat @@ -0,0 +1,35 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=source +set BUILDDIR=build + +if "%1" == "" goto help + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 +) + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + +:end +popd diff --git a/packages/async_channel/docs/source/conf.py b/packages/async_channel/docs/source/conf.py new file mode 100644 index 0000000000..e90f5deb2b --- /dev/null +++ b/packages/async_channel/docs/source/conf.py @@ -0,0 +1,87 @@ +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import os +import sys +sys.path.insert(0, os.path.abspath('../..')) + + +# -- Project information ----------------------------------------------------- + +project = 'Async-Channel' +copyright = '2020, Drakkar-Software' +author = 'Drakkar-Software' + +# The short X.Y version +version = '1.3' + +# The full version, including alpha/beta/rc tags +release = '1.3.21-beta' + +# https://github.com/readthedocs/readthedocs.org/issues/2569 +master_doc = 'index' + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.coverage', + 'sphinx.ext.todo', + 'sphinx.ext.intersphinx', + 'sphinx.ext.viewcode', + 'sphinx.ext.githubpages', + 'sphinx.ext.napoleon', +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = 'en' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = [] + + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'sphinx_rtd_theme' + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + + +# -- Extension configuration ------------------------------------------------- + +# -- Options for intersphinx extension --------------------------------------- + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = {'https://docs.python.org/3/': None} + +# -- Options for todo extension ---------------------------------------------- + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True \ No newline at end of file diff --git a/packages/async_channel/docs/source/index.rst b/packages/async_channel/docs/source/index.rst new file mode 100644 index 0000000000..61b54a7cd3 --- /dev/null +++ b/packages/async_channel/docs/source/index.rst @@ -0,0 +1,20 @@ +.. OctoBot-Channels documentation master file, created by + sphinx-quickstart on Tue Mar 24 00:38:18 2020. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to OctoBot-Channels's documentation! +============================================ + +.. toctree:: + :maxdepth: 2 + :caption: Contents: + + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/packages/async_channel/docs/source/modules.rst b/packages/async_channel/docs/source/modules.rst new file mode 100644 index 0000000000..5f84c92a06 --- /dev/null +++ b/packages/async_channel/docs/source/modules.rst @@ -0,0 +1,7 @@ +octobot_channels +================ + +.. toctree:: + :maxdepth: 4 + + octobot_channels diff --git a/packages/async_channel/docs/source/octobot_channels.channels.rst b/packages/async_channel/docs/source/octobot_channels.channels.rst new file mode 100644 index 0000000000..8773ded184 --- /dev/null +++ b/packages/async_channel/docs/source/octobot_channels.channels.rst @@ -0,0 +1,30 @@ +octobot\_channels.channels package +================================== + +Submodules +---------- + +octobot\_channels.channels.channel module +----------------------------------------- + +.. automodule:: octobot_channels.channels.channel + :members: + :undoc-members: + :show-inheritance: + +octobot\_channels.channels.channel\_instances module +---------------------------------------------------- + +.. automodule:: octobot_channels.channels.channel_instances + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: octobot_channels.channels + :members: + :undoc-members: + :show-inheritance: diff --git a/packages/async_channel/docs/source/octobot_channels.rst b/packages/async_channel/docs/source/octobot_channels.rst new file mode 100644 index 0000000000..283568d618 --- /dev/null +++ b/packages/async_channel/docs/source/octobot_channels.rst @@ -0,0 +1,46 @@ +octobot\_channels package +========================= + +Subpackages +----------- + +.. toctree:: + + octobot_channels.channels + octobot_channels.util + +Submodules +---------- + +octobot\_channels.constants module +---------------------------------- + +.. automodule:: octobot_channels.constants + :members: + :undoc-members: + :show-inheritance: + +octobot\_channels.consumer module +--------------------------------- + +.. automodule:: octobot_channels.consumer + :members: + :undoc-members: + :show-inheritance: + +octobot\_channels.producer module +--------------------------------- + +.. automodule:: octobot_channels.producer + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: octobot_channels + :members: + :undoc-members: + :show-inheritance: diff --git a/packages/async_channel/docs/source/octobot_channels.util.rst b/packages/async_channel/docs/source/octobot_channels.util.rst new file mode 100644 index 0000000000..fead321773 --- /dev/null +++ b/packages/async_channel/docs/source/octobot_channels.util.rst @@ -0,0 +1,22 @@ +octobot\_channels.util package +============================== + +Submodules +---------- + +octobot\_channels.util.channel\_creator module +---------------------------------------------- + +.. automodule:: octobot_channels.util.channel_creator + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: octobot_channels.util + :members: + :undoc-members: + :show-inheritance: diff --git a/packages/async_channel/standard.rc b/packages/async_channel/standard.rc new file mode 100644 index 0000000000..f6f2055c50 --- /dev/null +++ b/packages/async_channel/standard.rc @@ -0,0 +1,494 @@ +[MASTER] + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. +extension-pkg-whitelist=octobot_commons.logging.logging_util + +# Add files or directories to the blacklist. They should be base names, not +# paths. +ignore=CVS,tests,docs + +# Add files or directories matching the regex patterns to the blacklist. The +# regex matches against base names, not paths. +ignore-patterns= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the +# number of processors available to use. +jobs=1 + +# Control the amount of potential inferred values when inferring a single +# object. This can help the performance when dealing with large functions or +# complex, nested conditions. +limit-inference-results=100 + +# List of plugins (as comma separated values of python module names) to load, +# usually to register additional checkers. +load-plugins= + +# Pickle collected data for later comparisons. +persistent=yes + +# Specify a configuration file. +#rcfile= + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED. +confidence= + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once). You can also use "--disable=all" to +# disable everything first and then reenable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use "--disable=all --enable=classes +# --disable=W". +disable=too-few-public-methods, + logging-fstring-interpolation, + consider-using-from-import + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +enable=c-extension-no-member + + +[REPORTS] + +# Python expression which should return a score less than or equal to 10. You +# have access to the variables 'error', 'warning', 'refactor', and 'convention' +# which contain the number of messages in each category, as well as 'statement' +# which is the total number of statements analyzed. This score is used by the +# global evaluation report (RP0004). +evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details. +#msg-template= + +# Set the output format. Available formats are text, parseable, colorized, json +# and msvs (visual studio). You can also give a reporter class, e.g. +# mypackage.mymodule.MyReporterClass. +output-format=text + +# Tells whether to display a full report or only the messages. +reports=no + +# Activate the evaluation score. +score=yes + + +[REFACTORING] + +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 + +# Complete name of functions that never returns. When checking for +# inconsistent-return-statements if a never returning function is called then +# it will be considered as an explicit return statement and no message will be +# printed. +never-returning-functions=sys.exit + + +[SIMILARITIES] + +# Ignore comments when computing similarities. +ignore-comments=yes + +# Ignore docstrings when computing similarities. +ignore-docstrings=yes + +# Ignore imports when computing similarities. +ignore-imports=no + +# Minimum lines number of a similarity. +min-similarity-lines=4 + + +[LOGGING] + +# Format style used to check logging format string. `old` means using % +# formatting, `new` is for `{}` formatting,and `fstr` is for f-strings. +logging-format-style=old + +# Logging modules to check that the string format arguments are in logging +# function parameter format. +logging-modules=logging + + +[STRING] + +# This flag controls whether the implicit-str-concat-in-sequence should +# generate a warning on implicit string concatenation in sequences defined over +# several lines. +check-str-concat-over-line-jumps=no + + +[SPELLING] + +# Limits count of emitted suggestions for spelling mistakes. +max-spelling-suggestions=4 + +# Spelling dictionary name. Available dictionaries: none. To make it work, +# install the python-enchant package. +spelling-dict= + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains the private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to the private dictionary (see the +# --spelling-private-dict-file option) instead of raising a message. +spelling-store-unknown-words=no + + +[FORMAT] + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )?<?https?://\S+>?$ + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Maximum number of characters on a single line. +max-line-length=120 + +# Maximum number of lines in a module. +max-module-lines=1000 + +# Allow the body of a class to be on the same line as the declaration if body +# contains single statement. +single-line-class-stmt=no + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME, + XXX, + TODO + + +[BASIC] + +# Naming style matching correct argument names. +argument-naming-style=snake_case + +# Regular expression matching correct argument names. Overrides argument- +# naming-style. +#argument-rgx= + +# Naming style matching correct attribute names. +attr-naming-style=snake_case + +# Regular expression matching correct attribute names. Overrides attr-naming- +# style. +#attr-rgx= + +# Bad variable names which should always be refused, separated by a comma. +bad-names=foo, + bar, + baz, + toto, + tutu, + tata + +# Naming style matching correct class attribute names. +class-attribute-naming-style=any + +# Regular expression matching correct class attribute names. Overrides class- +# attribute-naming-style. +#class-attribute-rgx= + +# Naming style matching correct class names. +class-naming-style=PascalCase + +# Regular expression matching correct class names. Overrides class-naming- +# style. +#class-rgx= + +# Naming style matching correct constant names. +const-naming-style=UPPER_CASE + +# Regular expression matching correct constant names. Overrides const-naming- +# style. +#const-rgx= + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + +# Naming style matching correct function names. +function-naming-style=snake_case + +# Regular expression matching correct function names. Overrides function- +# naming-style. +#function-rgx= + +# Good variable names which should always be accepted, separated by a comma. +good-names=i, + j, + k, + ex, + Run, + _ + +# Include a hint for the correct naming format with invalid-name. +include-naming-hint=no + +# Naming style matching correct inline iteration names. +inlinevar-naming-style=any + +# Regular expression matching correct inline iteration names. Overrides +# inlinevar-naming-style. +#inlinevar-rgx= + +# Naming style matching correct method names. +method-naming-style=snake_case + +# Regular expression matching correct method names. Overrides method-naming- +# style. +#method-rgx= + +# Naming style matching correct module names. +module-naming-style=snake_case + +# Regular expression matching correct module names. Overrides module-naming- +# style. +#module-rgx= + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +# These decorators are taken in consideration only for invalid-name. +property-classes=abc.abstractproperty + +# Naming style matching correct variable names. +variable-naming-style=snake_case + +# Regular expression matching correct variable names. Overrides variable- +# naming-style. +#variable-rgx= + + +[VARIABLES] + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid defining new builtins when possible. +additional-builtins= + +# Tells whether unused global variables should be treated as a violation. +allow-global-unused-variables=yes + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_, + _cb + +# A regular expression matching the name of dummy variables (i.e. expected to +# not be used). +dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ + +# Argument names that match this expression will be ignored. Default to name +# with leading underscore. +ignored-argument-names=_.*|^ignored_|^unused_ + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io + + +[TYPECHECK] + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + +# Tells whether missing members accessed in mixin class should be ignored. A +# mixin class is detected if its name ends with "mixin" (case insensitive). +ignore-mixin-members=yes + +# Tells whether to warn about missing members when the owner of the attribute +# is inferred to be None. +ignore-none=yes + +# This flag controls whether pylint should warn about no-member and similar +# checks whenever an opaque object is returned when inferring. The inference +# can return multiple potential results while evaluating a Python object, but +# some branches might not be evaluated, which results in partial inference. In +# that case, it might be useful to still emit no-member and other checks for +# the rest of the inferred objects. +ignore-on-opaque-inference=yes + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local + +# List of module names for which member attributes should not be checked +# (useful for modules/projects where namespaces are manipulated during runtime +# and thus existing member attributes cannot be deduced by static analysis). It +# supports qualified module names, as well as Unix pattern matching. +ignored-modules= + +# Show a hint with possible names when a member name was not found. The aspect +# of finding the hint is based on edit distance. +missing-member-hint=yes + +# The minimum edit distance a name should have in order to be considered a +# similar match for a missing member name. +missing-member-hint-distance=1 + +# The total number of similar names that should be taken in consideration when +# showing a hint for a missing member. +missing-member-max-choices=1 + +# List of decorators that change the signature of a decorated function. +signature-mutators= + + +[IMPORTS] + +# List of modules that can be imported at any level, not just the top level +# one. +allow-any-import-level= + +# Allow wildcard imports from modules that define __all__. +allow-wildcard-with-all=no + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + +# Deprecated modules which should not be used, separated by a comma. +deprecated-modules=optparse,tkinter.tix + +# Create a graph of external dependencies in the given file (report RP0402 must +# not be disabled). +ext-import-graph= + +# Create a graph of every (i.e. internal and external) dependencies in the +# given file (report RP0402 must not be disabled). +import-graph= + +# Create a graph of internal dependencies in the given file (report RP0402 must +# not be disabled). +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant + +# Couples of modules and preferred modules, separated by a comma. +preferred-modules= + + +[CLASSES] + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__, + __new__, + setUp, + __post_init__ + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict, + _fields, + _replace, + _source, + _make + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=cls + + +[DESIGN] + +# Maximum number of arguments for function / method. +max-args=5 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Maximum number of boolean expressions in an if statement (see R0916). +max-bool-expr=5 + +# Maximum number of branch for function / method body. +max-branches=12 + +# Maximum number of locals for function / method body. +max-locals=15 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 + +# Maximum number of return / yield for function / method body. +max-returns=6 + +# Maximum number of statements in function / method body. +max-statements=50 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when being caught. Defaults to +# "BaseException, Exception". +overgeneral-exceptions=builtins.BaseException, + builtins.Exception diff --git a/packages/async_channel/tests/__init__.py b/packages/async_channel/tests/__init__.py new file mode 100644 index 0000000000..933ddfa6d1 --- /dev/null +++ b/packages/async_channel/tests/__init__.py @@ -0,0 +1,78 @@ +# Drakkar-Software Async-Channel +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import asyncio + +import async_channel.channels as channels +import async_channel.consumer as channel_consumer +import async_channel.producer as producer + +TEST_CHANNEL = "Test" +EMPTY_TEST_CHANNEL = "EmptyTest" +EMPTY_TEST_WITH_ID_CHANNEL = "EmptyTestWithId" +CONSUMER_KEY = "test" + + +class EmptyTestConsumer(channel_consumer.Consumer): + pass + + +class EmptyTestSupervisedConsumer(channel_consumer.SupervisedConsumer): + pass + + +class EmptyTestProducer(producer.Producer): + async def start(self): + await asyncio.sleep(100000) + + async def pause(self): + pass + + async def resume(self): + pass + + +class EmptyTestChannel(channels.Channel): + CONSUMER_CLASS = EmptyTestConsumer + PRODUCER_CLASS = EmptyTestProducer + + +async def empty_test_callback(): + pass + + +async def mock_was_called_once(mocked_method): + await wait_asyncio_next_cycle() + mocked_method.assert_called_once() + + +async def mock_was_not_called(mocked_method): + await wait_asyncio_next_cycle() + mocked_method.assert_not_called() + + +class EmptyTestWithIdChannel(channels.Channel): + CONSUMER_CLASS = EmptyTestConsumer + PRODUCER_CLASS = EmptyTestProducer + + def __init__(self, test_id): + super().__init__() + self.chan_id = test_id + + +async def wait_asyncio_next_cycle(): + async def do_nothing(): + pass + await asyncio.create_task(do_nothing()) diff --git a/packages/async_channel/tests/test_channel.py b/packages/async_channel/tests/test_channel.py new file mode 100644 index 0000000000..b31c9eacb3 --- /dev/null +++ b/packages/async_channel/tests/test_channel.py @@ -0,0 +1,410 @@ +# Drakkar-Software Async-Channel +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import os + +import pytest +import pytest_asyncio +import mock + +import async_channel.channels as channels +import async_channel.util as util +import async_channel + +import tests + + +@pytest_asyncio.fixture +async def test_channel(): + channels.del_chan(tests.EMPTY_TEST_CHANNEL) + yield await util.create_channel_instance(tests.EmptyTestChannel, channels.set_chan) + await channels.get_chan(tests.EMPTY_TEST_CHANNEL).stop() + + +@pytest.mark.asyncio +async def test_get_chan(): + class TestChannel(channels.Channel): + pass + + channels.del_chan(tests.TEST_CHANNEL) + await util.create_channel_instance(TestChannel, channels.set_chan) + await channels.get_chan(tests.TEST_CHANNEL).stop() + + +@pytest.mark.asyncio +async def test_set_chan(): + class TestChannel(channels.Channel): + pass + + channels.del_chan(tests.TEST_CHANNEL) + await util.create_channel_instance(TestChannel, channels.set_chan) + with pytest.raises(ValueError): + channels.set_chan(TestChannel(), name=TestChannel.get_name()) + await channels.get_chan(tests.TEST_CHANNEL).stop() + + +@pytest.mark.asyncio +async def test_set_chan_using_default_name(): + class TestChannel(channels.Channel): + pass + + channels.del_chan(tests.TEST_CHANNEL) + channel = TestChannel() + returned_channel = channels.set_chan(channel, name=None) + assert returned_channel is channel + assert channel.get_name() is not None + assert channels.ChannelInstances.instance().channels[channel.get_name()] == channel + with pytest.raises(ValueError): + channels.set_chan(TestChannel(), name=TestChannel.get_name()) + await channels.get_chan(tests.TEST_CHANNEL).stop() + + +@pytest.mark.asyncio +async def test_get_internal_producer(): + class TestChannel(channels.Channel): + pass + + channels.del_chan(tests.TEST_CHANNEL) + await util.create_channel_instance(TestChannel, channels.set_chan) + with pytest.raises(TypeError): + channels.get_chan(tests.TEST_CHANNEL).get_internal_producer() + await channels.get_chan(tests.TEST_CHANNEL).stop() + + +@pytest.mark.asyncio +async def test_new_consumer_without_producer(test_channel): + await channels.get_chan(tests.EMPTY_TEST_CHANNEL).new_consumer(tests.empty_test_callback) + assert len(channels.get_chan(tests.EMPTY_TEST_CHANNEL).consumers) == 1 + + +@pytest.mark.asyncio +async def test_new_consumer_without_filters(test_channel): + consumer = await channels.get_chan(tests.EMPTY_TEST_CHANNEL).new_consumer(tests.empty_test_callback) + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).get_consumers() == [consumer] + + +@pytest.mark.asyncio +async def test_new_consumer_with_filters(test_channel): + consumer = await channels.get_chan(tests.EMPTY_TEST_CHANNEL).new_consumer(tests.empty_test_callback, {"test_key": 1}) + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).get_consumers() == [consumer] + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).get_consumer_from_filters({}) == [consumer] # returns all if empty + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).get_consumer_from_filters({"test_key": 2}) == [] + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).get_consumer_from_filters({"test_key": 1, "test2": 2}) == [] + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).get_consumer_from_filters({"test_key": 1}) == [consumer] + + +@pytest.mark.asyncio +async def test_new_consumer_with_expected_wildcard_filters(test_channel): + consumer = await channels.get_chan(tests.EMPTY_TEST_CHANNEL).new_consumer(tests.empty_test_callback, {"test_key": 1, + "test_key_2": "abc"}) + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).get_consumers() == [consumer] + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).get_consumer_from_filters({}) == [consumer] # returns all if empty + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).get_consumer_from_filters({"test_key": 1, "test_key_2": "abc"}) == [consumer] + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).get_consumer_from_filters( + {"test_key": 1, "test_key_2": "abc", "test_key_3": 45}) == [] + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).get_consumer_from_filters( + {"test_key": 1, "test_key_2": "abc", "test_key_3": async_channel.CHANNEL_WILDCARD}) == [consumer] + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).get_consumer_from_filters({"test_key": 4, "test_key_2": "bc"}) == [] + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).get_consumer_from_filters({"test_key": 1, "test_key_2": async_channel.CHANNEL_WILDCARD}) == [ + consumer] + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).get_consumer_from_filters({"test_key": 3, "test_key_2": async_channel.CHANNEL_WILDCARD}) == [] + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).get_consumer_from_filters( + {"test_key": async_channel.CHANNEL_WILDCARD, "test_key_2": "abc"}) == [consumer] + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).get_consumer_from_filters( + {"test_key": async_channel.CHANNEL_WILDCARD, "test_key_2": "a"}) == [] + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).get_consumer_from_filters( + {"test_key": async_channel.CHANNEL_WILDCARD, "test_key_2": async_channel.CHANNEL_WILDCARD}) == [consumer] + + +@pytest.mark.asyncio +async def test_new_consumer_with_consumer_wildcard_filters(test_channel): + consumer = await channels.get_chan(tests.EMPTY_TEST_CHANNEL).new_consumer(tests.empty_test_callback, {"test_key": 1, + "test_key_2": "abc", + "test_key_3": async_channel.CHANNEL_WILDCARD}) + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).get_consumers() == [consumer] + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).get_consumer_from_filters({}) == [consumer] # returns all if empty + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).get_consumer_from_filters({"test_key": 1, "test_key_2": "abc"}) == [consumer] + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).get_consumer_from_filters( + {"test_key": 1, "test_key_2": "abc", "test_key_3": 45}) == [consumer] + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).get_consumer_from_filters( + {"test_key": 1, "test_key_2": "abc", "test_key_3": async_channel.CHANNEL_WILDCARD}) == [consumer] + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).get_consumer_from_filters({"test_key": 4, "test_key_2": "bc"}) == [] + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).get_consumer_from_filters({"test_key": 1, "test_key_2": async_channel.CHANNEL_WILDCARD}) == [ + consumer] + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).get_consumer_from_filters({"test_key": 1}) == [consumer] + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).get_consumer_from_filters({"test_key_2": async_channel.CHANNEL_WILDCARD}) == [consumer] + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).get_consumer_from_filters({"test_key_3": async_channel.CHANNEL_WILDCARD}) == [consumer] + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).get_consumer_from_filters({"test_key_3": "e"}) == [consumer] + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).get_consumer_from_filters({"test_key": 3, "test_key_2": async_channel.CHANNEL_WILDCARD}) == [] + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).get_consumer_from_filters( + {"test_key": async_channel.CHANNEL_WILDCARD, "test_key_2": "abc"}) == [consumer] + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).get_consumer_from_filters( + {"test_key": async_channel.CHANNEL_WILDCARD, "test_key_2": "a"}) == [] + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).get_consumer_from_filters( + {"test_key": async_channel.CHANNEL_WILDCARD, "test_key_2": "a", "test_key_3": async_channel.CHANNEL_WILDCARD}) == [] + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).get_consumer_from_filters( + {"test_key": async_channel.CHANNEL_WILDCARD, "test_key_2": async_channel.CHANNEL_WILDCARD}) == [consumer] + + +@pytest.mark.asyncio +async def test_new_consumer_with_multiple_consumer_filtering(test_channel): + consumers_descriptions = [ + {"A": 1, "B": 2, "C": async_channel.CHANNEL_WILDCARD}, # 0 + {"A": False, "B": "BBBB", "C": async_channel.CHANNEL_WILDCARD}, # 1 + {"A": 3, "B": async_channel.CHANNEL_WILDCARD, "C": async_channel.CHANNEL_WILDCARD}, # 2 + {"A": async_channel.CHANNEL_WILDCARD, "B": async_channel.CHANNEL_WILDCARD, "C": async_channel.CHANNEL_WILDCARD}, # 3 + {"A": async_channel.CHANNEL_WILDCARD, "B": 2, "C": 1}, # 4 + {"A": True, "B": async_channel.CHANNEL_WILDCARD, "C": async_channel.CHANNEL_WILDCARD}, # 5 + {"A": None, "B": None, "C": async_channel.CHANNEL_WILDCARD}, # 6 + {"A": "PPP", "B": 1, "C": async_channel.CHANNEL_WILDCARD, "D": 5}, # 7 + {"A": async_channel.CHANNEL_WILDCARD, "B": 2, "C": "ABC"}, # 8 + {"A": async_channel.CHANNEL_WILDCARD, "B": True, "C": async_channel.CHANNEL_WILDCARD}, # 9 + {"A": async_channel.CHANNEL_WILDCARD, "B": 6, "C": async_channel.CHANNEL_WILDCARD, "D": async_channel.CHANNEL_WILDCARD}, # 10 + {"A": async_channel.CHANNEL_WILDCARD, "B": async_channel.CHANNEL_WILDCARD, "C": async_channel.CHANNEL_WILDCARD, "D": async_channel.CHANNEL_WILDCARD}, # 11 + {"A": None, "B": False, "C": "LLLL", "D": async_channel.CHANNEL_WILDCARD}, # 12 + {"A": None, "B": None, "C": async_channel.CHANNEL_WILDCARD, "D": None}, # 13 + {"A": async_channel.CHANNEL_WILDCARD, "B": 2, "C": async_channel.CHANNEL_WILDCARD, "D": None}, # 14 + {"A": async_channel.CHANNEL_WILDCARD, "B": [2, 3, 4, 5, 6], "C": async_channel.CHANNEL_WILDCARD, "D": None}, # 15 + {"A": async_channel.CHANNEL_WILDCARD, "B": ["A", 5, "G"], "C": async_channel.CHANNEL_WILDCARD, "D": None}, # 16 + {"A": [1, 2, 3], "B": 2, "C": async_channel.CHANNEL_WILDCARD, "D": async_channel.CHANNEL_WILDCARD}, # 17 + {"A": ["A", "B", "C"], "B": 2, "C": async_channel.CHANNEL_WILDCARD, "D": async_channel.CHANNEL_WILDCARD}, # 18 + {"A": async_channel.CHANNEL_WILDCARD, "B": [2], "C": async_channel.CHANNEL_WILDCARD, "D": async_channel.CHANNEL_WILDCARD}, # 19 + {"A": async_channel.CHANNEL_WILDCARD, "B": ["B"], "C": async_channel.CHANNEL_WILDCARD, "D": async_channel.CHANNEL_WILDCARD}, # 20 + {"A": 18, "B": ["A", "B", "C"], "C": ["---", "9", "#"], "D": async_channel.CHANNEL_WILDCARD}, # 21 + {"A": [9, 18], "B": ["B", "C", "D"], "C": ["---", "9", "#", "@", "{"], "D": ["P", "__str__"]} # 22 + ] + + consumers = [ + await channels.get_chan(tests.EMPTY_TEST_CHANNEL).new_consumer(tests.empty_test_callback, consumers_description) + for consumers_description in consumers_descriptions + ] + + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).get_consumers() == consumers + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).get_consumer_from_filters({}) == consumers + # Warning : consumer[5] is returned because 1 == True + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).get_consumer_from_filters({"A": 1, "B": "6"}) == \ + [consumers[3], consumers[5], consumers[11]] + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).get_consumer_from_filters({"A": async_channel.CHANNEL_WILDCARD, "B": "G", "C": "1A"}) == \ + [consumers[2], consumers[3], consumers[5], consumers[11], consumers[16]] + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).get_consumer_from_filters({"A": async_channel.CHANNEL_WILDCARD, "B": async_channel.CHANNEL_WILDCARD, + "C": async_channel.CHANNEL_WILDCARD}) == consumers + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).get_consumer_from_filters({"A": 18, "B": "A", "C": "#"}) == \ + [consumers[3], consumers[11], consumers[16], consumers[21]] + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).get_consumer_from_filters({"A": 18, "B": "C", "C": "#", "D": None}) == \ + [consumers[11], consumers[21]] + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).get_consumer_from_filters({"A": 18, "B": "C", "C": "^", "D": None}) == \ + [consumers[11]] + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).get_consumer_from_filters({"A": 18, "B": "C", "C": "#", "D": "__str__"}) == \ + [consumers[11], consumers[21], consumers[22]] + + +@pytest.mark.asyncio +async def test_remove_consumer(test_channel): + consumer = await channels.get_chan(tests.EMPTY_TEST_CHANNEL).new_consumer(tests.empty_test_callback) + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).get_consumers() == [consumer] + await channels.get_chan(tests.EMPTY_TEST_CHANNEL).remove_consumer(consumer) + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).get_consumers() == [] + + +@pytest.mark.asyncio +async def test_unregister_producer(test_channel): + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).producers == [] + producer = tests.EmptyTestProducer(None) + await channels.get_chan(tests.EMPTY_TEST_CHANNEL).register_producer(producer) + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).producers == [producer] + + +@pytest.mark.asyncio +async def test_register_producer(test_channel): + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).producers == [] + producer = tests.EmptyTestProducer(None) + await channels.get_chan(tests.EMPTY_TEST_CHANNEL).register_producer(producer) + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).producers == [producer] + channels.get_chan(tests.EMPTY_TEST_CHANNEL).unregister_producer(producer) + assert channels.get_chan(tests.EMPTY_TEST_CHANNEL).producers == [] + + +@pytest.mark.asyncio +async def test_flush(test_channel): + producer = tests.EmptyTestProducer(test_channel) + await test_channel.register_producer(producer) + producer2 = tests.EmptyTestProducer(test_channel) + await test_channel.register_producer(producer2) + producer3 = tests.EmptyTestProducer(test_channel) + test_channel.internal_producer = producer3 + + assert producer3.channel is test_channel + for producer in test_channel.producers: + assert producer.channel is test_channel + + test_channel.flush() + assert test_channel.internal_producer.channel is None + for producer in test_channel.producers: + assert producer.channel is None + + +@pytest.mark.asyncio +async def test_start(test_channel): + consumer_1 = await channels.get_chan(tests.EMPTY_TEST_CHANNEL).new_consumer(tests.empty_test_callback) + consumer_2 = await channels.get_chan(tests.EMPTY_TEST_CHANNEL).new_consumer(tests.empty_test_callback) + with mock.patch.object(consumer_1, 'start', new=mock.AsyncMock()) as mocked_consumer_1_start: + with mock.patch.object(consumer_2, 'start', new=mock.AsyncMock()) as mocked_consumer_2_start: + await channels.get_chan(tests.EMPTY_TEST_CHANNEL).start() + await tests.mock_was_called_once(mocked_consumer_1_start) + await tests.mock_was_called_once(mocked_consumer_2_start) + + +@pytest.mark.asyncio +async def test_run(test_channel): + consumer_1 = await channels.get_chan(tests.EMPTY_TEST_CHANNEL).new_consumer(tests.empty_test_callback) + consumer_2 = await channels.get_chan(tests.EMPTY_TEST_CHANNEL).new_consumer(tests.empty_test_callback) + with mock.patch.object(consumer_1, 'run', new=mock.AsyncMock()) as mocked_consumer_1_run: + with mock.patch.object(consumer_2, 'run', new=mock.AsyncMock()) as mocked_consumer_2_run: + await channels.get_chan(tests.EMPTY_TEST_CHANNEL).run() + await tests.mock_was_called_once(mocked_consumer_1_run) + await tests.mock_was_called_once(mocked_consumer_2_run) + + +@pytest.mark.asyncio +async def test_modify(test_channel): + producer = tests.EmptyTestProducer(test_channel) + await test_channel.register_producer(producer) + producer_2 = tests.EmptyTestProducer(test_channel) + await test_channel.register_producer(producer_2) + with mock.patch.object(producer, 'modify', new=mock.AsyncMock()) as mocked_producer_1_modify: + with mock.patch.object(producer_2, 'modify', new=mock.AsyncMock()) as mocked_producer_2_modify: + await channels.get_chan(tests.EMPTY_TEST_CHANNEL).modify() + await tests.mock_was_called_once(mocked_producer_1_modify) + await tests.mock_was_called_once(mocked_producer_2_modify) + + +@pytest.mark.asyncio +async def test_should_pause_producers_with_no_consumers(test_channel): + producer = tests.EmptyTestProducer(test_channel) + await test_channel.register_producer(producer) + test_channel.is_paused = False + if not os.getenv('CYTHON_IGNORE'): + assert test_channel._should_pause_producers() + + +@pytest.mark.asyncio +async def test_should_pause_producers_when_already_paused(test_channel): + producer = tests.EmptyTestProducer(test_channel) + await test_channel.register_producer(producer) + test_channel.is_paused = True + if not os.getenv('CYTHON_IGNORE'): + assert not test_channel._should_pause_producers() + + +@pytest.mark.asyncio +async def test_should_pause_producers_with_priority_consumers(test_channel): + producer = tests.EmptyTestProducer(test_channel) + await test_channel.register_producer(producer) + consumer_1 = await channels.get_chan(tests.EMPTY_TEST_CHANNEL).new_consumer( + tests.empty_test_callback, + priority_level=async_channel.ChannelConsumerPriorityLevels.HIGH.value) + consumer_2 = await channels.get_chan(tests.EMPTY_TEST_CHANNEL).new_consumer( + tests.empty_test_callback, + priority_level=async_channel.ChannelConsumerPriorityLevels.MEDIUM.value) + consumer_3 = await channels.get_chan(tests.EMPTY_TEST_CHANNEL).new_consumer( + tests.empty_test_callback, + priority_level=async_channel.ChannelConsumerPriorityLevels.OPTIONAL.value) + test_channel.is_paused = False + if not os.getenv('CYTHON_IGNORE'): + assert not test_channel._should_pause_producers() + await test_channel.remove_consumer(consumer_1) + await test_channel.remove_consumer(consumer_2) + await test_channel.remove_consumer(consumer_3) + + +@pytest.mark.asyncio +async def test_should_pause_producers_with_optional_consumers(test_channel): + producer = tests.EmptyTestProducer(test_channel) + await test_channel.register_producer(producer) + consumer_1 = await channels.get_chan(tests.EMPTY_TEST_CHANNEL).new_consumer( + tests.empty_test_callback, + priority_level=async_channel.ChannelConsumerPriorityLevels.OPTIONAL.value) + consumer_2 = await channels.get_chan(tests.EMPTY_TEST_CHANNEL).new_consumer( + tests.empty_test_callback, + priority_level=async_channel.ChannelConsumerPriorityLevels.OPTIONAL.value) + consumer_3 = await channels.get_chan(tests.EMPTY_TEST_CHANNEL).new_consumer( + tests.empty_test_callback, + priority_level=async_channel.ChannelConsumerPriorityLevels.OPTIONAL.value) + test_channel.is_paused = False + if not os.getenv('CYTHON_IGNORE'): + assert test_channel._should_pause_producers() + await test_channel.remove_consumer(consumer_1) + await test_channel.remove_consumer(consumer_2) + await test_channel.remove_consumer(consumer_3) + + +@pytest.mark.asyncio +async def test_should_resume_producers_with_no_consumers(test_channel): + producer = tests.EmptyTestProducer(test_channel) + await test_channel.register_producer(producer) + test_channel.is_paused = True + if not os.getenv('CYTHON_IGNORE'): + assert not test_channel._should_resume_producers() + + +@pytest.mark.asyncio +async def test_should_resume_producers_when_already_resumed(test_channel): + producer = tests.EmptyTestProducer(test_channel) + await test_channel.register_producer(producer) + test_channel.is_paused = False + if not os.getenv('CYTHON_IGNORE'): + assert not test_channel._should_resume_producers() + + +@pytest.mark.asyncio +async def test_should_resume_producers_with_priority_consumers(test_channel): + producer = tests.EmptyTestProducer(test_channel) + await test_channel.register_producer(producer) + consumer_1 = await channels.get_chan(tests.EMPTY_TEST_CHANNEL).new_consumer( + tests.empty_test_callback, + priority_level=async_channel.ChannelConsumerPriorityLevels.HIGH.value) + consumer_2 = await channels.get_chan(tests.EMPTY_TEST_CHANNEL).new_consumer( + tests.empty_test_callback, + priority_level=async_channel.ChannelConsumerPriorityLevels.MEDIUM.value) + consumer_3 = await channels.get_chan(tests.EMPTY_TEST_CHANNEL).new_consumer( + tests.empty_test_callback, + priority_level=async_channel.ChannelConsumerPriorityLevels.OPTIONAL.value) + test_channel.is_paused = True + if not os.getenv('CYTHON_IGNORE'): + assert test_channel._should_resume_producers() + await test_channel.remove_consumer(consumer_1) + await test_channel.remove_consumer(consumer_2) + await test_channel.remove_consumer(consumer_3) + + +@pytest.mark.asyncio +async def test_should_resume_producers_with_optional_consumers(test_channel): + producer = tests.EmptyTestProducer(test_channel) + await test_channel.register_producer(producer) + consumer_1 = await channels.get_chan(tests.EMPTY_TEST_CHANNEL).new_consumer( + tests.empty_test_callback, + priority_level=async_channel.ChannelConsumerPriorityLevels.OPTIONAL.value) + consumer_2 = await channels.get_chan(tests.EMPTY_TEST_CHANNEL).new_consumer( + tests.empty_test_callback, + priority_level=async_channel.ChannelConsumerPriorityLevels.OPTIONAL.value) + consumer_3 = await channels.get_chan(tests.EMPTY_TEST_CHANNEL).new_consumer( + tests.empty_test_callback, + priority_level=async_channel.ChannelConsumerPriorityLevels.OPTIONAL.value) + test_channel.is_paused = True + if not os.getenv('CYTHON_IGNORE'): + assert not test_channel._should_resume_producers() + await test_channel.remove_consumer(consumer_1) + await test_channel.remove_consumer(consumer_2) + await test_channel.remove_consumer(consumer_3) diff --git a/packages/async_channel/tests/test_channel_creator.py b/packages/async_channel/tests/test_channel_creator.py new file mode 100644 index 0000000000..c61ef84b20 --- /dev/null +++ b/packages/async_channel/tests/test_channel_creator.py @@ -0,0 +1,80 @@ +# Drakkar-Software Async-Channel +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import copy +import mock +import pytest + +import async_channel.channels as channels +import async_channel.util as util + +import tests + + +@pytest.mark.asyncio +async def test_create_channel_instance(): + class TestChannel(channels.Channel): + pass + + channels.del_chan(tests.TEST_CHANNEL) + await util.create_channel_instance(TestChannel, channels.set_chan) + await channels.get_chan(tests.TEST_CHANNEL).stop() + + +@pytest.mark.asyncio +async def test_create_synchronized_channel_instance(): + class TestChannel(channels.Channel): + pass + + channels.del_chan(tests.TEST_CHANNEL) + await util.create_channel_instance(TestChannel, channels.set_chan, is_synchronized=True) + assert channels.get_chan(tests.TEST_CHANNEL).is_synchronized + await channels.get_chan(tests.TEST_CHANNEL).stop() + + +@pytest.mark.asyncio +async def test_create_all_subclasses_channel(): + class TestChannelClass(channels.Channel): + pass + + class Test1Channel(TestChannelClass): + pass + + class Test2Channel(TestChannelClass): + pass + + def clean_channels(): + for channel in copy.deepcopy(channels.ChannelInstances.instance().channels): + channels.del_chan(channel) + + channels.del_chan(tests.TEST_CHANNEL) + with mock.patch.object( + TestChannelClass, '__subclasses__', mock.Mock(return_value=[Test1Channel, Test2Channel]) + ) as mock_subclasses: + clean_channels() + await util.create_all_subclasses_channel(TestChannelClass, channels.set_chan) + assert sorted(channels.ChannelInstances.instance().channels) == sorted([ + chan.get_name() for chan in [Test1Channel, Test2Channel] + ]) + mock_subclasses.assert_called_once() + mock_subclasses.reset_mock() + clean_channels() + await util.create_all_subclasses_channel(TestChannelClass, channels.set_chan, is_synchronized=True) + sync_channels = channels.ChannelInstances.instance().channels + assert len(sync_channels) == 2 + assert all(channels.get_chan(channel).is_synchronized for channel in sync_channels) + clean_channels() + mock_subclasses.assert_called_once() + mock_subclasses.reset_mock() diff --git a/packages/async_channel/tests/test_channel_instances.py b/packages/async_channel/tests/test_channel_instances.py new file mode 100644 index 0000000000..5ed06d0f60 --- /dev/null +++ b/packages/async_channel/tests/test_channel_instances.py @@ -0,0 +1,114 @@ +# Drakkar-Software Async-Channel +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import uuid + +import pytest +import pytest_asyncio + +import async_channel.channels as channels +import async_channel.util as util +import tests + + +@pytest_asyncio.fixture +async def chan_id(): + channel_uuid = uuid.uuid4().hex + await util.create_channel_instance(tests.EmptyTestWithIdChannel, channels.set_chan_at_id, test_id=channel_uuid) + return channel_uuid + + +@pytest_asyncio.fixture +async def channel_id(): + channel_uuid = uuid.uuid4().hex + await util.create_channel_instance(tests.EmptyTestWithIdChannel, channels.set_chan_at_id, test_id=channel_uuid) + yield channel_uuid + await channels.get_chan_at_id(tests.EMPTY_TEST_WITH_ID_CHANNEL, channel_uuid).stop() + channels.del_chan_at_id(tests.EMPTY_TEST_WITH_ID_CHANNEL, channel_uuid) + + +@pytest.mark.asyncio +async def test_get_chan_at_id(channel_id): + assert channels.get_chan_at_id(tests.EMPTY_TEST_WITH_ID_CHANNEL, channel_id) + + +@pytest.mark.asyncio +async def test_set_chan_at_id_already_exist(channel_id): + with pytest.raises(ValueError): + await util.create_channel_instance(tests.EmptyTestWithIdChannel, channels.set_chan_at_id, test_id=channel_id) + + +@pytest.mark.asyncio +async def test_del_channel_container_not_exist_does_not_raise(channel_id): + channels.del_channel_container(channel_id + "test") + + +@pytest.mark.asyncio +async def test_del_channel_container(chan_id): + channels.del_channel_container(chan_id) + with pytest.raises(KeyError): + channels.get_chan_at_id(tests.EMPTY_TEST_WITH_ID_CHANNEL, chan_id) + channels.del_chan_at_id(tests.EMPTY_TEST_WITH_ID_CHANNEL, chan_id) + + +@pytest.mark.asyncio +async def test_get_channels_not_exist(channel_id): + with pytest.raises(KeyError): + channels.get_channels(channel_id + "test") + + +@pytest.mark.asyncio +async def test_get_channels(chan_id): + class EmptyTestWithId2Channel(tests.EmptyTestWithIdChannel): + pass + + class EmptyTestWithId3Channel(tests.EmptyTestWithIdChannel): + pass + + class EmptyTestWithId4Channel(tests.EmptyTestWithIdChannel): + pass + + class EmptyTestWithId5Channel(tests.EmptyTestWithIdChannel): + pass + + class EmptyTestWithId6Channel(tests.EmptyTestWithIdChannel): + pass + + channel_4_id = uuid.uuid4().hex + channel_6_id = uuid.uuid4().hex + ch1 = channels.get_chan_at_id(tests.EMPTY_TEST_WITH_ID_CHANNEL, chan_id) + ch2 = await util.create_channel_instance(EmptyTestWithId2Channel, channels.set_chan_at_id, test_id=chan_id) + ch3 = await util.create_channel_instance(EmptyTestWithId3Channel, channels.set_chan_at_id, test_id=chan_id) + ch4 = await util.create_channel_instance(EmptyTestWithId4Channel, channels.set_chan_at_id, test_id=channel_4_id) + ch5 = await util.create_channel_instance(EmptyTestWithId5Channel, channels.set_chan_at_id, test_id=channel_4_id) + ch6 = await util.create_channel_instance(EmptyTestWithId6Channel, channels.set_chan_at_id, test_id=channel_6_id) + assert len(channels.get_channels(chan_id)) == 3 + assert len(channels.get_channels(channel_4_id)) == 2 + assert len(channels.get_channels(channel_6_id)) == 1 + assert channels.get_channels(chan_id) == { + "EmptyTestWithId": ch1, + "EmptyTestWithId2": ch2, + "EmptyTestWithId3": ch3 + } + assert channels.get_channels(channel_4_id) == { + "EmptyTestWithId4": ch4, + "EmptyTestWithId5": ch5 + } + assert channels.get_channels(channel_6_id) == { + "EmptyTestWithId6": ch6 + } + channels.del_channel_container(chan_id) + channels.del_channel_container(channel_4_id) + channels.del_channel_container(channel_6_id) diff --git a/packages/async_channel/tests/test_consumer.py b/packages/async_channel/tests/test_consumer.py new file mode 100644 index 0000000000..6aed9b2fd6 --- /dev/null +++ b/packages/async_channel/tests/test_consumer.py @@ -0,0 +1,108 @@ +# Drakkar-Software Async-Channel +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import pytest +import pytest_asyncio +import mock + +import async_channel.consumer as channel_consumer +import async_channel.channels as channels +import async_channel.util as util +import tests + + +async def init_consumer_test(): + class TestChannel(channels.Channel): + PRODUCER_CLASS = tests.EmptyTestProducer + CONSUMER_CLASS = tests.EmptyTestConsumer + + channels.del_chan(tests.TEST_CHANNEL) + await util.create_channel_instance(TestChannel, channels.set_chan) + producer = tests.EmptyTestProducer(channels.get_chan(tests.TEST_CHANNEL)) + await producer.run() + return await channels.get_chan(tests.TEST_CHANNEL).new_consumer(tests.empty_test_callback) + + +@pytest.mark.asyncio +async def test_perform_called(): + consumer = await init_consumer_test() + with mock.patch.object(consumer, 'perform', new=mock.AsyncMock()) as mocked_consume_ends: + await channels.get_chan(tests.TEST_CHANNEL).get_internal_producer().send({}) + await tests.mock_was_called_once(mocked_consume_ends) + + await channels.get_chan(tests.TEST_CHANNEL).stop() + + +@pytest.mark.asyncio +async def test_consume_ends_called(): + consumer = await init_consumer_test() + with mock.patch.object(consumer, 'consume_ends', new=mock.AsyncMock()) as mocked_consume_ends: + await channels.get_chan(tests.TEST_CHANNEL).get_internal_producer().send({}) + await tests.mock_was_called_once(mocked_consume_ends) + + await channels.get_chan(tests.TEST_CHANNEL).stop() + + +@pytest_asyncio.fixture +async def internal_consumer(): + class TestInternalConsumer(channel_consumer.InternalConsumer): + async def perform(self, kwargs): + pass + + class TestChannel(channels.Channel): + PRODUCER_CLASS = tests.EmptyTestProducer + CONSUMER_CLASS = TestInternalConsumer + + channels.del_chan(tests.TEST_CHANNEL) + await util.create_channel_instance(TestChannel, channels.set_chan) + producer = tests.EmptyTestProducer(channels.get_chan(tests.TEST_CHANNEL)) + await producer.run() + yield TestInternalConsumer() + await channels.get_chan(tests.TEST_CHANNEL).stop() + + +@pytest.mark.asyncio +async def test_internal_consumer(internal_consumer): + await channels.get_chan(tests.TEST_CHANNEL).new_consumer(internal_consumer=internal_consumer) + + with mock.patch.object(internal_consumer, 'perform', new=mock.AsyncMock()) as mocked_consume_ends: + await channels.get_chan(tests.TEST_CHANNEL).get_internal_producer().send({}) + await tests.mock_was_called_once(mocked_consume_ends) + + +@pytest.mark.asyncio +async def test_default_internal_consumer_callback(internal_consumer): + with pytest.raises(NotImplementedError): + await internal_consumer.internal_callback() + + +@pytest.mark.asyncio +async def test_supervised_consumer(): + class TestSupervisedConsumer(channel_consumer.SupervisedConsumer): + pass + + class TestChannel(channels.Channel): + PRODUCER_CLASS = tests.EmptyTestProducer + CONSUMER_CLASS = TestSupervisedConsumer + + channels.del_chan(tests.TEST_CHANNEL) + await util.create_channel_instance(TestChannel, channels.set_chan) + producer = tests.EmptyTestProducer(channels.get_chan(tests.TEST_CHANNEL)) + await producer.run() + consumer = await channels.get_chan(tests.TEST_CHANNEL).new_consumer(tests.empty_test_callback) + await channels.get_chan(tests.TEST_CHANNEL).get_internal_producer().send({}) + await consumer.queue.join() + await channels.get_chan(tests.TEST_CHANNEL).stop() diff --git a/packages/async_channel/tests/test_producer.py b/packages/async_channel/tests/test_producer.py new file mode 100644 index 0000000000..660d78bcf3 --- /dev/null +++ b/packages/async_channel/tests/test_producer.py @@ -0,0 +1,218 @@ +# Drakkar-Software Async-Channel +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import pytest + +import async_channel.consumer as channel_consumer +import async_channel.channels as channels +import async_channel.producer as channel_producer +import async_channel.util as util +import tests + + +@pytest.mark.asyncio +async def test_send_internal_producer_without_consumer(): + class TestProducer(channel_producer.Producer): + async def send(self, data, **kwargs): + await super().send(data) + await channels.get_chan(tests.TEST_CHANNEL).stop() + + async def pause(self): + pass + + async def resume(self): + pass + + class TestChannel(channels.Channel): + PRODUCER_CLASS = TestProducer + + channels.del_chan(tests.TEST_CHANNEL) + await util.create_channel_instance(TestChannel, channels.set_chan) + await channels.get_chan(tests.TEST_CHANNEL).get_internal_producer().send({}) + + +@pytest.mark.asyncio +async def test_send_producer_without_consumer(): + class TestProducer(channel_producer.Producer): + async def send(self, data, **kwargs): + await super().send(data) + await channels.get_chan(tests.TEST_CHANNEL).stop() + + async def pause(self): + pass + + async def resume(self): + pass + + class TestConsumer(channel_consumer.Consumer): + async def consume(self): + while not self.should_stop: + await self.callback(**(await self.queue.get())) + + class TestChannel(channels.Channel): + PRODUCER_CLASS = TestProducer + CONSUMER_CLASS = TestConsumer + + channels.del_chan(tests.TEST_CHANNEL) + await util.create_channel_instance(TestChannel, channels.set_chan) + + producer = TestProducer(channels.get_chan(tests.TEST_CHANNEL)) + await producer.run() + await producer.send({}) + + +@pytest.mark.asyncio +async def test_send_producer_with_consumer(): + class TestConsumer(channel_consumer.Consumer): + pass + + class TestChannel(channels.Channel): + PRODUCER_CLASS = tests.EmptyTestProducer + CONSUMER_CLASS = TestConsumer + + async def callback(data): + assert data == "test" + await channels.get_chan(tests.TEST_CHANNEL).stop() + + channels.del_chan(tests.TEST_CHANNEL) + await util.create_channel_instance(TestChannel, channels.set_chan) + await channels.get_chan(tests.TEST_CHANNEL).new_consumer(callback) + + producer = tests.EmptyTestProducer(channels.get_chan(tests.TEST_CHANNEL)) + await producer.run() + await producer.send({"data": "test"}) + + +@pytest.mark.asyncio +async def test_pause_producer_without_consumers(): + class TestProducer(channel_producer.Producer): + async def pause(self): + await channels.get_chan(tests.TEST_CHANNEL).stop() + + async def pause(self): + pass + + async def resume(self): + pass + + class TestChannel(channels.Channel): + PRODUCER_CLASS = TestProducer + CONSUMER_CLASS = tests.EmptyTestConsumer + + channels.del_chan(tests.TEST_CHANNEL) + await util.create_channel_instance(TestChannel, channels.set_chan) + await TestProducer(channels.get_chan(tests.TEST_CHANNEL)).run() + + +@pytest.mark.asyncio +async def test_pause_producer_with_removed_consumer(): + class TestProducer(channel_producer.Producer): + async def pause(self): + await channels.get_chan(tests.TEST_CHANNEL).stop() + + async def pause(self): + pass + + async def resume(self): + pass + + class TestChannel(channels.Channel): + PRODUCER_CLASS = TestProducer + CONSUMER_CLASS = tests.EmptyTestConsumer + + channels.del_chan(tests.TEST_CHANNEL) + await util.create_channel_instance(TestChannel, channels.set_chan) + consumer = await channels.get_chan(tests.TEST_CHANNEL).new_consumer(tests.empty_test_callback) + await TestProducer(channels.get_chan(tests.TEST_CHANNEL)).run() + await channels.get_chan(tests.TEST_CHANNEL).remove_consumer(consumer) + + +@pytest.mark.asyncio +async def test_resume_producer(): + class TestProducer(channel_producer.Producer): + async def resume(self): + await channels.get_chan(tests.TEST_CHANNEL).stop() + + async def pause(self): + pass + + async def resume(self): + pass + + class TestChannel(channels.Channel): + PRODUCER_CLASS = TestProducer + CONSUMER_CLASS = tests.EmptyTestConsumer + + channels.del_chan(tests.TEST_CHANNEL) + await util.create_channel_instance(TestChannel, channels.set_chan) + await TestProducer(channels.get_chan(tests.TEST_CHANNEL)).run() + await channels.get_chan(tests.TEST_CHANNEL).new_consumer(tests.empty_test_callback) + + +@pytest.mark.asyncio +async def test_resume_producer(): + class TestSupervisedConsumer(channel_consumer.SupervisedConsumer): + pass + + class TestChannel(channels.Channel): + PRODUCER_CLASS = tests.EmptyTestProducer + CONSUMER_CLASS = TestSupervisedConsumer + + channels.del_chan(tests.TEST_CHANNEL) + await util.create_channel_instance(TestChannel, channels.set_chan) + producer = tests.EmptyTestProducer(channels.get_chan(tests.TEST_CHANNEL)) + await producer.run() + await channels.get_chan(tests.TEST_CHANNEL).new_consumer(tests.empty_test_callback) + await channels.get_chan(tests.TEST_CHANNEL).new_consumer(tests.empty_test_callback) + await channels.get_chan(tests.TEST_CHANNEL).new_consumer(tests.empty_test_callback) + await producer.send({"data": "test"}) + await producer.wait_for_processing() + await channels.get_chan(tests.TEST_CHANNEL).stop() + + +@pytest.mark.asyncio +async def test_producer_is_running(): + class TestChannel(channels.Channel): + PRODUCER_CLASS = tests.EmptyTestProducer + + channels.del_chan(tests.TEST_CHANNEL) + await util.create_channel_instance(TestChannel, channels.set_chan) + producer = tests.EmptyTestProducer(channels.get_chan(tests.TEST_CHANNEL)) + assert not producer.is_running + await producer.run() + assert producer.is_running + await channels.get_chan(tests.TEST_CHANNEL).stop() + assert not producer.is_running + + +@pytest.mark.asyncio +async def test_producer_pause_resume(): + class TestChannel(channels.Channel): + PRODUCER_CLASS = channel_producer.Producer + + channels.del_chan(tests.TEST_CHANNEL) + await util.create_channel_instance(TestChannel, channels.set_chan) + producer = channel_producer.Producer(channels.get_chan(tests.TEST_CHANNEL)) + assert producer.channel.is_paused + await producer.pause() + assert producer.channel.is_paused + await producer.resume() + assert not producer.channel.is_paused + await producer.pause() + assert producer.channel.is_paused + await producer.resume() + assert not producer.channel.is_paused + await channels.get_chan(tests.TEST_CHANNEL).stop() diff --git a/packages/async_channel/tests/test_synchronized.py b/packages/async_channel/tests/test_synchronized.py new file mode 100644 index 0000000000..6779c4defa --- /dev/null +++ b/packages/async_channel/tests/test_synchronized.py @@ -0,0 +1,273 @@ +# Drakkar-Software Async-Channel +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import asyncio +import pytest_asyncio + +import mock +import pytest + +import async_channel.channels as channels +import async_channel.producer as channel_producer +import async_channel.util as util +import tests + +TEST_SYNCHRONIZED_CHANNEL = "SynchronizedTest" + + +class SynchronizedProducerTest(channel_producer.Producer): + async def send(self, data, **kwargs): + await super().send(data) + await channels.get_chan(TEST_SYNCHRONIZED_CHANNEL).stop() + + async def pause(self): + pass + + async def resume(self): + pass + + +class SynchronizedChannelTest(channels.Channel): + PRODUCER_CLASS = SynchronizedProducerTest + CONSUMER_CLASS = tests.EmptyTestConsumer + + +@pytest_asyncio.fixture +async def synchronized_channel(): + yield await util.create_channel_instance(SynchronizedChannelTest, channels.set_chan, is_synchronized=True) + channels.del_chan(TEST_SYNCHRONIZED_CHANNEL) + + +@pytest.mark.asyncio +async def test_producer_synchronized_perform_consumers_queue_with_one_consumer(synchronized_channel): + async def callback(): + pass + + test_consumer = await synchronized_channel.new_consumer(callback) + + producer = SynchronizedProducerTest(channels.get_chan(TEST_SYNCHRONIZED_CHANNEL)) + await producer.run() + + with mock.patch.object(test_consumer, 'callback', new=mock.AsyncMock()) as mocked_test_consumer_callback: + await producer.send({}) + await tests.mock_was_not_called(mocked_test_consumer_callback) + await producer.synchronized_perform_consumers_queue(1, True, 1) + await tests.mock_was_called_once(mocked_test_consumer_callback) + + +@pytest.mark.asyncio +async def test_producer_synchronized_perform_supervised_consumer_with_processing_empty_queue(synchronized_channel): + continue_event = asyncio.Event() + calls = [] + done_calls = [] + + async def callback(): + calls.append(None) + await asyncio.wait_for(continue_event.wait(), 1) + done_calls.append(None) + + async def set_event_task(): + continue_event.set() + + # use supervised consumers + synchronized_channel.CONSUMER_CLASS = tests.EmptyTestSupervisedConsumer + test_consumer = await synchronized_channel.new_consumer(callback) + + producer = SynchronizedProducerTest(channels.get_chan(TEST_SYNCHRONIZED_CHANNEL)) + await producer.run() + + await producer.send({}) + await test_consumer.run() + try: + await tests.wait_asyncio_next_cycle() + # called already yet + assert calls == [None] + # call not finished + assert done_calls == [] + # queue is empty + assert test_consumer.queue.qsize() == 0 + asyncio.create_task(set_event_task()) + # wait for call to finish even though queue is empty => does not work as we are not joining the + # current processing + await producer.synchronized_perform_consumers_queue(1, False, 1) + assert done_calls == [] + # wait for call to finish even though queue is empty with join + await producer.synchronized_perform_consumers_queue(1, True, 1) + # ensure call actually finished (if we did not join the current task, this call would not have finished) + assert done_calls == [None] + finally: + await test_consumer.stop() + + +@pytest.mark.asyncio +async def test_join(): + # just test this does not throw an error on base consumers + base_consumer = tests.EmptyTestConsumer(None) + await base_consumer.join(1) + + supervised_consumer = tests.EmptyTestSupervisedConsumer(None) + assert supervised_consumer.idle.is_set() + + with mock.patch.object(supervised_consumer.idle, "wait", mock.AsyncMock()) as wait_mock: + await supervised_consumer.join(1) + wait_mock.assert_not_called() + + supervised_consumer.idle.clear() + await supervised_consumer.join(1) + wait_mock.assert_called_once() + + +@pytest.mark.asyncio +async def test_join_queue(): + base_consumer = tests.EmptyTestConsumer(None) + with mock.patch.object(base_consumer.queue, "join", mock.AsyncMock()) as join_mock: + await base_consumer.join_queue() + join_mock.assert_not_called() + + supervised_consumer = tests.EmptyTestSupervisedConsumer(None) + with mock.patch.object(supervised_consumer.queue, "join", mock.AsyncMock()) as join_mock: + await supervised_consumer.join_queue() + join_mock.assert_called_once() + + +@pytest.mark.asyncio +async def test_synchronized_no_tasks(synchronized_channel): + async def callback(): + pass + + test_consumer = await synchronized_channel.new_consumer(callback) + + producer = SynchronizedProducerTest(channels.get_chan(TEST_SYNCHRONIZED_CHANNEL)) + await producer.run() + + assert test_consumer.consume_task is None + assert producer.produce_task is None + + +@pytest.mark.asyncio +async def test_is_consumers_queue_empty_with_one_consumer(synchronized_channel): + async def callback(): + pass + + await synchronized_channel.new_consumer(callback) + + producer = SynchronizedProducerTest(channels.get_chan(TEST_SYNCHRONIZED_CHANNEL)) + await producer.run() + + await producer.send({}) + assert not producer.is_consumers_queue_empty(1) + assert not producer.is_consumers_queue_empty(2) + await producer.synchronized_perform_consumers_queue(1, True, 1) + assert producer.is_consumers_queue_empty(1) + assert producer.is_consumers_queue_empty(2) + + +@pytest.mark.asyncio +async def test_is_consumers_queue_empty_with_multiple_consumers(synchronized_channel): + async def callback(): + pass + + await synchronized_channel.new_consumer(callback) + await synchronized_channel.new_consumer(callback) + await synchronized_channel.new_consumer(callback, priority_level=2) + await synchronized_channel.new_consumer(callback, priority_level=2) + await synchronized_channel.new_consumer(callback, priority_level=3) + + producer = SynchronizedProducerTest(channels.get_chan(TEST_SYNCHRONIZED_CHANNEL)) + await producer.run() + + await producer.send({}) + assert not producer.is_consumers_queue_empty(1) + assert not producer.is_consumers_queue_empty(2) + assert not producer.is_consumers_queue_empty(3) + await producer.synchronized_perform_consumers_queue(1, True, 1) + assert producer.is_consumers_queue_empty(1) + assert not producer.is_consumers_queue_empty(2) + assert not producer.is_consumers_queue_empty(3) + await producer.synchronized_perform_consumers_queue(2, True, 1) + assert producer.is_consumers_queue_empty(1) + assert producer.is_consumers_queue_empty(2) + assert not producer.is_consumers_queue_empty(3) + await producer.synchronized_perform_consumers_queue(2, True, 1) + assert not producer.is_consumers_queue_empty(3) + await producer.synchronized_perform_consumers_queue(3, True, 1) + assert producer.is_consumers_queue_empty(3) + + +@pytest.mark.asyncio +async def test_producer_synchronized_perform_consumers_queue_with_multiple_consumer(synchronized_channel): + async def callback(): + pass + + test_consumer_1_1 = await synchronized_channel.new_consumer(callback) + test_consumer_1_2 = await synchronized_channel.new_consumer(callback) + test_consumer_2_1 = await synchronized_channel.new_consumer(callback, priority_level=2) + test_consumer_2_2 = await synchronized_channel.new_consumer(callback, priority_level=2) + test_consumer_3_1 = await synchronized_channel.new_consumer(callback, priority_level=3) + + producer = SynchronizedProducerTest(channels.get_chan(TEST_SYNCHRONIZED_CHANNEL)) + await producer.run() + + with mock.patch.object(test_consumer_1_1, 'callback', new=mock.AsyncMock()) as mocked_test_consumer_1_1_callback, \ + mock.patch.object(test_consumer_1_2, 'callback', new=mock.AsyncMock()) as mocked_test_consumer_1_2_callback, \ + mock.patch.object(test_consumer_2_1, 'callback', new=mock.AsyncMock()) as mocked_test_consumer_2_1_callback, \ + mock.patch.object(test_consumer_2_2, 'callback', new=mock.AsyncMock()) as mocked_test_consumer_2_2_callback, \ + mock.patch.object(test_consumer_3_1, 'callback', new=mock.AsyncMock()) as mocked_test_consumer_3_1_callback: + await producer.send({}) + await tests.mock_was_not_called(mocked_test_consumer_1_1_callback) + await tests.mock_was_not_called(mocked_test_consumer_1_2_callback) + await tests.mock_was_not_called(mocked_test_consumer_2_1_callback) + await tests.mock_was_not_called(mocked_test_consumer_2_2_callback) + await tests.mock_was_not_called(mocked_test_consumer_3_1_callback) + await producer.synchronized_perform_consumers_queue(1, True, 1) + await tests.mock_was_called_once(mocked_test_consumer_1_1_callback) + await tests.mock_was_called_once(mocked_test_consumer_1_2_callback) + await tests.mock_was_not_called(mocked_test_consumer_2_1_callback) + await tests.mock_was_not_called(mocked_test_consumer_2_2_callback) + await tests.mock_was_not_called(mocked_test_consumer_3_1_callback) + await producer.synchronized_perform_consumers_queue(2, True, 1) + await tests.mock_was_called_once(mocked_test_consumer_1_1_callback) + await tests.mock_was_called_once(mocked_test_consumer_1_2_callback) + await tests.mock_was_called_once(mocked_test_consumer_2_1_callback) + await tests.mock_was_called_once(mocked_test_consumer_2_2_callback) + await tests.mock_was_not_called(mocked_test_consumer_3_1_callback) + assert not producer.is_consumers_queue_empty(3) + await producer.synchronized_perform_consumers_queue(3, True, 1) + await tests.mock_was_called_once(mocked_test_consumer_3_1_callback) + assert producer.is_consumers_queue_empty(1) + assert producer.is_consumers_queue_empty(2) + assert producer.is_consumers_queue_empty(3) + + with mock.patch.object(test_consumer_1_1, 'callback', new=mock.AsyncMock()) as mocked_test_consumer_1_1_callback, \ + mock.patch.object(test_consumer_1_2, 'callback', new=mock.AsyncMock()) as mocked_test_consumer_1_2_callback, \ + mock.patch.object(test_consumer_2_1, 'callback', new=mock.AsyncMock()) as mocked_test_consumer_2_1_callback, \ + mock.patch.object(test_consumer_2_2, 'callback', new=mock.AsyncMock()) as mocked_test_consumer_2_2_callback, \ + mock.patch.object(test_consumer_3_1, 'callback', new=mock.AsyncMock()) as mocked_test_consumer_3_1_callback: + await producer.send({}) + await tests.mock_was_not_called(mocked_test_consumer_1_1_callback) + await tests.mock_was_not_called(mocked_test_consumer_1_2_callback) + await tests.mock_was_not_called(mocked_test_consumer_2_1_callback) + await tests.mock_was_not_called(mocked_test_consumer_2_2_callback) + await tests.mock_was_not_called(mocked_test_consumer_3_1_callback) + assert not producer.is_consumers_queue_empty(2) + await producer.synchronized_perform_consumers_queue(3, True, 1) + await tests.mock_was_called_once(mocked_test_consumer_1_1_callback) + await tests.mock_was_called_once(mocked_test_consumer_1_2_callback) + await tests.mock_was_called_once(mocked_test_consumer_2_1_callback) + await tests.mock_was_called_once(mocked_test_consumer_2_2_callback) + await tests.mock_was_called_once(mocked_test_consumer_3_1_callback) + assert producer.is_consumers_queue_empty(1) + assert producer.is_consumers_queue_empty(2) + assert producer.is_consumers_queue_empty(3) diff --git a/packages/backtesting/.coveragerc b/packages/backtesting/.coveragerc new file mode 100644 index 0000000000..aa450bd2d6 --- /dev/null +++ b/packages/backtesting/.coveragerc @@ -0,0 +1,6 @@ +[run] +omit = + tests/* + venv/* + setup.py + demo.py diff --git a/packages/backtesting/.gitignore b/packages/backtesting/.gitignore new file mode 100644 index 0000000000..7abc208ff1 --- /dev/null +++ b/packages/backtesting/.gitignore @@ -0,0 +1,112 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ + +.idea/ +*.c +backtesting/data/*.data + +tentacles/ + +cython_debug/ diff --git a/packages/backtesting/BUILD b/packages/backtesting/BUILD new file mode 100644 index 0000000000..e98d244c87 --- /dev/null +++ b/packages/backtesting/BUILD @@ -0,0 +1,22 @@ +python_requirements(name="reqs") + +python_sources(name="octobot_backtesting", sources=["octobot_backtesting/**/*.py"]) + +files( + name="test_data", + sources=["tests/static/**/*"], +) + +python_tests( + name="tests", + sources=["tests/**/test_*.py"], + dependencies=[ + ":octobot_backtesting", + ":reqs", + "//:dev_reqs", + ":test_data", + "packages/commons:octobot_commons", + "packages/commons:reqs", + "packages/commons:full_reqs", + ], +) \ No newline at end of file diff --git a/packages/backtesting/CHANGELOG.md b/packages/backtesting/CHANGELOG.md new file mode 100644 index 0000000000..563b627927 --- /dev/null +++ b/packages/backtesting/CHANGELOG.md @@ -0,0 +1,484 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## 2026-02-20 +### Added +[Comparators] Add find_matching_data_file to compare backtesting data files + +## [1.10.0] - 2026-01-04 +### Updated +[Requirements] Bump OctoBot-Commons version + +## [1.9.8] - 2025-11-26 +### Added +[Requirements] [full] requirements installation + +## [1.9.7] - 2023-12-11 +### Added +- [Backtesting] extra_backtesting_time_frames + +## [1.9.6] - 2023-12-06 +### Added +- [BacktestData] use_cached_markets + +## [1.9.5] - 2023-10-27 +### Added +- [API] adapt_backtesting_channels return value + +## [1.9.4] - 2023-10-15 +### Updated +- [Timeframes] allow accurate price time frame + +## [1.9.2] - 2023-09-03 +### Updated +- [Timeframes] allow minimal available timeframe instead of forcing 1m +- [TimeChannel] rename to avoid conflicts + +## [1.9.1] - 2023-08-14 +### Added +- [BacktestData] forced_markets + +## [1.9.0] - 2023-05-02 +### Updated +- Supported python versions +### Removed +- Cython + +## [1.8.2] - 2022-04-21 +### Updated +- [ExchangeCollector] handle exchange credentials + +## [1.8.1] - 2022-04-17 +### Updated +- [ExchangeCollector] delete_all + +## [1.8.0] - 2022-12-23 +### Updated +- [ChannelsManager] drop unused producers and priority levels +- cython version + +## [1.7.5] - 2022-10-13 +### Updated +- [ExchangeCollector] Cython header + +## [1.7.4] - 2022-10-12 +### Updated +- [Symbols] Use unified symbols + +## [1.7.3] - 2022-08-24 +### Updated +- [Cache] Optimize cache init + +## [1.7.2] - 2022-06-05 +### Updated +- [Symbols] Update for symbol object + +## [1.7.1] - 2022-05-02 +### Added +- [API] get_data_file_from_importers + +## [1.7.0] - 2022-03-31 +### Updated +- [DataImporter] optimized backtesting historical data reading +- [Databases] migrate databases into octobot-commons + +## [1.6.29] - 2022-01-16 +### Updated +- synchronized_perform_consumers_queue call + +## [1.6.28] - 2022-01-08 +### Updated +- Bump requirements + +## [1.6.27] - 2021-11-18 +### Updated +- aiohttp requirement + +## [1.6.26] - 2021-10-07 +### Updated +- BacktestingApi add start and end timestamp to adapt_backtesting_channels + +## [1.6.25] - 2021-09-22 +### Added +- BacktestingApi add stop_data_collector + +## [1.6.24] - 2021-09-19 +### Updated +- bump requirements + +## [1.6.23] - 2021-09-10 +### Fixed +- Data collector attributes visibility + +## [1.6.22] - 2021-09-07 +### Updated +- Data collector variables names + +## [1.6.21] - 2021-09-06 +### Updated +- BacktestingApi Add initialize_and_run_data_collector +- BacktestingApi Add is_data_collector_in_progress +- BacktestingApi Add get_data_collector_progress +- BacktestingApi Add is_data_collector_finished +- ExchangeDataCollector Add progression info +- DataCollector Add collection status (started, finished) + +## [1.6.20] - 2021-07-19 +### Updated +- bump requirements + +## [1.6.19] - 2021-07-04 +### Fixed +- ExchangeDataCollector cython typing + +## [1.6.18] - 2021-07-04 +### Updated +- ExchangeDataCollector add start and end timestamp for collecting data + +## [1.6.17] - 2021-05-05 +### Updated +- bump requirements + +## [1.6.16] - 2021-03-19 +### Added +- Timestamp to data_file description + +## [1.6.15] - 2021-03-06 +### Updated +- Force chardet version + +## [1.6.14] - 2021-03-03 +### Added +- Python 3.9 support + +## [1.6.13] - 2020-02-25 +### Updated +- Requirements + +## [1.6.12] - 2020-02-03 +### Added +- Default importer handling + +## [1.6.11] - 2020-12-28 +### Updated +- Requirements + +## [1.6.10] - 2020-12-23 +### Updated +- Requirements + +## [1.6.9] - 2020-12-09 +### Updated +- Use OctoBot commons configuration keys + +## [1.6.8] - 2020-11-22 +### Fixed +- Exchange collector tentacles_setup_config visibility + +## [1.6.7] - 2020-11-21 +### Updated +- OctoBot-Trading import + +## [1.6.6] - 2020-11-14 +### Updated +- Enable tentacles exchanges usage in data collector + +## [1.6.5] - 2020-11-07 +### Updated +- Requirements + +## [1.6.4] - 2020-10-30 +### Updated +- Concurrent database cursor management + +## [1.6.3] - 2020-10-29 +### Updated +- Commons requirement + +## [1.6.2] - 2020-10-24 +### Updated +- Aiohttp requirement + +## [1.6.1] - 2020-10-23 +### Updated +- Python 3.8 support + +## [1.6.0] - 2020-10-18 +### Changed +- Imports + +### Updated +- Aiohttp requirement + +## [1.5.20] - 2020-09-01 +### Updated +- Requirements + +## [1.5.19] - 2020-08-15 +### Updated +- Requirements + +## [1.5.18] - 2020-06-28 +### Updated +- Requirements + +## [1.5.17] - 2020-06-19 +### Updated +- Requirements + +## [1.5.16] - 2020-06-05 +### Fixed +- Async concurrency issue on backtesting stop + +## [1.5.15] - 2020-05-30 +### Updated +- Clear connection database attribute on stop + +## [1.5.14] - 2020-05-27 +### Updated +- Cython version + +## [1.5.13] - 2020-05-21 +### Updated +- Remove advanced manager from commons + +## [1.5.12] - 2020-05-17 +### Fixed +- [DataFiles] Cython header + +## [1.5.11] - 2020-05-16 +### Fixed +- [ExchangeCollector] Cython header + +## [1.5.10] - 2020-05-16 +### Updated +- Requirements + +## [1.5.9] - 2020-05-14 +### Changed +- [Database] Fix async concurrent access issues + +## [1.5.8] - 2020-05-14 +### Changed +- [ChannelsManager] Iterates on ChannelConsumerPriorityLevels + +## [1.5.7] - 2020-05-12 +### Fixed +- Date description parsing + +## [1.5.6] - 2020-05-11 +### Added +- Multiple data files support + +## [1.5.5] - 2020-05-10 +### Added +- Backtesting duration API + +## [1.5.4] - 2020-05-10 +### Fixed +- Backtesting progress management + +## [1.5.3] - 2020-05-09 +### Updated +- Channels requirement + +## [1.5.2] - 2020-05-08 +### Fixed +- Time manager memory leaks + +## [1.5.1] - 2020-05-05 +### Fixed +- Timestamp list generation + +## [1.5.0] - 2020-05-02 +### Added +- Synchronized backtesting + +## [1.4.1] - 2020-05-02 +### Updated +- octobot-channels requirements + +## [1.4.0] - 2020-05-02 +### Updated +- Migrate octobot-backtesting, indepdendent-backtesting and strategy-optimizer into OctoBot repository +- backtesting and importer API + +## [1.3.20] - 2020-04-29 +### Fixed +- Time channel non-existing attribute set + +## [1.3.19] - 2020-04-28 +### Updated +- Cython header files + +## [1.3.18] - 2020-04-18 +### Updated +- Time management and debug logs + +## [1.3.17] - 2020-04-18 +### Updated +- Cython header files + +## [1.3.16] - 2020-04-18 +### Added +- Handle backtesting errors + +### Updated +- Use updated evaluator matrix API +- Do not crash on backtesting missing files + +## [1.3.15] - 2020-04-10 +### Added +- Service feeds handling + +## [1.3.14] - 2020-04-07 +### Fixed +- Wildcard imports + +## [1.3.13] - 2020-04-05 +### Updated +- Integrate OctoBot-tentacles-manager 2.0.0 + +## [1.3.12] - 2020-03-30 +### Updated +**Requirements** +- Commons version to 1.3.5 +- Cython version to 0.29.16 +- Channels version to 1.3.22 + +## [1.3.11] - 2020-03-07 +### Updated +**Requirements** +- Commons version to 1.3.0 +- CCXT version to 1.23.67 + +## [1.3.10] - 2020-02-18 +### Fixed +- Remove hard octobot_evaluators import + +## [1.3.9] - 2020-02-17 +### Added +- Backtesting finished event + +### Updated +- Data converters +- Backtesting API +- DataFile converter API +- Database error handling +- IndependentBacktesting flexibily for strategy optimizer + +### Fixed +- Compiled double accuracy + +## [1.3.8] - 2020-02-06 +### Added +- Independent backtesting handling + +### Updated +- Backtesting API +- Importer API +- DataFile API + +## [1.3.7] - 2020-02-02 +### Updated +- Backtesting API +- Importer API + +## [1.3.6] - 2020-01-26 +### Updated +- Backtesting API +- Backtesting workflow + +## [1.3.5] - 2020-01-23 +### Updated +- Backtesting API + +## [1.3.4] - 2020-01-18 +### Added +- AbstractExchangeHistoryCollector and AbstractExchangeLiveCollector + +### Updated +- Data collector to work from web interface +- collect_exchange_historical_data and get_file_description APIs + +## [1.3.3] - 2020-01-02 +### Added +- Backtesting, data_file and exchange_data_collector API +- is_in_progress method in Backtesting +- use_all_available_timeframes in exchange collector + +### Updated +- data_file_manager imports +- Commons version to 1.2.1 + +## [1.3.2] - 2019-12-21 +### Updated +**Requirements** +- Commons version to 1.2.0 +- Channels version to 1.3.6 + +## [1.3.1] - 2019-12-14 +### Updated +**Requirements** +- Commons version to 1.1.51 +- Channels version to 1.3.6 +- aiosqlite version to 0.11.0 + +## [1.3.0] - 2019-11-07 +## Added +- Timestamp interval management (starting and stopping) + +### Fixed +- Database select where clauses generation + +## [1.2.5] - 2019-10-30 +## Added +- OSX support + +## [1.2.4] - 2019-10-09 +## Added +- PyPi manylinux deployment + +## [1.2.3] - 2019-10-08 +### Changed +- Constants VERSION and PROJECT_NAME file location + +## [1.2.2] - 2019-10-08 +## Fixed +- Install with setup + +## [1.2.1] - 2019-10-07 +### Added +- Collector async http with aiohttp + +### Changed +- Improved database management + +## [1.2.0] - 2019-10-05 +### Added +- Converters classes +- Database indexes +- Tentacles management (Importers, Collectors, Converters) +- Database async management + +### Changed +- Fully async backtesting + +## [1.1.1] - 2019-09-18 +### Added +- Time management from OctoBot-Trading + +## [1.1.0] - 2019-09-16 +### Added +- Collectors basis +- Importers basis +- Exchange collectors (Live and History) +- Exchange importer +- Database manager + +## [1.0.0] - 2019-09-10 +### Added +- Package components from OctoBot project diff --git a/packages/backtesting/LICENSE b/packages/backtesting/LICENSE new file mode 100644 index 0000000000..0a041280bd --- /dev/null +++ b/packages/backtesting/LICENSE @@ -0,0 +1,165 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/> + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff --git a/packages/backtesting/MANIFEST.in b/packages/backtesting/MANIFEST.in new file mode 100644 index 0000000000..95bf85be51 --- /dev/null +++ b/packages/backtesting/MANIFEST.in @@ -0,0 +1,9 @@ +recursive-include octobot_backtesting *.pxd + +include README.md +include LICENSE +include CHANGELOG.md +include requirements.txt +include full_requirements.txt + +global-exclude *.c diff --git a/packages/backtesting/README.md b/packages/backtesting/README.md new file mode 100644 index 0000000000..04601ddb0e --- /dev/null +++ b/packages/backtesting/README.md @@ -0,0 +1,7 @@ +# OctoBot-Backtesting +[![Codacy Badge](https://api.codacy.com/project/badge/Grade/aa0b156e99604b3c98923fffeaea6a49)](https://app.codacy.com/gh/Drakkar-Software/OctoBot-Backtesting?utm_source=github.com&utm_medium=referral&utm_content=Drakkar-Software/OctoBot-Backtesting&utm_campaign=Badge_Grade_Dashboard) +[![Coverage Status](https://coveralls.io/repos/github/Drakkar-Software/OctoBot-Backtesting/badge.svg?branch=master)](https://coveralls.io/github/Drakkar-Software/OctoBot-Backtesting?branch=master) +[![Github-Action-CI](https://github.com/Drakkar-Software/OctoBot-Backtesting/workflows/OctoBot-Backtesting-CI/badge.svg)](https://github.com/Drakkar-Software/OctoBot-Backtesting/actions) +[![Build Status](https://cloud.drone.io/api/badges/Drakkar-Software/OctoBot-Backtesting/status.svg)](https://cloud.drone.io/Drakkar-Software/OctoBot-Backtesting) + +OctoBot backtesting engine package. \ No newline at end of file diff --git a/packages/backtesting/octobot_backtesting/__init__.py b/packages/backtesting/octobot_backtesting/__init__.py new file mode 100644 index 0000000000..1f45d4fbd0 --- /dev/null +++ b/packages/backtesting/octobot_backtesting/__init__.py @@ -0,0 +1,21 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import logging + +PROJECT_NAME = "OctoBot-Backtesting" +VERSION = "1.10.0" + +logging.getLogger('aiosqlite').setLevel(logging.ERROR) diff --git a/packages/backtesting/octobot_backtesting/api/__init__.py b/packages/backtesting/octobot_backtesting/api/__init__.py new file mode 100644 index 0000000000..bb0c7569a5 --- /dev/null +++ b/packages/backtesting/octobot_backtesting/api/__init__.py @@ -0,0 +1,131 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_backtesting.api import data_file_converters +from octobot_backtesting.api import data_file +from octobot_backtesting.api import importer +from octobot_backtesting.api import backtesting +from octobot_backtesting.api import exchange_data_collector +from octobot_backtesting.api import social_data_collector +from octobot_backtesting.api import data_comparator + +from octobot_backtesting.api.data_file_converters import ( + convert_data_file, +) +from octobot_backtesting.api.data_file import ( + get_all_available_data_files, + delete_data_file, + get_file_description, +) +from octobot_backtesting.api.importer import ( + create_importer, + get_available_data_types, + get_data_file, + get_data_file_from_importers, + get_data_file_path, + get_available_time_frames, + get_available_symbols, + get_data_timestamp_interval, + get_all_ohlcvs, + stop_importer, +) +from octobot_backtesting.api.backtesting import ( + set_time_updater_interval, + set_iteration_timeout, + get_importers, + get_backtesting_current_time, + get_backtesting_starting_time, + get_backtesting_ending_time, + register_backtesting_timestamp_whitelist, + get_backtesting_timestamp_whitelist, + is_backtesting_enabled, + get_backtesting_data_files, + get_backtesting_duration, + create_and_init_backtest_data, + get_preloaded_candles_manager, + initialize_backtesting, + initialize_independent_backtesting_config, + get_backtesting_time_channel_name, + modify_backtesting_timestamps, + adapt_backtesting_channels, + start_backtesting, + stop_backtesting, + stop_independent_backtesting, +) +from octobot_backtesting.api.exchange_data_collector import ( + exchange_historical_data_collector_factory, + exchange_bot_snapshot_data_collector_factory, + initialize_and_run_data_collector, + stop_data_collector, + is_data_collector_in_progress, + get_data_collector_progress, + is_data_collector_finished, +) +from octobot_backtesting.api.social_data_collector import ( + social_historical_data_collector_factory, + social_live_data_collector_factory, +) +from octobot_backtesting.api.data_comparator import ( + find_matching_data_file, +) + +__all__ = [ + "convert_data_file", + "get_all_available_data_files", + "delete_data_file", + "get_file_description", + "create_importer", + "get_available_data_types", + "get_data_file", + "get_data_file_from_importers", + "get_data_file_path", + "get_available_time_frames", + "get_available_symbols", + "get_data_timestamp_interval", + "get_all_ohlcvs", + "stop_importer", + "set_time_updater_interval", + "set_iteration_timeout", + "get_importers", + "get_backtesting_current_time", + "get_backtesting_starting_time", + "get_backtesting_ending_time", + "register_backtesting_timestamp_whitelist", + "get_backtesting_timestamp_whitelist", + "is_backtesting_enabled", + "get_backtesting_data_files", + "get_backtesting_duration", + "create_and_init_backtest_data", + "get_preloaded_candles_manager", + "initialize_backtesting", + "initialize_independent_backtesting_config", + "get_backtesting_time_channel_name", + "modify_backtesting_timestamps", + "adapt_backtesting_channels", + "start_backtesting", + "stop_backtesting", + "stop_independent_backtesting", + "exchange_historical_data_collector_factory", + "exchange_bot_snapshot_data_collector_factory", + "initialize_and_run_data_collector", + "stop_data_collector", + "is_data_collector_in_progress", + "get_data_collector_progress", + "is_data_collector_finished", + "social_historical_data_collector_factory", + "social_live_data_collector_factory", + "find_matching_data_file", +] diff --git a/packages/backtesting/octobot_backtesting/api/backtesting.py b/packages/backtesting/octobot_backtesting/api/backtesting.py new file mode 100644 index 0000000000..30b27d56be --- /dev/null +++ b/packages/backtesting/octobot_backtesting/api/backtesting.py @@ -0,0 +1,242 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import time + +import octobot_commons.constants as common_constants +import octobot_commons.enums as common_enums +import octobot_commons.logging as logging +import octobot_commons.time_frame_manager as time_frame_manager + +import octobot_backtesting.api as api +import octobot_backtesting.errors as errors +import octobot_backtesting.backtesting as backtesting_class +import octobot_backtesting.backtest_data as backtest_data +import octobot_backtesting.constants as constants + + +LOGGER_NAME = "BacktestingAPI" + + +async def initialize_backtesting(config, exchange_ids, matrix_id, data_files, + importers_by_data_file=None, backtest_data=None, + bot_id=None) -> backtesting_class.Backtesting: + backtesting_instance = backtesting_class.Backtesting(config=config, + exchange_ids=exchange_ids, + matrix_id=matrix_id, + backtesting_files=data_files, + importers_by_data_file=importers_by_data_file, + backtest_data=backtest_data, + bot_id=bot_id) + await backtesting_instance.create_importers() + await backtesting_instance.initialize() + + if not backtesting_instance.importers: + raise ValueError("No importers created: did you enter the backtesting file(s) to use ?") + + return backtesting_instance + + +async def initialize_independent_backtesting_config(independent_backtesting) -> dict: + return await independent_backtesting.initialize_config() + + +def get_backtesting_time_channel_name(backtesting) -> str: + return backtesting.get_time_chan_name() + + +async def modify_backtesting_timestamps(backtesting, set_timestamp=None, + minimum_timestamp=None, maximum_timestamp=None) -> None: + await backtesting.time_updater.modify(set_timestamp=set_timestamp, + minimum_timestamp=minimum_timestamp, + maximum_timestamp=maximum_timestamp) + + +async def _get_min_max_timestamps(importers, run_on_common_part_only, start_timestamp, end_timestamp, + min_time_frame_to_consider, max_time_frame_to_consider): + # set mininmum and maximum timestamp according to all importers data + try: + short_tf_timestamps = [await api.get_data_timestamp_interval(importer, min_time_frame_to_consider) + for importer in importers] # [(min, max) ... ] + large_tf_timestamps = [await api.get_data_timestamp_interval(importer, max_time_frame_to_consider) + for importer in importers] # [(min, max) ... ] + except errors.MissingTimeFrame as e: + raise RuntimeError(f"Impossible to start backtesting on this configuration: {e}") + min_timestamps = [timestamp[0] for timestamp in short_tf_timestamps] + max_timestamps = [timestamp[1] for timestamp in short_tf_timestamps] + + min_timestamp = max(min_timestamps) if run_on_common_part_only else min(min_timestamps) + max_timestamp = min(max_timestamps) if run_on_common_part_only else max(max_timestamps) + + large_min_timestamps = [timestamp[0] for timestamp in large_tf_timestamps] + min_large_timestamp = max(large_min_timestamps) if run_on_common_part_only else min(large_min_timestamps) + + # set min timestamp where we have data in the largest candle to avoid starting with missing large candles data + min_timestamp = max(min_timestamp, min_large_timestamp) + + if min_timestamp > max_timestamp: + raise RuntimeError(f"No candle data to run backtesting on in this time window: starting at: {min_timestamp} " + f"and ending at: {max_timestamp}") + if start_timestamp is not None and end_timestamp is not None and \ + start_timestamp > end_timestamp: + raise RuntimeError(f"No candle data to run backtesting on in this time window: starting at: {start_timestamp} " + f"and ending at: {end_timestamp}") + + time_frame_sec = common_enums.TimeFramesMinutes[min_time_frame_to_consider] * common_constants.MINUTE_TO_SECONDS + if start_timestamp is not None: + # Adapt start and end timestamp to start exactly at the top of the 1st available candle + # This avoids backtesting to run from mid candle time to mid candle time. + # Adapt start timestamp + start_timestamp = start_timestamp + (time_frame_sec - start_timestamp % time_frame_sec) + + if min_timestamp <= start_timestamp < (end_timestamp if end_timestamp else max_timestamp): + min_timestamp = start_timestamp + else: + logging.get_logger(LOGGER_NAME).warning(f"Can't set the minimum timestamp to {start_timestamp}. " + f"The minimum available({min_timestamp}) will be used instead.") + if end_timestamp is not None: + # Adapt end timestamp + end_timestamp = end_timestamp - (end_timestamp % time_frame_sec) + + if max_timestamp >= end_timestamp > start_timestamp if start_timestamp else min_timestamp: + max_timestamp = end_timestamp + else: + logging.get_logger(LOGGER_NAME).warning(f"Can't set the maximum timestamp to {end_timestamp}. " + f"The maximum available({max_timestamp}) will be used instead.") + return min_timestamp, max_timestamp + + +async def adapt_backtesting_channels(backtesting, config, importer_class, + run_on_common_part_only=True, + start_timestamp=None, end_timestamp=None): + importers = backtesting.get_importers(importer_class) + if not importers: + raise RuntimeError("No exchange importer has been found for this data file, backtesting can't start.") + sorted_time_frames = time_frame_manager.sort_time_frames(time_frame_manager.get_config_time_frame(config)) + sorted_available_time_frames = time_frame_manager.sort_time_frames(api.get_available_time_frames(importers[0])) + min_available_time_frame = sorted_available_time_frames[0] + if not sorted_time_frames or ( + backtesting.use_accurate_price_time_frame() and sorted_time_frames[0] != min_available_time_frame + ): + # use min available timeframe as default if no timeframe is enabled or if add min available timeframe + # if not already in handled time frames + sorted_time_frames.insert(0, min_available_time_frame) + min_time_frame_to_consider = sorted_time_frames[0] + max_time_frame_to_consider = sorted_time_frames[-1] + _ensure_extra_time_frames(min_time_frame_to_consider, config) + min_timestamp, max_timestamp = await _get_min_max_timestamps(importers, run_on_common_part_only, + start_timestamp, end_timestamp, + min_time_frame_to_consider, max_time_frame_to_consider) + + await modify_backtesting_timestamps( + backtesting, + minimum_timestamp=int(min_timestamp), + maximum_timestamp=int(max_timestamp)) + try: + import octobot_trading.api as exchange_api + + if exchange_api.has_only_ohlcv(importers): + set_time_updater_interval(backtesting, + common_enums.TimeFramesMinutes[min_time_frame_to_consider] * + common_constants.MINUTE_TO_SECONDS) + except ImportError: + logging.get_logger(LOGGER_NAME).error("requires OctoBot-Trading package installed") + return min_timestamp, max_timestamp + + +def _ensure_extra_time_frames(min_time_frame_to_consider, config): + min_tf_minutes = common_enums.TimeFramesMinutes[min_time_frame_to_consider] + for required_extra_time_frame in config.get(common_constants.CONFIG_REQUIRED_EXTRA_TIMEFRAMES, []): + if common_enums.TimeFramesMinutes[common_enums.TimeFrames(required_extra_time_frame)] < min_tf_minutes: + raise errors.MissingTimeFrame( + f"Missing required (or lower) time frame in data file: {required_extra_time_frame}" + ) + + +def set_time_updater_interval(backtesting, interval_in_seconds): + backtesting.time_manager.time_interval = interval_in_seconds + + +def set_iteration_timeout(backtesting, iteration_timeout_in_seconds): + backtesting.time_updater.channels_manager.refresh_timeout = iteration_timeout_in_seconds + + +async def start_backtesting(backtesting) -> None: + await backtesting.start_time_updater() + + +async def stop_backtesting(backtesting) -> None: + await backtesting.stop() + + +async def stop_independent_backtesting(independent_backtesting) -> None: + await independent_backtesting.stop() + + +def get_importers(backtesting) -> list: + return backtesting.importers + + +def get_backtesting_current_time(backtesting) -> float: + return backtesting.time_manager.current_timestamp + + +def get_backtesting_starting_time(backtesting) -> float: + return backtesting.time_manager.starting_timestamp + + +def get_backtesting_ending_time(backtesting) -> float: + return backtesting.time_manager.finishing_timestamp + + +def register_backtesting_timestamp_whitelist(backtesting, timestamps, check_callback, append_to_whitelist=False): + backtesting.time_manager.register_timestamp_whitelist(timestamps, check_callback, + append_to_whitelist=append_to_whitelist) + + +def get_backtesting_timestamp_whitelist(backtesting) -> list: + return backtesting.time_manager.timestamps_whitelist + + +def is_backtesting_enabled(config) -> bool: + return constants.CONFIG_BACKTESTING in config \ + and common_constants.CONFIG_ENABLED_OPTION in config[constants.CONFIG_BACKTESTING] \ + and config[constants.CONFIG_BACKTESTING][common_constants.CONFIG_ENABLED_OPTION] + + +def get_backtesting_data_files(config) -> list: + return config.get(constants.CONFIG_BACKTESTING, {}).get(constants.CONFIG_BACKTESTING_DATA_FILES, []) + + +def get_backtesting_duration(backtesting) -> float: + if backtesting.time_updater.simulation_duration > 0: + return backtesting.time_updater.simulation_duration + return time.time() - backtesting.time_updater.starting_time + + +async def create_and_init_backtest_data(data_files, config, tentacles_config, use_accurate_price_time_frame) \ + -> backtest_data.BacktestData: + backtest_data_inst = backtest_data.BacktestData(data_files, config, tentacles_config, use_accurate_price_time_frame) + await backtest_data_inst.initialize() + return backtest_data_inst + + +async def get_preloaded_candles_manager(backtesting, exchange, symbol, time_frame): + if backtesting.backtest_data is None: + return None + return await backtesting.backtest_data.get_preloaded_candles_manager( + exchange, symbol, time_frame, + get_backtesting_starting_time(backtesting), get_backtesting_ending_time(backtesting) + ) diff --git a/packages/backtesting/octobot_backtesting/api/data_comparator.py b/packages/backtesting/octobot_backtesting/api/data_comparator.py new file mode 100644 index 0000000000..5e15461771 --- /dev/null +++ b/packages/backtesting/octobot_backtesting/api/data_comparator.py @@ -0,0 +1,21 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_backtesting.comparators as comparators +import octobot_backtesting.constants as constants + + +async def find_matching_data_file(data_path=constants.BACKTESTING_FILE_PATH, **kwargs) -> str | None: + return await comparators.DataComparator(data_path).find_matching_data_file(**kwargs) diff --git a/packages/backtesting/octobot_backtesting/api/data_file.py b/packages/backtesting/octobot_backtesting/api/data_file.py new file mode 100644 index 0000000000..698ae2d045 --- /dev/null +++ b/packages/backtesting/octobot_backtesting/api/data_file.py @@ -0,0 +1,55 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import os.path as path + +import octobot_backtesting.constants as constants +import octobot_backtesting.data as data +import octobot_backtesting.enums as enums + + +async def get_file_description(file_name, data_path=constants.BACKTESTING_FILE_PATH) -> dict: + description = await data.get_file_description(path.join(data_path, file_name)) + if description: + return { + enums.DataFormatKeys.SYMBOLS.value: description[enums.DataFormatKeys.SYMBOLS.value], + enums.DataFormatKeys.EXCHANGE.value: description[enums.DataFormatKeys.EXCHANGE.value], + enums.DataFormatKeys.SERVICES.value: description.get(enums.DataFormatKeys.SERVICES.value, []), + enums.DataFormatKeys.DATE.value: data.get_date(int(description[enums.DataFormatKeys.TIMESTAMP.value])), + enums.DataFormatKeys.TIMESTAMP.value: int(description[enums.DataFormatKeys.TIMESTAMP.value]), + enums.DataFormatKeys.START_TIMESTAMP.value: int(description[enums.DataFormatKeys.START_TIMESTAMP.value]), + enums.DataFormatKeys.END_TIMESTAMP.value: int(description[enums.DataFormatKeys.END_TIMESTAMP.value]), + enums.DataFormatKeys.START_DATE.value: data.get_date( + int(description[enums.DataFormatKeys.START_TIMESTAMP.value])).split(" at ")[0], + enums.DataFormatKeys.END_DATE.value: data.get_date( + int(description[enums.DataFormatKeys.END_TIMESTAMP.value])).split(" at ")[0], + enums.DataFormatKeys.TIME_FRAMES.value: [tf.value + for tf in description[enums.DataFormatKeys.TIME_FRAMES.value]], + enums.DataFormatKeys.CANDLES_LENGTH.value: description[enums.DataFormatKeys.CANDLES_LENGTH.value], + enums.DataFormatKeys.TYPE.value: "OctoBot data file", + enums.DataFormatKeys.DATA_TYPE.value: description.get( + enums.DataFormatKeys.DATA_TYPE.value, enums.DataType.EXCHANGE.value + ), + } + else: + return description + + +def get_all_available_data_files(data_path=constants.BACKTESTING_FILE_PATH) -> list: + return data.get_all_available_data_files(data_path) + + +def delete_data_file(file_name, data_path=constants.BACKTESTING_FILE_PATH) -> tuple: + return data.delete_data_file(data_path, file_name) diff --git a/packages/backtesting/octobot_backtesting/api/data_file_converters.py b/packages/backtesting/octobot_backtesting/api/data_file_converters.py new file mode 100644 index 0000000000..48232ea1eb --- /dev/null +++ b/packages/backtesting/octobot_backtesting/api/data_file_converters.py @@ -0,0 +1,31 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import os.path as path +import typing + +import octobot_backtesting.converters as converters +import octobot_commons.tentacles_management as tentacles_management + + +async def convert_data_file(data_file_path) -> typing.Optional[str]: + if data_file_path and path.isfile(data_file_path): + converter_classes = tentacles_management.get_all_classes_from_parent(converters.DataConverter) + for converter_class in converter_classes: + converter = converter_class(data_file_path) + if await converter.can_convert(): + if await converter.convert(): + return converter.converted_file + return None diff --git a/packages/backtesting/octobot_backtesting/api/exchange_data_collector.py b/packages/backtesting/octobot_backtesting/api/exchange_data_collector.py new file mode 100644 index 0000000000..75b0004943 --- /dev/null +++ b/packages/backtesting/octobot_backtesting/api/exchange_data_collector.py @@ -0,0 +1,91 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_backtesting.collectors as collectors +import octobot_commons.tentacles_management as tentacles_management + + +def exchange_historical_data_collector_factory(exchange_name, + exchange_type, + tentacles_setup_config, + symbols, + time_frames=None, + start_timestamp=None, + end_timestamp=None, + config=None): + return _exchange_collector_factory(collectors.AbstractExchangeHistoryCollector, + exchange_name, + exchange_type, + tentacles_setup_config, + symbols, + time_frames, + start_timestamp, + end_timestamp, + config) + + +def exchange_bot_snapshot_data_collector_factory(exchange_name, + tentacles_setup_config, + symbols, + exchange_id, + time_frames=None, + start_timestamp=None, + end_timestamp=None, + config=None): + collector = _exchange_collector_factory(collectors.AbstractExchangeBotSnapshotCollector, + exchange_name, + None, + tentacles_setup_config, + symbols, + time_frames, + start_timestamp, + end_timestamp, + config) + collector.register_exchange_id(exchange_id) + return collector + + +def _exchange_collector_factory(collector_parent_class, exchange_name, exchange_type, tentacles_setup_config, symbols, + time_frames, start_timestamp, end_timestamp, config): + collector_class = tentacles_management.get_single_deepest_child_class(collector_parent_class) + collector_instance = collector_class(config or {}, exchange_name, exchange_type, + tentacles_setup_config, symbols, time_frames, + use_all_available_timeframes=time_frames is None, + start_timestamp=start_timestamp, end_timestamp=end_timestamp) + return collector_instance + + +async def initialize_and_run_data_collector(data_collector): + await data_collector.initialize() + await data_collector.start() + return data_collector.file_name + + +async def stop_data_collector(data_collector): + return await data_collector.stop(should_stop_database=False) if data_collector else False + + +def is_data_collector_in_progress(data_collector): + return data_collector.is_in_progress() if data_collector else False + + +def get_data_collector_progress(data_collector): + return (data_collector.get_current_step_index(), data_collector.get_total_steps(), + data_collector.get_current_step_percent()) if data_collector else (0, 0, 0) + + +def is_data_collector_finished(data_collector): + return not is_data_collector_in_progress( + data_collector) and data_collector.is_finished() if data_collector else False diff --git a/packages/backtesting/octobot_backtesting/api/importer.py b/packages/backtesting/octobot_backtesting/api/importer.py new file mode 100644 index 0000000000..2df98a4dde --- /dev/null +++ b/packages/backtesting/octobot_backtesting/api/importer.py @@ -0,0 +1,79 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_backtesting.enums as backtesting_enums +import octobot_backtesting.importers as importers +import octobot_backtesting.util as util + +import octobot_commons.errors as commons_errors +import octobot_commons.databases as databases + + +async def create_importer(config, backtesting_file, default_importer=None): + return await util.create_importer_from_backtesting_file_name(config, backtesting_file, + default_importer=default_importer) + + +def get_available_data_types(importer) -> list: + return importer.available_data_types + + +def get_data_file(importer) -> str: + return importer.file_path + + +def get_data_file_from_importers(available_importers, symbol, time_frame) -> str: + for importer in available_importers: + if symbol in get_available_symbols(importer) and \ + time_frame in get_available_time_frames(importer): + return get_data_file_path(importer) + return None + + +def get_data_file_path(importer) -> str: + return importer.adapt_file_path_if_necessary() + + +def get_available_time_frames(exchange_importer) -> list: + return exchange_importer.time_frames + + +def get_available_symbols(exchange_importer) -> list: + return [symbol for symbol in exchange_importer.symbols] + + +async def get_data_timestamp_interval(exchange_importer, time_frame=None) -> (float, float): + time_frame_value = time_frame.value if time_frame is not None else None + return await exchange_importer.get_data_timestamp_interval(time_frame=time_frame_value) + + +async def get_all_ohlcvs(database_path, exchange_name, symbol, time_frame, + inferior_timestamp=-1, superior_timestamp=-1) -> list: + timestamps, operations = importers.get_operations_from_timestamps(superior_timestamp, inferior_timestamp) + try: + async with databases.new_sqlite_database(database_path) as database: + candles = await database.select_from_timestamp(backtesting_enums.ExchangeDataTables.OHLCV, + exchange_name=exchange_name, symbol=symbol, + time_frame=time_frame.value, + timestamps=timestamps, + operations=operations) + return [candle_with_metadata[-1] + for candle_with_metadata in sorted(importers.import_ohlcvs(candles), key=lambda x: x[0])] + except commons_errors.DatabaseNotFoundError: + return [] + + +async def stop_importer(importer) -> None: + await importer.stop() diff --git a/packages/backtesting/octobot_backtesting/api/social_data_collector.py b/packages/backtesting/octobot_backtesting/api/social_data_collector.py new file mode 100644 index 0000000000..88d66900d3 --- /dev/null +++ b/packages/backtesting/octobot_backtesting/api/social_data_collector.py @@ -0,0 +1,103 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_backtesting.collectors as collectors +import octobot_commons.tentacles_management as tentacles_management + + +def social_historical_data_collector_factory(services, + tentacles_setup_config, + sources=None, + symbols=None, + start_timestamp=None, + end_timestamp=None, + config=None): + """ + Factory function to create a social history data collector. + :param services: List of service class names (feed class + required services) + :param tentacles_setup_config: Tentacles setup configuration + :param sources: Optional list of sources/channels to collect from + :param symbols: Optional list of symbols to filter by + :param start_timestamp: Optional start timestamp in milliseconds + :param end_timestamp: Optional end timestamp in milliseconds + :param config: Optional configuration dict + :return: SocialHistoryDataCollector instance + """ + return _social_collector_factory(collectors.AbstractSocialHistoryCollector, + services, + tentacles_setup_config, + sources, + symbols, + start_timestamp, + end_timestamp, + config) + + +def social_live_data_collector_factory(services, + tentacles_setup_config, + sources=None, + symbols=None, + service_feed_class=None, + channel_name=None, + config=None): + """ + Factory function to create a social live data collector. + :param services: List of service class names (feed class + required services) + :param tentacles_setup_config: Tentacles setup configuration + :param sources: Optional list of sources/channels to collect from + :param symbols: Optional list of symbols to filter by + :param service_feed_class: Optional service feed class to subscribe to + :param channel_name: Optional channel name to subscribe to directly + :param config: Optional configuration dict + :return: SocialLiveDataCollector instance + """ + collector_class = tentacles_management.get_single_deepest_child_class( + collectors.AbstractSocialLiveCollector + ) + collector_instance = collector_class( + config or {}, + services, + tentacles_setup_config, + sources=sources, + symbols=symbols, + service_feed_class=service_feed_class, + channel_name=channel_name + ) + return collector_instance + + +def _social_collector_factory(collector_parent_class, services, tentacles_setup_config, + sources, symbols, start_timestamp, end_timestamp, config): + collector_class = tentacles_management.get_single_deepest_child_class(collector_parent_class) + collector_instance = collector_class( + config or {}, + services, + tentacles_setup_config, + sources=sources, + symbols=symbols, + use_all_available_sources=sources is None, + start_timestamp=start_timestamp, + end_timestamp=end_timestamp + ) + return collector_instance + + +from octobot_backtesting.api.exchange_data_collector import ( + initialize_and_run_data_collector, + stop_data_collector, + is_data_collector_in_progress, + get_data_collector_progress, + is_data_collector_finished, +) diff --git a/packages/backtesting/octobot_backtesting/backtest_data.py b/packages/backtesting/octobot_backtesting/backtest_data.py new file mode 100644 index 0000000000..a77519411b --- /dev/null +++ b/packages/backtesting/octobot_backtesting/backtest_data.py @@ -0,0 +1,69 @@ +import octobot_backtesting.util as util +import octobot_commons.enums as commons_enums + + +class BacktestData: + def __init__(self, data_files, config, tentacles_config, use_accurate_price_time_frame): + self.data_files = data_files + self.config = config + self.tentacles_config = tentacles_config + self.importers_by_data_file = None + self.preloaded_candle_managers = {} + self.use_cached_markets: bool = False + self.default_importer = None + self.use_accurate_price_time_frame: bool = use_accurate_price_time_frame + + async def initialize(self): + self.importers_by_data_file = { + data_file: await util.create_importer_from_backtesting_file_name(self.config, data_file, + default_importer=self.default_importer) + for data_file in self.data_files + } + + async def get_preloaded_candles_manager(self, exchange, symbol, time_frame, start_timestamp, end_timestamp): + key = self._get_key(exchange, symbol, time_frame, start_timestamp, end_timestamp) + try: + return self.preloaded_candle_managers[key] + except KeyError: + try: + import octobot_trading.api as trading_api + preloaded_candles = await self._get_all_candles(exchange, symbol, time_frame, + start_timestamp, end_timestamp) + self.preloaded_candle_managers[key] = await trading_api.create_preloaded_candles_manager( + preloaded_candles + ) + return self.preloaded_candle_managers[key] + except ImportError: + self.preloaded_candle_managers[key] = None + return self.preloaded_candle_managers[key] + + async def _get_all_candles(self, exchange, symbol, time_frame, start_timestamp, end_timestamp): + # manually filter max candles not to change chronological cache behavior + all_candles = await self._get_importer(exchange, symbol).get_ohlcv_from_timestamps( + exchange_name=exchange, + symbol=symbol, + time_frame=time_frame, + inferior_timestamp=start_timestamp, + ) + return [ + candle[-1] + for candle in all_candles + if candle[-1][commons_enums.PriceIndexes.IND_PRICE_TIME.value] <= end_timestamp + ] + + def _get_importer(self, exchange, symbol): + for importer in self.importers_by_data_file.values(): + if exchange in importer.exchange_name == exchange and symbol in importer.symbols: + return importer + raise KeyError("Importer not found") + + def reset_cached_indexes(self): + for importer in self.importers_by_data_file.values(): + importer.reset_cache() + + async def stop(self): + for importer in self.importers_by_data_file.values(): + await importer.stop() + + def _get_key(self, *identifiers): + return "-".join(str(i) for i in identifiers) diff --git a/packages/backtesting/octobot_backtesting/backtesting.py b/packages/backtesting/octobot_backtesting/backtesting.py new file mode 100644 index 0000000000..6ec61c2e4c --- /dev/null +++ b/packages/backtesting/octobot_backtesting/backtesting.py @@ -0,0 +1,132 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import async_channel.channels as channels +import async_channel.util as channel_util + +import octobot_commons.logging as logging +import octobot_commons.tentacles_management as tentacles_management + +import octobot_backtesting.util as backtesting_util +import octobot_backtesting.time as backtesting_time + + +class Backtesting: + def __init__(self, config, exchange_ids, matrix_id, backtesting_files, + importers_by_data_file=None, backtest_data=None, bot_id=None): + self.config = config + self.backtesting_files = backtesting_files + self.importers_by_data_file = importers_by_data_file or {} + self.logger = logging.get_logger(self.__class__.__name__) + + self.exchange_ids = exchange_ids + self.matrix_id = matrix_id + self.bot_id = bot_id or "" + + self.importers = [] + self.backtest_data = backtest_data + self.time_manager = None + self.time_updater = None + self.time_channel = None + + async def initialize(self): + time_chan_name = self.get_time_chan_name() # not in try to be able to raise on error + try: + self.time_manager = backtesting_time.TimeManager(config=self.config) + self.time_manager.initialize() + + self.time_channel = await channel_util.create_channel_instance( + backtesting_time.TimeChannel, + channels.set_chan, + is_synchronized=True, + channel_name=time_chan_name + ) + + self.time_updater = backtesting_time.TimeUpdater( + channels.get_chan(self.get_time_chan_name()), + self + ) + except Exception as e: + self.logger.exception(e, True, f"Error when initializing backtesting : {e}.") + + def use_accurate_price_time_frame(self) -> bool: + for importer in self.importers: + if not importer.provides_accurate_price_time_frame(): + return False + if self.backtest_data: + return self.backtest_data.use_accurate_price_time_frame + return True + + def get_time_chan_name(self): + return backtesting_time.TimeChannel.get_name(self.bot_id) + + async def stop(self): + await self.delete_time_channel() + + async def delete_time_channel(self): + await self.time_channel.stop() + for consumer in self.time_channel.consumers: + await self.time_channel.remove_consumer(consumer) + self.time_channel.flush() + channels.del_chan(self.get_time_chan_name()) + + async def start_time_updater(self): + await self.time_updater.run() + + async def _create_importer(self, backtesting_file): + return await backtesting_util.create_importer_from_backtesting_file_name( + self.config, + backtesting_file, + backtesting_util.get_default_importer() + ) + + async def create_importers(self): + try: + self.importers = [] + for backtesting_file in self.backtesting_files: + if self.importers_by_data_file is not None and backtesting_file in self.importers_by_data_file: + self.importers.append(self.importers_by_data_file[backtesting_file]) + else: + self.importers.append(await self._create_importer(backtesting_file)) + except TypeError: + pass + + async def handle_time_update(self, timestamp): + if self.time_manager: + self.time_manager.set_current_timestamp(timestamp) + + def get_importers(self, importer_parent_class=None) -> list: + return [importer + for importer in self.importers + if tentacles_management.default_parents_inspection(importer.__class__, importer_parent_class)] \ + if importer_parent_class is not None else self.importers + + def get_progress(self): + if self._has_nothing_to_do(): + return 0 + return 1 - (self.time_manager.get_remaining_iteration() / self.time_manager.get_total_iteration()) + + def is_in_progress(self): + if self._has_nothing_to_do(): + return False + else: + return self.time_manager.get_remaining_iteration() > 0 + + def _has_nothing_to_do(self): + return not self.time_manager or self.time_manager.get_total_iteration() == 0 + + def has_finished(self): + return self.time_updater and self.time_updater.finished_event.is_set() diff --git a/packages/backtesting/octobot_backtesting/channels_manager.py b/packages/backtesting/octobot_backtesting/channels_manager.py new file mode 100644 index 0000000000..8630776961 --- /dev/null +++ b/packages/backtesting/octobot_backtesting/channels_manager.py @@ -0,0 +1,171 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import asyncio +import copy + +import async_channel.channels as channels +import async_channel.enums as channel_enums + +import octobot_commons.channels_name as channels_name +import octobot_commons.list_util as list_util +import octobot_commons.logging as logging +import octobot_commons.asyncio_tools as asyncio_tools + + +class ChannelsManager: + DEFAULT_REFRESH_TIMEOUT = 15 + + def __init__(self, exchange_ids, matrix_id, time_chan_name, refresh_timeout=DEFAULT_REFRESH_TIMEOUT): + self.logger = logging.get_logger(self.__class__.__name__) + self.exchange_ids = exchange_ids + self.matrix_id = matrix_id + self.time_chan_name = time_chan_name + self.refresh_timeout = refresh_timeout + self.producers = [] + self.initial_producers = [] + self.iteration_task = None + self.should_stop = False + self.producers_by_priority_levels = {} + + async def initialize(self) -> None: + """ + Initialize Backtesting channels manager + """ + self.logger.debug("Initializing producers...") + try: + self.initial_producers = list_util.flatten_list( + _get_backtesting_producers(self.time_chan_name) + + self._get_trading_producers() + + self._get_evaluator_producers() + ) + self.producers = copy.copy(self.initial_producers) + + self.producers_by_priority_levels = { + priority_level.value: self.producers + for priority_level in channel_enums.ChannelConsumerPriorityLevels + } + + # Initialize all producers by calling producer.start() + for producer in list_util.flatten_list(self._get_trading_producers() + self._get_evaluator_producers()): + await producer.start() + except Exception as exception: + self.logger.exception(exception, True, f"Error when initializing backtesting: {exception}") + raise + + def clear_empty_channels_producers(self): + self.producers = [ + producer + for producer in self.initial_producers + if producer.channel.get_consumers() + ] + + def update_producers_by_priority_levels(self): + self.producers_by_priority_levels = { + priority_level.value: _get_producers_with_priority_level_consumers(self.producers, priority_level.value) + for priority_level in channel_enums.ChannelConsumerPriorityLevels + if _check_producers_has_priority_consumers(self.producers, priority_level.value) + } + + async def handle_new_iteration(self, current_timestamp) -> None: + for level_key, producers in self.producers_by_priority_levels.items(): + try: + if _check_producers_consumers_emptiness(producers, level_key): + # avoid creating tasks when not necessary + continue + self.iteration_task = self.refresh_priority_level(producers, level_key, True) + await self.iteration_task + # trigger waiting events + await asyncio_tools.wait_asyncio_next_cycle() + # massive slow down + # self.iteration_task = await asyncio.wait_for(self.refresh_priority_level(level_key.value, True), + # timeout=self.refresh_timeout) + except asyncio.TimeoutError: + self.logger.error(f"Refreshing priority level {level_key} has been timed out at timestamp " + f"{current_timestamp}.") + + async def refresh_priority_level(self, producers, priority_level: int, join_consumers: bool) -> None: + while not self.should_stop: + for producer in producers: + await producer.synchronized_perform_consumers_queue(priority_level, join_consumers, self.refresh_timeout) + if _check_producers_consumers_emptiness(self.producers, priority_level): + break + + def stop(self): + self.should_stop = True + + def flush(self): + self.producers = [] + self.initial_producers = [] + self.producers_by_priority_levels = {} + self.iteration_task = None + + def _get_trading_producers(self): + import octobot_trading.exchange_channel as exchange_channel + return [ + _get_channel_producers(exchange_channel.get_chan(channel_name.value, exchange_id)) + for exchange_id in self.exchange_ids + for channel_name in channels_name.OctoBotTradingChannelsName + ] + + def _get_evaluator_producers(self): + import octobot_evaluators.evaluators.channel as evaluators_channel + return [ + _get_channel_producers(evaluators_channel.get_chan(channel_name.value, self.matrix_id)) + for channel_name in channels_name.OctoBotEvaluatorsChannelsName + ] + + +def _get_channel_producers(channel): + if channel.producers: + return channel.producers + return [channel.get_internal_producer()] + + +def _get_backtesting_producers(time_chan_name): + return [ + _get_channel_producers(channels.get_chan(channel_name)) + for channel_name in [time_chan_name] + ] + + +def _check_producers_consumers_emptiness(producers, priority_level): + for producer in producers: + try: + if not producer.is_consumers_queue_empty(priority_level): + return False + except AttributeError: + if producer.channel is None: + # channel has been cleared, there is nothing to do + return True + else: + # unexpected, propagate + raise + return True + + +def _check_producers_has_priority_consumers(producers, priority_level): + for producer in producers: + if producer.channel.get_prioritized_consumers(priority_level): + return True + return False + + +def _get_producers_with_priority_level_consumers(producers, priority_level): + return [ + producer + for producer in producers + if producer.channel.get_prioritized_consumers(priority_level) + ] diff --git a/packages/backtesting/octobot_backtesting/collectors/__init__.py b/packages/backtesting/octobot_backtesting/collectors/__init__.py new file mode 100644 index 0000000000..3d8a6a1d35 --- /dev/null +++ b/packages/backtesting/octobot_backtesting/collectors/__init__.py @@ -0,0 +1,47 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_backtesting.collectors import data_collector +from octobot_backtesting.collectors.data_collector import ( + DataCollector, +) + +from octobot_backtesting.collectors import exchanges +from octobot_backtesting.collectors.exchanges import ( + ExchangeDataCollector, + AbstractExchangeBotSnapshotCollector, + AbstractExchangeHistoryCollector, + AbstractExchangeLiveCollector, +) + +from octobot_backtesting.collectors import social +from octobot_backtesting.collectors.social import ( + SocialDataCollector, + AbstractSocialHistoryCollector, + AbstractSocialLiveCollector, +) + + +__all__ = [ + "DataCollector", + "ExchangeDataCollector", + "AbstractExchangeBotSnapshotCollector", + "AbstractExchangeHistoryCollector", + "AbstractExchangeLiveCollector", + "SocialDataCollector", + "AbstractSocialHistoryCollector", + "AbstractSocialLiveCollector", +] diff --git a/packages/backtesting/octobot_backtesting/collectors/data_collector.py b/packages/backtesting/octobot_backtesting/collectors/data_collector.py new file mode 100644 index 0000000000..a6900fc2e9 --- /dev/null +++ b/packages/backtesting/octobot_backtesting/collectors/data_collector.py @@ -0,0 +1,130 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import asyncio +import copy +import json +import os.path as path +import os +import time + +import aiohttp + +import octobot_commons.logging as logging +import octobot_commons.databases as databases + +import octobot_backtesting.enums as enums +import octobot_backtesting.constants as constants +import octobot_backtesting.data as data +import octobot_backtesting.importers as importers + + +class DataCollector: + IMPORTER = importers.DataImporter + + def __init__(self, config, + path=constants.BACKTESTING_FILE_PATH, + data_format=enums.DataFormats.REGULAR_COLLECTOR_DATA): + self.config = copy.deepcopy(config) + self.path = path + self.logger = logging.get_logger(self.__class__.__name__) + + self.should_stop = False + self.file_name = data.get_backtesting_file_name(self.__class__, + self.get_file_identifier, + data_format=data_format) + + self.database = None + self.aiohttp_session = None + self.file_path = None + self.temp_file_path = None + self.finished = False + self.in_progress = False + self._ensure_file_path() + self.set_file_path() + + async def initialize(self) -> None: + pass + + def get_file_identifier(self): + return time.time() + + async def stop(self, **kwargs) -> None: + self.should_stop = True + + async def start(self) -> None: + raise NotImplementedError("Start is not implemented") + + def is_in_progress(self): + return self.in_progress + + def is_finished(self): + return self.finished + + def _ensure_file_path(self): + if not path.isdir(self.path): + os.makedirs(self.path) + + def set_file_path(self) -> None: + self.file_path = path.join(self.path, self.file_name) if self.path else self.file_name + self.temp_file_path = self.file_path + constants.BACKTESTING_DATA_FILE_TEMP_EXT + + def create_database(self) -> None: + if not self.database: + self.database = databases.SQLiteDatabase(self.temp_file_path) + + def finalize_database(self): + os.rename(self.temp_file_path, self.file_path) + + def create_aiohttp_session(self) -> None: + if not self.aiohttp_session: + self.aiohttp_session = aiohttp.ClientSession() + + async def stop_aiohttp_session(self) -> None: + if self.aiohttp_session: + await self.aiohttp_session.close() + + async def execute_request(self, url, params=None, headers=None): + response = await self.aiohttp_session.get(url, params=params, headers=headers) + if response.status != 200: + if response.status == 502: # bad gateway (should retry) + self.logger.warning("Got a bad gateway error, retrying...") + await asyncio.sleep(60) + return await self.execute_request(url, params=params, headers=headers) + else: + try: + message = json.loads(await response.text())['message'] + except json.JSONDecodeError: + message = await response.text() + self.logger.error(f"Error when requesting url {url} / " + f"message : {message} / " + f"code : {response.status} / " + f"reason : {response.reason}") + return None + try: + return json.loads(await response.text()) + except aiohttp.ClientPayloadError as e: + self.logger.error(f"Failed to extract payload text : {e}") + return None + + async def fetch_with_continuation(self, continuation_url_key, json_answer, headers, callback): + if continuation_url_key in json_answer: + answer = await self.execute_request(json_answer[continuation_url_key], headers=headers) + if answer is None: + return + + await callback(answer["data"]) + + await self.fetch_with_continuation(continuation_url_key, answer, headers, callback) diff --git a/packages/backtesting/octobot_backtesting/collectors/exchanges/__init__.py b/packages/backtesting/octobot_backtesting/collectors/exchanges/__init__.py new file mode 100644 index 0000000000..3477204ede --- /dev/null +++ b/packages/backtesting/octobot_backtesting/collectors/exchanges/__init__.py @@ -0,0 +1,41 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_backtesting.collectors.exchanges import exchange_collector +from octobot_backtesting.collectors.exchanges.exchange_collector import ( + ExchangeDataCollector, +) + +from octobot_backtesting.collectors.exchanges import abstract_exchange_bot_snapshot_collector +from octobot_backtesting.collectors.exchanges import abstract_exchange_history_collector +from octobot_backtesting.collectors.exchanges import abstract_exchange_live_collector + +from octobot_backtesting.collectors.exchanges.abstract_exchange_bot_snapshot_collector import ( + AbstractExchangeBotSnapshotCollector, +) +from octobot_backtesting.collectors.exchanges.abstract_exchange_history_collector import ( + AbstractExchangeHistoryCollector, +) +from octobot_backtesting.collectors.exchanges.abstract_exchange_live_collector import ( + AbstractExchangeLiveCollector, +) + +__all__ = [ + "ExchangeDataCollector", + "AbstractExchangeBotSnapshotCollector", + "AbstractExchangeHistoryCollector", + "AbstractExchangeLiveCollector", +] diff --git a/packages/backtesting/octobot_backtesting/collectors/exchanges/abstract_exchange_bot_snapshot_collector.py b/packages/backtesting/octobot_backtesting/collectors/exchanges/abstract_exchange_bot_snapshot_collector.py new file mode 100644 index 0000000000..68dffe0cbb --- /dev/null +++ b/packages/backtesting/octobot_backtesting/collectors/exchanges/abstract_exchange_bot_snapshot_collector.py @@ -0,0 +1,20 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_backtesting.collectors.exchanges as exchanges + + +class AbstractExchangeBotSnapshotCollector(exchanges.ExchangeDataCollector): + pass diff --git a/packages/backtesting/octobot_backtesting/collectors/exchanges/abstract_exchange_history_collector.py b/packages/backtesting/octobot_backtesting/collectors/exchanges/abstract_exchange_history_collector.py new file mode 100644 index 0000000000..ea22431b89 --- /dev/null +++ b/packages/backtesting/octobot_backtesting/collectors/exchanges/abstract_exchange_history_collector.py @@ -0,0 +1,20 @@ +# Drakkar-Software OctoBot +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_backtesting.collectors.exchanges as exchanges + + +class AbstractExchangeHistoryCollector(exchanges.ExchangeDataCollector): + pass diff --git a/packages/backtesting/octobot_backtesting/collectors/exchanges/abstract_exchange_live_collector.py b/packages/backtesting/octobot_backtesting/collectors/exchanges/abstract_exchange_live_collector.py new file mode 100644 index 0000000000..31a991a619 --- /dev/null +++ b/packages/backtesting/octobot_backtesting/collectors/exchanges/abstract_exchange_live_collector.py @@ -0,0 +1,20 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_backtesting.collectors.exchanges.exchange_collector as exchange_collector + + +class AbstractExchangeLiveCollector(exchange_collector.ExchangeDataCollector): + pass diff --git a/packages/backtesting/octobot_backtesting/collectors/exchanges/exchange_collector.py b/packages/backtesting/octobot_backtesting/collectors/exchanges/exchange_collector.py new file mode 100644 index 0000000000..85bf190b03 --- /dev/null +++ b/packages/backtesting/octobot_backtesting/collectors/exchanges/exchange_collector.py @@ -0,0 +1,164 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import json +import logging +import abc + +import time + +import octobot_commons.constants as commons_constants +import octobot_backtesting.collectors.data_collector as data_collector +import octobot_backtesting.constants as constants +import octobot_backtesting.enums as enums +import octobot_backtesting.importers as importers + +try: + import octobot_trading.constants as trading_constants +except ImportError: + logging.error("ExchangeDataCollector requires OctoBot-Trading package installed") + + +class ExchangeDataCollector(data_collector.DataCollector): + VERSION = "1.1" + IMPORTER = importers.ExchangeDataImporter + + def __init__(self, config, exchange_name, exchange_type, + tentacles_setup_config, symbols, time_frames, use_all_available_timeframes=False, + data_format=enums.DataFormats.REGULAR_COLLECTOR_DATA, start_timestamp=None, end_timestamp=None): + super().__init__(config, data_format=data_format) + self.exchange_name = exchange_name + self.exchange_type = exchange_type + self.tentacles_setup_config = tentacles_setup_config + self.symbols = symbols if symbols else [] + self.time_frames = time_frames if time_frames else [] + self.use_all_available_timeframes = use_all_available_timeframes + self.start_timestamp = start_timestamp + self.end_timestamp = end_timestamp + self.current_step_index = 0 + self.total_steps = 0 + self.current_step_percent = 0 + self.exchange_id = None + self.set_file_path() + + def register_exchange_id(self, exchange_id): + self.exchange_id = exchange_id + + def get_current_step_index(self): + return self.current_step_index + + def get_total_steps(self): + return self.total_steps + + def get_current_step_percent(self): + return self.current_step_percent + + @abc.abstractmethod + def _load_all_available_timeframes(self): + raise NotImplementedError("_load_all_available_timeframes is not implemented") + + async def initialize(self): + self.create_database() + await self.database.initialize() + + # set config from params + self.config[commons_constants.CONFIG_TIME_FRAME] = self.time_frames + # get exchange credentials if available + existing_exchange_config = self.config.get(commons_constants.CONFIG_EXCHANGES, {}).get(self.exchange_name, {}) + self.config[commons_constants.CONFIG_EXCHANGES] = {self.exchange_name: existing_exchange_config} + self.config[commons_constants.CONFIG_CRYPTO_CURRENCIES] = {"Symbols": { + commons_constants.CONFIG_CRYPTO_PAIRS: [str(symbol) for symbol in self.symbols]}} + + def _load_timeframes_if_necessary(self): + if self.use_all_available_timeframes: + self._load_all_available_timeframes() + self.config[commons_constants.CONFIG_TIME_FRAME] = self.time_frames + + async def _create_description(self): + await self.database.insert(enums.DataTables.DESCRIPTION, + timestamp=time.time(), + version=constants.CURRENT_VERSION, + type=enums.DataType.EXCHANGE.value, + exchange=self.exchange_name, + symbols=json.dumps([symbol.symbol_str for symbol in self.symbols]), + time_frames=json.dumps([tf.value for tf in self.time_frames]), + start_timestamp=int(self.start_timestamp/1000) if self.start_timestamp else 0, + end_timestamp=int(self.end_timestamp/1000) if self.end_timestamp + else int(time.time()) if self.start_timestamp else 0) + + async def save_ticker(self, timestamp, exchange, cryptocurrency, symbol, ticker, multiple=False): + if not multiple: + await self.database.insert(enums.ExchangeDataTables.TICKER, timestamp, + exchange_name=exchange, cryptocurrency=cryptocurrency, + symbol=symbol, recent_trades=json.dumps(ticker)) + else: + await self.database.insert_all(enums.ExchangeDataTables.TICKER, timestamp, + exchange_name=exchange, cryptocurrency=cryptocurrency, + symbol=symbol, recent_trades=[json.dumps(t) for t in ticker]) + + async def save_order_book(self, timestamp, exchange, cryptocurrency, symbol, asks, bids, multiple=False): + if not multiple: + await self.database.insert(enums.ExchangeDataTables.ORDER_BOOK, timestamp, + exchange_name=exchange, cryptocurrency=cryptocurrency, symbol=symbol, + asks=json.dumps(asks), bids=json.dumps(bids)) + else: + await self.database.insert_all(enums.ExchangeDataTables.ORDER_BOOK, timestamp, + exchange_name=exchange, cryptocurrency=cryptocurrency, symbol=symbol, + asks=[json.dumps(a) for a in asks], + bids=[json.dumps(b) for b in bids]) + + async def save_recent_trades(self, timestamp, exchange, cryptocurrency, symbol, recent_trades, multiple=False): + if not multiple: + await self.database.insert(enums.ExchangeDataTables.RECENT_TRADES, timestamp, + exchange_name=exchange, cryptocurrency=cryptocurrency, + symbol=symbol, recent_trades=json.dumps(recent_trades)) + else: + await self.database.insert_all(enums.ExchangeDataTables.RECENT_TRADES, timestamp, + exchange_name=exchange, cryptocurrency=cryptocurrency, + symbol=symbol, recent_trades=[json.dumps(rt) for rt in recent_trades]) + + async def save_ohlcv(self, timestamp, exchange, cryptocurrency, symbol, time_frame, candle, multiple=False): + if not multiple: + await self.database.insert(enums.ExchangeDataTables.OHLCV, timestamp, + exchange_name=exchange, cryptocurrency=cryptocurrency, + symbol=symbol, time_frame=time_frame.value, + candle=json.dumps(candle)) + else: + await self.database.insert_all(enums.ExchangeDataTables.OHLCV, timestamp=timestamp, + exchange_name=exchange, cryptocurrency=cryptocurrency, + symbol=symbol, time_frame=time_frame.value, + candle=[json.dumps(c) for c in candle]) + + async def save_kline(self, timestamp, exchange, cryptocurrency, symbol, time_frame, kline, multiple=False): + if not multiple: + await self.database.insert(enums.ExchangeDataTables.KLINE, timestamp, + exchange_name=exchange, cryptocurrency=cryptocurrency, + symbol=symbol, time_frame=time_frame.value, + candle=json.dumps(kline)) + else: + await self.database.insert_all(enums.ExchangeDataTables.KLINE, timestamp=timestamp, + exchange_name=exchange, cryptocurrency=cryptocurrency, + symbol=symbol, time_frame=time_frame.value, + candle=[json.dumps(kl) for kl in kline]) + + async def delete_all(self, table, exchange, cryptocurrency, symbol, time_frame=None): + kwargs = { + "exchange_name": exchange, + "cryptocurrency": cryptocurrency, + "symbol": symbol, + } + if time_frame: + kwargs["time_frame"] = time_frame.value + await self.database.delete(table, **kwargs) diff --git a/packages/backtesting/octobot_backtesting/collectors/social/__init__.py b/packages/backtesting/octobot_backtesting/collectors/social/__init__.py new file mode 100644 index 0000000000..d3af1134df --- /dev/null +++ b/packages/backtesting/octobot_backtesting/collectors/social/__init__.py @@ -0,0 +1,35 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_backtesting.collectors.social import social_collector +from octobot_backtesting.collectors.social import abstract_social_history_collector +from octobot_backtesting.collectors.social import abstract_social_live_collector + +from octobot_backtesting.collectors.social.social_collector import ( + SocialDataCollector, +) +from octobot_backtesting.collectors.social.abstract_social_history_collector import ( + AbstractSocialHistoryCollector, +) +from octobot_backtesting.collectors.social.abstract_social_live_collector import ( + AbstractSocialLiveCollector, +) + +__all__ = [ + "SocialDataCollector", + "AbstractSocialHistoryCollector", + "AbstractSocialLiveCollector", +] diff --git a/packages/backtesting/octobot_backtesting/collectors/social/abstract_social_history_collector.py b/packages/backtesting/octobot_backtesting/collectors/social/abstract_social_history_collector.py new file mode 100644 index 0000000000..71c757259b --- /dev/null +++ b/packages/backtesting/octobot_backtesting/collectors/social/abstract_social_history_collector.py @@ -0,0 +1,20 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_backtesting.collectors.social.social_collector as social_collector + + +class AbstractSocialHistoryCollector(social_collector.SocialDataCollector): + pass diff --git a/packages/backtesting/octobot_backtesting/collectors/social/abstract_social_live_collector.py b/packages/backtesting/octobot_backtesting/collectors/social/abstract_social_live_collector.py new file mode 100644 index 0000000000..9d38827ce0 --- /dev/null +++ b/packages/backtesting/octobot_backtesting/collectors/social/abstract_social_live_collector.py @@ -0,0 +1,20 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_backtesting.collectors.social.social_collector as social_collector + + +class AbstractSocialLiveCollector(social_collector.SocialDataCollector): + pass diff --git a/packages/backtesting/octobot_backtesting/collectors/social/social_collector.py b/packages/backtesting/octobot_backtesting/collectors/social/social_collector.py new file mode 100644 index 0000000000..d800e3a61f --- /dev/null +++ b/packages/backtesting/octobot_backtesting/collectors/social/social_collector.py @@ -0,0 +1,131 @@ +# Drakkar-Software OctoBot +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import json +import logging +import abc +import time + +import octobot_backtesting.collectors.data_collector as data_collector +import octobot_backtesting.constants as constants +import octobot_backtesting.enums as enums +import octobot_backtesting.importers as importers + +try: + import octobot_services.constants as services_constants +except ImportError: + logging.error("SocialDataCollector requires OctoBot-Services package installed") + + +class SocialDataCollector(data_collector.DataCollector): + VERSION = constants.CURRENT_VERSION + IMPORTER = importers.SocialDataImporter + + def __init__(self, config, services, tentacles_setup_config=None, sources=None, symbols=None, + use_all_available_sources=False, + data_format=enums.DataFormats.REGULAR_COLLECTOR_DATA, + start_timestamp=None, end_timestamp=None): + super().__init__(config, data_format=data_format) + self.tentacles_setup_config = tentacles_setup_config + self.sources = sources if sources else [] + self.symbols = symbols if symbols else [] + self.use_all_available_sources = use_all_available_sources + self.start_timestamp = start_timestamp + self.end_timestamp = end_timestamp + self.services = services if services else [] + self.primary_service = self.services[0] if self.services else "" + self.current_step_index = 0 + self.total_steps = 0 + self.current_step_percent = 0 + self.set_file_path() + + def get_current_step_index(self): + return self.current_step_index + + def get_total_steps(self): + return self.total_steps + + def get_current_step_percent(self): + return self.current_step_percent + + @abc.abstractmethod + def _load_all_available_sources(self): + raise NotImplementedError("_load_all_available_sources is not implemented") + + async def initialize(self): + self.create_database() + await self.database.initialize() + + # set config from params + if self.sources: + self.config.setdefault("sources", self.sources) + # get service config if available + if self.primary_service: + existing_service_config = self.config.get( + services_constants.CONFIG_CATEGORY_SERVICES, {} + ).get(self.primary_service, {}) + self.config[services_constants.CONFIG_CATEGORY_SERVICES] = { + self.primary_service: existing_service_config + } + if self.symbols: + self.config.setdefault("symbols", [str(symbol) for symbol in self.symbols]) + + def _load_sources_if_necessary(self): + if self.use_all_available_sources: + self._load_all_available_sources() + if self.sources: + self.config["sources"] = self.sources + + async def _create_description(self): + timestamp = time.time() + description = { + "version": self.VERSION, + "type": enums.DataType.SOCIAL.value, + "sources": json.dumps(self.sources) if self.sources else json.dumps([]), + "symbols": json.dumps([str(symbol) for symbol in self.symbols]) if self.symbols else json.dumps([]), + "start_timestamp": int(self.start_timestamp / 1000) if self.start_timestamp else 0, + "end_timestamp": int(self.end_timestamp / 1000) if self.end_timestamp + else int(time.time()) if self.start_timestamp else 0, + } + description["services"] = json.dumps(self.services or []) + await self.database.insert(enums.DataTables.DESCRIPTION, timestamp, **description) + + async def save_event(self, timestamp, service_name, channel=None, symbol=None, payload=None, multiple=False): + if not multiple: + await self.database.insert(enums.SocialDataTables.SOCIAL_EVENTS, timestamp, + service_name=service_name, + channel=channel if channel else "", + symbol=symbol if symbol else "", + payload=json.dumps(payload)) + else: + # When multiple=True, timestamp should be a list, and varying fields should be lists + # service_name stays constant, channel and symbol can be lists or single values + channel_list = channel if isinstance(channel, list) else [channel if channel else "" for _ in payload] + symbol_list = symbol if isinstance(symbol, list) else [symbol if symbol else "" for _ in payload] + await self.database.insert_all(enums.SocialDataTables.SOCIAL_EVENTS, timestamp=timestamp, + service_name=service_name, + channel=channel_list, + symbol=symbol_list, + payload=[json.dumps(p) for p in payload]) + + async def delete_all(self, table, service_name, channel=None, symbol=None): + kwargs = { + "service_name": service_name, + } + if channel: + kwargs["channel"] = channel + if symbol: + kwargs["symbol"] = symbol + await self.database.delete(table, **kwargs) diff --git a/packages/backtesting/octobot_backtesting/comparators/__init__.py b/packages/backtesting/octobot_backtesting/comparators/__init__.py new file mode 100644 index 0000000000..c503955c04 --- /dev/null +++ b/packages/backtesting/octobot_backtesting/comparators/__init__.py @@ -0,0 +1,21 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +from octobot_backtesting.comparators import data_comparator +from octobot_backtesting.comparators.data_comparator import DataComparator + +__all__ = [ + "DataComparator", +] diff --git a/packages/backtesting/octobot_backtesting/comparators/data_comparator.py b/packages/backtesting/octobot_backtesting/comparators/data_comparator.py new file mode 100644 index 0000000000..8f49caaa57 --- /dev/null +++ b/packages/backtesting/octobot_backtesting/comparators/data_comparator.py @@ -0,0 +1,134 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import os.path as path + +import octobot_commons.logging as logging + +import octobot_backtesting.constants as constants +import octobot_backtesting.data as data +import octobot_backtesting.enums as enums + + +class DataComparator: + def __init__(self, data_path=constants.BACKTESTING_FILE_PATH): + self.logger = logging.get_logger(self.__class__.__name__) + self.data_path = data_path + + def _sorted_str_list(self, values) -> list: + if not values: + return [] + return sorted(str(v) for v in values) + + def _timestamps_match(self, existing_start, existing_end, requested_start, requested_end) -> bool: + req_start_s = int(requested_start / 1000) if requested_start else 0 + req_end_s = int(requested_end / 1000) if requested_end else 0 + # SQLite may return timestamps as strings; normalise to int before comparing + ex_start_s = int(existing_start) if existing_start else 0 + ex_end_s = int(existing_end) if existing_end else 0 + if req_start_s and ex_start_s != req_start_s: + return False + if req_end_s and ex_end_s != req_end_s: + return False + return True + + def exchange_description_matches(self, description: dict, + exchange_name: str, + symbols: list, + time_frames: list, + start_timestamp, + end_timestamp, + **kwargs) -> bool: + # 1. data type + if description.get(enums.DataFormatKeys.DATA_TYPE.value) != enums.DataType.EXCHANGE.value: + return False + # 2. version (exchange collector always writes CURRENT_VERSION) + if description.get(enums.DataFormatKeys.VERSION.value) != constants.CURRENT_VERSION: + return False + # 3. exchange name + if description.get(enums.DataFormatKeys.EXCHANGE.value) != exchange_name: + return False + # 4. symbols (order-independent) + if self._sorted_str_list(description.get(enums.DataFormatKeys.SYMBOLS.value, [])) \ + != self._sorted_str_list(symbols): + return False + # 5. time frames (order-independent) + existing_tfs = self._sorted_str_list( + tf.value if hasattr(tf, "value") else tf + for tf in description.get(enums.DataFormatKeys.TIME_FRAMES.value, []) + ) + requested_tfs = self._sorted_str_list( + tf.value if hasattr(tf, "value") else tf for tf in (time_frames or []) + ) + if existing_tfs != requested_tfs: + return False + # 6. timestamps + existing_start = description.get(enums.DataFormatKeys.START_TIMESTAMP.value, 0) + existing_end = description.get(enums.DataFormatKeys.END_TIMESTAMP.value, 0) + return self._timestamps_match(existing_start, existing_end, start_timestamp, end_timestamp) + + def social_description_matches(self, description: dict, + services: list, + symbols: list, + start_timestamp, + end_timestamp, + **kwargs) -> bool: + # 1. data type + if description.get(enums.DataFormatKeys.DATA_TYPE.value) != enums.DataType.SOCIAL.value: + return False + # 2. version + if description.get(enums.DataFormatKeys.VERSION.value) != constants.CURRENT_VERSION: + return False + # 3. service names + # Collector descriptions can contain the selected feed class plus its + # required underlying services. Accept a superset in existing files. + existing_services = self._sorted_str_list(description.get(enums.DataFormatKeys.SERVICES.value, [])) + requested_services = self._sorted_str_list(services) + if not requested_services: + return False + if not set(requested_services).issubset(set(existing_services)): + return False + # 4. symbols (order-independent) + # A social file with no symbols means "all symbols": it can satisfy any + # requested symbol filter. A symbol-scoped file must match exactly. + existing_symbols = self._sorted_str_list(description.get(enums.DataFormatKeys.SYMBOLS.value, [])) + requested_symbols = self._sorted_str_list(symbols) + if existing_symbols and existing_symbols != requested_symbols: + return False + # 5. timestamps + existing_start = description.get(enums.DataFormatKeys.START_TIMESTAMP.value, 0) + existing_end = description.get(enums.DataFormatKeys.END_TIMESTAMP.value, 0) + return self._timestamps_match(existing_start, existing_end, start_timestamp, end_timestamp) + + def description_matches(self, description: dict, **kwargs) -> bool: + data_type = description.get(enums.DataFormatKeys.DATA_TYPE.value) + if data_type == enums.DataType.EXCHANGE.value: + return self.exchange_description_matches(description, **kwargs) + if data_type == enums.DataType.SOCIAL.value: + return self.social_description_matches(description, **kwargs) + return False + + async def find_matching_data_file(self, **kwargs) -> str | None: + for file_name in data.get_all_available_data_files(self.data_path): + description = await data.get_file_description(path.join(self.data_path, file_name)) + if description is None: + continue + try: + if self.description_matches(description, **kwargs): + self.logger.debug(f"Found existing matching data file: {file_name}") + return file_name + except Exception as e: + self.logger.debug(f"Could not compare description of {file_name}: {e}") + return None diff --git a/packages/backtesting/octobot_backtesting/constants.py b/packages/backtesting/octobot_backtesting/constants.py new file mode 100644 index 0000000000..649dbef95b --- /dev/null +++ b/packages/backtesting/octobot_backtesting/constants.py @@ -0,0 +1,39 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import os + +import octobot_commons.enums as enums + +CONFIG_BACKTESTING = "backtesting" +CONFIG_BACKTESTING_DATA_FILES = "files" +CONFIG_ANALYSIS_ENABLED_OPTION = "post_analysis_enabled" +CONFIG_BACKTESTING_OTHER_MARKETS_STARTING_PORTFOLIO = 10000 +BACKTESTING_DATA_OHLCV = "ohlcv" +BACKTESTING_DATA_TRADES = "trades" +BACKTESTING_FILE_PATH = os.path.join(CONFIG_BACKTESTING, "data") +BACKTESTING_DATA_FILE_EXT = ".data" +BACKTESTING_DATA_FILE_TEMP_EXT = ".part" +BACKTESTING_DATA_FILE_SEPARATOR = "_" +CURRENT_VERSION = "2.0" +BACKTESTING_DATA_FILE_TIME_WRITE_FORMAT = '%Y%m%d_%H%M%S' +BACKTESTING_DATA_FILE_TIME_READ_FORMAT = BACKTESTING_DATA_FILE_TIME_WRITE_FORMAT.replace("_", "") +BACKTESTING_DATA_FILE_TIME_DISPLAY_FORMAT = '%d %B %Y at %H:%M:%S' +BACKTESTING_DEFAULT_JOIN_TIMEOUT = 1800 # 30min + +BACKTESTING_TIME_FRAMES_TO_DISPLAY = [enums.TimeFrames.THIRTY_MINUTES.value, + enums.TimeFrames.ONE_HOUR.value, + enums.TimeFrames.FOUR_HOURS.value, + enums.TimeFrames.ONE_DAY.value] diff --git a/packages/backtesting/octobot_backtesting/converters/__init__.py b/packages/backtesting/octobot_backtesting/converters/__init__.py new file mode 100644 index 0000000000..b3deb2f48d --- /dev/null +++ b/packages/backtesting/octobot_backtesting/converters/__init__.py @@ -0,0 +1,25 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_backtesting.converters import data_converter + +from octobot_backtesting.converters.data_converter import ( + DataConverter, +) + +__all__ = [ + "DataConverter", +] diff --git a/packages/backtesting/octobot_backtesting/converters/data_converter.py b/packages/backtesting/octobot_backtesting/converters/data_converter.py new file mode 100644 index 0000000000..ba8563c0b8 --- /dev/null +++ b/packages/backtesting/octobot_backtesting/converters/data_converter.py @@ -0,0 +1,36 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import abc +import octobot_commons.logging as logging + + +class DataConverter: + def __init__(self, backtesting_file_to_convert): + self.logger = logging.get_logger(self.__class__.__name__) + self.file_to_convert = backtesting_file_to_convert + self.converted_file = "" + + @abc.abstractmethod + async def can_convert(self) -> bool: + raise NotImplementedError("can_convert is not implemented") + + @abc.abstractmethod + async def convert(self) -> bool: + """ + Converts self.backtesting_file_to_convert and saves the output into self.converted_file_path + :return: True when conversion is successful, False otherwise + """ + raise NotImplementedError("convert is not implemented") diff --git a/packages/backtesting/octobot_backtesting/data/__init__.py b/packages/backtesting/octobot_backtesting/data/__init__.py new file mode 100644 index 0000000000..52e82aa62e --- /dev/null +++ b/packages/backtesting/octobot_backtesting/data/__init__.py @@ -0,0 +1,40 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_backtesting.data import data_file_manager +from octobot_backtesting.data.data_file_manager import ( + get_backtesting_file_name, + get_data_type, + get_file_ending, + get_date, + is_valid_ending, + get_all_available_data_files, + delete_data_file, + get_database_description, + get_file_description, +) + +__all__ = [ + "get_backtesting_file_name", + "get_data_type", + "get_file_ending", + "get_date", + "is_valid_ending", + "get_all_available_data_files", + "delete_data_file", + "get_database_description", + "get_file_description", +] diff --git a/packages/backtesting/octobot_backtesting/data/data_file_manager.py b/packages/backtesting/octobot_backtesting/data/data_file_manager.py new file mode 100644 index 0000000000..082b97493a --- /dev/null +++ b/packages/backtesting/octobot_backtesting/data/data_file_manager.py @@ -0,0 +1,177 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import json +import os.path as path +import os +from datetime import datetime + +import octobot_commons.databases as databases +import octobot_commons.enums as common_enums +import octobot_commons.time_frame_manager as tmf_manager +import octobot_commons.errors as commons_errors + +import octobot_backtesting.constants as constants +import octobot_backtesting.enums as enums + + +def get_backtesting_file_name(clazz, identifier, data_format=enums.DataFormats.REGULAR_COLLECTOR_DATA): + return f"{clazz.__name__}{constants.BACKTESTING_DATA_FILE_SEPARATOR}" \ + f"{identifier()}{get_file_ending(data_format)}" + + +def get_data_type(file_name): + if file_name.endswith(constants.BACKTESTING_DATA_FILE_EXT): + return enums.DataFormats.REGULAR_COLLECTOR_DATA + + +def get_file_ending(data_type): + if data_type == enums.DataFormats.REGULAR_COLLECTOR_DATA: + return constants.BACKTESTING_DATA_FILE_EXT + + +def get_date(time_info) -> str: + """ + :param time_info: Timestamp in seconds of the time to convert + :return: A human readable date at the backtesting data file time format + """ + return datetime.fromtimestamp(time_info).strftime(constants.BACKTESTING_DATA_FILE_TIME_DISPLAY_FORMAT) + + +async def get_database_description(database): + description = (await database.select(enums.DataTables.DESCRIPTION, size=1))[0] + version = description[1] + if version == "2.0": + data_type = description[2] + if data_type == enums.DataType.EXCHANGE.value: + symbols = json.loads(description[4]) + time_frames = [common_enums.TimeFrames(tf) for tf in json.loads(description[5])] + candles_count = (await database.select_count( + enums.ExchangeDataTables.OHLCV, ["*"], + time_frame=tmf_manager.find_min_time_frame(time_frames).value + ))[0][0] + candles_length = int(candles_count / len(symbols)) if symbols else 0 + return { + enums.DataFormatKeys.TIMESTAMP.value: description[0], + enums.DataFormatKeys.VERSION.value: description[1], + enums.DataFormatKeys.DATA_TYPE.value: enums.DataType.EXCHANGE.value, + enums.DataFormatKeys.EXCHANGE.value: description[3], + enums.DataFormatKeys.SYMBOLS.value: symbols, + enums.DataFormatKeys.TIME_FRAMES.value: time_frames, + enums.DataFormatKeys.START_TIMESTAMP.value: description[6], + enums.DataFormatKeys.END_TIMESTAMP.value: description[7], + enums.DataFormatKeys.CANDLES_LENGTH.value: candles_length, + } + elif data_type == enums.DataType.SOCIAL.value: + def _parse_list(value): + try: + parsed = json.loads(value) if value else [] + return parsed if isinstance(parsed, list) else [] + except (json.JSONDecodeError, TypeError): + return [] + + sources = _parse_list(description[3]) if len(description) > 3 else [] + symbols = _parse_list(description[4]) if len(description) > 4 else [] + start_timestamp = description[5] if len(description) > 5 else 0 + end_timestamp = description[6] if len(description) > 6 else 0 + services = _parse_list(description[7]) if len(description) > 7 else [] + exchange = "" + return { + enums.DataFormatKeys.TIMESTAMP.value: description[0], + enums.DataFormatKeys.VERSION.value: description[1], + enums.DataFormatKeys.DATA_TYPE.value: enums.DataType.SOCIAL.value, + enums.DataFormatKeys.EXCHANGE.value: exchange, + enums.DataFormatKeys.SERVICES.value: services, + enums.DataFormatKeys.SYMBOLS.value: symbols if isinstance(symbols, list) else [], + enums.DataFormatKeys.TIME_FRAMES.value: [], + enums.DataFormatKeys.START_TIMESTAMP.value: start_timestamp, + enums.DataFormatKeys.END_TIMESTAMP.value: end_timestamp, + enums.DataFormatKeys.CANDLES_LENGTH.value: 0, + } + elif version == "1.0": + return { + enums.DataFormatKeys.TIMESTAMP.value: description[0], + enums.DataFormatKeys.VERSION.value: description[1], + enums.DataFormatKeys.DATA_TYPE.value: enums.DataType.EXCHANGE.value, + enums.DataFormatKeys.EXCHANGE.value: description[2], + enums.DataFormatKeys.SYMBOLS.value: json.loads(description[3]), + enums.DataFormatKeys.TIME_FRAMES.value: [common_enums.TimeFrames(tf) for tf in json.loads(description[4])], + enums.DataFormatKeys.START_TIMESTAMP.value: 0, + enums.DataFormatKeys.END_TIMESTAMP.value: 0, + enums.DataFormatKeys.CANDLES_LENGTH.value: + int((await database.select_count(enums.ExchangeDataTables.OHLCV, ["*"],\ + time_frame=tmf_manager.find_min_time_frame([common_enums.TimeFrames(tf) + for tf in json.loads(description[4])]).value))[0][0] + / len(json.loads(description[3]))) + } + elif version == "1.1": + return { + enums.DataFormatKeys.TIMESTAMP.value: description[0], + enums.DataFormatKeys.VERSION.value: description[1], + enums.DataFormatKeys.DATA_TYPE.value: enums.DataType.EXCHANGE.value, + enums.DataFormatKeys.EXCHANGE.value: description[2], + enums.DataFormatKeys.SYMBOLS.value: json.loads(description[3]), + enums.DataFormatKeys.TIME_FRAMES.value: [common_enums.TimeFrames(tf) for tf in json.loads(description[4])], + enums.DataFormatKeys.START_TIMESTAMP.value: description[5], + enums.DataFormatKeys.END_TIMESTAMP.value: description[6], + enums.DataFormatKeys.CANDLES_LENGTH.value: + int((await database.select_count(enums.ExchangeDataTables.OHLCV, ["*"],\ + time_frame=tmf_manager.find_min_time_frame([common_enums.TimeFrames(tf) + for tf in json.loads(description[4])]).value))[0][0] + / len(json.loads(description[3]))) + } + else: + raise RuntimeError(f"Unknown datafile version: {version}") + + +async def get_file_description(database_file): + database = None + try: + database = databases.SQLiteDatabase(database_file) + await database.initialize() + description = await get_database_description(database) + except (commons_errors.DatabaseNotFoundError, TypeError): + description = None + finally: + if database is not None: + await database.stop() + return description + + +def is_valid_ending(ending): + return ending in [constants.BACKTESTING_DATA_FILE_EXT] + + +def get_all_available_data_files(data_collector_path): + try: + files = [file + for file in os.listdir(data_collector_path) + if path.isfile(path.join(data_collector_path, file)) and is_valid_ending(path.splitext(file)[1])] + except FileNotFoundError: + files = [] + return files + + +def delete_data_file(data_collector_path, file_name): + try: + file_path = path.join(data_collector_path, file_name) + if path.isfile(file_path): + os.remove(file_path) + return True, "" + else: + return False, f"file can't be found" + except Exception as e: + return False, e diff --git a/packages/backtesting/octobot_backtesting/enums.py b/packages/backtesting/octobot_backtesting/enums.py new file mode 100644 index 0000000000..8757cdbe63 --- /dev/null +++ b/packages/backtesting/octobot_backtesting/enums.py @@ -0,0 +1,66 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import enum + + +class DataFormats(enum.Enum): + REGULAR_COLLECTOR_DATA = 0 + + +class DataFormatKeys(enum.Enum): + SYMBOLS = "symbols" + EXCHANGE = "exchange" + SERVICES = "services" + DATE = "date" + TIMESTAMP = "timestamp" + START_TIMESTAMP = "start_timestamp" + END_TIMESTAMP = "end_timestamp" + START_DATE = "start_date" + END_DATE = "end_date" + CANDLES = "candles" + CANDLES_LENGTH = "candles_length" + TIME_FRAMES = "time_frames" + TYPE = "type" + VERSION = "version" + DATA_TYPE = "data_type" + + +class ReportFormat(enum.Enum): + SYMBOL_REPORT = "symbol_report" + BOT_REPORT = "bot_report" + SYMBOLS_WITH_TF = "symbols_with_time_frames_frames" + + +class DataType(enum.Enum): + EXCHANGE = "exchange" + SOCIAL = "social" + + +class DataTables(enum.Enum): + DESCRIPTION = "description" + + +class ExchangeDataTables(enum.Enum): + RECENT_TRADES = "recent_trades" + ORDER_BOOK = "order_book" + OHLCV = "ohlcv" + KLINE = "kline" + TICKER = "ticker" + FUNDING = "funding" + + +class SocialDataTables(enum.Enum): + SOCIAL_EVENTS = "social_events" diff --git a/packages/backtesting/octobot_backtesting/errors.py b/packages/backtesting/octobot_backtesting/errors.py new file mode 100644 index 0000000000..2a139e914e --- /dev/null +++ b/packages/backtesting/octobot_backtesting/errors.py @@ -0,0 +1,30 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +class DataCollectorError(Exception): + pass + + +class IncompatibleDatafileError(Exception): + pass + + +class MissingTimeFrame(Exception): + pass + + +class BacktestingFileNotFound(Exception): + pass diff --git a/packages/backtesting/octobot_backtesting/importers/__init__.py b/packages/backtesting/octobot_backtesting/importers/__init__.py new file mode 100644 index 0000000000..6c4e6a99a3 --- /dev/null +++ b/packages/backtesting/octobot_backtesting/importers/__init__.py @@ -0,0 +1,48 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_backtesting.importers import data_importer +from octobot_backtesting.importers.data_importer import ( + DataImporter, +) + +from octobot_backtesting.importers import social +from octobot_backtesting.importers import exchanges + +from octobot_backtesting.importers.exchanges import ( + ExchangeDataImporter, + get_operations_from_timestamps, + import_ohlcvs, + import_tickers, + import_order_books, + import_recent_trades, + import_klines, +) +from octobot_backtesting.importers.social import ( + SocialDataImporter, +) + +__all__ = [ + "DataImporter", + "ExchangeDataImporter", + "SocialDataImporter", + "get_operations_from_timestamps", + "import_ohlcvs", + "import_tickers", + "import_order_books", + "import_recent_trades", + "import_klines", +] diff --git a/packages/backtesting/octobot_backtesting/importers/data_importer.py b/packages/backtesting/octobot_backtesting/importers/data_importer.py new file mode 100644 index 0000000000..83769be886 --- /dev/null +++ b/packages/backtesting/octobot_backtesting/importers/data_importer.py @@ -0,0 +1,69 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import os.path as path + +import octobot_commons.logging as logging +import octobot_commons.databases as databases + +import octobot_backtesting.constants as constants +import octobot_backtesting.errors as errors + + +class DataImporter: + def __init__(self, config, file_path): + self.config = config + self.file_path = file_path + self.logger = logging.get_logger(self.__class__.__name__) + + self.should_stop = False + + self.version = None + self.database = None + self.chronological_cache = databases.ChronologicalReadDatabaseCache() + + async def initialize(self) -> None: + pass + + def reset_cache(self): + self.chronological_cache.reset_cached_indexes() + + async def get_data_timestamp_interval(self, time_frame=None): + raise NotImplementedError("get_data_timestamp_interval is not implemented") + + async def stop(self) -> None: + if not self.should_stop: + self.should_stop = True + await self.database.stop() + + async def start(self) -> None: + raise NotImplementedError("Start is not implemented") + + def provides_accurate_price_time_frame(self) -> bool: + raise NotImplementedError("provides_accurate_price_time_frame is not implemented") + + def load_database(self) -> None: + file_path = self.adapt_file_path_if_necessary() + if not self.database: + self.database = databases.SQLiteDatabase(file_path) + + def adapt_file_path_if_necessary(self): + if path.isfile(self.file_path): + return self.file_path + else: + candidate_path = path.join(constants.BACKTESTING_FILE_PATH, self.file_path) + if path.isfile(candidate_path): + return candidate_path + raise errors.BacktestingFileNotFound(f"File {self.file_path} not found") diff --git a/packages/backtesting/octobot_backtesting/importers/exchanges/__init__.py b/packages/backtesting/octobot_backtesting/importers/exchanges/__init__.py new file mode 100644 index 0000000000..bc1bff4195 --- /dev/null +++ b/packages/backtesting/octobot_backtesting/importers/exchanges/__init__.py @@ -0,0 +1,41 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_backtesting.importers.exchanges import exchange_importer +from octobot_backtesting.importers.exchanges import util + +from octobot_backtesting.importers.exchanges.exchange_importer import ( + ExchangeDataImporter, +) + +from octobot_backtesting.importers.exchanges.util import ( + get_operations_from_timestamps, + import_ohlcvs, + import_tickers, + import_order_books, + import_recent_trades, + import_klines, +) + +__all__ = [ + "ExchangeDataImporter", + "get_operations_from_timestamps", + "import_ohlcvs", + "import_tickers", + "import_order_books", + "import_recent_trades", + "import_klines", +] diff --git a/packages/backtesting/octobot_backtesting/importers/exchanges/exchange_importer.py b/packages/backtesting/octobot_backtesting/importers/exchanges/exchange_importer.py new file mode 100644 index 0000000000..52a4cf5e6f --- /dev/null +++ b/packages/backtesting/octobot_backtesting/importers/exchanges/exchange_importer.py @@ -0,0 +1,268 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.constants as common_constants +import octobot_commons.enums as common_enums +import octobot_commons.errors as common_errors +import octobot_commons.databases as databases + +import octobot_backtesting.data as data +import octobot_backtesting.enums as enums +import octobot_backtesting.errors as errors +import octobot_backtesting.importers as importers + + +class ExchangeDataImporter(importers.DataImporter): + def __init__(self, config, file_path): + super().__init__(config, file_path) + + self.exchange_name = None + self.symbols = [] + self.time_frames = [] + self.available_data_types = [] + self.has_all_time_frames_candles_history = False + + async def initialize(self) -> None: + self.load_database() + await self.database.initialize() + + # load description + description = await data.get_database_description(self.database) + self.exchange_name = description[enums.DataFormatKeys.EXCHANGE.value] + self.symbols = description[enums.DataFormatKeys.SYMBOLS.value] + self.time_frames = description[enums.DataFormatKeys.TIME_FRAMES.value] + self.has_all_time_frames_candles_history = bool(description.get(enums.DataFormatKeys.START_TIMESTAMP.value)) + await self._init_available_data_types() + + self.logger.info(f"Loaded {self.exchange_name} data file with " + f"{', '.join(self.symbols)} on {', '.join([tf.value for tf in self.time_frames])}") + + async def start(self) -> None: + pass + + def provides_accurate_price_time_frame(self) -> bool: + # has_all_time_frames_candles_history is necessary for accurate price time frame + return self.has_all_time_frames_candles_history + + async def get_data_timestamp_interval(self, time_frame=None): + minimum_timestamp: float = 0.0 + maximum_timestamp: float = 0.0 + + min_ohlcv_timestamp: float = 0.0 + max_ohlcv_timestamp: float = 0.0 + + for table in [enums.ExchangeDataTables.KLINE, enums.ExchangeDataTables.ORDER_BOOK, + enums.ExchangeDataTables.RECENT_TRADES, enums.ExchangeDataTables.TICKER]: + if table in self.available_data_types: + try: + min_timestamp = (await self.database.select_min(table, + [databases.SQLiteDatabase.TIMESTAMP_COLUMN]))[0][0] + if not minimum_timestamp or minimum_timestamp > min_timestamp: + minimum_timestamp = min_timestamp + + max_timestamp = (await self.database.select_max(table, + [databases.SQLiteDatabase.TIMESTAMP_COLUMN]))[0][0] + if not maximum_timestamp or maximum_timestamp < max_timestamp: + maximum_timestamp = max_timestamp + except (IndexError, common_errors.DatabaseNotFoundError): + pass + + # OHLCV timestamps + try: + ohlcv_kwargs = {"time_frame": time_frame} if time_frame else {} + ohlcv_min_timestamps = (await self.database.select_min(enums.ExchangeDataTables.OHLCV, + [databases.SQLiteDatabase.TIMESTAMP_COLUMN], + [common_constants.CONFIG_TIME_FRAME], + group_by=common_constants.CONFIG_TIME_FRAME, + **ohlcv_kwargs + )) + + if ohlcv_min_timestamps: + # if the required time frame is not included in this database, ohlcv_min_timestamps is empty: ignore it + min_ohlcv_timestamp = max(ohlcv_min_timestamps)[0] + max_ohlcv_timestamp = (await self.database.select_max(enums.ExchangeDataTables.OHLCV, + [databases.SQLiteDatabase.TIMESTAMP_COLUMN], + **ohlcv_kwargs))[0][0] + elif time_frame: + raise errors.MissingTimeFrame(f"Missing time frame in data file: {time_frame}") + + except (IndexError, common_errors.DatabaseNotFoundError): + pass + + if minimum_timestamp > 0 and maximum_timestamp > 0: + return max(minimum_timestamp, min_ohlcv_timestamp), max(maximum_timestamp, max_ohlcv_timestamp) + return min_ohlcv_timestamp, max_ohlcv_timestamp + + async def _init_available_data_types(self): + self.available_data_types = [table for table in enums.ExchangeDataTables + if await self.database.check_table_exists(table) + and await self.database.check_table_not_empty(table)] + + async def _get_from_db( + self, exchange_name, symbol, table, + time_frame=None, + limit=databases.SQLiteDatabase.DEFAULT_SIZE, + timestamps=None, + operations=None + ): + kwargs = {} if time_frame is None else {"time_frame": time_frame.value} + + if timestamps: + return await self.database.select_from_timestamp( + table, size=limit, + exchange_name=exchange_name, symbol=symbol, + timestamps=timestamps, + operations=operations, + **kwargs + ) + return await self.database.select( + table, size=limit, + exchange_name=exchange_name, symbol=symbol, + **kwargs + ) + + async def get_ohlcv(self, exchange_name=None, symbol=None, + time_frame=common_enums.TimeFrames.ONE_HOUR, + limit=databases.SQLiteDatabase.DEFAULT_SIZE, + timestamps=None, + operations=None): + return importers.import_ohlcvs(await self._get_from_db( + exchange_name, symbol, enums.ExchangeDataTables.OHLCV, + time_frame=time_frame, + limit=limit, + timestamps=timestamps, + operations=operations + )) + + async def get_ohlcv_from_timestamps(self, exchange_name=None, symbol=None, + time_frame=common_enums.TimeFrames.ONE_HOUR, + limit=databases.SQLiteDatabase.DEFAULT_SIZE, + inferior_timestamp=-1, superior_timestamp=-1) -> list: + """ + Reads OHLCV history from database and populates a local ChronologicalReadDatabaseCache. + Warning: can't read data from before last given inferior_timestamp unless associated cache is reset + """ + return await self._get_from_cache(exchange_name, symbol, time_frame, enums.ExchangeDataTables.OHLCV, + inferior_timestamp, superior_timestamp, self.get_ohlcv, limit) + + async def get_ticker(self, exchange_name=None, symbol=None, + limit=databases.SQLiteDatabase.DEFAULT_SIZE, + timestamps=None, + operations=None): + return importers.import_tickers(await self._get_from_db( + exchange_name, symbol, enums.ExchangeDataTables.TICKER, + limit=limit, + timestamps=timestamps, + operations=operations + )) + + async def get_ticker_from_timestamps(self, exchange_name=None, symbol=None, + limit=databases.SQLiteDatabase.DEFAULT_SIZE, + inferior_timestamp=-1, superior_timestamp=-1): + """ + Reads ticker history from database and populates a local ChronologicalReadDatabaseCache. + Warning: can't read data from before last given inferior_timestamp unless associated cache is reset + """ + return await self._get_from_cache(exchange_name, symbol, None, enums.ExchangeDataTables.TICKER, + inferior_timestamp, superior_timestamp, self.get_ticker, limit) + + async def get_order_book(self, exchange_name=None, symbol=None, + limit=databases.SQLiteDatabase.DEFAULT_SIZE, + timestamps=None, + operations=None): + return importers.import_order_books(await self._get_from_db( + exchange_name, symbol, enums.ExchangeDataTables.ORDER_BOOK, + limit=limit, + timestamps=timestamps, + operations=operations + )) + + async def get_order_book_from_timestamps(self, exchange_name=None, symbol=None, + limit=databases.SQLiteDatabase.DEFAULT_SIZE, + inferior_timestamp=-1, superior_timestamp=-1): + """ + Reads order book history from database and populates a local ChronologicalReadDatabaseCache. + Warning: can't read data from before last given inferior_timestamp unless associated cache is reset + """ + return await self._get_from_cache(exchange_name, symbol, None, enums.ExchangeDataTables.ORDER_BOOK, + inferior_timestamp, superior_timestamp, self.get_order_book, limit) + + async def get_recent_trades(self, exchange_name=None, symbol=None, + limit=databases.SQLiteDatabase.DEFAULT_SIZE, + timestamps=None, + operations=None): + return importers.import_recent_trades(await self._get_from_db( + exchange_name, symbol, enums.ExchangeDataTables.RECENT_TRADES, + limit=limit, + timestamps=timestamps, + operations=operations + )) + + async def get_recent_trades_from_timestamps(self, exchange_name=None, symbol=None, + limit=databases.SQLiteDatabase.DEFAULT_SIZE, + inferior_timestamp=-1, superior_timestamp=-1): + """ + Reads recent trades history from database and populates a local ChronologicalReadDatabaseCache. + Warning: can't read data from before last given inferior_timestamp unless associated cache is reset + """ + return await self._get_from_cache(exchange_name, symbol, None, enums.ExchangeDataTables.RECENT_TRADES, + inferior_timestamp, superior_timestamp, self.get_recent_trades, limit) + + async def get_kline(self, exchange_name=None, symbol=None, + time_frame=common_enums.TimeFrames.ONE_HOUR, + limit=databases.SQLiteDatabase.DEFAULT_SIZE, + timestamps=None, + operations=None): + return importers.import_klines(await self._get_from_db( + exchange_name, symbol, enums.ExchangeDataTables.KLINE, + time_frame=time_frame, + limit=limit, + timestamps=timestamps, + operations=operations + )) + + async def get_kline_from_timestamps(self, exchange_name=None, symbol=None, + time_frame=common_enums.TimeFrames.ONE_HOUR, + limit=databases.SQLiteDatabase.DEFAULT_SIZE, + inferior_timestamp=-1, superior_timestamp=-1): + """ + Reads kline history from database and populates a local ChronologicalReadDatabaseCache. + Warning: can't read data from before last given inferior_timestamp unless associated cache is reset + """ + return await self._get_from_cache(exchange_name, symbol, time_frame, enums.ExchangeDataTables.KLINE, + inferior_timestamp, superior_timestamp, self.get_kline, limit) + + async def _get_from_cache(self, exchange_name, symbol, time_frame, data_type, + inferior_timestamp, superior_timestamp, set_cache_method, limit): + if not self.chronological_cache.has((exchange_name, symbol, time_frame, data_type)): + # ignore superior timestamp to select everything starting from inferior_timestamp and cache it + select_superior_timestamp = -1 + timestamps, operations = importers.get_operations_from_timestamps( + select_superior_timestamp, + inferior_timestamp + ) + # initializer without time_frame args are not expecting the time_frame argument, remove it + # ignore the limit param as it might reduce the available cache and give false later select results + init_cache_method_args = \ + (exchange_name, symbol, databases.SQLiteDatabase.DEFAULT_SIZE, timestamps, operations) \ + if time_frame is None \ + else (exchange_name, symbol, time_frame, databases.SQLiteDatabase.DEFAULT_SIZE, timestamps, operations) + self.chronological_cache.set( + await set_cache_method(*init_cache_method_args), + 0, + (exchange_name, symbol, time_frame, data_type) + ) + return self.chronological_cache.get(inferior_timestamp, superior_timestamp, + (exchange_name, symbol, time_frame, data_type)) diff --git a/packages/backtesting/octobot_backtesting/importers/exchanges/util.py b/packages/backtesting/octobot_backtesting/importers/exchanges/util.py new file mode 100644 index 0000000000..2984f9d570 --- /dev/null +++ b/packages/backtesting/octobot_backtesting/importers/exchanges/util.py @@ -0,0 +1,63 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import json + +import octobot_commons.enums as commons_enums + + +def get_operations_from_timestamps(superior_timestamp, inferior_timestamp): + operations: list = [] + timestamps: list = [] + if superior_timestamp != -1: + timestamps.append(str(superior_timestamp)) + operations.append(commons_enums.DataBaseOperations.INF_EQUALS.value) + if inferior_timestamp != -1: + timestamps.append(str(inferior_timestamp)) + operations.append(commons_enums.DataBaseOperations.SUP_EQUALS.value) + + return timestamps, operations + + +def import_ohlcvs(ohlcvs): + for i, val in enumerate(ohlcvs): + ohlcvs[i] = list(val) + ohlcvs[i][-1] = json.loads(ohlcvs[i][-1]) + return ohlcvs + + +def import_tickers(tickers): + for ticker in tickers: + ticker[-1] = json.loads(ticker[-1]) + return tickers + + +def import_order_books(order_books): + for order_book in order_books: + order_book[-1] = json.loads(order_book[-1]) + order_book[-2] = json.loads(order_book[-2]) + return order_books + + +def import_recent_trades(recent_trades): + for recent_trade in recent_trades: + recent_trade[-1] = json.loads(recent_trade[-1]) + return recent_trades + + +def import_klines(klines): + for kline in klines: + kline[-1] = json.loads(kline[-1]) + return klines diff --git a/packages/backtesting/octobot_backtesting/importers/social/__init__.py b/packages/backtesting/octobot_backtesting/importers/social/__init__.py new file mode 100644 index 0000000000..3834995ef1 --- /dev/null +++ b/packages/backtesting/octobot_backtesting/importers/social/__init__.py @@ -0,0 +1,25 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_backtesting.importers.social import social_importer + +from octobot_backtesting.importers.social.social_importer import ( + SocialDataImporter, +) + +__all__ = [ + "SocialDataImporter", +] diff --git a/packages/backtesting/octobot_backtesting/importers/social/social_importer.py b/packages/backtesting/octobot_backtesting/importers/social/social_importer.py new file mode 100644 index 0000000000..c57c0da680 --- /dev/null +++ b/packages/backtesting/octobot_backtesting/importers/social/social_importer.py @@ -0,0 +1,213 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import json + +import octobot_commons.constants as common_constants +import octobot_commons.errors as common_errors +import octobot_commons.databases as databases + +import octobot_backtesting.constants as constants +import octobot_backtesting.data as data +import octobot_backtesting.enums as enums +import octobot_backtesting.importers as importers +import octobot_backtesting.importers.exchanges.util as importers_util + + +class SocialDataImporter(importers.DataImporter): + def __init__(self, config, file_path): + super().__init__(config, file_path) + + self.service_name = None + self.sources = [] + self.symbols = [] + self.available_data_types = [] + self.has_all_events_history = False + + async def initialize(self) -> None: + self.load_database() + await self.database.initialize() + + # load description + description = await self._get_database_description() + self.service_name = description.get("service_name") + self.sources = description.get("sources", []) + self.symbols = description.get("symbols", []) + self.has_all_events_history = bool(description.get("start_timestamp", 0)) + await self._init_available_data_types() + + self.logger.info(f"Loaded {self.service_name} data file with " + f"sources: {', '.join(self.sources) if self.sources else 'all'}, " + f"symbols: {', '.join(self.symbols) if self.symbols else 'all'}") + + def provides_accurate_price_time_frame(self) -> bool: + return True + + async def _get_database_description(self): + row = (await self.database.select(enums.DataTables.DESCRIPTION, size=1))[0] + if row[2] == enums.DataType.SOCIAL.value: + def _list(v): + try: + out = json.loads(v) if v else [] + return out if isinstance(out, list) else [] + except (json.JSONDecodeError, TypeError): + return [] + sources = _list(row[3]) if len(row) > 3 else [] + symbols = _list(row[4]) if len(row) > 4 else [] + start_timestamp = row[5] if len(row) > 5 else 0 + end_timestamp = row[6] if len(row) > 6 else 0 + services = _list(row[7]) if len(row) > 7 else [] + service_name = services[0] if services else "" + return { + "timestamp": row[0], + "version": row[1], + "type": row[2], + "service_name": service_name, + "sources": sources, + "symbols": symbols, + "start_timestamp": start_timestamp, + "end_timestamp": end_timestamp, + "services": services, + } + return { + "timestamp": row[0], + "version": row[1], + "service_name": row[2] if len(row) > 2 else "unknown", + "sources": [], + "symbols": [], + "start_timestamp": 0, + "end_timestamp": 0, + "services": [], + } + + async def start(self) -> None: + pass + + async def get_data_timestamp_interval(self, time_frame=None): + """Get timestamp interval for social events""" + minimum_timestamp: float = 0.0 + maximum_timestamp: float = 0.0 + + if enums.SocialDataTables.SOCIAL_EVENTS in self.available_data_types: + try: + min_timestamp = (await self.database.select_min(enums.SocialDataTables.SOCIAL_EVENTS, + [databases.SQLiteDatabase.TIMESTAMP_COLUMN]))[0][0] + max_timestamp = (await self.database.select_max(enums.SocialDataTables.SOCIAL_EVENTS, + [databases.SQLiteDatabase.TIMESTAMP_COLUMN]))[0][0] + if min_timestamp and max_timestamp: + minimum_timestamp = min_timestamp + maximum_timestamp = max_timestamp + except (IndexError, common_errors.DatabaseNotFoundError): + pass + + return minimum_timestamp, maximum_timestamp + + async def _init_available_data_types(self): + self.available_data_types = [table for table in enums.SocialDataTables + if await self.database.check_table_exists(table) + and await self.database.check_table_not_empty(table)] + + async def _get_from_db( + self, service_name, table, + channel=None, + symbol=None, + limit=databases.SQLiteDatabase.DEFAULT_SIZE, + timestamps=None, + operations=None + ): + kwargs = {"service_name": service_name} + if channel: + kwargs["channel"] = channel + if symbol: + kwargs["symbol"] = symbol + + if timestamps: + return await self.database.select_from_timestamp( + table, size=limit, + timestamps=timestamps, + operations=operations, + **kwargs + ) + return await self.database.select( + table, size=limit, + **kwargs + ) + + async def get_social_events(self, service_name=None, channel=None, symbol=None, + limit=databases.SQLiteDatabase.DEFAULT_SIZE, + timestamps=None, + operations=None): + """ + Get social events from database. + :param service_name: Filter by service name (defaults to self.service_name) + :param channel: Filter by channel + :param symbol: Filter by symbol + :param limit: Maximum number of events to return + :param timestamps: List of timestamps to filter by + :param operations: Operations for timestamp filtering + :return: List of event dicts + """ + service_name = service_name or self.service_name + events = await self._get_from_db( + service_name, enums.SocialDataTables.SOCIAL_EVENTS, + channel=channel, + symbol=symbol, + limit=limit, + timestamps=timestamps, + operations=operations + ) + result = [] + for event in events: + event_dict = { + "timestamp": event[0], + "service_name": event[1], + "channel": event[2], + "symbol": event[3], + "payload": json.loads(event[4]) if len(event) > 4 else {} + } + result.append(event_dict) + return result + + async def get_social_events_from_timestamps(self, service_name=None, channel=None, symbol=None, + limit=databases.SQLiteDatabase.DEFAULT_SIZE, + inferior_timestamp=-1, superior_timestamp=-1): + """ + Reads social events history from database and populates a local ChronologicalReadDatabaseCache. + Warning: can't read data from before last given inferior_timestamp unless associated cache is reset + """ + return await self._get_from_cache(service_name, channel, symbol, enums.SocialDataTables.SOCIAL_EVENTS, + inferior_timestamp, superior_timestamp, self.get_social_events, limit) + + async def _get_from_cache(self, service_name, channel, symbol, data_type, + inferior_timestamp, superior_timestamp, set_cache_method, limit): + cache_key = (service_name, channel, symbol, data_type) + if not self.chronological_cache.has(cache_key): + # ignore superior timestamp to select everything starting from inferior_timestamp and cache it + select_superior_timestamp = -1 + timestamps, operations = importers_util.get_operations_from_timestamps( + select_superior_timestamp, + inferior_timestamp + ) + # initializer without time_frame args are not expecting the time_frame argument, remove it + # ignore the limit param as it might reduce the available cache and give false later select results + init_cache_method_args = ( + service_name, channel, symbol, databases.SQLiteDatabase.DEFAULT_SIZE, timestamps, operations + ) + self.chronological_cache.set( + await set_cache_method(*init_cache_method_args), + "timestamp", + cache_key + ) + return self.chronological_cache.get(inferior_timestamp, superior_timestamp, cache_key) diff --git a/packages/backtesting/octobot_backtesting/time/__init__.py b/packages/backtesting/octobot_backtesting/time/__init__.py new file mode 100644 index 0000000000..115835e250 --- /dev/null +++ b/packages/backtesting/octobot_backtesting/time/__init__.py @@ -0,0 +1,36 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +from octobot_backtesting.time import time_manager +from octobot_backtesting.time import channel + +from octobot_backtesting.time.time_manager import ( + TimeManager, +) + +from octobot_backtesting.time.channel import ( + TimeProducer, + TimeConsumer, + TimeChannel, + TimeUpdater, +) + +__all__ = [ + "TimeManager", + "TimeProducer", + "TimeConsumer", + "TimeChannel", + "TimeUpdater", +] diff --git a/packages/backtesting/octobot_backtesting/time/channel/__init__.py b/packages/backtesting/octobot_backtesting/time/channel/__init__.py new file mode 100644 index 0000000000..d23cb09709 --- /dev/null +++ b/packages/backtesting/octobot_backtesting/time/channel/__init__.py @@ -0,0 +1,34 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_backtesting.time.channel import time +from octobot_backtesting.time.channel import time_updater + +from octobot_backtesting.time.channel.time import ( + TimeProducer, + TimeConsumer, + TimeChannel, +) +from octobot_backtesting.time.channel.time_updater import ( + TimeUpdater, +) + +__all__ = [ + "TimeProducer", + "TimeConsumer", + "TimeChannel", + "TimeUpdater", +] diff --git a/packages/backtesting/octobot_backtesting/time/channel/time.py b/packages/backtesting/octobot_backtesting/time/channel/time.py new file mode 100644 index 0000000000..7c837c8d4b --- /dev/null +++ b/packages/backtesting/octobot_backtesting/time/channel/time.py @@ -0,0 +1,64 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import asyncio + +import async_channel.channels as channels +import async_channel.consumer as consumers +import async_channel.producer as producers + + +class TimeProducer(producers.Producer): + def __init__(self, channel, backtesting): + super().__init__(channel) + self.backtesting = backtesting + + async def push(self, timestamp): + await self.perform(timestamp) + + async def perform(self, timestamp): + try: + await self.backtesting.handle_time_update(timestamp) + await self.send(timestamp) + except asyncio.CancelledError: + self.logger.info("Update tasks cancelled.") + except Exception as e: + self.logger.exception(e, True, f"Exception when triggering time update: {e}") + + async def send(self, timestamp, **kwargs): + for consumer in self.channel.get_consumer_from_filters({}): + await consumer.queue.put({ + "timestamp": timestamp + }) + + +class TimeConsumer(consumers.SupervisedConsumer): + pass + + +class TimeChannel(channels.Channel): + PRODUCER_CLASS = TimeProducer + CONSUMER_CLASS = TimeConsumer + + @classmethod + def get_name(cls, identifier=None) -> str: + """ + Override of the default implementation to avoid naming conflicts + :returns the channel name + """ + if identifier is None: + raise NotImplementedError("Provide identifier param or use backtesting.get_time_chan_name() instead") + return f"{channels.Channel.get_name()}#{identifier}" + diff --git a/packages/backtesting/octobot_backtesting/time/channel/time_updater.py b/packages/backtesting/octobot_backtesting/time/channel/time_updater.py new file mode 100644 index 0000000000..39dc85e177 --- /dev/null +++ b/packages/backtesting/octobot_backtesting/time/channel/time_updater.py @@ -0,0 +1,93 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import asyncio +import time + +import octobot_backtesting.channels_manager as channels_manager +import octobot_backtesting.time.channel.time as time_channel + + +class TimeUpdater(time_channel.TimeProducer): + def __init__(self, channel, backtesting): + super().__init__(channel, backtesting) + self.backtesting = backtesting + self.time_manager = backtesting.time_manager + self.starting_time = time.time() + self.simulation_duration = 0 + self.finished_event = asyncio.Event() + + self.channels_manager = None + + async def start(self): + self.channels_manager = channels_manager.ChannelsManager( + self.backtesting.exchange_ids, + self.backtesting.matrix_id, + self.backtesting.get_time_chan_name(), + + ) + await self.channels_manager.initialize() + cleared_producers = False + while not self.should_stop: + try: + current_timestamp = self.time_manager.current_timestamp + await self.push(self.time_manager.current_timestamp) + + self.logger.info(f"Progress : {round(min(self.backtesting.get_progress(), 1) * 100, 2)}% " + f"[{current_timestamp}]") + + # Call synchronous channels callbacks + await self.channels_manager.handle_new_iteration(current_timestamp) + + if self.time_manager.has_finished(): + self.logger.debug("Maximum timestamp hit, stopping...") + self.simulation_duration = time.time() - self.starting_time + self.logger.info(f"Lasted {round(self.simulation_duration, 3)}s") + await self.stop() + else: + # jump to the next time point + self.time_manager.next_timestamp() + if not cleared_producers: + self.channels_manager.clear_empty_channels_producers() + self.channels_manager.update_producers_by_priority_levels() + cleared_producers = True + except Exception as e: + self.logger.exception(e, True, f"Fail to update time : {e}") + await self.backtesting.delete_time_channel() + self.channels_manager.flush() + self.finished_event.set() + self.backtesting = None + + async def stop(self) -> None: + self.channels_manager.stop() + await super().stop() + + async def modify(self, set_timestamp=None, minimum_timestamp=None, maximum_timestamp=None) -> None: + if set_timestamp is not None: + self.time_manager.set_current_timestamp(set_timestamp) + + if minimum_timestamp is not None: + self.time_manager.set_minimum_timestamp(minimum_timestamp) + self.time_manager.set_current_timestamp(minimum_timestamp) + + if maximum_timestamp is not None: + self.time_manager.set_maximum_timestamp(maximum_timestamp) + + async def run(self) -> None: + """ + Overrides default producer run() because producer task wont be created in synchronized context + """ + await self.channel.register_producer(self) + self.create_task() diff --git a/packages/backtesting/octobot_backtesting/time/time_manager.py b/packages/backtesting/octobot_backtesting/time/time_manager.py new file mode 100644 index 0000000000..87ea85e929 --- /dev/null +++ b/packages/backtesting/octobot_backtesting/time/time_manager.py @@ -0,0 +1,110 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import collections +import time + +import octobot_commons.logging as logging + + +class TimeManager: + DEFAULT_TIMESTAMP_INIT_VALUE = -1 + DEFAULT_FINISH_TIME_DELTA = 1000 + DEFAULT_TIME_INTERVAL = 50 + + def __init__(self, config): + self.logger = logging.get_logger(self.__class__.__name__) + self.config = config + + self.time_initialized = False + + self.starting_timestamp = self.DEFAULT_TIMESTAMP_INIT_VALUE + self.finishing_timestamp = self.DEFAULT_TIMESTAMP_INIT_VALUE + self.current_timestamp = self.DEFAULT_TIMESTAMP_INIT_VALUE + self.time_interval = self.DEFAULT_TIME_INTERVAL + + self.timestamp_accept_check_callback = None + self.timestamps_whitelist: set = None + self._timestamps_whitelist_queue: collections.deque = None + + def initialize(self): + self._reset_time() + self.time_initialized = True + + def start(self): + if self.starting_timestamp == self.DEFAULT_TIMESTAMP_INIT_VALUE: + self.starting_timestamp = time.time() + + if self.finishing_timestamp == self.DEFAULT_TIMESTAMP_INIT_VALUE: + self.finishing_timestamp = self.starting_timestamp + self.DEFAULT_FINISH_TIME_DELTA + + self.current_timestamp = self.starting_timestamp + + def _reset_time(self): + self.set_current_timestamp(0) + + def has_finished(self): + if self._timestamps_whitelist_queue is not None: + if len(self._timestamps_whitelist_queue) == 0: + return True + return self.current_timestamp >= self.finishing_timestamp + + def next_timestamp(self): + self.current_timestamp += self.time_interval + if self._timestamps_whitelist_queue is not None: + # when timestamps_whitelist is set: fast forward time to only trigger whitelisted timestamps + while self._should_skip_current_timestamp() and self.current_timestamp <= self.finishing_timestamp: + self.current_timestamp += self.time_interval + + def _should_skip_current_timestamp(self): + if self.timestamp_accept_check_callback is not None and self.timestamp_accept_check_callback(): + return False + return not self._has_current_timestamp_in_whitelist() + + def _has_current_timestamp_in_whitelist(self): + if self._timestamps_whitelist_queue: + whitelist_timestamp = self._timestamps_whitelist_queue[0] + while whitelist_timestamp < self.current_timestamp and self._timestamps_whitelist_queue: + whitelist_timestamp = self._timestamps_whitelist_queue.popleft() + return whitelist_timestamp == self.current_timestamp + return False + + def set_minimum_timestamp(self, minimum_timestamp): + if self.starting_timestamp == self.DEFAULT_TIMESTAMP_INIT_VALUE or self.starting_timestamp > minimum_timestamp: + self.starting_timestamp = minimum_timestamp + self.logger.info(f"Set minimum timestamp to : {minimum_timestamp}") + + def set_maximum_timestamp(self, maximum_timestamp): + if self.finishing_timestamp == self.DEFAULT_TIMESTAMP_INIT_VALUE or \ + self.finishing_timestamp < maximum_timestamp: + self.finishing_timestamp = maximum_timestamp + self.logger.info(f"Set maximum timestamp to : {maximum_timestamp}") + + def set_current_timestamp(self, timestamp): + self.current_timestamp = timestamp + + def get_total_iteration(self): + return (self.finishing_timestamp - self.starting_timestamp) / self.time_interval + + def get_remaining_iteration(self): + return (self.finishing_timestamp - self.current_timestamp) / self.time_interval + + def register_timestamp_whitelist(self, timestamps, check_callback, append_to_whitelist=False): + self.timestamp_accept_check_callback = check_callback + if append_to_whitelist and self.timestamps_whitelist: + self.timestamps_whitelist = sorted(set(self.timestamps_whitelist + timestamps)) + else: + self.timestamps_whitelist = sorted(set(timestamps)) + self._timestamps_whitelist_queue = collections.deque(self.timestamps_whitelist) diff --git a/packages/backtesting/octobot_backtesting/util/__init__.py b/packages/backtesting/octobot_backtesting/util/__init__.py new file mode 100644 index 0000000000..bb694f1706 --- /dev/null +++ b/packages/backtesting/octobot_backtesting/util/__init__.py @@ -0,0 +1,27 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_backtesting.util import backtesting_util + +from octobot_backtesting.util.backtesting_util import ( + create_importer_from_backtesting_file_name, + get_default_importer, +) + +__all__ = [ + "create_importer_from_backtesting_file_name", + "get_default_importer", +] diff --git a/packages/backtesting/octobot_backtesting/util/backtesting_util.py b/packages/backtesting/octobot_backtesting/util/backtesting_util.py new file mode 100644 index 0000000000..14f47ce696 --- /dev/null +++ b/packages/backtesting/octobot_backtesting/util/backtesting_util.py @@ -0,0 +1,55 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import os +import typing + +import octobot_backtesting.constants as constants +import octobot_backtesting.collectors as collectors +import octobot_backtesting.importers as importers +import octobot_commons.tentacles_management as tentacles_management +import octobot_commons.logging as commons_logging + + +async def create_importer_from_backtesting_file_name(config, + backtesting_file, + default_importer=None) -> typing.Optional[importers.DataImporter]: + collector_klass = tentacles_management.get_deep_class_from_parent_subclasses( + _parse_class_name_from_backtesting_file(backtesting_file), collectors.DataCollector) + if collector_klass: + importer_class = collector_klass.IMPORTER + else: + commons_logging.get_logger().debug(f"No specific exchange importer identified for '{backtesting_file}' " + f"(maybe its filename has been changed). Using {default_importer.__name__}.") + importer_class = default_importer + importer = importer_class(config, backtesting_file) if importer_class else None + + if not importer: + return None + + await importer.initialize() + return importer + + +def get_default_importer(default_collector_class=collectors.AbstractExchangeHistoryCollector) -> importers.DataImporter: + available_collectors = tentacles_management.get_all_classes_from_parent(default_collector_class) + try: + return available_collectors[0].IMPORTER + except KeyError: + return None + + +def _parse_class_name_from_backtesting_file(backtesting_file): + return os.path.basename(backtesting_file).split(constants.BACKTESTING_DATA_FILE_SEPARATOR)[0] diff --git a/packages/backtesting/requirements.txt b/packages/backtesting/requirements.txt new file mode 100644 index 0000000000..044fc6dbb6 --- /dev/null +++ b/packages/backtesting/requirements.txt @@ -0,0 +1,2 @@ +# async http requests +aiohttp>=3.9.5 diff --git a/packages/backtesting/standard.rc b/packages/backtesting/standard.rc new file mode 100644 index 0000000000..b0ee172341 --- /dev/null +++ b/packages/backtesting/standard.rc @@ -0,0 +1,510 @@ +[MASTER] + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. +extension-pkg-whitelist= + +# Specify a score threshold to be exceeded before program exits with error. +fail-under=10.0 + +# Add files or directories to the blacklist. They should be base names, not +# paths. +ignore=CVS,tests + +# Add files or directories matching the regex patterns to the blacklist. The +# regex matches against base names, not paths. +ignore-patterns= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the +# number of processors available to use. +jobs=1 + +# Control the amount of potential inferred values when inferring a single +# object. This can help the performance when dealing with large functions or +# complex, nested conditions. +limit-inference-results=100 + +# List of plugins (as comma separated values of python module names) to load, +# usually to register additional checkers. +load-plugins= + +# Pickle collected data for later comparisons. +persistent=yes + +# When enabled, pylint would attempt to guess common misconfiguration and emit +# user-friendly hints instead of false-positive error messages. +# suggestion-mode=yes + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED. +confidence= + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once). You can also use "--disable=all" to +# disable everything first and then reenable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use "--disable=all --enable=classes +# --disable=W". +disable=I, R, C, W, import-error + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +enable= + + +[REPORTS] + +# Python expression which should return a score less than or equal to 10. You +# have access to the variables 'error', 'warning', 'refactor', and 'convention' +# which contain the number of messages in each category, as well as 'statement' +# which is the total number of statements analyzed. This score is used by the +# global evaluation report (RP0004). +evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details. +#msg-template= + +# Set the output format. Available formats are text, parseable, colorized, json +# and msvs (visual studio). You can also give a reporter class, e.g. +# mypackage.mymodule.MyReporterClass. +output-format=text + +# Tells whether to display a full report or only the messages. +reports=no + +# Activate the evaluation score. +score=yes + + +[REFACTORING] + +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 + +# Complete name of functions that never returns. When checking for +# inconsistent-return-statements if a never returning function is called then +# it will be considered as an explicit return statement and no message will be +# printed. +never-returning-functions=sys.exit + + +[SIMILARITIES] + +# Ignore comments when computing similarities. +ignore-comments=yes + +# Ignore docstrings when computing similarities. +ignore-docstrings=yes + +# Ignore imports when computing similarities. +ignore-imports=no + +# Minimum lines number of a similarity. +min-similarity-lines=4 + + +[LOGGING] + +# The type of string formatting that logging methods do. `old` means using % +# formatting, `new` is for `{}` formatting. +logging-format-style=old + +# Logging modules to check that the string format arguments are in logging +# function parameter format. +logging-modules=logging + + +[STRING] + +# This flag controls whether inconsistent-quotes generates a warning when the +# character used as a quote delimiter is used inconsistently within a module. +check-quote-consistency=no + +# This flag controls whether the implicit-str-concat should generate a warning +# on implicit string concatenation in sequences defined over several lines. +check-str-concat-over-line-jumps=no + + +[SPELLING] + +# Limits count of emitted suggestions for spelling mistakes. +max-spelling-suggestions=4 + +# Spelling dictionary name. Available dictionaries: none. To make it work, +# install the python-enchant package. +spelling-dict= + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains the private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to the private dictionary (see the +# --spelling-private-dict-file option) instead of raising a message. +spelling-store-unknown-words=no + + +[FORMAT] + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )?<?https?://\S+>?$ + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Maximum number of characters on a single line. +max-line-length=100 + +# Maximum number of lines in a module. +max-module-lines=1000 + +# Allow the body of a class to be on the same line as the declaration if body +# contains single statement. +single-line-class-stmt=no + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME, + XXX, + TODO + +# Regular expression of note tags to take in consideration. +#notes-rgx= + + +[BASIC] + +# Naming style matching correct argument names. +argument-naming-style=snake_case + +# Regular expression matching correct argument names. Overrides argument- +# naming-style. +#argument-rgx= + +# Naming style matching correct attribute names. +attr-naming-style=snake_case + +# Regular expression matching correct attribute names. Overrides attr-naming- +# style. +#attr-rgx= + +# Bad variable names which should always be refused, separated by a comma. +bad-names=foo, + bar, + baz, + toto, + tutu, + tata + +# Bad variable names regexes, separated by a comma. If names match any regex, +# they will always be refused +bad-names-rgxs= + +# Naming style matching correct class attribute names. +class-attribute-naming-style=any + +# Regular expression matching correct class attribute names. Overrides class- +# attribute-naming-style. +#class-attribute-rgx= + +# Naming style matching correct class names. +class-naming-style=PascalCase + +# Regular expression matching correct class names. Overrides class-naming- +# style. +#class-rgx= + +# Naming style matching correct constant names. +const-naming-style=UPPER_CASE + +# Regular expression matching correct constant names. Overrides const-naming- +# style. +#const-rgx= + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + +# Naming style matching correct function names. +function-naming-style=snake_case + +# Regular expression matching correct function names. Overrides function- +# naming-style. +#function-rgx= + +# Good variable names which should always be accepted, separated by a comma. +good-names=i, + j, + k, + ex, + Run, + _ + +# Good variable names regexes, separated by a comma. If names match any regex, +# they will always be accepted +good-names-rgxs= + +# Include a hint for the correct naming format with invalid-name. +include-naming-hint=no + +# Naming style matching correct inline iteration names. +inlinevar-naming-style=any + +# Regular expression matching correct inline iteration names. Overrides +# inlinevar-naming-style. +#inlinevar-rgx= + +# Naming style matching correct method names. +method-naming-style=snake_case + +# Regular expression matching correct method names. Overrides method-naming- +# style. +#method-rgx= + +# Naming style matching correct module names. +module-naming-style=snake_case + +# Regular expression matching correct module names. Overrides module-naming- +# style. +#module-rgx= + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +# These decorators are taken in consideration only for invalid-name. +property-classes=abc.abstractproperty + +# Naming style matching correct variable names. +variable-naming-style=snake_case + +# Regular expression matching correct variable names. Overrides variable- +# naming-style. +#variable-rgx= + + +[VARIABLES] + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid defining new builtins when possible. +additional-builtins= + +# Tells whether unused global variables should be treated as a violation. +allow-global-unused-variables=yes + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_, + _cb + +# A regular expression matching the name of dummy variables (i.e. expected to +# not be used). +dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ + +# Argument names that match this expression will be ignored. Default to name +# with leading underscore. +ignored-argument-names=_.*|^ignored_|^unused_ + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io + + +[TYPECHECK] + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + +# Tells whether missing members accessed in mixin class should be ignored. A +# mixin class is detected if its name ends with "mixin" (case insensitive). +ignore-mixin-members=yes + +# Tells whether to warn about missing members when the owner of the attribute +# is inferred to be None. +ignore-none=yes + +# This flag controls whether pylint should warn about no-member and similar +# checks whenever an opaque object is returned when inferring. The inference +# can return multiple potential results while evaluating a Python object, but +# some branches might not be evaluated, which results in partial inference. In +# that case, it might be useful to still emit no-member and other checks for +# the rest of the inferred objects. +ignore-on-opaque-inference=yes + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local + +# List of module names for which member attributes should not be checked +# (useful for modules/projects where namespaces are manipulated during runtime +# and thus existing member attributes cannot be deduced by static analysis). It +# supports qualified module names, as well as Unix pattern matching. +ignored-modules= + +# Show a hint with possible names when a member name was not found. The aspect +# of finding the hint is based on edit distance. +missing-member-hint=yes + +# The minimum edit distance a name should have in order to be considered a +# similar match for a missing member name. +missing-member-hint-distance=1 + +# The total number of similar names that should be taken in consideration when +# showing a hint for a missing member. +missing-member-max-choices=1 + +# List of decorators that change the signature of a decorated function. +signature-mutators= + + +[IMPORTS] + +# List of modules that can be imported at any level, not just the top level +# one. +allow-any-import-level= + +# Allow wildcard imports from modules that define __all__. +allow-wildcard-with-all=no + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + +# Deprecated modules which should not be used, separated by a comma. +deprecated-modules=optparse,tkinter.tix + +# Create a graph of external dependencies in the given file (report RP0402 must +# not be disabled). +ext-import-graph= + +# Create a graph of every (i.e. internal and external) dependencies in the +# given file (report RP0402 must not be disabled). +import-graph= + +# Create a graph of internal dependencies in the given file (report RP0402 must +# not be disabled). +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant + +# Couples of modules and preferred modules, separated by a comma. +preferred-modules= + + +[CLASSES] + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__, + __new__, + setUp, + __post_init__ + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict, + _fields, + _replace, + _source, + _make + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=cls + + +[DESIGN] + +# Maximum number of arguments for function / method. +max-args=5 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Maximum number of boolean expressions in an if statement (see R0916). +max-bool-expr=5 + +# Maximum number of branch for function / method body. +max-branches=12 + +# Maximum number of locals for function / method body. +max-locals=15 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 + +# Maximum number of return / yield for function / method body. +max-returns=6 + +# Maximum number of statements in function / method body. +max-statements=50 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when being caught. Defaults to +# "BaseException, Exception". +overgeneral-exceptions=builtins.BaseException, + builtins.Exception diff --git a/packages/backtesting/tests/__init__.py b/packages/backtesting/tests/__init__.py new file mode 100644 index 0000000000..5335228a5d --- /dev/null +++ b/packages/backtesting/tests/__init__.py @@ -0,0 +1,15 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. diff --git a/packages/backtesting/tests/api/__init__.py b/packages/backtesting/tests/api/__init__.py new file mode 100644 index 0000000000..5335228a5d --- /dev/null +++ b/packages/backtesting/tests/api/__init__.py @@ -0,0 +1,15 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. diff --git a/packages/backtesting/tests/api/test_backtesting.py b/packages/backtesting/tests/api/test_backtesting.py new file mode 100644 index 0000000000..3c6f24dc45 --- /dev/null +++ b/packages/backtesting/tests/api/test_backtesting.py @@ -0,0 +1,57 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_backtesting.api as api + + +def _get_default_config(): + return { + "crypto-currencies": { + "Bitcoin": { + "pairs": [ + "BTC/USD" + ] + } + }, + "trader-simulator": { + "enabled": True, + "fees": { + "maker": 0.07, + "taker": 0.07 + }, + "starting-portfolio": { + "BTC": 0.5, + "USDT": 5000 + } + }, + "trading": { + "reference-market": "BTC", + "risk": 1 + } + } + + +def test_is_backtesting_enabled(): + assert api.is_backtesting_enabled({}) is False + assert api.is_backtesting_enabled({"backtesting": {}}) is False + assert api.is_backtesting_enabled({"backtesting": {"enabled": False}}) is False + assert api.is_backtesting_enabled({"backtesting": {"enabled": True}}) is True + + +def test_get_backtesting_data_files(): + assert api.get_backtesting_data_files({}) == [] + assert api.get_backtesting_data_files({"backtesting": {}}) == [] + assert api.get_backtesting_data_files({"backtesting": {"files": []}}) == [] + assert api.get_backtesting_data_files({"backtesting": {"files": ["t", "1"]}}) == ["t", "1"] diff --git a/packages/backtesting/tests/api/test_data_file.py b/packages/backtesting/tests/api/test_data_file.py new file mode 100644 index 0000000000..56867672a4 --- /dev/null +++ b/packages/backtesting/tests/api/test_data_file.py @@ -0,0 +1,20 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +from octobot_backtesting.api.data_file import get_all_available_data_files + + +def test_get_all_available_data_files(): + assert get_all_available_data_files() == [] diff --git a/packages/backtesting/tests/api/test_data_file_converters.py b/packages/backtesting/tests/api/test_data_file_converters.py new file mode 100644 index 0000000000..cd6db42f3d --- /dev/null +++ b/packages/backtesting/tests/api/test_data_file_converters.py @@ -0,0 +1,22 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import pytest +from octobot_backtesting.api.data_file_converters import convert_data_file + + +@pytest.mark.asyncio +async def test_convert_data_file_without_converter(): + assert await convert_data_file(None) is None diff --git a/packages/backtesting/tests/api/test_exchange_data_collector.py b/packages/backtesting/tests/api/test_exchange_data_collector.py new file mode 100644 index 0000000000..ad272a4ee8 --- /dev/null +++ b/packages/backtesting/tests/api/test_exchange_data_collector.py @@ -0,0 +1,20 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_backtesting.api as api + + +def test_import(): + assert api.exchange_historical_data_collector_factory is not None diff --git a/packages/backtesting/tests/api/test_importer.py b/packages/backtesting/tests/api/test_importer.py new file mode 100644 index 0000000000..c7700831f0 --- /dev/null +++ b/packages/backtesting/tests/api/test_importer.py @@ -0,0 +1,20 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_backtesting.api as api + + +def test_import(): + assert api.get_data_timestamp_interval is not None diff --git a/packages/backtesting/tests/comparators/__init__.py b/packages/backtesting/tests/comparators/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/packages/backtesting/tests/comparators/test_data_comparator.py b/packages/backtesting/tests/comparators/test_data_comparator.py new file mode 100644 index 0000000000..551796de10 --- /dev/null +++ b/packages/backtesting/tests/comparators/test_data_comparator.py @@ -0,0 +1,513 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import json +import os +import sqlite3 +import tempfile +from contextlib import contextmanager + +import pytest + +from octobot_backtesting.comparators.data_comparator import DataComparator +import octobot_backtesting.api as backtesting_api +import octobot_backtesting.constants as constants +import octobot_backtesting.enums as enums + +pytestmark = pytest.mark.asyncio + +EXCHANGE = "binance" +SYMBOLS = ["ETH/BTC", "BTC/USDT"] +TIME_FRAMES = ["1h", "4h"] +# timestamps in seconds (as stored in the description table) +START_TS_S = 1680000000 +END_TS_S = 1690000000 +# same timestamps expressed in milliseconds (as passed by callers) +START_TS_MS = START_TS_S * 1000 +END_TS_MS = END_TS_S * 1000 + +SERVICES = ["RedditService", "TwitterService"] + +def _make_exchange_db(directory, file_name, + exchange=EXCHANGE, + symbols=None, + time_frames=None, + start_ts=START_TS_S, + end_ts=END_TS_S, + version=constants.CURRENT_VERSION) -> str: + """Create a minimal v2.0 exchange .data file and return its path.""" + symbols = symbols if symbols is not None else list(SYMBOLS) + time_frames = time_frames if time_frames is not None else list(TIME_FRAMES) + file_path = os.path.join(directory, file_name) + conn = sqlite3.connect(file_path) + conn.execute( + "CREATE TABLE description " + "(timestamp REAL, version TEXT, type TEXT, exchange TEXT, " + "symbols TEXT, time_frames TEXT, start_timestamp INTEGER, end_timestamp INTEGER)" + ) + conn.execute( + "INSERT INTO description VALUES (?,?,?,?,?,?,?,?)", + (1700000000.0, version, enums.DataType.EXCHANGE.value, exchange, + json.dumps(symbols), json.dumps(time_frames), start_ts, end_ts), + ) + # get_database_description v2.0 counts ohlcv rows to compute candles_length + min_tf = sorted(time_frames)[0] + conn.execute( + "CREATE TABLE ohlcv " + "(timestamp REAL, exchange_name TEXT, cryptocurrency TEXT, symbol TEXT, time_frame TEXT, candle TEXT)" + ) + for sym in symbols: + conn.execute( + "INSERT INTO ohlcv VALUES (?,?,?,?,?,?)", + (1700000000.0, exchange, "Crypto", sym, min_tf, + json.dumps([1.0, 2.0, 0.5, 1.5, 100.0, 1700000000.0])), + ) + conn.commit() + conn.close() + return file_path + + +def _make_social_db(directory, file_name, + services=None, + symbols=None, + start_ts=START_TS_S, + end_ts=END_TS_S, + version=constants.CURRENT_VERSION) -> str: + services = services if services is not None else list(SERVICES) + symbols = symbols if symbols is not None else [] + file_path = os.path.join(directory, file_name) + conn = sqlite3.connect(file_path) + # column order matches social_collector._create_description: + # timestamp, version, type, sources, symbols, start_timestamp, end_timestamp, services + conn.execute( + "CREATE TABLE description " + "(timestamp REAL, version TEXT, type TEXT, sources TEXT, " + "symbols TEXT, start_timestamp INTEGER, end_timestamp INTEGER, services TEXT)" + ) + conn.execute( + "INSERT INTO description VALUES (?,?,?,?,?,?,?,?)", + (1700000000.0, version, enums.DataType.SOCIAL.value, + json.dumps([]), json.dumps([str(s) for s in symbols]), + start_ts, end_ts, json.dumps(services)), + ) + conn.execute( + "CREATE TABLE social_events " + "(timestamp REAL, service_name TEXT, channel TEXT, symbol TEXT, payload TEXT)" + ) + conn.commit() + conn.close() + return file_path + + +@contextmanager +def _exchange_data_dir(**kwargs): + with tempfile.TemporaryDirectory() as tmpdir: + _make_exchange_db(tmpdir, "ExchangeHistoryDataCollector_test.data", **kwargs) + yield tmpdir + + +@contextmanager +def _social_data_dir(**kwargs): + with tempfile.TemporaryDirectory() as tmpdir: + _make_social_db(tmpdir, "SocialHistoryDataCollector_test.data", **kwargs) + yield tmpdir + + +class TestExchangeDescriptionMatches: + def setup_method(self): + self.comparator = DataComparator() + + def _make_desc(self, exchange=EXCHANGE, symbols=None, time_frames=None, + start_ts=START_TS_S, end_ts=END_TS_S, + version=constants.CURRENT_VERSION, + data_type=enums.DataType.EXCHANGE.value): + return { + enums.DataFormatKeys.DATA_TYPE.value: data_type, + enums.DataFormatKeys.VERSION.value: version, + enums.DataFormatKeys.EXCHANGE.value: exchange, + enums.DataFormatKeys.SYMBOLS.value: symbols if symbols is not None else list(SYMBOLS), + enums.DataFormatKeys.TIME_FRAMES.value: time_frames if time_frames is not None else list(TIME_FRAMES), + enums.DataFormatKeys.START_TIMESTAMP.value: start_ts, + enums.DataFormatKeys.END_TIMESTAMP.value: end_ts, + } + + def test_exact_match(self): + desc = self._make_desc() + assert self.comparator.exchange_description_matches( + desc, EXCHANGE, SYMBOLS, TIME_FRAMES, START_TS_MS, END_TS_MS + ) + + def test_symbols_order_independent(self): + desc = self._make_desc(symbols=["BTC/USDT", "ETH/BTC"]) + assert self.comparator.exchange_description_matches( + desc, EXCHANGE, ["ETH/BTC", "BTC/USDT"], TIME_FRAMES, START_TS_MS, END_TS_MS + ) + + def test_time_frames_order_independent(self): + desc = self._make_desc(time_frames=["4h", "1h"]) + assert self.comparator.exchange_description_matches( + desc, EXCHANGE, SYMBOLS, ["1h", "4h"], START_TS_MS, END_TS_MS + ) + + def test_unconstrained_timestamps_match(self): + desc = self._make_desc(start_ts=0, end_ts=0) + assert self.comparator.exchange_description_matches( + desc, EXCHANGE, SYMBOLS, TIME_FRAMES, None, None + ) + + def test_wrong_exchange(self): + desc = self._make_desc(exchange="kraken") + assert not self.comparator.exchange_description_matches( + desc, EXCHANGE, SYMBOLS, TIME_FRAMES, START_TS_MS, END_TS_MS + ) + + def test_wrong_symbols(self): + desc = self._make_desc(symbols=["BTC/USDT"]) + assert not self.comparator.exchange_description_matches( + desc, EXCHANGE, SYMBOLS, TIME_FRAMES, START_TS_MS, END_TS_MS + ) + + def test_wrong_time_frames(self): + desc = self._make_desc(time_frames=["1d"]) + assert not self.comparator.exchange_description_matches( + desc, EXCHANGE, SYMBOLS, TIME_FRAMES, START_TS_MS, END_TS_MS + ) + + def test_wrong_start_timestamp(self): + desc = self._make_desc(start_ts=START_TS_S + 100) + assert not self.comparator.exchange_description_matches( + desc, EXCHANGE, SYMBOLS, TIME_FRAMES, START_TS_MS, END_TS_MS + ) + + def test_wrong_end_timestamp(self): + desc = self._make_desc(end_ts=END_TS_S + 100) + assert not self.comparator.exchange_description_matches( + desc, EXCHANGE, SYMBOLS, TIME_FRAMES, START_TS_MS, END_TS_MS + ) + + def test_wrong_data_type(self): + desc = self._make_desc(data_type=enums.DataType.SOCIAL.value) + assert not self.comparator.exchange_description_matches( + desc, EXCHANGE, SYMBOLS, TIME_FRAMES, START_TS_MS, END_TS_MS + ) + + def test_wrong_version(self): + desc = self._make_desc(version="1.0") + assert not self.comparator.exchange_description_matches( + desc, EXCHANGE, SYMBOLS, TIME_FRAMES, START_TS_MS, END_TS_MS + ) + + def test_timestamps_as_strings(self): + # SQLite databases (via octobot_commons) may return timestamps as strings + # instead of ints; the comparator must still match correctly. + desc = self._make_desc(start_ts=str(START_TS_S), end_ts=str(END_TS_S)) + assert self.comparator.exchange_description_matches( + desc, EXCHANGE, SYMBOLS, TIME_FRAMES, START_TS_MS, END_TS_MS + ) + +class TestSocialDescriptionMatches: + def setup_method(self): + self.comparator = DataComparator() + + def _make_desc(self, services=None, symbols=None, + start_ts=START_TS_S, end_ts=END_TS_S, + version=constants.CURRENT_VERSION, + data_type=enums.DataType.SOCIAL.value): + return { + enums.DataFormatKeys.DATA_TYPE.value: data_type, + enums.DataFormatKeys.VERSION.value: version, + enums.DataFormatKeys.SERVICES.value: services if services is not None else list(SERVICES), + enums.DataFormatKeys.SYMBOLS.value: symbols if symbols is not None else [], + enums.DataFormatKeys.START_TIMESTAMP.value: start_ts, + enums.DataFormatKeys.END_TIMESTAMP.value: end_ts, + } + + def test_exact_match(self): + desc = self._make_desc() + assert self.comparator.social_description_matches( + desc, SERVICES, [], START_TS_MS, END_TS_MS + ) + + def test_services_order_independent(self): + desc = self._make_desc(services=["TwitterService", "RedditService"]) + assert self.comparator.social_description_matches( + desc, ["RedditService", "TwitterService"], [], START_TS_MS, END_TS_MS + ) + + def test_existing_services_superset_matches_requested_services(self): + desc = self._make_desc(services=["AlternativeMeServiceFeed", "AlternativeMeService"]) + assert self.comparator.social_description_matches( + desc, ["AlternativeMeServiceFeed"], [], START_TS_MS, END_TS_MS + ) + + def test_symbols_order_independent(self): + desc = self._make_desc(symbols=["BTC/USDT", "ETH/BTC"]) + assert self.comparator.social_description_matches( + desc, SERVICES, ["ETH/BTC", "BTC/USDT"], START_TS_MS, END_TS_MS + ) + + def test_existing_all_symbols_matches_requested_symbols(self): + desc = self._make_desc(symbols=[]) + assert self.comparator.social_description_matches( + desc, SERVICES, ["ETH/BTC"], START_TS_MS, END_TS_MS + ) + + def test_existing_symbol_subset_does_not_match_all_symbols_request(self): + desc = self._make_desc(symbols=["ETH/BTC"]) + assert not self.comparator.social_description_matches( + desc, SERVICES, [], START_TS_MS, END_TS_MS + ) + + def test_unconstrained_timestamps_match(self): + desc = self._make_desc(start_ts=0, end_ts=0) + assert self.comparator.social_description_matches( + desc, SERVICES, [], None, None + ) + + def test_wrong_services(self): + desc = self._make_desc(services=["TelegramService"]) + assert not self.comparator.social_description_matches( + desc, SERVICES, [], START_TS_MS, END_TS_MS + ) + + def test_wrong_symbols(self): + desc = self._make_desc(symbols=["ETH/BTC"]) + assert not self.comparator.social_description_matches( + desc, SERVICES, ["BTC/USDT"], START_TS_MS, END_TS_MS + ) + + def test_wrong_start_timestamp(self): + desc = self._make_desc(start_ts=START_TS_S + 100) + assert not self.comparator.social_description_matches( + desc, SERVICES, [], START_TS_MS, END_TS_MS + ) + + def test_wrong_data_type(self): + desc = self._make_desc(data_type=enums.DataType.EXCHANGE.value) + assert not self.comparator.social_description_matches( + desc, SERVICES, [], START_TS_MS, END_TS_MS + ) + + def test_wrong_version(self): + desc = self._make_desc(version="1.0") + assert not self.comparator.social_description_matches( + desc, SERVICES, [], START_TS_MS, END_TS_MS + ) + + def test_timestamps_as_strings(self): + # SQLite databases (via octobot_commons) may return timestamps as strings + desc = self._make_desc(start_ts=str(START_TS_S), end_ts=str(END_TS_S)) + assert self.comparator.social_description_matches( + desc, SERVICES, [], START_TS_MS, END_TS_MS + ) + + +class TestDescriptionMatches: + def setup_method(self): + self.comparator = DataComparator() + + def test_dispatches_to_exchange(self): + desc = { + enums.DataFormatKeys.DATA_TYPE.value: enums.DataType.EXCHANGE.value, + enums.DataFormatKeys.VERSION.value: constants.CURRENT_VERSION, + enums.DataFormatKeys.EXCHANGE.value: EXCHANGE, + enums.DataFormatKeys.SYMBOLS.value: list(SYMBOLS), + enums.DataFormatKeys.TIME_FRAMES.value: list(TIME_FRAMES), + enums.DataFormatKeys.START_TIMESTAMP.value: START_TS_S, + enums.DataFormatKeys.END_TIMESTAMP.value: END_TS_S, + } + assert self.comparator.description_matches( + desc, + exchange_name=EXCHANGE, symbols=SYMBOLS, time_frames=TIME_FRAMES, + start_timestamp=START_TS_MS, end_timestamp=END_TS_MS, + ) + + def test_dispatches_to_social(self): + desc = { + enums.DataFormatKeys.DATA_TYPE.value: enums.DataType.SOCIAL.value, + enums.DataFormatKeys.VERSION.value: constants.CURRENT_VERSION, + enums.DataFormatKeys.SERVICES.value: list(SERVICES), + enums.DataFormatKeys.SYMBOLS.value: [], + enums.DataFormatKeys.START_TIMESTAMP.value: START_TS_S, + enums.DataFormatKeys.END_TIMESTAMP.value: END_TS_S, + } + assert self.comparator.description_matches( + desc, + services=SERVICES, symbols=[], start_timestamp=START_TS_MS, end_timestamp=END_TS_MS, + ) + + def test_unknown_data_type_returns_false(self): + desc = {enums.DataFormatKeys.DATA_TYPE.value: "unknown"} + assert not self.comparator.description_matches(desc) + + +async def test_find_matching_data_file_exchange_match(): + with _exchange_data_dir() as tmpdir: + comparator = DataComparator(data_path=tmpdir) + result = await comparator.find_matching_data_file( + exchange_name=EXCHANGE, symbols=SYMBOLS, time_frames=TIME_FRAMES, + start_timestamp=START_TS_MS, end_timestamp=END_TS_MS, + ) + assert result == "ExchangeHistoryDataCollector_test.data" + + +async def test_find_matching_data_file_exchange_no_match_exchange(): + with _exchange_data_dir() as tmpdir: + comparator = DataComparator(data_path=tmpdir) + result = await comparator.find_matching_data_file( + exchange_name="kraken", symbols=SYMBOLS, time_frames=TIME_FRAMES, + start_timestamp=START_TS_MS, end_timestamp=END_TS_MS, + ) + assert result is None + + +async def test_find_matching_data_file_exchange_no_match_symbols(): + with _exchange_data_dir() as tmpdir: + comparator = DataComparator(data_path=tmpdir) + result = await comparator.find_matching_data_file( + exchange_name=EXCHANGE, symbols=["XRP/USDT"], time_frames=TIME_FRAMES, + start_timestamp=START_TS_MS, end_timestamp=END_TS_MS, + ) + assert result is None + + +async def test_find_matching_data_file_exchange_no_match_time_frames(): + with _exchange_data_dir() as tmpdir: + comparator = DataComparator(data_path=tmpdir) + result = await comparator.find_matching_data_file( + exchange_name=EXCHANGE, symbols=SYMBOLS, time_frames=["1d"], + start_timestamp=START_TS_MS, end_timestamp=END_TS_MS, + ) + assert result is None + + +async def test_find_matching_data_file_exchange_no_match_timestamps(): + with _exchange_data_dir() as tmpdir: + comparator = DataComparator(data_path=tmpdir) + result = await comparator.find_matching_data_file( + exchange_name=EXCHANGE, symbols=SYMBOLS, time_frames=TIME_FRAMES, + start_timestamp=(START_TS_S + 3600) * 1000, end_timestamp=END_TS_MS, + ) + assert result is None + + +async def test_find_matching_data_file_exchange_empty_dir(): + with tempfile.TemporaryDirectory() as tmpdir: + comparator = DataComparator(data_path=tmpdir) + result = await comparator.find_matching_data_file( + exchange_name=EXCHANGE, symbols=SYMBOLS, time_frames=TIME_FRAMES, + start_timestamp=START_TS_MS, end_timestamp=END_TS_MS, + ) + assert result is None + + +async def test_find_matching_data_file_social_match(): + with _social_data_dir() as tmpdir: + comparator = DataComparator(data_path=tmpdir) + result = await comparator.find_matching_data_file( + services=SERVICES, symbols=[], + start_timestamp=START_TS_MS, end_timestamp=END_TS_MS, + ) + assert result == "SocialHistoryDataCollector_test.data" + + +async def test_find_matching_data_file_social_no_match_services(): + with _social_data_dir() as tmpdir: + comparator = DataComparator(data_path=tmpdir) + result = await comparator.find_matching_data_file( + services=["TelegramService"], symbols=[], + start_timestamp=START_TS_MS, end_timestamp=END_TS_MS, + ) + assert result is None + + +async def test_find_matching_data_file_social_match_when_existing_services_is_superset(): + with _social_data_dir(services=["AlternativeMeServiceFeed", "AlternativeMeService"]) as tmpdir: + comparator = DataComparator(data_path=tmpdir) + result = await comparator.find_matching_data_file( + services=["AlternativeMeServiceFeed"], symbols=[], + start_timestamp=START_TS_MS, end_timestamp=END_TS_MS, + ) + assert result == "SocialHistoryDataCollector_test.data" + + +async def test_find_matching_data_file_social_match_when_existing_is_all_symbols(): + with _social_data_dir(symbols=[]) as tmpdir: + comparator = DataComparator(data_path=tmpdir) + result = await comparator.find_matching_data_file( + services=SERVICES, symbols=["ETH/BTC"], + start_timestamp=START_TS_MS, end_timestamp=END_TS_MS, + ) + assert result == "SocialHistoryDataCollector_test.data" + + +async def test_find_matching_data_file_social_no_match_timestamps(): + with _social_data_dir() as tmpdir: + comparator = DataComparator(data_path=tmpdir) + result = await comparator.find_matching_data_file( + services=SERVICES, symbols=[], + start_timestamp=START_TS_MS, end_timestamp=(END_TS_S + 3600) * 1000, + ) + assert result is None + + +async def test_find_matching_data_file_social_empty_dir(): + with tempfile.TemporaryDirectory() as tmpdir: + comparator = DataComparator(data_path=tmpdir) + result = await comparator.find_matching_data_file( + services=SERVICES, symbols=[], + start_timestamp=START_TS_MS, end_timestamp=END_TS_MS, + ) + assert result is None + + +async def test_api_find_matching_data_file_exchange(): + with _exchange_data_dir() as tmpdir: + result = await backtesting_api.find_matching_data_file( + data_path=tmpdir, + exchange_name=EXCHANGE, symbols=SYMBOLS, time_frames=TIME_FRAMES, + start_timestamp=START_TS_MS, end_timestamp=END_TS_MS, + ) + assert result == "ExchangeHistoryDataCollector_test.data" + + +async def test_api_find_matching_data_file_exchange_no_match(): + with _exchange_data_dir() as tmpdir: + result = await backtesting_api.find_matching_data_file( + data_path=tmpdir, + exchange_name="kraken", symbols=SYMBOLS, time_frames=TIME_FRAMES, + start_timestamp=START_TS_MS, end_timestamp=END_TS_MS, + ) + assert result is None + + +async def test_api_find_matching_data_file_social(): + with _social_data_dir() as tmpdir: + result = await backtesting_api.find_matching_data_file( + data_path=tmpdir, + services=SERVICES, symbols=[], + start_timestamp=START_TS_MS, end_timestamp=END_TS_MS, + ) + assert result == "SocialHistoryDataCollector_test.data" + + +async def test_api_find_matching_data_file_social_no_match(): + with _social_data_dir() as tmpdir: + result = await backtesting_api.find_matching_data_file( + data_path=tmpdir, + services=["TelegramService"], symbols=[], + start_timestamp=START_TS_MS, end_timestamp=END_TS_MS, + ) + assert result is None diff --git a/packages/backtesting/tests/importers/__init__.py b/packages/backtesting/tests/importers/__init__.py new file mode 100644 index 0000000000..5335228a5d --- /dev/null +++ b/packages/backtesting/tests/importers/__init__.py @@ -0,0 +1,15 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. diff --git a/packages/backtesting/tests/importers/test_exchange_importer.py b/packages/backtesting/tests/importers/test_exchange_importer.py new file mode 100644 index 0000000000..0736c3b6fb --- /dev/null +++ b/packages/backtesting/tests/importers/test_exchange_importer.py @@ -0,0 +1,158 @@ +# Drakkar-Software OctoBot +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import pytest +import os +from contextlib import asynccontextmanager + + +import octobot_commons.errors as commons_errors +from octobot_backtesting.importers.exchanges.exchange_importer import ExchangeDataImporter +from octobot_backtesting.enums import ExchangeDataTables +from octobot_commons.enums import TimeFrames + +# All test coroutines will be treated as marked. +pytestmark = pytest.mark.asyncio + + +# use context manager instead of fixture to prevent pytest threads issues +@asynccontextmanager +async def get_importer(): + database_file = os.path.join("tests", "static", "ExchangeHistoryDataCollector_1589740606.4862757.data") + importer = ExchangeDataImporter({}, database_file) + try: + await importer.initialize() + yield importer + finally: + await importer.stop() + + +async def test_initialize(): + async with get_importer() as importer: + assert importer.exchange_name == "binance" + assert importer.symbols == ["ETH/BTC"] + assert importer.time_frames == [TimeFrames(tf) + for tf in ("1m", "3m", "5m", "15m", "30m", "1h", "2h", + "4h", "6h", "8h", "12h", "1d", "3d", "1w", "1M")] + assert importer.available_data_types == [ExchangeDataTables.OHLCV] + + +async def test_get_data_timestamp_interval(): + async with get_importer() as importer: + # over all data + assert await importer.get_data_timestamp_interval() == (1589710680, 1590883200) + # for 1h time frame + assert await importer.get_data_timestamp_interval("1h") == (1587945600, 1589742000) + # for 1M time frame + assert await importer.get_data_timestamp_interval("1M") == (1501459200, 1590883200) + + +async def test_get_ohlcv(): + async with get_importer() as importer: + # default values + ohlcv = await importer.get_ohlcv() + assert len(ohlcv) == 500 + # same symbol, cryptocurrency and exchange in all data (one symbol in datafile) + exchange = ohlcv[0][1] + cryptocurrency = ohlcv[0][2] + symbol = ohlcv[0][3] + assert all(data[1] == exchange for data in ohlcv) + assert all(data[2] == cryptocurrency for data in ohlcv) + assert all(data[3] == symbol for data in ohlcv) + # same time frame + assert all(data[4] == "1h" for data in ohlcv) + # valid ohlcv data + assert all( + len(data[5]) == 6 and all(isinstance(element, float) + for element in data[5]) + for data in ohlcv) + + # custom values + ohlcv = await importer.get_ohlcv(exchange_name="binance", symbol="ETH/BTC", + time_frame=TimeFrames.ONE_WEEK, limit=10) + assert len(ohlcv) == 10 + assert all(data[4] == TimeFrames.ONE_WEEK.value for data in ohlcv) + + # unknown values + ohlcv = await importer.get_ohlcv(exchange_name="binance", symbol="ETH/XXX", time_frame=TimeFrames.ONE_WEEK) + assert len(ohlcv) == 0 + + +async def test_get_ohlcv_from_timestamps(): + async with get_importer() as importer: + # default values + ohlcv = await importer.get_ohlcv_from_timestamps() + assert len(ohlcv) == 500 + assert all(data[4] == "1h" for data in ohlcv) + # timestamp select + ohlcv = await importer.get_ohlcv_from_timestamps(inferior_timestamp=1587978000, superior_timestamp=1588060800) + assert len(ohlcv) == 24 + assert all(1587978000 <= data[0] <= 1588060800 for data in ohlcv) + + +async def test_get_ticker(): + async with get_importer() as importer: + # TODO complete this test when available datafile with ticker data + with pytest.raises(commons_errors.DatabaseNotFoundError): + await importer.get_ticker() + + +async def test_get_ticker_from_timestamps(): + async with get_importer() as importer: + # TODO complete this test when available datafile with ticker data + with pytest.raises(commons_errors.DatabaseNotFoundError): + await importer.get_ticker_from_timestamps() + + +async def test_get_order_book(): + async with get_importer() as importer: + # TODO complete this test when available datafile with order book data + with pytest.raises(commons_errors.DatabaseNotFoundError): + await importer.get_order_book() + + +async def test_get_order_book_from_timestamps(): + async with get_importer() as importer: + # TODO complete this test when available datafile with order book data + with pytest.raises(commons_errors.DatabaseNotFoundError): + await importer.get_order_book_from_timestamps() + + +async def test_get_recent_trades(): + async with get_importer() as importer: + # TODO complete this test when available datafile with recent trades data + with pytest.raises(commons_errors.DatabaseNotFoundError): + await importer.get_recent_trades() + + +async def test_get_recent_trades_from_timestamps(): + async with get_importer() as importer: + # TODO complete this test when available datafile with recent trades data + with pytest.raises(commons_errors.DatabaseNotFoundError): + await importer.get_recent_trades_from_timestamps() + + +async def test_get_kline(): + async with get_importer() as importer: + # TODO complete this test when available datafile with kline data + with pytest.raises(commons_errors.DatabaseNotFoundError): + await importer.get_kline() + + +async def test_get_kline_from_timestamps(): + async with get_importer() as importer: + # TODO complete this test when available datafile with kline data + with pytest.raises(commons_errors.DatabaseNotFoundError): + await importer.get_kline_from_timestamps() diff --git a/packages/backtesting/tests/producers/__init__.py b/packages/backtesting/tests/producers/__init__.py new file mode 100644 index 0000000000..5335228a5d --- /dev/null +++ b/packages/backtesting/tests/producers/__init__.py @@ -0,0 +1,15 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. diff --git a/packages/backtesting/tests/static/ExchangeHistoryDataCollector_1589740606.4862757.data b/packages/backtesting/tests/static/ExchangeHistoryDataCollector_1589740606.4862757.data new file mode 100644 index 0000000000..f06877d18e Binary files /dev/null and b/packages/backtesting/tests/static/ExchangeHistoryDataCollector_1589740606.4862757.data differ diff --git a/packages/backtesting/tests/static/second_ExchangeHistoryDataCollector_1589740606.4862757.data b/packages/backtesting/tests/static/second_ExchangeHistoryDataCollector_1589740606.4862757.data new file mode 100644 index 0000000000..f06877d18e Binary files /dev/null and b/packages/backtesting/tests/static/second_ExchangeHistoryDataCollector_1589740606.4862757.data differ diff --git a/packages/backtesting/tests/util/__init__.py b/packages/backtesting/tests/util/__init__.py new file mode 100644 index 0000000000..5335228a5d --- /dev/null +++ b/packages/backtesting/tests/util/__init__.py @@ -0,0 +1,15 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. diff --git a/packages/backtesting/tests/util/test_backtesting_util.py b/packages/backtesting/tests/util/test_backtesting_util.py new file mode 100644 index 0000000000..a2a902403b --- /dev/null +++ b/packages/backtesting/tests/util/test_backtesting_util.py @@ -0,0 +1,24 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import os + +if not os.getenv('CYTHON_IGNORE'): + from octobot_backtesting.util.backtesting_util import _parse_class_name_from_backtesting_file + + +def test_parse_class_name_from_backtesting_file(): + if not os.getenv('CYTHON_IGNORE'): + assert _parse_class_name_from_backtesting_file("ExchangeHistoryDataCollector_1589740606.4862757") == "ExchangeHistoryDataCollector" diff --git a/packages/binary/.dockerignore b/packages/binary/.dockerignore new file mode 100644 index 0000000000..49355e9aa0 --- /dev/null +++ b/packages/binary/.dockerignore @@ -0,0 +1,102 @@ +# dev +.idea + +# CI files +.coveragerc +.coveralls.yml +.travis.yml +appveyor.yml +renovate.json +setup.cfg +tox.ini + +# octobot +tentacles +user +logs + +# Git +.git +Dockerfile +.DS_Store +.gitignore +.dockerignore +.github + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns +.com.apple.timemachine.donotpresent + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg + +# PyInstaller +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml + +# Flask stuff: +instance/ +.webassets-cache + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# Environments +.env +.venv +env/ +venv/ +ENV/ diff --git a/packages/binary/.gitignore b/packages/binary/.gitignore new file mode 100644 index 0000000000..0872ba2f0a --- /dev/null +++ b/packages/binary/.gitignore @@ -0,0 +1,100 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ + +\.idea/ diff --git a/packages/binary/BUILD b/packages/binary/BUILD new file mode 100644 index 0000000000..8c0f1b47f9 --- /dev/null +++ b/packages/binary/BUILD @@ -0,0 +1,3 @@ +python_requirements(name="reqs") + +python_sources(name="octobot_binary", sources=["octobot_binary/**/*.py"]) diff --git a/packages/binary/LICENSE b/packages/binary/LICENSE new file mode 100644 index 0000000000..0a041280bd --- /dev/null +++ b/packages/binary/LICENSE @@ -0,0 +1,165 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/> + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff --git a/packages/binary/README.md b/packages/binary/README.md new file mode 100644 index 0000000000..a5c924c1db --- /dev/null +++ b/packages/binary/README.md @@ -0,0 +1,31 @@ +# OctoBot-Binary +[![Release](https://img.shields.io/github/downloads/Drakkar-Software/OctoBot-Binary/total.svg)](https://github.com/Drakkar-Software/OctoBot-Binary/releases) +[![OctoBot-Binary-CI](https://github.com/Drakkar-Software/OctoBot-Binary/workflows/OctoBot-Binary-CI/badge.svg)](https://github.com/Drakkar-Software/OctoBot-Binary/actions) + +OctoBot binaries is dedicated to create and upload binaries for Windows, Linux and MacOS for each [OctoBot project](https://github.com/Drakkar-Software/OctoBot) release. + +### On Windows +- Just double-click on *OctoBot_windows.exe* + +### On Linux and MacOS +- Open a terminal a type the following commands : +``` +$ chmod +x OctoBot_linux +$ ./OctoBot_linux +``` + +**Replace `OctoBot_linux` by `OctoBot_osx` on MacOS** + +## Binary production steps: +1. Clone OctoBot into the octobot folder +2. Install OctoBot requirements `python -m pip install -r octobot/requirements.txt` +3. Install pyinstaller >= 4.0 pip install `https://github.com/pyinstaller/pyinstaller/archive/develop.zip` +4. List OctoBot modules files for pyinstaller discovery `python scripts/python_file_lister.py "bin/octobot_packages_files.txt" <octobot repo folder>` +5. Add pyinstaller required imports `python scripts/insert_imports.py octobot/octobot/cli.py` +6. Copy bin folder into the octobot folder +7. Go into the octobot folder +8. Compile the OctoBot project `python setup.py build_ext --inplace` +9. Call pyinstaller `pyinstaller bin\start.spec` +10. Binary should be available in the dist folder + +More information on [OctoBot wiki page](https://github.com/Drakkar-Software/OctoBot/wiki/Installation). diff --git a/packages/binary/build_scripts/unix.sh b/packages/binary/build_scripts/unix.sh new file mode 100644 index 0000000000..9f001f1277 --- /dev/null +++ b/packages/binary/build_scripts/unix.sh @@ -0,0 +1,9 @@ +#!/bin/bash +python3 -m pip freeze +python3 packages/binary/scripts/python_file_lister.py bin/octobot_packages_files.txt +python3 packages/binary/scripts/insert_imports.py octobot/cli.py +python3 packages/binary/scripts/fetch_nltk_data.py words $NLTK_DATA +python3 -m PyInstaller bin/start.spec --workpath installer +./dist/OctoBot --version +mv dist/OctoBot ./OctoBot_$BUILD_ARCH && rm -rf dist/ +ls -al diff --git a/packages/binary/build_scripts/windows.ps1 b/packages/binary/build_scripts/windows.ps1 new file mode 100644 index 0000000000..3f103fcb74 --- /dev/null +++ b/packages/binary/build_scripts/windows.ps1 @@ -0,0 +1,8 @@ +python -m pip freeze +python packages/binary/scripts/python_file_lister.py bin/octobot_packages_files.txt +python packages/binary/scripts/insert_imports.py octobot/cli.py +python packages/binary/scripts/fetch_nltk_data.py words $env:NLTK_DATA +python -m PyInstaller bin/start.spec --workpath installer +Move-Item dist\OctoBot.exe OctoBot_windows.exe +.\OctoBot_windows.exe --version +dir diff --git a/packages/binary/requirements.txt b/packages/binary/requirements.txt new file mode 100644 index 0000000000..8908fbb60f --- /dev/null +++ b/packages/binary/requirements.txt @@ -0,0 +1,2 @@ +pyinstaller==6.18.0 +nltk diff --git a/packages/binary/scripts/fetch_nltk_data.py b/packages/binary/scripts/fetch_nltk_data.py new file mode 100644 index 0000000000..b7b16e4341 --- /dev/null +++ b/packages/binary/scripts/fetch_nltk_data.py @@ -0,0 +1,5 @@ +import sys +import nltk + +if __name__ == "__main__": + nltk.download(sys.argv[1], download_dir=sys.argv[2]) diff --git a/packages/binary/scripts/insert_imports.py b/packages/binary/scripts/insert_imports.py new file mode 100644 index 0000000000..a32965d11c --- /dev/null +++ b/packages/binary/scripts/insert_imports.py @@ -0,0 +1,16 @@ +import sys + + +TO_INSERT_LINES = [ + "from engineio.async_drivers import gevent" # required for pyinstaller to add gevent and be able to use it later. +] + + +def insert_imports(target_file): + with open(target_file, "a") as file_append: + file_append.writelines([f"{line}\n" for line in TO_INSERT_LINES]) + print(f"{len(TO_INSERT_LINES)} lines appended into {target_file}") + + +if __name__ == "__main__": + insert_imports(sys.argv[1]) diff --git a/packages/binary/scripts/python_file_lister.py b/packages/binary/scripts/python_file_lister.py new file mode 100644 index 0000000000..bf1a9f4ff0 --- /dev/null +++ b/packages/binary/scripts/python_file_lister.py @@ -0,0 +1,56 @@ +import sys +import os +import site + + +INIT_FILE = "__init__.py" +HANDLED_EXT = {".py", ".pyd", ".so"} +OCTOBOT_PREFIX = "octobot" +OTHER_MODULES = ["async_channel"] + + +def _is_file_to_handle(entry): + return entry.name != INIT_FILE and os.path.splitext(entry.name)[-1] in HANDLED_EXT + + +def _is_dir_to_handle(entry): + return not entry.name.startswith("__") + + +def _explore_module(package_entry, root=""): + print(f"Exploring {root}.{package_entry.name}") + files = [] + for entry in os.scandir(package_entry): + import_element = entry.name.split(".")[0] + if entry.is_dir(): + if _is_dir_to_handle(entry): + files = files + _explore_module(entry, f"{root}{'.' if root else ''}{import_element}") + elif _is_file_to_handle(entry): + files.append(f"{root}.{import_element}") + return files + + +def _get_octobot_packages(packages_path): + for entry in os.scandir(packages_path): + if (entry.name.startswith(OCTOBOT_PREFIX) or entry.name in OTHER_MODULES) \ + and not entry.name.endswith("-info") and not entry.name.endswith(".hcl"): + yield entry + + +def explore_packages(packages_paths, target_file): + files = set() + for packages_path in packages_paths: + for package_entry in _get_octobot_packages(packages_path): + files = files.union(_explore_module(package_entry, package_entry.name)) + if files: + with open(target_file, "w+") as file_w: + file_w.writelines(sorted([f"{f}\n" for f in files])) + print(f"{len(files)} files saved into {target_file}") + + +if __name__ == "__main__": + print(f"site.getsitepackages(): {site.getsitepackages()}") + site_packages = [s for s in site.getsitepackages() if "site-packages" in s] + bot_package = sys.argv[2] if len(sys.argv) > 2 else "." + print(f"exploring {site_packages}") + explore_packages(site_packages + [bot_package], sys.argv[1]) diff --git a/packages/commons/.coveragerc b/packages/commons/.coveragerc new file mode 100644 index 0000000000..24c8b9d8d4 --- /dev/null +++ b/packages/commons/.coveragerc @@ -0,0 +1,9 @@ +[run] +omit = + octobot_commons/channels_name.py + octobot_commons/constants.py + octobot_commons/errors.py + octobot_commons/enums.py + venv/* + tests/* + setup.py diff --git a/packages/commons/.gitignore b/packages/commons/.gitignore new file mode 100644 index 0000000000..429e2ade28 --- /dev/null +++ b/packages/commons/.gitignore @@ -0,0 +1,111 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ + +*.c + +.idea +user +cython_debug +wheelhouse diff --git a/packages/commons/BUILD b/packages/commons/BUILD new file mode 100644 index 0000000000..238d17bf94 --- /dev/null +++ b/packages/commons/BUILD @@ -0,0 +1,20 @@ +python_requirements(name="reqs") +python_requirements(name="full_reqs", source="full_requirements.txt") + +python_sources(name="octobot_commons", sources=["octobot_commons/**/*.py"]) + +files( + name="test_data", + sources=["tests/static/**/*"], +) + +python_test_utils( + name="test_profiles_utils", + sources=["tests/profiles/__init__.py"], +) + +python_tests( + name="tests", + sources=["tests/**/test_*.py", "tests/profiles/**", "!tests/profiles/__init__.py"], + dependencies=[":octobot_commons", ":reqs", ":full_reqs", "//:dev_reqs", ":test_data", ":test_profiles_utils"], +) \ No newline at end of file diff --git a/packages/commons/CHANGELOG.md b/packages/commons/CHANGELOG.md new file mode 100644 index 0000000000..0e653fca09 --- /dev/null +++ b/packages/commons/CHANGELOG.md @@ -0,0 +1,1435 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [1.10.6] - 2026-01-23 +### Fixed +[BaseTreeNode] Fix missing description and metadata in path functions + +## [1.10.5] - 2026-01-23 +### Added +[BaseTreeNode] add description and metadata + +## [1.10.4] - 2026-01-21 +### Added +- "markets" to InitializationEventExchangeTopics + +## [1.10.3] - 2026-01-11 +### Added +- [ContextUtil] add EmptyContextManager +### Updated +- [ProfileData] make ProfileData an UpdatableDataclass + +## [1.10.2] - 2026-01-07 +### Added +- RSA, AES and ECDSA encryption functions + +## [1.10.1] - 2026-01-03 +### Added +- CONFIG_EXCHANGE_OPTION constant +- `is_symbol` to symbol_util + +## [1.10.0] - 2026-01-02 +### Added +- Option symbols supports in Symbol operations (Warning: `market_separator` is not the fourth param of `merge_currencies` anymore, it shouldn't be a breaking change as it should be called with kwargs) + +## [1.9.92] - 2025-12-16 +### Added +[DSL] add operator docs + +## [1.9.91] - 2025-12-05 +### Added +[AsyncTools] add gather_waiting_for_all_before_raising + +## [1.9.90] - 2025-12-03 +### Updated +[Cache] reduce symbol util cache to free RAM + +## [1.9.89] - 2025-12-02 +### Added +[OS] Add is_raspberry_pi_machine() + +## [1.9.88] - 2025-11-26 +### Added +[Requirements] fix full requirements installation + +## [1.9.87] - 2025-11-26 +### Added +[Requirements] [full] requirements + +## [1.9.86] - 2025-11-18 +### Fixed +[Constants] usd-like coins list + +## [1.9.85] - 2025-11-18 +### Added +[DSL] add DSL interpreter + +## [1.9.84] - 2025-11-07 +### Added +[Constants] add CONFIG_FORCE_AUTHENTICATION + +## [1.9.83] - 2025-10-17 +### Added +[Constants] add DEFAULT_REFERENCE_MARKET + +## [1.9.82] - 2025-08-24 +### Added +[Signals] add dependencies + +## [1.9.81] - 2025-08-16 +### Added +[ProfileData] add exchange_account_id + +## [1.9.80] - 2025-07-17 +### Updated +[Time] fix timezone deprecation + +## [1.9.79] - 2025-07-12 +### Added +[HistoricalConfig] add checker and multi config selector + +## [1.9.78] - 2025-06-16 +### Added +[Authentication] add kwargs to update_portfolio + +## [1.9.77] - 2025-05-31 +### Added +[DisplayTranslator] temporary fix field dependency issue + +## [1.9.76] - 2025-05-23 +### Added +[ProfileData] add nested_strategy_config_id +[ListUtil] deduplicate + +## [1.9.75] - 2025-05-21 +### Added +[Constants] add distribution + +## [1.9.74] - 2025-03-18 +### Updated +[ProfileData] add sub portfolio requirements + +## [1.9.73] - 2025-02-23 +### Updated +[DictUtil] add ignore_lists + +## [1.9.72] - 2025-02-16 +### Fixed +[ProfileData] fix future_exchange_data default value + +## [1.9.71] - 2025-02-10 +### Added +[Constants] add TRADING_SYMBOL_REGEX + +## [1.9.70] - 2024-12-11 +### Added +[Constants] add FIAT_NON_USD_LIKE_COINS + +## [1.9.69] - 2024-12-06 +### Added +[ProfileData] remove proxy_id + +## [1.9.68] - 2023-12-03 +### Added +[Authenticator] update positions +[ProfileData] leverage and exchange_type + +## [1.9.67] - 2023-12-02 +### Updated +[HTML] handle html exception args + +## [1.9.66] - 2023-12-02 +### Updated +[HTML] handle nested exception causes + +## [1.9.65] - 2023-11-24 +### Added +[Configuration] add get_oldest_historical_tentacle_config_time +[AbstractTentacle] add get_tentacle_config_traded_symbols + +## [1.9.64] - 2023-11-23 +### Added +[Tentacles] historical config + +## [1.9.63] - 2023-11-21 +### Added +[html] add html summarizer + +## [1.9.62] - 2023-11-11 +### Added +[aiohttp] add CounterClientSession + +## [1.9.61] - 2023-10-23 +### Added +[ProfileData] add proxy_id +[Constants] add CONFIG_EXCHANGE_ACCESS_TOKEN + +## [1.9.60] - 2023-10-12 +### Added +[ProfileData] add exchange_id + +## [1.9.59] - 2023-10-03 +### Added +[CommunityChannelTypes] add CONFIGURATION + +## [1.9.58] - 2023-08-28 +### Updated +[ExchangeAuthData] add exchange config when missing + +## [1.9.57] - 2023-08-28 +### Added +ExchangeAuthData +### Updated +[ExchangeAuthData] add exchange_type and sandboxed + +## [1.9.56] - 2023-08-28 +### Added +ExchangeAuthData +### Updated +[ProfileImport] add force_simulator param + +## [1.9.55] - 2023-08-25 +### Updated +[Config] remove custom restore file +[Config] allow restore file copy failure + +## [1.9.54] - 2023-08-21 +### Updated +[Authenticator] update update_orders args +## [1.9.53] - 2023-08-19 +### Added +[Constants] CONFIG_EXCHANGE_UID + +## [1.9.52] - 2023-08-18 +### Added +[ProfileData] TentaclesProfileDataTranslator + +## [1.9.51] - 2023-07-23 +### Added +[Authenticator] wait_and_check_has_open_source_package + +## [1.9.50] - 2023-07-15 +### Added +[OS] optional RAM watcher + +## [1.9.49] - 2023-07-04 +### Added +[Profiles] handle registered tentacles in import + +## [1.9.48] - 2023-07-03 +### Added +[Profiles] handle profile update + +## [1.9.47] - 2023-06-12 +### Added +[CommunityChannelTypes] add TRADINGVIEW + +## [1.9.46] - 2023-05-26 +### Updated +ProfileData: added enable field on traded pairs + +## [1.9.45] - 2023-04-21 +### Added +certify_aiohttp_client_session + +## [1.9.44] - 2023-04-18 +### Added +ssl_fallback_aiohttp_client_session + +## [1.9.43] - 2023-03-21 +### Added +PROFITABILITY to InitializationEventExchangeTopics + +## [1.9.42] - 2023-02-20 +### Removed +- usd_like_value from MinimalFund + +## [1.9.41] - 2023-02-20 +### Added +- usd_like_value to MinimalFund + +## [1.9.40] - 2023-02-15 +### Fixed +- ProfileData to profile dict portfolio + +## [1.9.39] - 2023-01-18 +### Fixed +- File download: typo + +## [1.9.38] - 2023-01-18 +### Updated +- File download: add error text when possible + +## [1.9.37] - 2023-01-10 +### Updated +- File download: return last_modified + +## [1.9.36] - 2023-01-09 +### Updated +- dependencies + +## [1.9.35] - 2023-12-18 +### Added +- logging callback + +## [1.9.34] - 2023-12-11 +### Added +- Profiles: extra_backtesting_time_frames + +## [1.9.33] - 2023-12-08 +### Added +- Enums: TRIGGER_HEALTH_CHECK + +## [1.9.32] - 2023-12-04 +### Added +- Profiles: ProfileData import + +## [1.9.31] - 2023-11-16 +### Added +- [Authenticator] use_as_singleton param + +## [1.9.30] - 2023-10-29 +### Fixed +- [Config] handle malformed pairs + +## [1.9.29] - 2023-10-27 +### Added +[Tree] clear +[TimeFrames] get_last_timeframe_time + +## [1.9.28] - 2023-10-11 +### Added +[TradingData] MinimalFund add from_value_dict + +## [1.9.27] - 2023-10-11 +### Updated +[TradingData] MinimalFund format + +## [1.9.26] - 2023-10-11 +### Added +[enums] INITIAL_PORTFOLIO_OPTIMIZATION + +## [1.9.25] - 2023-10-04 +### Added +[ProfileData] minimal_funds + +## [1.9.24] - 2023-10-04 +### Added +[Signals] add sort_signals to builder + +## [1.9.23] - 2023-09-25 +### Added +- [Authentication] update_orders + +## [1.9.22] - 2023-09-24 +### Added +- [Constants] USD_LIKE_COINS + +## [1.9.21] - 2023-09-15 +### Fixed +- [Config] save files issues + +## [1.9.20] - 2023-09-05 +### Fixed +- [Config] exchange keys format error + +## [1.9.19] - 2023-09-05 +### Updated +- [FlexibleDataclass] add get_field_names + +## [1.9.18] - 2023-09-01 +### Updated +- [FlexibleDataclass] handle any type of field + +## [1.9.17] - 2023-08-25 +### Added +- [Dataclasses] FlexibleDataclass + +## [1.9.16] - 2023-08-22 +### Added +- [Logging] extra data to exceptions + +## [1.9.15] - 2023-08-17 +### Added +- [Databases] is_hard_reset_error + +## [1.9.14] - 2023-08-16 +### Added +- [Logs] set_enable_web_interface_logs + +## [1.9.13] - 2023-08-14 +### Added +- [ProfileData] BacktestingContext + +## [1.9.12] - 2023-08-07 +### Updated +- [Authenticator] update_portfolio params + +## [1.9.11] - 2023-08-07 +### Updated +- ProfileData simplify content + +## [1.9.10] - 2023-08-07 +### Updated +- ProfileData format + +## [1.9.9] - 2023-08-03 +### Added +- UpdatableDataclass + +## [1.9.8] - 2023-07-25 +### Added +- ProfileData default values + +## [1.9.7] - 2023-07-23 +### Added +- ProfileData config_name + +## [1.9.6] - 2023-07-22 +### Added +- ProfileData +- Singletons: add remove methods + +## [1.9.5] - 2023-05-17 +### Added +- DEPENDENCIES to UserInputOtherSchemaValuesTypes +### Fixed +- sqlite close error +- threadpool stop + +## [1.9.4] - 2023-05-10 +### Updated +- [SignalBundleBuilder] add logger + +## [1.9.3] - 2023-05-05 +### Fixed +- make archive path + +## [1.9.2] - 2023-05-02 +### Updated +- setup.py + +## [1.9.1] - 2023-05-02 +### Updated +- setup.py + +## [1.9.0] - 2023-05-02 +### Updated +- Supported python versions + +## [1.8.28] - 2023-04-29 +### Updated +- [PrettyPrinter] decimal adapter +- [Enums] BacktestingMetadata + +## [1.8.27] - 2023-04-17 +### Fixed +- [Databases] SQL: delete statement + +## [1.8.26] - 2023-04-15 +### Updated +- [PrettyPrinter] Adapt decimals to number +### Fixed +- [Databases] Auto-repair error + +## [1.8.25] - 2023-04-15 +### Added +- [DisplayTranslator] config_by_tentacles +- [Databases] Auto-repair when necessary + +## [1.8.24] - 2023-04-05 +### Added +- [TimeFrames] is_time_frame + +## [1.8.23] - 2023-03-30 +### Added +- [Orders] historical orders update + +## [1.8.22] - 2023-03-28 +### Updated +- [AbstractTentacle] fix CLASS_UI + +## [1.8.21] - 2023-03-27 +### Updated +- [AbstractTentacle] add cython class and support for generalized user inputs + +## [1.8.20] - 2023-03-25 +### Updated +- [Profiles] add risk and complexity + +## [1.8.19] - 2023-03-23 +### Updated +- [Portfolio] improve portfolio pretty print + +## [1.8.18] - 2023-03-22 +### Added +- [User Inputs] UserInputEditorOptionsTypes +### Updated +- [Portfolio] add reference market value in pretty print + +## [1.8.17] - 2023-03-21 +### Updated +- [Logging] add handler-scope level update + +## [1.8.16] - 2023-03-19 +### Updated +- [Profiles] handle profiles sync error + +## [1.8.15] - 2023-03-18 +### Updated +- [PrettyPrinter] telegram lib import + +## [1.8.14] - 2023-03-13 +### Updated +- [Enums] Storage related enums + +## [1.8.13] - 2023-03-08 +### Added +- [Profiles] Validate imported profiles + +## [1.8.12] - 2023-03-01 +### Added +- [Databases] Debug logs + +## [1.8.11] - 2023-02-16 +### Updated +- [Config] Improve config files errors management + +## [1.8.10] - 2023-02-12 +### Updated +- [SystemResourcesWatcher] Enable resources dump in csv file + +## [1.8.9] - 2023-02-11 +### Updated +- [Databases] Raise FileNotFoundError on missing path + +## [1.8.8] - 2023-02-10 +### Added +- [Enums] TriggerSource + +## [1.8.7] - 2023-02-09 +### Updated +- [Databases] Add account type +- [Databases] Create file only when necessary +- [ClockSynchronizer] Disable unnecessary warning + +## [1.8.6] - 2023-01-28 +### Added +- [Authenticator] AccountUpdateError + +## [1.8.5] - 2023-01-18 +### Added +- [Authenticator] register + +## [1.8.4] - 2023-01-18 +### Added +- [Authenticator] update_trades and update_portfolio + +## [1.8.3] - 2023-01-15 +### Fixed +- [DataUtil] typing issue + +## [1.8.2] - 2022-12-27 +### Fixed +- [Profiles] handle invalid downloaded profiles + +## [1.8.1] - 2022-12-26 +### Added +- [Profiles] load_profile + +## [1.8.0] - 2022-12-23 +### Updated +Numpy and Cython versions + +## [1.7.37] - 2022-12-22 +### Added +- [Symbols] use cache for repetitive operations +- [Profiles] quite mode in install +- [DisplayTranslator] add_parts_from_other +- [ChronologicalReadDatabaseCache] reset_cached_indexes +### Updated +- [Constants] increase MAX_BACKTESTING_RUNS + +## [1.7.36] 2022-12-10 +### Fixed +- Profile rename + +## [1.7.35] 2022-11-29 +### Added +- Profiles origin url + +## [1.7.34] 2022-11-24 +### Fixed +- OSUtil get_cpu_and_ram_usage on different platforms + +## [1.7.33] 2022-11-23 +### Added +- SystemResourcesWatcher +### Updated +- [AsyncJob] add log on success after multiple failures + +## [1.7.32] 2022-11-23 +### Updated +- [Profiles] allow exchange removal on multiple profiles at once +### Fixed +- [ClockSynchronizer] NotImplementedError in update loop + +## [1.7.31] 2022-11-11 +### Fixed +- [ClockSynchronizer] on docker + +## [1.7.30] 2022-11-06 +### Updated +- [Profiles] install and update + +## [1.7.29] 2022-11-01 +### Added +- [SignalPublisher] octobot_commons.errors.MissingSignalBuilder + +## [1.7.28] 2022-11-01 +### Added +- [ClockSynchronizer] info log + +## [1.7.27] 2022-10-27 +### Fixed +- [ClockSynchronizer] Stop + +## [1.7.26] 2022-10-26 +### Added +- [Clock] ClockSynchronizer + +## [1.7.25] 2022-10-13 +### Updated +- [UserInputFactory] add update_parent_value + +## [1.7.24] 2022-10-13 +### Updated +- [FileSystemRunDatabasesPruner] handle no data error + +## [1.7.23] 2022-10-12 +### Updated +- Symbol methods + +## [1.7.22] 2022-10-10 +### Fixed +- Cython Database + +## [1.7.21] - 2022-10-09 +### Added +- User inputs +- Database pruner + +## [1.7.20] - 2022-09-20 +### Added +- EventTree +### Updated +- [EventTree] rename into BaseTree + +## [1.7.19] - 2022-09-11 +### Updated +- [Profiles] add imported attribute + +## [1.7.18] - 2022-09-11 +### Updated +- [Profiles] parse imported profiles + +## [1.7.17] - 2022-09-08 +### Added +- [Databases] live id +- [AsyncTools] timeout param + +## [1.7.16] - 2022-09-02 +### Added +- [Signals] Prevent double emit + +## [1.7.15] - 2022-08-20 +### Added +- [Signals] Signals definition and publisher + +## [1.7.14] - 2022-08-19 +### Updated +- [Profiles] Do not share disabled exchanges + +## [1.7.13] - 2022-08-11 +### Updated +- [Authenticator] API +- [AsyncTool] API + +## [1.7.12] - 2022-08-10 +### Updated +- [Authenticator] API + +## [1.7.11] - 2022-07-16 +### Added +- [Databases] add GlobalSharedMemoryStorage + +## [1.7.10] - 2022-06-01 +### Added +- [Symbols] Symbol object + +## [1.7.9] - 2022-05-25 +### Added +- [Config] exchange types +- +## [1.7.8] - 2022-05-18 +### Fixed +- [Databases] ChronologicalReadDatabaseCache set +### Updated +- [PrettyPrinter] Orders and trades print + +## [1.7.7] - 2022-05-04 +### Fixed +- [PrettyPrinter] Portfolio print + +## [1.7.6] - 2022-05-03 +### Updated +- [Enums] Remove clear cache command + +## [1.7.5] - 2022-05-02 +### Added +- [Enums] Add databases enums +### Updated +- [Enums] rename ActivationTopics#EVALUATORS into ActivationTopics#EVALUATION_CYCLE + +## [1.7.4] - 2022-04-23 +### Updated +- [Databases] Add filter to get_single_sub_identifier call + +## [1.7.3] - 2022-04-16 +### Added +- [TradingSignals] feed bases +### Updated +- [Authenticator] Use singleton + +## [1.7.2] - 2022-03-31 +### Fixed +- [Databases] Cython imports + +## [1.7.1] - 2022-03-30 +### Added +- [Databases] CacheClient + +## [1.7.0] - 2022-03-26 +### Added +- [Databases] Document and relational databases +- DisplayTranslator, logical operators, multiprocessing_utils, optimization_campaigns + +## [1.6.20] - 2021-01-08 +### Updated +- Bump requirements + +## [1.6.19] - 2021-12-19 +### Added +- [PrettyPrinter] Assets support + +## [1.6.18] - 2021-11-01 +### Update +- [Configuration] Add merge_sub_array option in merge_dictionaries_by_appending_keys + +## [1.6.17] - 2021-10-30 +### Updated +- Bump requirements + +## [1.6.16] - 2021-10-30 +### Added +- [Configuration] add dev_mode_enabled + +## [1.6.15] - 2021-10-11 +### Updated +- Bump requirements + +## [1.6.14] - 2021-09-25 +### Added +- OS util: parse_boolean_environment_var method + +## [1.6.13] - 2021-09-14 +### Added +- Singleton: get_instance_if_exists method + +## [1.6.12] - 2021-09-08 +### Added +- ErrorContainer: print traceback + +## [1.6.11] - 2021-08-28 +### Added +- Exchange CONFIG_EXCHANGE_SUB_ACCOUNT constant + +## [1.6.10] - 2021-08-24 +### Fixed +- Error callback registration + +## [1.6.9] - 2021-08-21 +### Added +- Error callback + +## [1.6.8] - 2021-08-01 +### Added +- Async RLock + +## [1.6.7] - 2021-07-21 +### Updated +- authenticator + +## [1.6.6] - 2021-07-19 +### Updated +- bump requirements + +## [1.6.5] - 2021-07-19 +### Updated +- bump requirements + +## [1.6.4] - 2021-04-06 +### Updated +- loggers signatures + +## [1.6.3] - 2021-05-24 +### Added +- authentication abstract class + +## [1.6.2] - 2021-05-05 +### Updated +- bump requirements + +## [1.6.1] - 2021-04-29 +### Added +- [Profile import] Add replace current profile if exists parameter + +## [1.6.0] - 2021-04-28 +### Updated +- [Profile loading] Don't use 'default' as default profile +- [Profile loading] Load profiles only if possible and necessary + +## [1.5.19] - 2021-04-06 +### Removed +- Sentry usage until performance impact is measured + +## [1.5.18] - 2021-04-05 +### Fixed +- Sentry disable + +## [1.5.17] - 2021-04-02 +### Added +- Sentry error tracking + +## [1.5.16] - 2021-04-01 +### Added +- aiohttp util : download_stream_file + +## [1.5.15] - 2021-04-01 +### Added +- is_machine_64bit and is_arm_machine to os_util + +## [1.5.14] - 2021-03-31 +### Added +- Github constants + +## [1.5.13] - 2021-03-28 +### Fixed +- METRICS_URL constant (issue created on 1.5.12) + +## [1.5.12] - 2021-03-27 +### Fixed +- METRICS_URL constant + +## [1.5.11] - 2021-03-23 +### Added +- Symbols wildcard constant + +## [1.5.10] - 2021-03-22 +### Updated +- metrics url + +## [1.5.9] - 2021-03-15 +### Added +- Tentacles user commands + +## [1.5.8] - 2021-03-04 +### Added +- PriceStrings in enums + +## [1.5.7] - 2021-03-03 +### Added +- Python 3.9 support + +## [1.5.6] - 2020-02-27 +### Removed +- Aarch64 build on DroneCI, now build with github actions + +## [1.5.5] - 2020-02-25 +### Updated +- cython requirement + +## [1.5.4] - 2020-02-08 +### Updated +- numpy requirement + +## [1.5.3] - 2020-02-03 +### Updated +- numpy requirement + +## [1.5.2] - 2020-01-30 +### Updated +- Profiles duplication path + +## [1.5.1] - 2020-12-23 +### Updated +- Profiles import + +## [1.5.0] - 2020-12-23 +### Added +- Profiles management + +## [1.4.15] - 2020-12-10 +### Fixed +- trading configuration keys import + +## [1.4.14] - 2020-12-08 +### Updated +- migrate trading config keys into octobot-commons + +## [1.4.13] - 2020-12-06 +### Updated +- requirements: removed telegram requirement + +## [1.4.12] - 2020-12-06 +### Updated +- config.json test file + +## [1.4.11] - 2020-11-26 +### Added +- Thread util module + +## [1.4.10] - 2020-11-25 +### Updated +- Remove multi-session-profitability from default config + +## [1.4.9] - 2020-11-20 +### Fixed +- Number pretty printer + +## [1.4.8] - 2020-11-08 +### Updated +- Metrics url + +## [1.4.7] - 2020-11-06 +### Updated +- CI to github actions + +## [1.4.6] - 2020-10-29 +### Updated +- Numpy requirements + +## [1.4.5] - 2020-10-24 +### Updated +- Requirements + +## [1.4.4] - 2020-10-23 +### Added +- disable method on BotLogger + +## [1.4.3] - 2020-10-23 +### Updated +- Release process + +## [1.4.2] - 2020-10-23 +### Updated +- Python 3.8 + +## [1.4.1] - 2020-10-04 +### Updated +- Requirements + +## [1.4.0] - 2020-10-04 +### Changed +- Imports + +## [1.3.46] - 2020-09-02 +### Updated +- AsyncJob exception handling + +## [1.3.45] - 2020-08-27 +### Fixed +- AsyncJob timers + +## [1.3.44] - 2020-08-27 +### Added +- AsyncJob + +## [1.3.43] - 2020-08-15 +### Updated +- Requirements + +## [1.3.42] - 2020-08-13 +### Removed +- Fix pretty printer typing issue + +## [1.3.41] - 2020-07-25 +### Removed +- search_class_name_in_class_list from tentacles manager + +## [1.3.40] - 2020-06-28 +### Updated +- Requirements + +## [1.3.39] - 2020-06-27 +### Fixed +- Errors counter + +## [1.3.38] - 2020-06-19 +### Updated +- Requirements + +## [1.3.37] - 2020-06-09 +### Updated +- Asyncio tools ErrorContainer + +## [1.3.36] - 2020-06-02 +### Added +- Asyncio tool wait_for_task_to_perform + +## [1.3.35] - 2020-06-02 +### Added +- get_password_hash + +## [1.3.34] - 2020-05-27 +### Update +- Cython version + +## [1.3.33] - 2020-05-20 +### Update +- Take config schema as argument in config management + +## [1.3.32] - 2020-05-19 +### Fixed +- Cython header + +## [1.3.31] - 2020-05-16 +### Updated +- Requirements + +## [1.3.30] - 2020-05-14 +### Added +- [Enums] ChannelConsumerPriorityLevels + +## [1.3.29] - 2020-05-13 +### Fixed +- [PrettyPrinter] Fix trade_pretty_printer cython header + +## [1.3.28] - 2020-05-12 +### Fixed +- [Logging] Fix get_backtesting_errors_count cython header + +## [1.3.27] - 2020-05-11 +### Added +- [ConfigUtil] Decrypt util function + +## [1.3.26] - 2020-05-11 +### Added +- [CI] Azure pipeline + +### Removed +- [CI] macOs build on travis +- [CI] Appveyor builds + +## [1.3.25] - 2020-05-10 +### Updated +- Telegram requirements + +## [1.3.24] - 2020-05-09 +### Added +- OctoBotChannel subjects enum + +## [1.3.23] - 2020-05-09 +### Fixed +- Evaluators channels name + +## [1.3.22] - 2020-05-09 +### Added +- OctoBot channel name + +## [1.3.21] - 2020-05-08 +### Update +- improve asyncio ErrorContainer + +## [1.3.20] - 2020-05-08 +### Fixed +- asyncio ErrorContainer + +## [1.3.19] - 2020-05-07 +### Added +- asyncio ErrorContainer + +## [1.3.18] - 2020-05-06 +### Fixed +- Logging_util compiled errors + +## [1.3.17] - 2020-05-05 +### Fixed +- Logging_util cython headers + +## [1.3.16] - 2020-05-03 +### Added +- time_frame_manager cythonization and tests +- symbol_util cythonization + +## [1.3.15] - 2020-05-03 +### Removed +- [EventTree] Events management + +## [1.3.14] - 2020-05-02 +### Added +- list_util file with flatten_list method + +## [1.3.13] - 2020-04-30 +### Added +- Pylint and Black code style checkers + +### Fixed +- Code style issues + +### Removed +- Singleton annotation +- get_value_or_default replaced by native dict.get + +## [1.3.12] - 2020-04-27 +### Updated +- Cython requirement + +## [1.3.11] - 2020-04-23 +### Updated +- [DataUtil] Improve shift implementation + +## [1.3.10] - 2020-04-16 +### Added +- Evaluators channel name +- [EventTree] node value time + +### Fixed +- [EventTree] event clearing too early +- [EventTree] syntax + +### Removed +- AbtractEvaluator default description + +## [1.3.9] - 2020-04-10 +### Fixed +- Missing constant + +## [1.3.8] - 2020-04-08 +### Removed +- AbstractTentacle cythonization + +## [1.3.7] - 2020-04-07 +### Fixed +- Wildcard imports + +## [1.3.6] - 2020-03-25 +### Updated +- Tentacles management to include OctoBot-tentacles-manager + +## [1.3.5] - 2020-03-25 +### Updated +- [Requirement] cython to 0.29.16 +- [Requirement] numpy to 0.18.2 +- [Requirement] jsonschema to 3.2.0 +- [Requirement] python-telegram-bot to 12.4.2 + +## [1.3.4] - 2020-03-22 +### Added +- Liquidations, Mini ticker and Book ticker Channels name + +## [1.3.3] - 2020-03-15 +### Added +- Datetime to timestamp conversion + +## [1.3.2] - 2020-03-14 +### Added +- Funding Channel name + +## [1.3.1] - 2020-03-07 +### Added +- Margin Portfolio key + +## [1.3.0] - 2020-03-05 +### Added +- Error message to exception logger + +### Fixed +- Trade prettyprinter format + +## [1.2.3] - 2020-02-16 +### Added +- shift_value_array function to shift a numpy array +- Cythonized numpy array functions +- Error notifier callback + +### Changed +- Minimal time frame is now 1 min +- Update pretty_printer for the new Trade attributes + +## [1.2.2] - 2020-01-04 +### Changed +- Pretty printer cryptocurrencies alert refresh + +### Fixed +- MarkdownFormat comparison error + +## [1.2.1] - 2020-01-02 +### Added +- Asyncio run_coroutine_in_asyncio_loop method +- External resources management +- Tentacle and classes management utility methods +- Configuration file management + +### Changed +- Pretty printer typo fix + +## [1.2.0] - 2019-12-18 +### Added +- Tests from OctoBot < 0.4.0 +- Number Util float rounding method +- Evaluators_util cython compilation + +### Changed +- TimeFrameManager static methods to function only +- DataUtil static methods to function only +- Evaluator_util check_eval_note returns only boolean + +### Removed +- Travis build stage + +## [1.1.53] - 2019-12-17 +### Added +- Makefile + +### Fixed +- SegFault : Temporary disable abstract_tentacle cython compilation + +## [1.1.52] - 2019-12-14 +### Added +- EventTree NodeExistsError exception + +## [1.1.51] - 2019-12-14 +### Added +- EventTree methods relative node param +- EventTree get without creation method + +## [1.1.50] - 2019-12-11 +### Added +- EventTree with EventNode classes +- tests EventTree methods + +## [1.1.49] - 2019-11-07 +## Updated +- Cython version to 0.29.14 + +## [1.1.48] - 2019-10-21 +### Added +- OSX support + +## [1.1.47] - 2019-10-19 +### Added +- OS tools + +## [1.1.46] - 2019-10-09 +### Changed +- Code cleanup + +## [1.1.45] - 2019-10-09 +### Added +- Appveyor CI + +## [1.1.44] - 2019-10-09 +### Added +- PyPi manylinux deployment + +## [1.1.43] - 2019-10-08 +### Fixed +- Install with setup + +## [1.1.42] - 2019-10-03 +### Added +- Advanced Manager new search methods + +## [1.1.41] - 2019-10-02 +### Added +- Time constants + +## [1.1.40] - 2019-09-26 +### Added +- Inspector deep method by subclasses + +## [1.1.39] - 2019-09-26 +### Added +- Inspector method by subclasses + +## [1.1.38] - 2019-09-25 +### Fixed +- Setup installation + +## [1.1.37] - 2019-09-21 +### Added +- class_inspector default_parents_inspection method + +## [1.1.36] - 2019-09-18 +### Added +- class_inspector cython compilation + +### Changed +- 'default_parent_inspection' to public + +## [1.1.35] - 2019-09-17 +### Changed +- TIME_CHANNEL to backtesting names + +## [1.1.34] - 2019-09-12 +### Fixed +- is_valid_timestamp method exception + +## [1.1.33] - 2019-09-01 +### Fixed +- Adapted config manager from OctoBot core + +## [1.1.32] - 2019-08-27 +### Added +- Tentacle config manager + +## [1.1.31] - 2019-08-18 +### Removed +- Abstract tentacle pxd file + +## [1.1.30] - 2019-08-17 +### Removed +- Advanced manager class + +## [1.1.29] - 2019-08-16 +### Changed +- Generify & cythonize advanced_manager + +## [1.1.28] - 2019-08-16 +### Added +- Evaluator util + +## [1.1.27] - 2019-08-15 +### Added +- Future tentacles constants declaration + +## [1.1.26] - 2019-08-15 +### Added +- Abstract tentacle cython declaration + +## [1.1.25] - 2019-08-15 +### Added +- OctoBot custom errors (can be used to except elsewhere) + +## [1.1.24] - 2019-08-15 +### Added +- Tentacles commons constants + +## [1.1.23] - 2019-08-15 +### Added +- Common channels name + +## [1.1.22] - 2019-08-14 +### Fixed +- Singleton Class instances attribute declaration + +## [1.1.21] - 2019-08-14 +### Changed +- Singleton Class implementation + +## [1.1.20] - 2019-08-14 +### Added +- Singleton Class +- Cython compilation + +### Changed +- Moved singleton.py to singleton/singleton_annotation.py + +## [1.1.19] - 2019-08-14 +### Changed +- AdvancedManager fully split Evaluators and Trading tentacles classes list initialization + +## [1.1.18] - 2019-08-07 +### Added +- ConfigManager from OctoBot main repository + +### Changed +- AdvancedManager tentacle initialization is now splitted between Evaluators and Trading + +## [1.1.17] - 2019-08-06 +### Added +- Constants from OctoBot-Tentacles-Manager + +## [1.1.16] - 2019-08-05 +### Changed +- Tentacles management imports to prepare OctoBot-Tentacles-Manager migration to commons + +## [1.1.15] - 2019-08-05 +### Added +- Config load methods +- 6h time frame in TimeFrames enums + +## [1.1.14] - 2019-08-01 +### Changed +- Adapt pretty printer to OctoBot-Trading callbacks (exchange name) +- Updated order and trade instance getters/property compatibilities + +## [1.1.13] - 2019-06-23 +### Changed +- Catch split_symbol index error exception + +## [1.1.12] - 2019-06-09 +### Added +- Encrypt and decrypt functions + +## [1.1.11] - 2019-06-08 +### Added +- Config util + +## [1.1.10] - 2019-06-08 +### Added +- Data util +- Numpy requirement + +## [1.1.9] - 2019-06-06 +### Added +- Trading constants from OctoBot constants + +## [1.1.8] - 2019-06-05 +### Added +- TimeFrames enums +- TimeFrame manager + +## [1.1.7] - 2019-06-05 +### Added +- dict util methods + +### Removed +- Initializable class + +## [1.1.6] - 2019-06-05 +### Added +- pretty printer + +## [1.1.5] - 2019-06-02 +### Changed +- convert_symbol new optionnal parameter should_lowercase with False as default value + +## [1.1.4] - 2019-06-01 +### Added +- convert_symbol method to manage separator between symbol formats +### Changed +- merge_currencies with a new additional parameter "separator" with MARKET_SEPARATOR as default value + +## [1.1.3] - 2019-05-27 +### Added +- Manifest + +## [1.1.2] - 2019-05-27 +### Added +- Symbol utils +- Initializable class diff --git a/packages/commons/LICENSE b/packages/commons/LICENSE new file mode 100644 index 0000000000..0a041280bd --- /dev/null +++ b/packages/commons/LICENSE @@ -0,0 +1,165 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/> + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff --git a/packages/commons/MANIFEST.in b/packages/commons/MANIFEST.in new file mode 100644 index 0000000000..2615016250 --- /dev/null +++ b/packages/commons/MANIFEST.in @@ -0,0 +1,9 @@ +recursive-include octobot_commons *.pxd + +include README.md +include LICENSE +include CHANGELOG.md +include requirements.txt +include full_requirements.txt + +global-exclude *.c diff --git a/packages/commons/README.md b/packages/commons/README.md new file mode 100644 index 0000000000..75cd72edf1 --- /dev/null +++ b/packages/commons/README.md @@ -0,0 +1,8 @@ +# OctoBot-Commons +[![Codacy Badge](https://api.codacy.com/project/badge/Grade/b31f3ab3511744a5a5ca6b9bb48e77bb)](https://app.codacy.com/gh/Drakkar-Software/OctoBot-Commons?utm_source=github.com&utm_medium=referral&utm_content=Drakkar-Software/OctoBot-Commons&utm_campaign=Badge_Grade_Dashboard) +[![Coverage Status](https://coveralls.io/repos/github/Drakkar-Software/OctoBot-Commons/badge.svg?branch=master)](https://coveralls.io/github/Drakkar-Software/OctoBot-Commons?branch=master) +[![Github-Action-CI](https://github.com/Drakkar-Software/OctoBot-Commons/workflows/Github-Action-CI/badge.svg)](https://github.com/Drakkar-Software/OctoBot-Commons/actions) +[![Build Status](https://cloud.drone.io/api/badges/Drakkar-Software/OctoBot-Commons/status.svg)](https://cloud.drone.io/Drakkar-Software/OctoBot-Commons) +[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) + +[OctoBot](https://github.com/Drakkar-Software/OctoBot) project common modules. diff --git a/packages/commons/full_requirements.txt b/packages/commons/full_requirements.txt new file mode 100644 index 0000000000..70329ea2e7 --- /dev/null +++ b/packages/commons/full_requirements.txt @@ -0,0 +1,12 @@ +# Commons requirements +jsonschema==4.26.0 +psutil==7.2.2 + +certifi==2026.1.4 + +tinydb==4.8.2 + +# async sqlite connection +# update asap (0.19 working) +# but required <= 0.17.0 from asyncpraw https://github.com/praw-dev/asyncpraw/blob/master/pyproject.toml +aiosqlite==0.17.0 diff --git a/packages/commons/octobot_commons/__init__.py b/packages/commons/octobot_commons/__init__.py new file mode 100644 index 0000000000..d63b946582 --- /dev/null +++ b/packages/commons/octobot_commons/__init__.py @@ -0,0 +1,27 @@ +# pylint: disable=W0511 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +PROJECT_NAME = "OctoBot-Commons" +VERSION = "1.10.6" # major.minor.revision + +MARKET_SEPARATOR = "/" +SETTLEMENT_ASSET_SEPARATOR = ":" +OPTION_SEPARATOR = "-" +DICT_BULLET_TOKEN_STR = "\n " + +# DEPRECATED: will be removed in future versions +OCTOBOT_KEY = b"uVEw_JJe7uiXepaU_DR4T-ThkjZlDn8Pzl8hYPIv7w0=" diff --git a/packages/commons/octobot_commons/aiohttp_util.py b/packages/commons/octobot_commons/aiohttp_util.py new file mode 100644 index 0000000000..fea8152869 --- /dev/null +++ b/packages/commons/octobot_commons/aiohttp_util.py @@ -0,0 +1,245 @@ +# pylint: disable=W0718 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import ssl +import time +import urllib.parse +import dataclasses + +import contextlib +import aiohttp +import aiohttp.typedefs + +import octobot_commons.logging +import octobot_commons.constants + +try: + import certifi +except ImportError: + if octobot_commons.constants.USE_MINIMAL_LIBS: + # mock certifi imports + class CertifiImportMock: + def where(self): # pylint: disable=missing-function-docstring + raise ImportError("certifi not installed") + + certifi = CertifiImportMock() + else: + raise + + +async def download_stream_file( + output_file, + file_url, + aiohttp_session, + data_chunk_size=5 * 2**20, + is_aiofiles_output_file=False, +) -> str: + """ + Download a big file with an aiohttp session + :param output_file: the output file + :param file_url: the file to be downloaded url + :param aiohttp_session: the aiohttp session + :param data_chunk_size: default value is 5*2**20 (5MB) + :param is_aiofiles_output_file: When True, output_file.write will be awaited (when it's an aiofiles instance) + :return downloaded file last_modified if given as response header + """ + last_modified = None + async with aiohttp_session.get(file_url) as resp: + if resp.status != 200: + try: + text = await resp.text() + except BaseException as err: + text = f"error when reading resp text: {err}" + raise RuntimeError( + f"Failed to download file at url : {file_url} (status: {resp.status}, text: {text})" + ) + while True: + last_modified = resp.headers.get("Last-Modified", "unknown") + chunk = await resp.content.read(data_chunk_size) + if not chunk: + # resp.content.read returns an empty chunk when completed + break + if is_aiofiles_output_file: + await output_file.write(chunk) + else: + output_file.write(chunk) + return last_modified + + +async def _check_local_certificates_availability( + session: aiohttp.ClientSession, test_url: str +): + try: + # try fetching https://tentacles.octobot.online/ using local certificates + async with session.get(test_url) as resp: + if resp.status >= 400: + octobot_commons.logging.get_logger(__name__).error( + f"Error when checking ssl certificates: fetching {test_url} returned {resp.status}. " + f"Considering certificates as valid." + ) + return True + except aiohttp.ClientConnectorCertificateError: + return False + except Exception as err: + octobot_commons.logging.get_logger(__name__).info( + f"Impossible to check ssl certificate: {err}. Consider valid" + ) + return True + + +def _get_certify_aiohttp_client_session() -> aiohttp.ClientSession: + # from https://docs.aiohttp.org/en/stable/client_advanced.html#example-use-certifi + ssl_context = ssl.create_default_context(cafile=certifi.where()) + return aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl=ssl_context)) + + +async def get_ssl_fallback_aiohttp_client_session( + test_url: str, +) -> aiohttp.ClientSession: + """ + :return: an aiohttp.ClientSession using certifi ssl certificates if necessary + """ + base_session = aiohttp.ClientSession() + if ( + not octobot_commons.constants.ENABLE_CERTIFI_SSL_CERTIFICATES + or await _check_local_certificates_availability(base_session, test_url) + ): + return base_session + try: + await base_session.close() + except Exception as err: + octobot_commons.logging.get_logger(__name__).exception( + err, True, f"Error when closing test session: {err}" + ) + # use custom SSL certificates session + fallback_session = _get_certify_aiohttp_client_session() + octobot_commons.logging.get_logger(__name__).info( + "Falling back to certifi configured aiohttp connector." + ) + return fallback_session + + +@contextlib.asynccontextmanager +async def ssl_fallback_aiohttp_client_session(test_url: str): + """ + yields an aiohttp.ClientSession using certifi ssl certificates if necessary + """ + session = None + try: + session = await get_ssl_fallback_aiohttp_client_session(test_url) + yield session + finally: + if session is not None: + await session.close() + + +@contextlib.asynccontextmanager +async def certify_aiohttp_client_session(): + """ + yields an aiohttp.ClientSession always using certifi ssl certificates + """ + session = None + try: + octobot_commons.logging.get_logger(__name__).debug( + "Using certifi configured aiohttp connector." + ) + session = _get_certify_aiohttp_client_session() + yield session + finally: + if session is not None: + await session.close() + + +@dataclasses.dataclass +class RequestCounter: + """ + A counter with an identifier: counts requests and logs when its period is over + """ + + name: str + period: float + last_period_start: float = 0 + paths: dict[str, float] = dataclasses.field(default_factory=dict) + + def account_for( + self, method: str, str_or_url: aiohttp.typedefs.StrOrURL, timestamp: float + ): + """ + Account for a request, log if period is over + """ + if timestamp - self.last_period_start > self.period: + if self.last_period_start != 0: + self._log_stats() + self._clear() + self.last_period_start = timestamp - timestamp % self.period + url = str_or_url if isinstance(str_or_url, str) else str_or_url.human_repr() + path = f"[{method}] { urllib.parse.urlparse(url).path}" + self.paths[path] = self.paths.get(path, 0) + 1 + + def _clear(self): + self.paths.clear() + + def _log_stats(self): + total_req = sum(value for value in self.paths.values()) + hours = self.period / octobot_commons.constants.HOURS_TO_SECONDS + days = self.period / octobot_commons.constants.DAYS_TO_SECONDS + identifier = ( + f"{days} days" + if days > 1 + else f"{hours} hours" if hours > 1 else f"{self.period} seconds" + ) + octobot_commons.logging.get_logger(self.__class__.__name__).info( + f"[{self.name}]: {total_req} requests over {identifier}: {self.paths}" + ) + + +class CounterClientSession(aiohttp.ClientSession): + """ + Extends aiohttp.ClientSession to periodically log requests statistics + """ + + def __init__(self, identifier: str, *args, **kwargs): + super().__init__(*args, **kwargs) + self.per_min: RequestCounter = RequestCounter( + identifier, octobot_commons.constants.MINUTE_TO_SECONDS + ) + self.per_hour: RequestCounter = RequestCounter( + identifier, octobot_commons.constants.HOURS_TO_SECONDS + ) + self.per_day: RequestCounter = RequestCounter( + identifier, octobot_commons.constants.DAYS_TO_SECONDS + ) + + async def _request( + self, method: str, str_or_url: aiohttp.typedefs.StrOrURL, *args, **kwargs + ) -> aiohttp.ClientResponse: + self._account_for(method, str_or_url, time.time()) + if kwargs: + return await super()._request(method, str_or_url, *args, **kwargs) + return await super()._request(method, str_or_url) + + def _account_for( + self, method: str, str_or_url: aiohttp.typedefs.StrOrURL, timestamp: float + ): + try: + self.per_min.account_for(method, str_or_url, timestamp) + self.per_hour.account_for(method, str_or_url, timestamp) + self.per_day.account_for(method, str_or_url, timestamp) + except BaseException as err: + # never raise or the subsequent request is blocked + octobot_commons.logging.get_logger(__name__).exception( + err, True, "Error when accounting for request: {err}" + ) diff --git a/packages/commons/octobot_commons/async_job.py b/packages/commons/octobot_commons/async_job.py new file mode 100644 index 0000000000..bd84514772 --- /dev/null +++ b/packages/commons/octobot_commons/async_job.py @@ -0,0 +1,289 @@ +# pylint: disable=W0703,R0902,R0913,R1729 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import asyncio +import time + +import octobot_commons.logging as logging_util +import octobot_commons.html_util as html_util + + +class AsyncJob: + """ + Async job management + """ + + NO_DELAY = 0.1 + DEPENDENCIES_WAIT_TIMEOUT = 300 + SELF_RUNNING_WAIT_TIMEOUT = 30 + MAXIMUM_ALLOWED_SUCCESSIVE_FAILURES = 1 + + def __init__( + self, + callback, + execution_interval_delay=NO_DELAY, + min_execution_delay=NO_DELAY, + first_execution_delay=NO_DELAY, + is_periodic=True, + enable_multiple_runs=False, + max_successive_failures=MAXIMUM_ALLOWED_SUCCESSIVE_FAILURES, + ): + self.logger = logging_util.get_logger( + f"{self.__class__.__name__}-{callback.__name__}" + ) + self.callback = callback + self.is_started = False + self.should_stop = False + self.is_periodic = is_periodic + self.enable_multiple_runs = enable_multiple_runs + self.simultaneous_calls = 0 + self.successive_failures = 0 + + # Set this attribute to 0 to log on any periodic refresh exception. + self.max_successive_failures = max_successive_failures + + self.last_execution_time = 0 + self.execution_interval_delay = execution_interval_delay + self.min_execution_delay = min_execution_delay + self.first_execution_delay = first_execution_delay + + self.job_dependencies = [] + self.idle_task_event = asyncio.Event() + self.idle_task_event.set() + + self.job_task = None + self.job_periodic_task = None + + async def run( + self, + force=False, + wait_for_task_execution=False, + ignore_dependencies_check=False, + **kwargs, + ): + """ + Run the job if possible + Reschedule the job in the end + :param force: When True, force the execution of the job + :param wait_for_task_execution: When True, await idle_task_event + :param ignore_dependencies_check: When True, ignore dependencies wait + """ + if not self.is_started and self.is_periodic: + self.should_stop = False + self.job_periodic_task = asyncio.create_task( + self._run_periodic_task(**kwargs) + ) + else: + if self._should_run_job(force=force, ignore_dependencies=True): + if wait_for_task_execution: + await self._run_task_as_soon_as_possible( + force=force, + ignore_dependencies_check=ignore_dependencies_check, + **kwargs, + ) + else: + self.job_task = asyncio.create_task( + self._run_task_as_soon_as_possible( + force=force, + ignore_dependencies_check=ignore_dependencies_check, + **kwargs, + ) + ) + + async def _run_periodic_task(self, **kwargs): + """ + Calls _run() periodically until self.should_stop == True or cancellation + """ + while not self.should_stop: + self.is_started = True + if self.last_execution_time == 0: + # first execution + sleep_time = ( + 0 + if self.first_execution_delay == self.NO_DELAY + else self.first_execution_delay + ) + else: + # other executions + sleep_time = ( + 0 + if time.time() - self.last_execution_time + >= self.execution_interval_delay + else self.execution_interval_delay + ) + await asyncio.sleep(sleep_time) + await self._run_task_as_soon_as_possible( + error_on_single_failure=False, **kwargs + ) + self.is_started = False + + async def _run_task_as_soon_as_possible( + self, + force=False, + ignore_dependencies_check=False, + error_on_single_failure=True, + **kwargs, + ): + """ + Wait until job _run() can be called + :param force: if True, force job task execution + """ + if self._should_run_job(force=force): + await self._run(error_on_single_failure=error_on_single_failure, **kwargs) + else: + # wait for job dependencies to stop running + # and also this job to stop running + try: + events_to_wait = [] + + # add dependencies event wait_for coroutines + if not ignore_dependencies_check: + events_to_wait += [ + asyncio.wait_for( + dependency.idle_task_event.wait(), + self.DEPENDENCIES_WAIT_TIMEOUT, + ) + for dependency in self.job_dependencies + ] + + # add self idle event wait_for coroutine + if not self.enable_multiple_runs: + events_to_wait.append( + asyncio.wait_for( + self.idle_task_event.wait(), + self.SELF_RUNNING_WAIT_TIMEOUT, + ) + ) + + if events_to_wait: + await asyncio.gather( + *events_to_wait, + ) + except asyncio.TimeoutError: + self.logger.warning("Job has been timed out") + finally: + await self._run( + error_on_single_failure=error_on_single_failure, **kwargs + ) + + def is_job_idle(self): + """ + :return: publicly is_running attribute value + """ + return self.idle_task_event.is_set() + + def add_job_dependency(self, job): + """ + Add a new job dependency + :param job: the new job dependency + """ + self.job_dependencies.append(job) + + async def _run(self, error_on_single_failure=True, **kwargs): + """ + Execute the job callback + Reset the last_execution_time + """ + # Clear to be able to await the event + if self.simultaneous_calls == 0: + self.idle_task_event.clear() + self.simultaneous_calls += 1 + try: + await self.callback(**kwargs) + if self.successive_failures > self.max_successive_failures: + self.logger.info( + f"Job successfully run after {self.successive_failures} failures." + ) + self.successive_failures = 0 + except Exception as exception: + self._handle_run_exception(exception, error_on_single_failure) + finally: + self.last_execution_time = time.time() + self.simultaneous_calls -= 1 + if self.simultaneous_calls == 0: + # Set the event to trigger event waiters + self.idle_task_event.set() + + def _handle_run_exception(self, exception, error_on_single_failure): + self.successive_failures += 1 + str_error = html_util.get_html_summary_if_relevant(exception) + error_message = f"Failed to run job action, exception: {exception.__class__.__name__}: {str_error}" + if error_on_single_failure: + self.logger.exception(exception, True, error_message) + else: + if self.successive_failures > self.max_successive_failures: + self.logger.exception( + exception, + True, + f"{error_message} ({self.successive_failures} failures in a row)", + ) + else: + self.logger.debug(error_message) + # always at least print stacktrace in logs + self.logger.exception(exception, False) + + def _should_run_job(self, force=False, ignore_dependencies=False): + """ + :param force: If True, enabled job execution even if min_execution_delay > last_execution_time + :param ignore_dependencies: If True, ignore _are_job_dependencies_running() result + :return: True if the job is not already running and if _should_run is True + """ + return (self.is_job_idle() or self.enable_multiple_runs) and ( + (self._are_job_dependencies_idle() or ignore_dependencies) + and (self._has_enough_time_elapsed() or force) + ) + + def _has_enough_time_elapsed(self): + """ + :return: True if min_execution_delay < last_execution_time + """ + return ( + time.time() - self.min_execution_delay > self.last_execution_time + or self.min_execution_delay == AsyncJob.NO_DELAY + or self.last_execution_time == 0 + ) + + def _are_job_dependencies_idle(self): + """ + :return: True if a dependent jobs is idle + """ + return all([job.is_job_idle() for job in self.job_dependencies]) + + def is_stopped(self): + """ + Return True when the AsyncJob has stopped + """ + return self.should_stop + + def stop(self): + """ + Stop the job by cancelling the execution task + """ + self.should_stop = True + if self.job_task is not None: + self.job_task.cancel() + self.job_task = None + if self.job_periodic_task is not None: + self.job_periodic_task.cancel() + self.job_periodic_task = None + self.is_started = False + + def clear(self): + """ + Clear job object references and stop it + """ + self.job_dependencies = [] + self.stop() diff --git a/packages/commons/octobot_commons/asyncio_tools.py b/packages/commons/octobot_commons/asyncio_tools.py new file mode 100644 index 0000000000..abd45b243f --- /dev/null +++ b/packages/commons/octobot_commons/asyncio_tools.py @@ -0,0 +1,230 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import asyncio +import contextlib +import time +import traceback +import concurrent.futures +import typing + +import octobot_commons.constants as constants +import octobot_commons.logging as logging_util + + +_BACKGROUND_FINGERPRINT_ASYNC_EXECUTOR = None + + +def run_background_fingerprint_async_executor(fingerprint: str, coroutine) -> bool: + """ + Run a coroutine in the background async executor + :param fingerprint: the fingerprint of the coroutine + :param coroutine: the coroutine to run + :return: True if the coroutine was submitted, False if it was already processing + """ + global _BACKGROUND_FINGERPRINT_ASYNC_EXECUTOR # pylint: disable=global-statement + if _BACKGROUND_FINGERPRINT_ASYNC_EXECUTOR is None: + _BACKGROUND_FINGERPRINT_ASYNC_EXECUTOR = BackgroundFingerprintAsyncExecutor() + return _BACKGROUND_FINGERPRINT_ASYNC_EXECUTOR.submit(fingerprint, coroutine) + + +def run_coroutine_in_asyncio_loop( + coroutine, async_loop, log_exceptions=True, timeout=constants.DEFAULT_FUTURE_TIMEOUT +): + """ + Run a coroutine in the specified asyncio loop + :param coroutine: the coroutine to run + :param async_loop: the asyncio loop + :param log_exceptions: logs exceptions when True + :param timeout: number of seconds to wait for the future before raising a asyncio.TimeoutError + :return: the execution result + """ + logger = logging_util.get_logger("asyncio_tools") + current_task_before_start = asyncio.current_task(async_loop) + future = asyncio.run_coroutine_threadsafe(coroutine, async_loop) + try: + return future.result(timeout) + except (asyncio.TimeoutError, concurrent.futures.TimeoutError) as timeout_error: + logger.error( + f"{coroutine.__name__} coroutine took too long to execute, cancelling the task. " + f"(current task before starting this one: {current_task_before_start}, actual current " + f"task before cancel: {asyncio.current_task(async_loop)})" + ) + future.cancel() + raise asyncio.TimeoutError from timeout_error + except Exception as global_exception: + if log_exceptions: + logger.exception( + global_exception, + True, + f"{coroutine.__name__} coroutine raised an exception: {global_exception}", + ) + raise global_exception + + +class ErrorContainer: + """ + ErrorContainer is used to catch exceptions in as asyncio loop context + """ + + def __init__(self): + self.errors = [] + # set to True when investigating post loop closing exceptions (ex: on task __del__) + self.print_received_exceptions = True + + def exception_handler(self, _, context) -> None: + """ + To be set in the watched asyncio loop via loop.set_exception_handler() + :param _: the loop argument, not used + :param context: the context dict of the exception + :return: None + """ + self.errors.append(context) + if self.print_received_exceptions: + print(context) + error = context.get("exception") + if error: + traceback.print_exception( + type(error), value=error, tb=error.__traceback__ + ) + + async def check(self) -> None: + """ + Will raise AssertionError if an exception has been raised in the registered loop(s) + :return: None + """ + if self.errors: + raise AssertionError("\n".join(f"{e}" for e in self.errors)) + + +async def wait_asyncio_next_cycle(): + """ + Wait for next asyncio next loop cycle + """ + + async def do_nothing(): + pass + + await asyncio.create_task(do_nothing()) + + +async def gather_waiting_for_all_before_raising(*coros): + """ + Gather coros waiting for all futures to be done before raising an exception if any. + Note: given coros should never return an exception, otherwise it will be raised immediately. + :param coros: the coros to gather + :return: same as asyncio.gather with return_exceptions=False + """ + maybe_exceptions = await asyncio.gather(*coros, return_exceptions=True) + for maybe_exception in maybe_exceptions: + if isinstance(maybe_exception, Exception): + # an exception was returned (raised by a future): raise it immediately + raise maybe_exception + # no exception returned: all futures completed successfully: return their values + return maybe_exceptions + + +@contextlib.contextmanager +def logged_waiter(self, name: str, sleep_time: float = 30) -> typing.Generator[None, None, None]: + """ + Periodically log the time elapsed since the start of the waiter + """ + async def _waiter() -> None: + t0 = time.time() + try: + while True: + await asyncio.sleep(sleep_time) + self.logger.info(f"{name} is still processing [{time.time() - t0:.2f} seconds] ...") + except asyncio.CancelledError: + pass + task = None + try: + task = asyncio.create_task(_waiter()) + yield + finally: + if task is not None and not task.done(): + task.cancel() + + +class RLock(asyncio.Lock): + """ + Async Lock implementing reentrancy + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._task = None + self._depth = 0 + + async def acquire(self): + if self._task is None or self._task is not asyncio.current_task(): + await super().acquire() + self._task = asyncio.current_task() + if self._depth != 0: + raise RuntimeError( + f"Async RLock acquired when depth !=0 (depth = {self._depth})." + ) + self._depth += 1 + return True + + def release(self): + if self._depth > 0: + self._depth -= 1 + if self._depth == 0: + super().release() + self._task = None + + +class BackgroundFingerprintAsyncExecutor: + """ + BackgroundFingerprintAsyncExecutor is used to run coroutines in a background thread + """ + def __init__(self): + self.executor = concurrent.futures.ThreadPoolExecutor( + thread_name_prefix="bg-async-executor" + ) # let pool decide how many workers + self.submitted: set[str] = set() + logging_util.get_logger(self.__class__.__name__).info( + f"Initialized pool using {self.executor._max_workers} workers" + ) + + def submit(self, fingerprint: str, coroutine): + """ + Submit a coroutine to the background executor + :param fingerprint: the fingerprint of the coroutine + :param coroutine: the coroutine to run + :return: True if the coroutine was submitted, False if it was already processing + """ + if fingerprint in self.submitted: + # already processing + return False + self.submitted.add(fingerprint) + + def coro_wrapper(): + t_start = time.time() + try: + logging_util.get_logger(self.__class__.__name__).info( + f"Submitting {fingerprint}" + ) + asyncio.run(coroutine) + finally: + logging_util.get_logger(self.__class__.__name__).info( + f"Completed {fingerprint} in {time.time() - t_start:.2f} seconds" + ) + if fingerprint in self.submitted: + self.submitted.remove(fingerprint) + + self.executor.submit(coro_wrapper) + return True diff --git a/packages/commons/octobot_commons/authentication.py b/packages/commons/octobot_commons/authentication.py new file mode 100644 index 0000000000..937b3f59e0 --- /dev/null +++ b/packages/commons/octobot_commons/authentication.py @@ -0,0 +1,237 @@ +# pylint: disable=R0913 +# Drakkar-Software OctoBot +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import abc +import functools +import asyncio + +import octobot_commons.logging as bot_logging +import octobot_commons.singleton as singleton + + +class Authenticator(singleton.Singleton): + """ + Abstract class to be implemented when using authenticated requests + """ + + def __init__(self, use_as_singleton=True): + self.logger: bot_logging.BotLogger = bot_logging.get_logger( + self.__class__.__name__ + ) + self.initialized_event: asyncio.Event = None + self.supports: None + self.feed_callbacks = {} + if use_as_singleton: + # also register this instance for the base Authenticator class in singleton + self.use_as_singleton_instance() + + # pylint: disable=W0212 + def use_as_singleton_instance(self): + """ + Update the Authenticator Singleton to use self + """ + singleton.Singleton._instances[Authenticator] = self + singleton.Singleton._instances[self.__class__] = self + + @abc.abstractmethod + async def login(self, username, password, password_token=None): + """ + Used to trigger a login + :param username: authentication username + :param password: authentication password + :param password_token: (optional) authentication password token + :return: + """ + raise NotImplementedError + + @abc.abstractmethod + def logout(self): + """ + Used to clear a logged in session + :return: + """ + raise NotImplementedError + + @abc.abstractmethod + async def register(self, username, password): + """ + Used to create a user account + :param username: authentication username + :param password: authentication password + :return: + """ + raise NotImplementedError + + @abc.abstractmethod + def get_aiohttp_session(self): + """ + Get the aiohttp authenticated session + :return: + """ + raise NotImplementedError + + @abc.abstractmethod + def is_logged_in(self): + """ + :return: True when authenticated + """ + raise NotImplementedError + + @abc.abstractmethod + def must_be_authenticated_through_authenticator(self): + """ + :return: True when this authenticator has to be validated + """ + raise NotImplementedError + + @abc.abstractmethod + def ensure_token_validity(self): + """ + Called before @authenticated methods to ensure authentication + :return: + """ + raise NotImplementedError + + async def register_feed_callback(self, topic, callback): + """ + Registers a feed callback + """ + raise NotImplementedError + + async def send(self, message, channel_type, identifier=None): + """ + Sends a message + """ + raise NotImplementedError + + @abc.abstractmethod + def is_initialized(self) -> bool: + """ + Returns True when initialized + :return: + """ + raise NotImplementedError + + async def await_initialization(self, timeout): + """ + Returns when initialized + :return: + """ + await asyncio.wait_for(self.initialized_event.wait(), timeout) + + async def update_trades(self, trades: list, exchange_name: str, reset: bool): + """ + Updates authenticated account trades + """ + raise NotImplementedError + + async def update_orders(self, orders_by_exchange: dict[str, list]): + """ + Updates authenticated account orders + """ + raise NotImplementedError + + async def update_positions(self, positions_by_exchange: dict[str, list]): + """ + Updates authenticated account positions + """ + raise NotImplementedError + + async def update_portfolio( + self, + current_value: dict, + initial_value: dict, + profitability: float, + unit: str, + content: dict, + history: dict, + price_by_asset: dict, + reset: bool, + **kwargs + ): + """ + Updates authenticated account portfolio + """ + raise NotImplementedError + + async def wait_for_private_data_fetch_if_processing(self): + """ + Returns for private data to be fetched + """ + + def has_open_source_package(self) -> bool: + """ + :return: True when open source package is available + """ + raise NotImplementedError + + @staticmethod + async def wait_and_check_has_open_source_package(raise_on_timeout=False) -> bool: + """ + Returns for private data to be fetched and return True if package is available + """ + authenticator = Authenticator.instance() + try: + await authenticator.wait_for_private_data_fetch_if_processing() + except asyncio.TimeoutError: + if raise_on_timeout: + raise + return authenticator.has_open_source_package() + + +class FailedAuthentication(Exception): + """ + Raised upon authentication failure + """ + + +class UnavailableError(Exception): + """ + Raised upon website availability issues failure + """ + + +class AuthenticationError(Exception): + """ + Raised upon authentication technical error, not on login/password issues + """ + + +class AccountUpdateError(Exception): + """ + Raised upon account update error + """ + + +class AuthenticationRequired(Exception): + """ + Raised when an authentication is required + """ + + +def authenticated(func): + """ + Annotation to required authentication for a method call + :param func: + :return: + """ + + @functools.wraps(func) + def wrapped(self, *args, **kwargs): + self.ensure_token_validity() + return func(self, *args, **kwargs) + + return wrapped diff --git a/packages/commons/octobot_commons/cache_util.py b/packages/commons/octobot_commons/cache_util.py new file mode 100644 index 0000000000..836b56bde4 --- /dev/null +++ b/packages/commons/octobot_commons/cache_util.py @@ -0,0 +1,36 @@ +# Drakkar-Software OctoBot +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import time + + +def prevented_multiple_calls(f): + """ + Decorator to prevent multiple identical calls to a method within a given period. + """ + _calls_cache: dict[int, float] = {} + + async def _prevented_multiple_calls_wrapper(self, *args, max_period: float = 0, **kwargs): + if max_period: + # include self in the hash to avoid mixing calls from different instances + args_key = hash(f"{id(self)} {hash(args)} {hash(str(kwargs))}") + if args_key in _calls_cache: + if time.time() - _calls_cache[args_key] < max_period: + # period not reached: skip call + return + # register call + _calls_cache[args_key] = time.time() + return await f(self, *args, **kwargs) + return _prevented_multiple_calls_wrapper diff --git a/packages/commons/octobot_commons/channels_name.py b/packages/commons/octobot_commons/channels_name.py new file mode 100644 index 0000000000..0f22b82295 --- /dev/null +++ b/packages/commons/octobot_commons/channels_name.py @@ -0,0 +1,81 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import enum + + +class OctoBotChannelsName(enum.Enum): + """ + OctoBot-Evaluators channel names + """ + + OCTOBOT_CHANNEL = "OctoBot" + + +class OctoBotUserChannelsName(enum.Enum): + """ + OctoBot-Backtesting channel names + """ + + USER_COMMANDS_CHANNEL = "UserCommands" + + +class OctoBotEvaluatorsChannelsName(enum.Enum): + """ + OctoBot-Evaluators channel names + """ + + MATRIX_CHANNEL = "Matrix" + EVALUATORS_CHANNEL = "Evaluators" + + +class OctoBotBacktestingChannelsName(enum.Enum): + """ + OctoBot-Backtesting channel names + """ + + TIME_CHANNEL = "Time" + + +class OctoBotCommunityChannelsName(enum.Enum): + """ + OctoBot community channel names + """ + + REMOTE_TRADING_SIGNALS_CHANNEL = "RemoteTradingSignals" + + +class OctoBotTradingChannelsName(enum.Enum): + """ + OctoBot-Trading channel names + """ + + OHLCV_CHANNEL = "OHLCV" + TICKER_CHANNEL = "Ticker" + MINI_TICKER_CHANNEL = "MiniTicker" + RECENT_TRADES_CHANNEL = "RecentTrade" + ORDER_BOOK_CHANNEL = "OrderBook" + ORDER_BOOK_TICKER_CHANNEL = "OrderBookTicker" + KLINE_CHANNEL = "Kline" + TRADES_CHANNEL = "Trades" + LIQUIDATIONS_CHANNEL = "Liquidations" + ORDERS_CHANNEL = "Orders" + BALANCE_CHANNEL = "Balance" + BALANCE_PROFITABILITY_CHANNEL = "BalanceProfitability" + POSITIONS_CHANNEL = "Positions" + MODE_CHANNEL = "Mode" + MARK_PRICE_CHANNEL = "MarkPrice" + FUNDING_CHANNEL = "Funding" + MARKETS_CHANNEL = "Markets" diff --git a/packages/commons/octobot_commons/configuration/__init__.py b/packages/commons/octobot_commons/configuration/__init__.py new file mode 100644 index 0000000000..f96d65c4fa --- /dev/null +++ b/packages/commons/octobot_commons/configuration/__init__.py @@ -0,0 +1,108 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +from octobot_commons.configuration import configuration +from octobot_commons.configuration import config_file_manager +from octobot_commons.configuration import config_operations +from octobot_commons.configuration import fields_utils +from octobot_commons.configuration import user_inputs +from octobot_commons.configuration import user_input_configuration +from octobot_commons.configuration import historical_configuration + + +from octobot_commons.configuration.configuration import ( + Configuration, +) +from octobot_commons.configuration.config_file_manager import ( + get_user_config, + load, + dump, + encrypt_values_if_necessary, + handle_encrypted_value, +) +from octobot_commons.configuration.config_operations import ( + filter_to_update_data, + parse_and_update, + merge_dictionaries_by_appending_keys, + clear_dictionaries_by_keys, +) +from octobot_commons.configuration.fields_utils import ( + has_invalid_default_config_value, + encrypt, + decrypt, + decrypt_element_if_possible, + get_password_hash, +) +from octobot_commons.configuration.user_inputs import ( + USER_INPUT_TYPE_TO_PYTHON_TYPE, + MAX_USER_INPUT_ORDER, + UserInput, + UserInputFactory, + sanitize_user_input_name, + save_user_input, + get_user_input_tentacle_type, + get_user_inputs, + clear_user_inputs, +) +from octobot_commons.configuration.user_input_configuration import ( + load_user_inputs_from_class, + get_raw_config_and_user_inputs_from_class, + get_raw_config_and_user_inputs, + load_and_save_user_inputs, +) +from octobot_commons.configuration.historical_configuration import ( + add_historical_tentacle_config, + has_any_historical_tentacle_config, + get_historical_tentacle_config, + get_historical_tentacle_configs, + get_oldest_historical_tentacle_config_time, +) + + +__all__ = [ + "Configuration", + "get_user_config", + "load", + "dump", + "encrypt_values_if_necessary", + "handle_encrypted_value", + "filter_to_update_data", + "parse_and_update", + "merge_dictionaries_by_appending_keys", + "clear_dictionaries_by_keys", + "has_invalid_default_config_value", + "encrypt", + "decrypt", + "decrypt_element_if_possible", + "get_password_hash", + "USER_INPUT_TYPE_TO_PYTHON_TYPE", + "MAX_USER_INPUT_ORDER", + "UserInput", + "UserInputFactory", + "sanitize_user_input_name", + "save_user_input", + "get_user_input_tentacle_type", + "get_user_inputs", + "clear_user_inputs", + "load_user_inputs_from_class", + "get_raw_config_and_user_inputs_from_class", + "get_raw_config_and_user_inputs", + "load_and_save_user_inputs", + "add_historical_tentacle_config", + "has_any_historical_tentacle_config", + "get_historical_tentacle_config", + "get_historical_tentacle_configs", + "get_oldest_historical_tentacle_config_time", +] diff --git a/packages/commons/octobot_commons/configuration/config_file_manager.py b/packages/commons/octobot_commons/configuration/config_file_manager.py new file mode 100644 index 0000000000..2f704b86ce --- /dev/null +++ b/packages/commons/octobot_commons/configuration/config_file_manager.py @@ -0,0 +1,142 @@ +# pylint: disable=W0613, W0703, W0719 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import os +import octobot_commons.logging as logging +import octobot_commons.constants as commons_constants +import octobot_commons.configuration.fields_utils as fields_utils +import octobot_commons.json_util as json_util + + +LOGGER_NAME = "ConfigFileManager" + + +def get_user_config() -> str: + """ + Return user config path + :return: user config path + """ + return os.path.join(commons_constants.USER_FOLDER, commons_constants.CONFIG_FILE) + + +def load(config_file, should_raise=True, fill_missing_fields=False) -> dict: + """ + Load a config from a config_file + :param config_file: the config file path + :param should_raise: if error should be raised + :param fill_missing_fields: if missing fields should be filled + :return: the loaded config + """ + logger = logging.get_logger(LOGGER_NAME) + basic_error = "Error when load config file {0}".format(config_file) + try: + config = json_util.read_file(config_file) + return config + except ValueError as value_error: + error_str = f"{basic_error} : json decoding failed ({value_error})" + if should_raise: + raise Exception(error_str) + logger.error(error_str) + except IOError as io_error: + error_str = f"{basic_error} : file opening failed ({io_error})" + if should_raise: + raise Exception(error_str) + logger.error(error_str) + except Exception as global_exception: + error_str = f"{basic_error} : {global_exception}" + if should_raise: + raise Exception(error_str) + logger.error(error_str) + return None + + +def dump( + config_file, + config, + schema_file=None, +) -> None: + """ + Save a json config + :param config_file: the config file path + :param config: the json config + :param schema_file: path to the json schema to validate the updated config + """ + try: + encrypt_values_if_necessary(config) + if schema_file is not None: + # check if the new config file is correct + _check_config(config, schema_file) + except Exception as global_exception: + logging.get_logger(LOGGER_NAME).error( + f"Failed to validate configuration to save : {global_exception}" + ) + raise global_exception + + json_util.safe_dump(config, config_file) + + +def _check_config(content, schema_file) -> None: + """ + Check a config file + :param content: the config content + :param schema_file: path to the json schema to validate the updated config + """ + try: + json_util.validate(content, schema_file=schema_file) + except Exception as global_exception: + raise global_exception + + +def encrypt_values_if_necessary(config) -> None: + """ + check exchange keys encryption + """ + if commons_constants.CONFIG_EXCHANGES not in config: + return + # check exchange keys encryption + for exchange, exchange_config in config[commons_constants.CONFIG_EXCHANGES].items(): + try: + for key in commons_constants.CONFIG_EXCHANGE_ENCRYPTED_VALUES: + handle_encrypted_value(key, exchange_config) + except Exception: + config[commons_constants.CONFIG_EXCHANGES][exchange] = { + key: "" for key in commons_constants.CONFIG_EXCHANGE_ENCRYPTED_VALUES + } + + +def handle_encrypted_value(value_key, config_element, verbose=False) -> bool: + """ + Handle encrypted value + :param value_key: the value key + :param config_element: the config element + :param verbose: if verbosity is enabled + :return: True if the value can be decrypted + """ + if value_key in config_element: + key = config_element[value_key] + if not fields_utils.has_invalid_default_config_value(key): + try: + fields_utils.decrypt(key, silent_on_invalid_token=True) + return True + except Exception: + config_element[value_key] = fields_utils.encrypt(key).decode() + if verbose: + logging.get_logger(LOGGER_NAME).warning( + f"Non encrypted secret info found in config ({value_key}): replaced " + f"value with encrypted equivalent." + ) + return False + return True diff --git a/packages/commons/octobot_commons/configuration/config_operations.py b/packages/commons/octobot_commons/configuration/config_operations.py new file mode 100644 index 0000000000..264db71d4f --- /dev/null +++ b/packages/commons/octobot_commons/configuration/config_operations.py @@ -0,0 +1,132 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.logging as logging +import octobot_commons.constants as commons_constants + + +DELETE_ELEMENT_VALUE = "" + + +def filter_to_update_data(to_update_data, in_backtesting): + """ + Filter data to update + :param to_update_data: the data to be updated + :param in_backtesting: if backtesting is enabled + :return: the updated data + """ + if in_backtesting: + for key in set(to_update_data.keys()): + # remove changes to currency config when in backtesting + if commons_constants.CONFIG_CRYPTO_CURRENCIES in key: + to_update_data.pop(key) + + +def parse_and_update(key, new_data, config_separator): + """ + Parse and update key + :param key: the key to update + :param new_data: the new data + :param config_separator: the config separator + :return: the key updated + """ + parsed_data_array = key.split(config_separator) + new_config = {} + current_dict = new_config + + for i, _ in enumerate(parsed_data_array): + if i > 0: + if i == len(parsed_data_array) - 1: + current_dict[parsed_data_array[i]] = new_data + else: + current_dict[parsed_data_array[i]] = {} + else: + new_config[parsed_data_array[i]] = {} + + current_dict = current_dict[parsed_data_array[i]] + + return new_config + + +def merge_dictionaries_by_appending_keys( + dict_dest, dict_src, merge_sub_array=False +) -> dict: + """ + Merge dictionnaries by appending keys + :param dict_dest: the destination dictionnary + :param dict_src: the source dictionnary + :return: the merged dictionnary + """ + for key in dict_src: + src_val = dict_src[key] + if key in dict_dest: + dest_val = dict_dest[key] + if isinstance(dest_val, dict) and isinstance(src_val, dict): + dict_dest[key] = merge_dictionaries_by_appending_keys(dest_val, src_val) + elif dest_val == src_val: + pass # same leaf value + elif _are_of_compatible_type(dest_val, src_val): + # simple type: update value + dict_dest[key] = src_val + elif isinstance(dest_val, list) and isinstance(src_val, list): + if merge_sub_array: + dict_dest[key] += src_val + dict_dest[key] = list(set(dict_dest[key])) + else: + dict_dest[key] = src_val + else: + logging.get_logger("ConfigOperations").error( + f"Conflict when merging dict with key : {key}" + ) + else: + dict_dest[key] = src_val + + return dict_dest + + +def clear_dictionaries_by_keys(dict_dest, dict_src): + """ + Clear dictionnaries by keys + :param dict_dest: the destination dictionnary + :param dict_src: the source dictionnary + :return: the cleaned dictionnary + """ + for key in dict_src: + src_val = dict_src[key] + if key in dict_dest: + dest_val = dict_dest[key] + if src_val == DELETE_ELEMENT_VALUE: + dict_dest.pop(key) + elif isinstance(dest_val, dict) and isinstance(src_val, dict): + dict_dest[key] = clear_dictionaries_by_keys(dest_val, src_val) + else: + logging.get_logger("ConfigOperations").error( + f"Conflict when deleting dict element with key : {key}" + ) + + return dict_dest + + +def _are_of_compatible_type(val1, val2) -> bool: + """ + Check if types are compatibles + :param val1: the first value + :param val2: the second value + :return: True if types are compatible + """ + return ( + isinstance(val1, val2.__class__) + or (isinstance(val1, (float, int)) and isinstance(val2, (float, int))) + ) and isinstance(val1, (bool, str, float, int)) diff --git a/packages/commons/octobot_commons/configuration/configuration.py b/packages/commons/octobot_commons/configuration/configuration.py new file mode 100644 index 0000000000..5d0deb0f8a --- /dev/null +++ b/packages/commons/octobot_commons/configuration/configuration.py @@ -0,0 +1,333 @@ +# pylint: disable=R0913, R0902, W0703 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import os +import functools +import copy +import shutil + +import octobot_commons.logging as logging +import octobot_commons.errors as errors +import octobot_commons.constants as commons_constants +import octobot_commons.profiles as profiles +import octobot_commons.json_util as json_util +import octobot_commons.configuration.config_file_manager as config_file_manager +import octobot_commons.configuration.config_operations as config_operations + + +class Configuration: + """ + Configuration is managing an OctoBot configuration regarding reading, writing and updating + """ + + def __init__( + self, + config_path: str, + profiles_path: str, + schema_path: str = None, + profile_schema_path: str = None, + ): + self.logger = logging.get_logger(self.__class__.__name__) + self.config_path: str = config_path + self.profiles_path: str = profiles_path + self.config: dict = None + self.config_schema_path: str = ( + schema_path or commons_constants.CONFIG_FILE_SCHEMA + ) + self.profile_schema_path: str = ( + profile_schema_path or commons_constants.PROFILE_FILE_SCHEMA + ) + + self._read_config: dict = None + self.profile: profiles.Profile = None + self.profile_by_id: dict = {} + + def validate(self) -> None: + """ + Validated self._read_config and self._profile against their json schema + :return: None + """ + json_util.validate(self._read_config, self.config_schema_path) + self.profile.validate() + + def read(self, should_raise=True, fill_missing_fields=False) -> None: + """ + Reads the configuration from self.config_path and load the current profile + Overall config is stored into self.config and consists of a merger from the user + config and activated profile + :param should_raise: will raise upon exception when True + :param fill_missing_fields: will try to fill in missing fields when true + :return: None + """ + self._read_config = config_file_manager.load( + self.config_path, + should_raise=should_raise, + fill_missing_fields=fill_missing_fields, + ) + self.config = copy.deepcopy(self._read_config) + self.load_profiles_if_possible_and_necessary() + + def load_profiles_if_possible_and_necessary(self) -> None: + """ + Loads profiles if profiles already exists and have not been already loaded + :return: None + """ + if not self.are_profiles_empty_or_missing() and not self.are_profile_loaded(): + self.load_profiles() + self.select_profile(self._get_selected_profile()) + + def select_profile(self, profile_id) -> None: + """ + Sets self.profile using its profile_id + :param profile_id: id of the profile to select + :return: None + """ + self.config[commons_constants.CONFIG_PROFILE] = profile_id + self.profile = self.profile_by_id[profile_id] + self.logger.info(f"Using {self.profile.name} profile.") + self._generate_config_from_user_config_and_profile() + + def remove_profile(self, profile_id: str) -> None: + """ + Removes the given profile and deletes its folder on disk + :param profile_id: the id of the profile to remove + :return: None + """ + profile = self.profile_by_id[profile_id] + if profile.read_only and not profile.imported: + raise errors.ProfileRemovalError(f"{profile.name} profile can't be removed") + try: + shutil.rmtree(profile.path) + self.profile_by_id.pop(profile_id, None) + except Exception as err: + raise errors.ProfileRemovalError() from err + + def _generate_config_from_user_config_and_profile(self): + for profile_managed_element in self.profile.FULLY_MANAGED_ELEMENTS: + self.config[profile_managed_element] = copy.deepcopy( + self.profile.config[profile_managed_element] + ) + for partially_managed_element in self.profile.PARTIALLY_MANAGED_ELEMENTS: + self.profile.merge_partially_managed_element_into_config( + self.config, partially_managed_element + ) + + def save( + self, + schema_file=None, + sync_all_profiles=False, + ) -> None: + """ + Save the current self.config and self.profile. + Synchronize all profiles if sync_all_profiles + :return: None + """ + config_to_save = self._get_config_without_profile_elements() + config_file_manager.dump( + self.config_path, + config_to_save, + schema_file=schema_file, + ) + if self.profile is not None: + self.profile.save_config(self.config) + if sync_all_profiles: + self._sync_other_profiles() + + def _sync_other_profiles(self): + """ + Update profile partially managed elements for all profiles except self.profile + with self.config + """ + for profile in self.profile_by_id.values(): + if profile is self.profile: + # do not synchronize self.profile + continue + try: + profile.remove_deleted_elements(self.config) + profile.validate_and_save_config() + except Exception as err: + self.logger.exception( + f"Error when synchronizing '{profile.name}' profile at '{profile.path}': {err}", + False, + err, + ) + + def is_loaded(self) -> bool: + """ + Checks if self has been loaded + :return: True when self has been loaded (read) + """ + return self.config is not None + + def is_config_file_empty_or_missing(self) -> bool: + """ + Checks if self.config_path existing and not empty + :return: True when self.config_path is existing and non empty + """ + return (not os.path.isfile(self.config_path)) or os.stat( + self.config_path + ).st_size == 0 + + def are_profile_loaded(self) -> bool: + """ + Checks if profiles have already been loaded + :return: True if profiles have been loaded + """ + return self.profile is not None + + def are_profiles_empty_or_missing(self) -> bool: + """ + Checks if self.profiles_path exists and contains folders + :return: True if profiles folder is not empty + """ + return not ( + os.path.isdir(self.profiles_path) and os.listdir(self.profiles_path) + ) + + def get_non_imported_profiles(self) -> list: + """ + :return: The list of loaded profiles in self that have not been imported into this OctoBot + """ + return [ + profile for profile in self.profile_by_id.values() if not profile.imported + ] + + def get_tentacles_config_path(self) -> str: + """ + :return: The tentacles configurations associated to the activated profile + """ + return self.profile.get_tentacles_config_path() + + def get_metrics_enabled(self) -> bool: + """ + Check if metrics are enabled + :return: True if metrics are enabled + """ + return bool( + self.config.get(commons_constants.CONFIG_METRICS, {}).get( + commons_constants.CONFIG_ENABLED_OPTION, True + ) + ) + + def get_metrics_id(self) -> str: + """ + :return: The current user's metrics id + """ + return self.config[commons_constants.CONFIG_METRICS][ + commons_constants.CONFIG_METRICS_BOT_ID + ] + + def accepted_terms(self) -> bool: + """ + Check if terms has been accepted + :return: the check result + """ + return self.config.get(commons_constants.CONFIG_ACCEPTED_TERMS, False) + + def accept_terms(self, accepted) -> None: + """ + Perform terms acceptation + :param accepted: accepted or not + """ + self.config[commons_constants.CONFIG_ACCEPTED_TERMS] = accepted + self.save() + + def update_config_fields( + self, + to_update_fields, + in_backtesting, + config_separator, + delete=False, + ) -> None: + """ + Partially update self.config using the fields found in to_update_fields + :param to_update_fields: the fields to update + :param in_backtesting: if backtesting is enabled + :param config_separator: the config separator + :param delete: if the data should be removed + """ + config_operations.filter_to_update_data(to_update_fields, in_backtesting) + removed_configs = [] + if delete: + removed_configs = [ + config_operations.parse_and_update( + data_key, config_operations.DELETE_ELEMENT_VALUE, config_separator + ) + for data_key in to_update_fields + ] + functools.reduce( + config_operations.clear_dictionaries_by_keys, + [self.config] + removed_configs, + ) + else: + updated_configs = [ + config_operations.parse_and_update( + data_key, data_value, config_separator + ) + for data_key, data_value in to_update_fields.items() + ] + # merge configs + functools.reduce( + config_operations.merge_dictionaries_by_appending_keys, + [self.config] + updated_configs, + ) + # ensure encrypted fields + config_file_manager.encrypt_values_if_necessary(self.config) + + # save config + self.save( + schema_file=self.config_schema_path, sync_all_profiles=bool(removed_configs) + ) + + def _get_selected_profile(self): + selected_profile_id = self._read_config.get( + commons_constants.CONFIG_PROFILE, commons_constants.DEFAULT_PROFILE + ) + if selected_profile_id in self.profile_by_id: + return selected_profile_id + if ( + selected_profile_id != commons_constants.DEFAULT_PROFILE + and commons_constants.DEFAULT_PROFILE in self.profile_by_id + ): + return commons_constants.DEFAULT_PROFILE + raise errors.NoProfileError + + def load_profiles(self) -> None: + """ + Loads the available profiles + :return: None + """ + for profile in profiles.Profile.get_all_profiles( + self.profiles_path, schema_path=self.profile_schema_path + ): + if profile.profile_id not in self.profile_by_id: + self.profile_by_id[profile.profile_id] = profile + + def _get_config_without_profile_elements(self) -> dict: + filtered_config = copy.deepcopy(self.config) + # do not include profile fully managed elements into filtered config + for profile_managed_element in profiles.Profile.FULLY_MANAGED_ELEMENTS: + filtered_config.pop(profile_managed_element, None) + return filtered_config + + def dev_mode_enabled(self) -> bool: + """ + Check if DEV_MODE is enabled + :return: bool + """ + return commons_constants.IS_DEV_MODE_ENABLED or self.config.get( + commons_constants.CONFIG_DEBUG_OPTION, False + ) diff --git a/packages/commons/octobot_commons/configuration/fields_utils.py b/packages/commons/octobot_commons/configuration/fields_utils.py new file mode 100644 index 0000000000..46ad259174 --- /dev/null +++ b/packages/commons/octobot_commons/configuration/fields_utils.py @@ -0,0 +1,91 @@ +# pylint: disable=W1203 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import logging +import hashlib +import cryptography.fernet as fernet + +import octobot_commons +import octobot_commons.constants as commons_constants + + +def has_invalid_default_config_value(*config_values): + """ + Check if config has invalid values + :param config_values: the config values to check + :return: the check result + """ + return any( + value in commons_constants.DEFAULT_CONFIG_VALUES for value in config_values + ) + + +def encrypt(data): + """ + Basic encryption + :param data: the data to encrypt + :return: the encrypted data + """ + try: + return fernet.Fernet(octobot_commons.OCTOBOT_KEY).encrypt(data.encode()) + except Exception as global_exception: + logging.getLogger("fields_utils").error(f"Failed to encrypt : {data}") + raise global_exception + + +def decrypt(data, silent_on_invalid_token=False): + """ + Basic decryption method + :param data: the data to decrypt + :param silent_on_invalid_token: if an error should be raised if a token is invalid + :return: the decrypted data + """ + try: + return ( + fernet.Fernet(octobot_commons.OCTOBOT_KEY).decrypt(data.encode()).decode() + ) + except fernet.InvalidToken as invalid_token_error: + if not silent_on_invalid_token: + logging.getLogger("fields_utils").error( + f"Failed to decrypt : {data} ({invalid_token_error})" + ) + raise invalid_token_error + except Exception as global_exception: + logging.getLogger("fields_utils").error(f"Failed to decrypt : {data} ({global_exception})") + raise global_exception + + +def decrypt_element_if_possible(value_key, config_element, default="") -> str: + """ + Return decrypted values, handles placeholder values + :param value_key: the value key + :param config_element: the config element + :param default: the default value if no decrypt possible + :return: True if the value can be decrypted + """ + element = config_element.get(value_key, "") + if element and not has_invalid_default_config_value(element): + return decrypt(element) + return default + + +def get_password_hash(password): + """ + Returns the password's hex digest + :param password: the password to hash + :return: the hash digest + """ + return hashlib.sha256(password.encode()).hexdigest() diff --git a/packages/commons/octobot_commons/configuration/historical_configuration.py b/packages/commons/octobot_commons/configuration/historical_configuration.py new file mode 100644 index 0000000000..ea2e6125f2 --- /dev/null +++ b/packages/commons/octobot_commons/configuration/historical_configuration.py @@ -0,0 +1,98 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.constants as constants + + +def add_historical_tentacle_config( + master_config: dict, config_start_time: float, historical_config: dict +): + """ + Adds the given historical_config to the master_config historical configurations + """ + if constants.CONFIG_HISTORICAL_CONFIGURATION not in master_config: + master_config[constants.CONFIG_HISTORICAL_CONFIGURATION] = [] + # use list to ensure it still can be serialized + master_config[constants.CONFIG_HISTORICAL_CONFIGURATION].append( + [config_start_time, historical_config] + ) + # always keep the most recent first to be able to find the most up to date first when iterating + master_config[constants.CONFIG_HISTORICAL_CONFIGURATION].sort( + key=lambda x: x[0], reverse=True + ) + + +def has_any_historical_tentacle_config(master_config: dict) -> bool: + """ + :return: True if there is any historical configuration in the master_config + """ + return constants.CONFIG_HISTORICAL_CONFIGURATION in master_config + + +def get_historical_tentacle_config(master_config: dict, current_time: float) -> dict: + """ + :return: the historical configuration associated to the given time + """ + try: + for config_start_time_and_config in master_config[ + constants.CONFIG_HISTORICAL_CONFIGURATION + ]: + if config_start_time_and_config[0] <= current_time: + return config_start_time_and_config[1] + # no suitable config found: fallback to the oldest config + return master_config[constants.CONFIG_HISTORICAL_CONFIGURATION][-1][1] + except KeyError: + raise KeyError( + f"{constants.CONFIG_HISTORICAL_CONFIGURATION} not found in master_config." + ) + + +def get_historical_tentacle_configs( + master_config: dict, from_time: float, to_time: float +) -> list[dict]: + """ + :return: the historical configurations corresponding to the given time interval, + ordered by the most recent config first + """ + try: + return [ + config_start_time_and_config[1] + for config_start_time_and_config in master_config[ + constants.CONFIG_HISTORICAL_CONFIGURATION + ] + if ( + config_start_time_and_config[0] >= from_time + and config_start_time_and_config[0] <= to_time + ) + ] + except KeyError: + raise KeyError( + f"{constants.CONFIG_HISTORICAL_CONFIGURATION} not found in master_config." + ) + + +def get_oldest_historical_tentacle_config_time(master_config: dict) -> float: + """ + :return: the oldest historical configuration timestamp + """ + try: + return min( + historical_config[0] + for historical_config in master_config.get( + constants.CONFIG_HISTORICAL_CONFIGURATION, [] + ) + ) + except ValueError: + raise ValueError("No historical configuration found") diff --git a/packages/commons/octobot_commons/configuration/user_input_configuration.py b/packages/commons/octobot_commons/configuration/user_input_configuration.py new file mode 100644 index 0000000000..214fe74fff --- /dev/null +++ b/packages/commons/octobot_commons/configuration/user_input_configuration.py @@ -0,0 +1,130 @@ +# pylint: disable=W0718 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import octobot_commons.logging as logging +import octobot_commons.configuration.user_inputs as user_inputs +import octobot_commons.databases.run_databases.run_databases_provider as run_databases_provider + +try: + import octobot_tentacles_manager.api +except ImportError: + pass + + +def load_user_inputs_from_class( + configured_class, tentacles_setup_config, to_fill_config +) -> dict: + """ + Apply the given tentacles_setup_config configuration to the given to_fill_config using configured_class user inputs + Requires octobot_tentacles_manager import, configured_class.CLASS_UI, configured_class.init_user_inputs + and configured_class.get_name + :return: the filled user input configuration + """ + inputs = {} + try: + to_fill_config.update( + octobot_tentacles_manager.api.get_tentacle_config( + tentacles_setup_config, configured_class + ) + ) + except NotImplementedError: + # get_name not implemented, no tentacle config + return inputs + logger = logging.get_logger(configured_class.get_name()) + try: + with configured_class.CLASS_UI.local_factory( + configured_class, lambda: to_fill_config + ): + configured_class.init_user_inputs_from_class(inputs) + except Exception as err: + logger.exception(err, True, f"Error when initializing user inputs: {err}") + if to_fill_config: + logger.debug(f"Using config: {to_fill_config}") + return inputs + + +async def load_and_save_user_inputs(tentacle_instance, bot_id: str) -> dict: + """ + Requires an instance of the tentacle and the init_user_inputs method + Initialize and save the user inputs of the tentacle in run data + :return: the filled user input configuration + """ + inputs = {} + try: + tentacle_instance.init_user_inputs(inputs) + if run_databases_provider.RunDatabasesProvider.instance().is_storage_enabled( + bot_id + ): + run_db = run_databases_provider.RunDatabasesProvider.instance().get_run_db( + bot_id + ) + await user_inputs.clear_user_inputs(run_db, tentacle_instance.get_name()) + for user_input in inputs.values(): + await user_inputs.save_user_input(user_input, run_db) + await run_db.flush() + except Exception as err: + tentacle_instance.logger.exception( + err, True, f"Error when initializing user inputs: {err}" + ) + return inputs + + +def get_raw_config_and_user_inputs_from_class( + configured_class, tentacles_setup_config +) -> (dict, list): + """ + Requires octobot_tentacles_manager import and configured_class.load_user_inputs + :return: the filled user input configuration of configured_class according to the given tentacles_setup_config + """ + loaded_config = octobot_tentacles_manager.api.get_tentacle_config( + tentacles_setup_config, configured_class + ) + created_user_inputs = configured_class.load_user_inputs_from_class( + tentacles_setup_config, loaded_config + ) + return loaded_config, list( + user_input.to_dict() for user_input in created_user_inputs.values() + ) + + +async def get_raw_config_and_user_inputs( + configured_class, config, tentacles_setup_config, bot_id +) -> (dict, list): + """ + Requires octobot_tentacles_manager import and configured_class.create_local_instance + Uses run data to load user input values when available + :return: the tentacle configuration and its list of user inputs + """ + loaded_config = octobot_tentacles_manager.api.get_tentacle_config( + tentacles_setup_config, configured_class + ) + if saved_user_inputs := await user_inputs.get_user_inputs( + run_databases_provider.RunDatabasesProvider.instance().get_run_db(bot_id), + configured_class.get_name(), + ): + # user inputs have been saved in run database, use those as they might contain additional + # (nested) user inputs + return loaded_config, saved_user_inputs + # use user inputs from init_user_inputs + tentacle_instance = configured_class.create_local_instance( + config, tentacles_setup_config, loaded_config + ) + created_user_inputs = {} + tentacle_instance.init_user_inputs(created_user_inputs) + return loaded_config, list( + user_input.to_dict() for user_input in created_user_inputs.values() + ) diff --git a/packages/commons/octobot_commons/configuration/user_inputs.py b/packages/commons/octobot_commons/configuration/user_inputs.py new file mode 100644 index 0000000000..21f87a4d8a --- /dev/null +++ b/packages/commons/octobot_commons/configuration/user_inputs.py @@ -0,0 +1,339 @@ +# pylint: disable=W1203,R0902,R0913,R0914 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import contextlib + +import octobot_commons.enums as enums +import octobot_commons.dict_util as dict_util + + +USER_INPUT_TYPE_TO_PYTHON_TYPE = { + enums.UserInputTypes.INT.value: int, + enums.UserInputTypes.FLOAT.value: float, + enums.UserInputTypes.BOOLEAN.value: bool, + enums.UserInputTypes.TEXT.value: str, + enums.UserInputTypes.OBJECT.value: dict, + enums.UserInputTypes.OBJECT_ARRAY.value: list, + enums.UserInputTypes.STRING_ARRAY.value: list, + enums.UserInputTypes.OPTIONS.value: str, + enums.UserInputTypes.MULTIPLE_OPTIONS.value: list, +} + + +MAX_USER_INPUT_ORDER = 9999 + + +class UserInput: + + def __init__( + self, + name, + input_type, + value, + def_val, + tentacle_type, + tentacle_name, + min_val=None, + max_val=None, + options=None, + title=None, + item_title=None, + other_schema_values=None, + editor_options=None, + read_only=False, + is_nested_config=None, + nested_tentacle=None, + parent_input_name=None, + show_in_summary=True, + show_in_optimizer=True, + path=None, + order=None, + ): + self.name = name + self.input_type = input_type + self.value = value + self.def_val = def_val + self.tentacle_type = tentacle_type + self.tentacle_name = tentacle_name + self.min_val = min_val + self.max_val = max_val + self.options = options + self.title = title + self.item_title = item_title + self.other_schema_values = other_schema_values + self.editor_options = editor_options + self.read_only = read_only + self.is_nested_config = is_nested_config + self.nested_tentacle = nested_tentacle + self.parent_input_name = parent_input_name + self.show_in_summary = show_in_summary + self.show_in_optimizer = show_in_optimizer + self.path = path + self.order = order + + def to_dict(self): + """ + :return: the dict representation of the UserInput + """ + return { + "name": self.name, + "input_type": ( + self.input_type + if isinstance(self.input_type, str) + else self.input_type.value + ), + "value": self.value, + "def_val": self.def_val, + "min_val": self.min_val, + "max_val": self.max_val, + "options": self.options, + "title": self.title, + "item_title": self.item_title, + "other_schema_values": self.other_schema_values, + "editor_options": self.editor_options, + "read_only": self.read_only, + "tentacle_type": self.tentacle_type, + "tentacle": self.tentacle_name, + "nested_tentacle": self.nested_tentacle, + "parent_input_name": self.parent_input_name, + "is_nested_config": self.is_nested_config, + "in_summary": self.show_in_summary, + "in_optimizer": self.show_in_optimizer, + "path": self.path, + "order": self.order, + } + + +class UserInputFactory: + def __init__(self, user_input_tentacle_type: enums.UserInputTentacleTypes): + self.user_input_tentacle_type: enums.UserInputTentacleTypes = ( + user_input_tentacle_type + ) + self.tentacle_class = None + self.tentacle_config_proxy = None + + def set_tentacle_class(self, tentacle_class): + """ + set the associated tentacle class + :return: self + """ + self.tentacle_class = tentacle_class + return self + + def set_tentacle_config_proxy(self, tentacle_config_proxy): + """ + set the associated tentacle configuration proxy function + :return: self + """ + self.tentacle_config_proxy = tentacle_config_proxy + return self + + def user_input( + self, + name: str, + input_type, + def_val, + registered_inputs: dict, + value=None, + min_val=None, + max_val=None, + options=None, + title=None, + item_title=None, + other_schema_values=None, + editor_options=None, + read_only=False, + is_nested_config=None, + nested_tentacle=None, + parent_input_name=None, + show_in_summary=True, + show_in_optimizer=True, + path=None, + order=None, + array_indexes=None, + return_value_only=True, + update_parent_value=True, + ): + """ + Set and return a user input value. + The returned value is set as an attribute named as the "name" param with " " replaced by "_" + in self.specific_config. + Types are any UserInputTypes + :return: the saved_config value if any, def_val otherwise + """ + sanitized_name = sanitize_user_input_name(name) + parent = _find_parent_config_node( + self.tentacle_config_proxy(), parent_input_name, array_indexes + ) + used_value = value + if value is None: + # value is not provided, use def_val + used_value = def_val + if parent is not None: + # parent found, try to find saved value + try: + used_value = parent[sanitized_name] + except KeyError: + # use default value + pass + input_key = f"{parent_input_name}{name}" + created_input = UserInput( + name, + input_type, + used_value, + def_val, + self.user_input_tentacle_type.value, + self.tentacle_class.get_name(), + min_val=min_val, + max_val=max_val, + options=options, + title=title, + item_title=item_title, + other_schema_values=other_schema_values, + editor_options=editor_options, + read_only=read_only, + is_nested_config=is_nested_config, + nested_tentacle=nested_tentacle, + parent_input_name=parent_input_name, + show_in_summary=show_in_summary, + show_in_optimizer=show_in_optimizer, + path=path, + order=order, + ) + if input_key not in registered_inputs: + # do not register user input multiple times + registered_inputs[input_key] = created_input + if parent is not None and update_parent_value: + parent[sanitized_name] = used_value + return used_value if return_value_only else created_input + + @contextlib.contextmanager + def local_factory(self, tentacle_class, tentacle_config_proxy): + """ + temporarily set the associated tentacle class and tentacle config proxy + """ + previous_tentacle_class = self.tentacle_class + previous_tentacle_config_proxy = self.tentacle_config_proxy + try: + self.set_tentacle_class(tentacle_class).set_tentacle_config_proxy( + tentacle_config_proxy + ) + yield + finally: + self.set_tentacle_class(previous_tentacle_class).set_tentacle_config_proxy( + previous_tentacle_config_proxy + ) + + +def sanitize_user_input_name(name): + """ + :return: the sanitized user input name + """ + return name.replace(" ", "_") + + +async def save_user_input( + u_input: UserInput, + run_data_writer, + flush_if_necessary=False, + skip_flush=False, +): + """ + Save the user input in the given run_data_writer. First checks if the user input is not already present. + Does not update a user input if it is already saved in run_data_writer. + """ + if not run_data_writer.enable_storage: + return + if not await run_data_writer.contains_row( + enums.DBTables.INPUTS.value, + { + "name": u_input.name, + "tentacle": u_input.tentacle_name, + "nested_tentacle": u_input.nested_tentacle, + "parent_input_name": u_input.parent_input_name, + "is_nested_config": u_input.is_nested_config, + }, + ): + await run_data_writer.log( + enums.DBTables.INPUTS.value, + u_input.to_dict(), + ) + if not skip_flush and ( + flush_if_necessary or run_data_writer.are_data_initialized + ): + # in some cases, user inputs might be setup after the 1st trading mode cycle: flush + # writer in live mode to ensure writing + await run_data_writer.flush() + + +def get_user_input_tentacle_type(tentacle) -> str: + """ + :return: the tentacle associated UserInputTentacleTypes + """ + return ( + enums.UserInputTentacleTypes.TRADING_MODE.value + if hasattr(tentacle, "trading_config") + else ( + enums.UserInputTentacleTypes.EVALUATOR.value + if hasattr(tentacle, "specific_config") + else enums.UserInputTentacleTypes.EXCHANGE.value + ) + ) + + +async def get_user_inputs(reader, tentacle_name=None): + """ + :return: all user inputs. Only user inputs associated to the given tentacle_name that have been saved into + the given reader if tentacle_name is given + """ + all_inputs = await reader.all(enums.DBTables.INPUTS.value) + if tentacle_name is None: + return all_inputs + return [ + selected_input + for selected_input in all_inputs + if selected_input["tentacle"] == tentacle_name + ] + + +async def clear_user_inputs(writer, tentacle_name=None): + """ + Delete all user inputs. Only delete user inputs associated to tentacle_name if tentacle_name is given + """ + if tentacle_name is None: + await writer.delete_all(enums.DBTables.INPUTS.value) + else: + query = {"tentacle": tentacle_name} + await writer.delete(enums.DBTables.INPUTS.value, query) + + +def _find_parent_config_node(tentacle_config, parent_input_name, array_indexes): + """ + :return: the found parent node from tentacles_config + """ + if parent_input_name is not None: + found, nested_parent = dict_util.find_nested_value( + tentacle_config, + sanitize_user_input_name(parent_input_name), + list_indexes=array_indexes, + ) + if found and isinstance(nested_parent, dict): + return nested_parent + if found and isinstance(nested_parent, list) and array_indexes: + return nested_parent[array_indexes[-1]] + # non dict or list with array_indexes nested parents are not supported + return None + return tentacle_config diff --git a/packages/commons/octobot_commons/constants.py b/packages/commons/octobot_commons/constants.py new file mode 100644 index 0000000000..47182e86b0 --- /dev/null +++ b/packages/commons/octobot_commons/constants.py @@ -0,0 +1,304 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import os +import octobot_commons.enums as enums + + +def parse_boolean_environment_var(env_key: str, default_value: str) -> bool: + """ + :param env_key: the environment var key + :param default_value: the default value + :return: True when the var value is "True" or "true" else false + """ + return bool(os.getenv(env_key, default_value).lower() == "true") + + +# time +MSECONDS_TO_SECONDS = 1000 +MINUTE_TO_SECONDS = 60 +MSECONDS_TO_MINUTE = MSECONDS_TO_SECONDS * MINUTE_TO_SECONDS +HOURS_TO_SECONDS = MINUTE_TO_SECONDS * 60 +HOURS_TO_MSECONDS = MSECONDS_TO_SECONDS * MINUTE_TO_SECONDS * MINUTE_TO_SECONDS +DAYS_TO_SECONDS = HOURS_TO_SECONDS * 24 + +# Strings +CONFIG_WILDCARD = "*" +CONFIG_SYMBOLS_WILDCARD = ["*"] +PORTFOLIO_AVAILABLE = "available" +MARGIN_PORTFOLIO = "margin" +PORTFOLIO_TOTAL = "total" + +# config +CONFIG_ENABLED_OPTION = "enabled" +CONFIG_DEBUG_OPTION = "DEV-MODE" +CONFIG_TIME_FRAME = "time_frame" +USER_FOLDER = "user" +CONFIG_FOLDER = "config" +CONFIG_FILE = "config.json" +SAFE_DUMP_SUFFIX = ".back" +DEFAULT_CONFIG_FILE = "default_config.json" +DEFAULT_CONFIG_FILE_PATH = f"{CONFIG_FOLDER}/{DEFAULT_CONFIG_FILE}" +SCHEMA = "schema" +CONFIG_FILE_EXT = ".json" +CONFIG_FILE_SCHEMA = f"{CONFIG_FOLDER}/config_{SCHEMA}.json" +CONFIG_REFRESH_RATE = "refresh_rate_seconds" +CONFIG_SAVED_HISTORICAL_TIMEFRAMES = "saved_historical_timeframes" +CONFIG_OPTIMIZER_ID = "optimizer_id" +CONFIG_BACKTESTING_ID = "backtesting_id" +CONFIG_CURRENT_LIVE_ID = "current-live-id" +DEFAULT_CURRENT_LIVE_ID = 1 + +# profiles +PROFILES_FOLDER = "profiles" +USER_PROFILES_FOLDER = f"{USER_FOLDER}/{PROFILES_FOLDER}" +PROFILE_CONFIG_FILE = "profile.json" +CONFIG_PROFILE = "profile" +CONFIG_BACKTESTING_PROFILE = "backtesting_profile" +DEFAULT_PROFILE = "default" +DEFAULT_PROFILE_FILE = f"{CONFIG_PROFILE}.json" +CONFIG_NAME = "name" +CONFIG_SLUG = "slug" +CONFIG_DESCRIPTION = "description" +CONFIG_AVATAR = "avatar" +CONFIG_ORIGIN_URL = "origin_url" +CONFIG_AUTO_UPDATE = "auto_update" +CONFIG_READ_ONLY = "read_only" +CONFIG_HIDDEN = "hidden" +CONFIG_IMPORTED = "imported" +CONFIG_EXTRA_BACKTESTING_TIME_FRAMES = "extra_backtesting_time_frames" +CONFIG_COMPLEXITY = "complexity" +CONFIG_RISK = "risk" +CONFIG_TYPE = "type" +PROFILE_CONFIG = "config" +CONFIG_ID = "id" +PROFILE_FILE_SCHEMA = f"{CONFIG_FOLDER}/profile_{SCHEMA}.json" +PROFILE_EXPORT_FORMAT = "zip" +IMPORTED_PROFILE_PREFIX = "imported" +USE_CURRENT_PROFILE = "use_current_profile" +PROFILE_REFRESH_HOURS_INTERVAL = int(os.getenv("PROFILE_REFRESH_HOURS_INTERVAL", "24")) + +# Config currencies +CONFIG_CRYPTO_CURRENCIES = "crypto-currencies" +CONFIG_CRYPTO_CURRENCY = "crypto-currency" +CONFIG_CRYPTO_PAIRS = "pairs" +CONFIG_CRYPTO_QUOTE = "quote" +CONFIG_CRYPTO_ADD = "add" +TRADING_SYMBOL_REGEX = "([a-zA-Z]|\\d){1,}\\/([a-zA-Z]|\\d){1,}" + +# Exchange +CONFIG_EXCHANGES = "exchanges" +CONFIG_EXCHANGE_KEY = "api-key" +CONFIG_EXCHANGE_SECRET = "api-secret" +CONFIG_EXCHANGE_PASSWORD = "api-password" +CONFIG_EXCHANGE_UID = "api-uid" +CONFIG_EXCHANGE_ACCESS_TOKEN = "access_token" +CONFIG_EXCHANGE_TYPE = "exchange-type" +CONFIG_FORCE_AUTHENTICATION = "force-authentication" +CONFIG_CONTRACT_TYPE = "contract-type" +CONFIG_REQUIRED_EXTRA_TIMEFRAMES = "required_extra_timeframes" +CONFIG_EXCHANGE_SANDBOXED = "sandboxed" +CONFIG_EXCHANGE_FUTURE = "future" +CONFIG_EXCHANGE_MARGIN = "margin" +CONFIG_EXCHANGE_OPTION = "option" +CONFIG_EXCHANGE_SPOT = "spot" +CONFIG_EXCHANGE_REST_ONLY = "rest_only" +CONFIG_EXCHANGE_WEB_SOCKET = "web-socket" +CONFIG_EXCHANGE_SUB_ACCOUNT = "sub_account" +CONFIG_EXCHANGE_ENCRYPTED_VALUES = [ + CONFIG_EXCHANGE_KEY, + CONFIG_EXCHANGE_SECRET, + CONFIG_EXCHANGE_PASSWORD, +] + +# Trader +CONFIG_TRADING = "trading" +CONFIG_TRADER = "trader" +CONFIG_LOAD_TRADE_HISTORY = "load-trade-history" +CONFIG_TRADER_RISK = "risk" +CONFIG_TRADER_PAUSED = "paused" +CONFIG_TRADER_ALLOW_ARTIFICIAL_ORDERS = "allow-artificial-orders" +CONFIG_TRADER_RISK_MIN = 0.05 +CONFIG_TRADER_RISK_MAX = 1 +CONFIG_TRADER_REFERENCE_MARKET = "reference-market" +DEFAULT_STORAGE_TRADING_MODE = "default" + +# Simulator +CONFIG_SIMULATOR = "trader-simulator" +CONFIG_STARTING_PORTFOLIO = "starting-portfolio" +SIMULATOR_CURRENT_PORTFOLIO = "simulator_current_portfolio" +CONFIG_SIMULATOR_FEES = "fees" +CONFIG_SIMULATOR_FEES_MAKER = "maker" +CONFIG_SIMULATOR_FEES_TAKER = "taker" +CONFIG_SIMULATOR_FEES_WITHDRAW = "withdraw" + +# Optimization campaigns +DEFAULT_CAMPAIGN = "default_campaign" + +# Optimizer +OPTIMIZER_RUNS_FOLDER = "optimizer" + +# OS +PLATFORM_DATA_SEPARATOR = ":" +CLOCK_REFRESH_HOURS_INTERVAL = int(os.getenv("CLOCK_REFRESH_HOURS_INTERVAL", "4")) +RESOURCES_WATCHER_MINUTES_INTERVAL = float( + os.getenv("RESOURCES_WATCHER_MINUTES_INTERVAL", "10") +) +BYTES_BY_GB = 1000000000 + +# Evaluators +MIN_EVAL_TIME_FRAME = enums.TimeFrames.ONE_MINUTE +INIT_EVAL_NOTE = 0 +START_PENDING_EVAL_NOTE = "0" + +# tentacles +TENTACLE_DEFAULT_CONFIG = "default_config" +CONFIG_TENTACLES_FILE = "tentacles_config.json" +EVALUATOR_PRIORITY = "priority" +DEFAULT_EVALUATOR_PRIORITY = 0 +CONFIG_TENTACLES_REQUIRED_CANDLES_COUNT = "required_candles_count" +CONFIG_HISTORICAL_CONFIGURATION = "_historical_configuration" +NESTED_TENTACLE_CONFIG = "nested_tentacle_configuration" +CONFIG_ACTIVATION_TOPICS = "activation method" +CONFIG_TRIGGER_TIMEFRAMES = "Trigger_timeframes" +CONFIG_EMIT_TRADING_SIGNALS = "emit_trading_signals" +CONFIG_TRADING_SIGNALS_STRATEGY = "trading_strategy" +ALLOW_DEFAULT_CONFIG = "allow_default_config" + +# terms of service +CONFIG_ACCEPTED_TERMS = "accepted_terms" + +# distribution +DEFAULT_DISTRIBUTION = "default" +CONFIG_DISTRIBUTION = "distribution" + +# metrics +CONFIG_METRICS = "metrics" +CONFIG_METRICS_BOT_ID = "metrics-bot-id" +TIMER_BEFORE_METRICS_REGISTRATION_SECONDS = 600 +TIMER_BETWEEN_METRICS_UPTIME_UPDATE = float( + os.getenv("TIMER_BETWEEN_METRICS_UPTIME_UPDATE", str(3600 * 4)) +) +METRICS_URL = os.getenv("METRICS_OCTOBOT_ONLINE_URL", "https://metrics.octobot.online/") +METRICS_ROUTE_GEN_BOT_ID = "gen-bot-id" +METRICS_ROUTE = "metrics" +METRICS_ROUTE_COMMUNITY = f"{METRICS_ROUTE}/community" +METRICS_ROUTE_UPTIME = f"{METRICS_ROUTE}/uptime" +METRICS_ROUTE_REGISTER = f"{METRICS_ROUTE}/register" +COMMUNITY_TOPS_COUNT = 1000 + +# default values in config files and interfaces +DEFAULT_API_KEY = "your-api-key-here" +DEFAULT_API_SECRET = "your-api-secret-here" +DEFAULT_API_PASSWORD = "your-api-password-here" +DEFAULT_EXCHANGE_TYPE = CONFIG_EXCHANGE_SPOT +NO_KEY_VALUE = "NO KEY" +DEFAULT_CONFIG_VALUES = { + DEFAULT_API_KEY, + DEFAULT_API_SECRET, + DEFAULT_API_PASSWORD, + "NOKEY", + NO_KEY_VALUE, + "Empty", +} + +# cache +CACHE_FOLDER = "cache" +CACHE_FILE = "cache.json" +CACHE_HASH_SIZE = 15 +CACHE_RELATED_DATA_SEPARATOR = "##" +LOCAL_BOT_DATA = "local_bot_data" +DO_NOT_CACHE = "do_not_cache" +DO_NOT_OVERRIDE_CACHE = "do_not_override_cache" +DEFAULT_IGNORED_VALUE = -1 +UNPROVIDED_CACHE_IDENTIFIER = "_unprovided" + +# Async settings +DEFAULT_FUTURE_TIMEOUT = 120 + +# Github urls +GITHUB_RAW_CONTENT_URL = "https://raw.githubusercontent.com" +GITHUB_API_CONTENT_URL = "https://api.github.com" +GITHUB_BASE_URL = "https://github.com" +GITHUB_ORGANISATION = "Drakkar-Software" + +# External resources +EXTERNAL_RESOURCE_URL = "https://raw.githubusercontent.com/Drakkar-Software/OctoBot/assets/external_resources.json" + +# Run databases +DATA_FOLDER = "data" +DB_SEPARATOR = "_" +TINYDB_EXT = ".json" +MAX_BACKTESTING_RUNS = 500000 +MAX_OPTIMIZER_RUNS = 50000 +FORCE_BACKTESTING_LOGS = parse_boolean_environment_var( + "FORCE_BACKTESTING_LOGS", "false" +) + +# DSL interpreter +BASE_OPERATORS_LIBRARY = "base" +CONTEXTUAL_OPERATORS_LIBRARY = "contextual" +UNRESOLVED_PARAMETER_PLACEHOLDER = "UNRESOLVED_PARAMETER" +LOCAL_VALUE_PLACEHOLDER = "LOCAL_VALUE_PLACEHOLDER" + +# Logging +EXCEPTION_DESC = "exception_desc" +IS_EXCEPTION_DESC = "is_exception_desc" +ALLOW_PRIVATE_DATA_LOGS = parse_boolean_environment_var("ALLOW_PRIVATE_DATA_LOGS", "true") +PRIVATE_MESSAGE_PLACEHOLDER = "***" +PRIVATE_MESSAGE_ALLOWED_CHARS_COUNT = 3 + + +# from https://www.coingecko.com/en/categories/stablecoins +USD_LIKE_COINS = [ + "USDT", + "USDC", + "TUSD", + "USDE", + "USDS", + "BUSD", + "DAI", + "USD", + "USDD", + "USDP", + "GUSD", + "LUSD", + "FDUSD", +] +DEFAULT_REFERENCE_MARKET = "USDT" +USUAL_REFERENCE_MARKET_USD_LIKE_COINS = [ + DEFAULT_REFERENCE_MARKET, + "USDC", +] + +# from coinbase and binance fiat pairs +FIAT_NON_USD_LIKE_COINS = [ + "EUR", + "GBP", + "TRY", + "BRL", + "ARS", +] + +USD_LIKE_AND_FIAT_COINS = USD_LIKE_COINS + FIAT_NON_USD_LIKE_COINS + +ENABLE_CERTIFI_SSL_CERTIFICATES = parse_boolean_environment_var( + "ENABLE_CERTIFI_SSL_CERTIFICATES", "true" +) +KNOWN_POTENTIALLY_SSL_FAILED_REQUIRED_URL = ( + "https://tentacles.octobot.online/officials/packages/full/base/1.0.9/metadata.yaml" +) +IS_DEV_MODE_ENABLED = parse_boolean_environment_var(CONFIG_DEBUG_OPTION, "False") +USE_MINIMAL_LIBS = parse_boolean_environment_var("USE_MINIMAL_LIBS", "false") diff --git a/packages/commons/octobot_commons/context_util.py b/packages/commons/octobot_commons/context_util.py new file mode 100644 index 0000000000..45edc0a111 --- /dev/null +++ b/packages/commons/octobot_commons/context_util.py @@ -0,0 +1,29 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + + +class EmptyContextManager: + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + pass + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc_value, traceback): + pass diff --git a/packages/commons/octobot_commons/cryptography/__init__.py b/packages/commons/octobot_commons/cryptography/__init__.py new file mode 100644 index 0000000000..e11e4dc668 --- /dev/null +++ b/packages/commons/octobot_commons/cryptography/__init__.py @@ -0,0 +1,54 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_commons.cryptography import encryption +from octobot_commons.cryptography.encryption import ( + generate_rsa_key_pair, + generate_aes_key, + generate_iv, + rsa_decrypt_aes_key, + rsa_encrypt_aes_key, + aes_gcm_decrypt, + aes_gcm_encrypt, + pbkdf2_derive_key_from_pin, + pbkdf2_encrypt_aes_key, + pbkdf2_decrypt_aes_key, +) +from octobot_commons.cryptography import signing +from octobot_commons.cryptography.signing import ( + generate_ecdsa_key_pair, + parse_private_key_pem, + sign_data, + verify_signature, +) + + +__all__ = [ + "generate_rsa_key_pair", + "generate_aes_key", + "generate_iv", + "rsa_decrypt_aes_key", + "rsa_encrypt_aes_key", + "aes_gcm_decrypt", + "aes_gcm_encrypt", + "pbkdf2_derive_key_from_pin", + "pbkdf2_encrypt_aes_key", + "pbkdf2_decrypt_aes_key", + "generate_ecdsa_key_pair", + "parse_private_key_pem", + "sign_data", + "verify_signature", +] diff --git a/packages/commons/octobot_commons/cryptography/encryption.py b/packages/commons/octobot_commons/cryptography/encryption.py new file mode 100644 index 0000000000..0d05d76ce4 --- /dev/null +++ b/packages/commons/octobot_commons/cryptography/encryption.py @@ -0,0 +1,361 @@ +# pylint: disable=R0801 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import secrets +from typing import Optional, Tuple + +from cryptography.hazmat.primitives.asymmetric import padding, rsa +from cryptography.hazmat.primitives.ciphers.aead import AESGCM +from cryptography.hazmat.primitives import hashes, serialization +from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC +from cryptography.hazmat.backends import default_backend + + +def generate_rsa_key_pair(key_size: int = 4096) -> Tuple[bytes, bytes]: + """Generate an RSA key pair + + This function generates a private and public RSA key pair equivalent to: + - openssl genrsa -out user_private_key.pem 4096 + - openssl rsa -in user_private_key.pem -pubout -out user_public_key.pem + + :param key_size: The size of the RSA key in bits. Defaults to 4096. + :type key_size: int + :return: A tuple containing (private_key_pem, public_key_pem) as bytes. + :rtype: Tuple[bytes, bytes] + :raises ValueError: If key_size is less than 2048 bits (security best practice). + """ + if key_size < 2048: + raise ValueError("RSA key size must be at least 2048 bits for security.") + + private_key = rsa.generate_private_key( + public_exponent=65537, + key_size=key_size, + ) + + # Serialize private key to PEM format + private_key_pem = private_key.private_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.TraditionalOpenSSL, + encryption_algorithm=serialization.NoEncryption(), + ) + + # Extract and serialize public key to PEM format + public_key = private_key.public_key() + public_key_pem = public_key.public_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PublicFormat.SubjectPublicKeyInfo, + ) + + return private_key_pem, public_key_pem + + +def generate_aes_key(key_size: int = 32) -> bytes: + """Generate a random AES key + + This function generates random bytes equivalent to: + - openssl rand -out aes_key_s1.bin 32 + + :param key_size: The size of the AES key in bytes. Defaults to 32 (256 bits). + :type key_size: int + :return: Random bytes suitable for use as an AES key. + :rtype: bytes + :raises ValueError: If key_size is less than 1. + """ + if key_size < 1: + raise ValueError("Key size must be at least 1 byte.") + + return secrets.token_bytes(key_size) + + +def generate_iv(iv_size: int = 12) -> bytes: + """Generate a random initialization vector (IV) + + This function generates random bytes equivalent to: + - openssl rand -out iv_s1.bin 12 + + :param iv_size: The size of the IV in bytes. Defaults to 12 (standard for AES-GCM). + :type iv_size: int + :return: Random bytes suitable for use as an IV. + :rtype: bytes + :raises ValueError: If iv_size is less than 1. + """ + if iv_size < 1: + raise ValueError("IV size must be at least 1 byte.") + + return secrets.token_bytes(iv_size) + + +def rsa_decrypt_aes_key(encrypted_aes_key: bytes, private_key_pem: bytes) -> bytes: + """Decrypt an RSA-encrypted AES key + + This function decrypts an AES key equivalent to: + - openssl rsautl -decrypt -in aes_key.enc -oaep -inkey private_key.pem -out aes_key.bin + + :param encrypted_aes_key: The RSA-encrypted AES key. + :type encrypted_aes_key: bytes + :param private_key_pem: The RSA private key in PEM format. + :type private_key_pem: bytes + :return: The decrypted AES key as bytes. + :rtype: bytes + :raises ValueError: If the private key cannot be loaded or is invalid. + """ + # Load the private key from PEM format + private_key = serialization.load_pem_private_key( + private_key_pem, + password=None, + ) + + if not isinstance(private_key, rsa.RSAPrivateKey): + raise ValueError("Private key must be an RSA key.") + + # Decrypt using OAEP padding + decrypted_key = private_key.decrypt( + encrypted_aes_key, + padding.OAEP( + mgf=padding.MGF1(algorithm=hashes.SHA256()), + algorithm=hashes.SHA256(), + label=None, + ), + ) + + return decrypted_key + + +def rsa_encrypt_aes_key(aes_key: bytes, public_key_pem: bytes) -> bytes: + """Encrypt an AES key using RSA public key + + This function encrypts an AES key equivalent to: + - openssl rsautl -encrypt -in aes_key.bin -oaep -pubin -inkey public_key.pem -out aes_key.enc + + :param aes_key: The AES key to encrypt. + :type aes_key: bytes + :param public_key_pem: The RSA public key in PEM format. + :type public_key_pem: bytes + :return: The encrypted AES key as bytes. + :rtype: bytes + :raises ValueError: If the public key cannot be loaded or is invalid. + """ + # Load the public key from PEM format + public_key = serialization.load_pem_public_key(public_key_pem) + + if not isinstance(public_key, rsa.RSAPublicKey): + raise ValueError("Public key must be an RSA key.") + + # Encrypt using OAEP padding + encrypted_key = public_key.encrypt( + aes_key, + padding.OAEP( + mgf=padding.MGF1(algorithm=hashes.SHA256()), + algorithm=hashes.SHA256(), + label=None, + ), + ) + + return encrypted_key + + +def aes_gcm_decrypt( + encrypted_data: bytes, + aes_key: bytes, + iv: bytes, + associated_data: Optional[bytes] = None, +) -> bytes: + """Decrypt data using AES-256-GCM + + This function decrypts data equivalent to: + - openssl enc -aes-256-gcm -d -in data.enc -out data.bin -K $AES_KEY -iv $IV + + :param encrypted_data: The encrypted data (ciphertext + authentication tag). + :type encrypted_data: bytes + :param aes_key: The AES key (32 bytes for AES-256). + :type aes_key: bytes + :param iv: The initialization vector (12 bytes for AES-GCM). + :type iv: bytes + :param associated_data: Optional associated data (AAD) for authenticated encryption. + :type associated_data: Optional[bytes] + :return: The decrypted plaintext data. + :rtype: bytes + :raises ValueError: If key or IV sizes are invalid. + """ + if len(aes_key) != 32: + raise ValueError("AES key must be 32 bytes (256 bits) for AES-256-GCM.") + + if len(iv) != 12: + raise ValueError("IV must be 12 bytes for AES-GCM.") + + # AESGCM expects the authentication tag to be appended to the ciphertext + # OpenSSL GCM format: ciphertext + 16-byte tag + if len(encrypted_data) < 16: + raise ValueError( + "Encrypted data too short (must include 16-byte authentication tag)." + ) + + # Create AESGCM cipher + aesgcm = AESGCM(aes_key) + + # Decrypt with authentication (encrypted_data should be ciphertext + 16-byte tag) + plaintext = aesgcm.decrypt(iv, encrypted_data, associated_data) + + return plaintext + + +def aes_gcm_encrypt( + plaintext: bytes, aes_key: bytes, iv: bytes, associated_data: Optional[bytes] = None +) -> bytes: + """Encrypt data using AES-256-GCM + + This function encrypts data equivalent to: + - openssl enc -aes-256-gcm -in data.bin -out data.enc -K $AES_KEY -iv $IV + + :param plaintext: The plaintext data to encrypt. + :type plaintext: bytes + :param aes_key: The AES key (32 bytes for AES-256). + :type aes_key: bytes + :param iv: The initialization vector (12 bytes for AES-GCM). + :type iv: bytes + :param associated_data: Optional associated data (AAD) for authenticated encryption. + :type associated_data: Optional[bytes] + :return: The encrypted data (ciphertext + 16-byte authentication tag). + :rtype: bytes + :raises ValueError: If key or IV sizes are invalid. + """ + if len(aes_key) != 32: + raise ValueError("AES key must be 32 bytes (256 bits) for AES-256-GCM.") + + if len(iv) != 12: + raise ValueError("IV must be 12 bytes for AES-GCM.") + + # Create AESGCM cipher + aesgcm = AESGCM(aes_key) + + # Encrypt with authentication (returns ciphertext + tag) + ciphertext_with_tag = aesgcm.encrypt(iv, plaintext, associated_data) + + return ciphertext_with_tag + + +def pbkdf2_derive_key_from_pin( + pin: str, salt: bytes, iterations: int = 200000, key_size: int = 32 +) -> bytes: + """Derive a cryptographic key from a PIN using PBKDF2 + + This function derives a key from a user PIN using PBKDF2-HMAC-SHA256. + High iteration count is recommended for PINs due to their low entropy. + + :param pin: The user PIN (4-6 digits recommended). + :type pin: str + :param salt: Random salt bytes (should be unique per encryption, 16 bytes recommended). + :type salt: bytes + :param iterations: Number of PBKDF2 iterations. Defaults to 200000 (high for PIN security). + :type iterations: int + :param key_size: Size of the derived key in bytes. Defaults to 32 (256 bits for AES-256). + :type key_size: int + :return: The derived key as bytes. + :rtype: bytes + :raises ValueError: If salt is empty or iterations/key_size are invalid. + """ + if len(salt) < 1: + raise ValueError("Salt must be at least 1 byte.") + if iterations < 1: + raise ValueError("Iterations must be at least 1.") + if key_size < 1: + raise ValueError("Key size must be at least 1 byte.") + + kdf = PBKDF2HMAC( + algorithm=hashes.SHA256(), + length=key_size, + salt=salt, + iterations=iterations, + backend=default_backend(), + ) + return kdf.derive(pin.encode("utf-8")) + + +def pbkdf2_encrypt_aes_key( + aes_key: bytes, + pin: str, + salt: Optional[bytes] = None, + iterations: int = 200000, +) -> Tuple[bytes, bytes, bytes]: + """Encrypt an AES key using a PIN-derived key via PBKDF2 + + This function encrypts an AES key using a key derived from a user PIN. + The salt and IV are generated randomly and must be stored for decryption. + + :param aes_key: The AES key to encrypt (32 bytes for AES-256). + :type aes_key: bytes + :param pin: The user PIN (4-6 digits recommended). + :type pin: str + :param salt: Optional salt bytes. If None, a random 16-byte salt is generated. + :type salt: Optional[bytes] + :param iterations: Number of PBKDF2 iterations. Defaults to 200000. + :type iterations: int + :return: A tuple containing (encrypted_key, salt, iv). + :rtype: Tuple[bytes, bytes, bytes] + :raises ValueError: If AES key size is invalid. + """ + if len(aes_key) != 32: + raise ValueError("AES key must be 32 bytes (256 bits) for AES-256.") + + if salt is None: + salt = secrets.token_bytes(16) + + # Derive key from PIN + derived_key = pbkdf2_derive_key_from_pin(pin, salt, iterations, key_size=32) + + # Generate IV for AES-GCM + iv = generate_iv() + + # Encrypt AES key using derived key + encrypted_key = aes_gcm_encrypt(aes_key, derived_key, iv) + + return encrypted_key, salt, iv + + +def pbkdf2_decrypt_aes_key( + encrypted_aes_key: bytes, pin: str, salt: bytes, iv: bytes, iterations: int = 200000 +) -> bytes: + """Decrypt an AES key using a PIN-derived key via PBKDF2 + + This function decrypts an AES key that was encrypted with pbkdf2_encrypt_aes_key. + + :param encrypted_aes_key: The encrypted AES key. + :type encrypted_aes_key: bytes + :param pin: The user PIN used for encryption. + :type pin: str + :param salt: The salt used during encryption. + :type salt: bytes + :param iv: The IV used during encryption. + :type iv: bytes + :param iterations: Number of PBKDF2 iterations (must match encryption). Defaults to 200000. + :type iterations: int + :return: The decrypted AES key as bytes. + :rtype: bytes + :raises ValueError: If decryption fails (wrong PIN, corrupted data, etc.). + """ + # Derive key from PIN (same parameters as encryption) + derived_key = pbkdf2_derive_key_from_pin(pin, salt, iterations, key_size=32) + + # Decrypt AES key using derived key + try: + decrypted_key = aes_gcm_decrypt(encrypted_aes_key, derived_key, iv) + except Exception as e: + raise ValueError( + "Failed to decrypt AES key. Wrong PIN or corrupted data." + ) from e + + return decrypted_key diff --git a/packages/commons/octobot_commons/cryptography/signing.py b/packages/commons/octobot_commons/cryptography/signing.py new file mode 100644 index 0000000000..5b66eeb397 --- /dev/null +++ b/packages/commons/octobot_commons/cryptography/signing.py @@ -0,0 +1,140 @@ +# pylint: disable=R0801 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import base64 +import re + +from typing import Tuple, Optional +from cryptography.exceptions import InvalidSignature + +from cryptography.hazmat.primitives.asymmetric import ec +from cryptography.hazmat.primitives import hashes, serialization + + +def generate_ecdsa_key_pair( + curve: Optional[ec.EllipticCurve] = None, +) -> Tuple[bytes, bytes]: + """Generate an ECDSA key pair + + This function generates a private and public ECDSA key pair equivalent to: + - openssl ecparam -name secp256r1 -genkey -out user_ecdsa_private_key.pem + - openssl ec -in user_ecdsa_private_key.pem -pubout -out user_ecdsa_public_key.pem + + :param curve: The elliptic curve to use. Defaults to SECP256R1 (secp256r1/prime256v1). + :type curve: Optional[ec.EllipticCurve] + :return: A tuple containing (private_key_pem, public_key_pem) as bytes. + :rtype: Tuple[bytes, bytes] + """ + if curve is None: + curve = ec.SECP256R1() + + # Generate private key + private_key = ec.generate_private_key(curve) + + # Serialize private key to PEM format + private_key_pem = private_key.private_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.PKCS8, + encryption_algorithm=serialization.NoEncryption(), + ) + + # Extract and serialize public key to PEM format + public_key = private_key.public_key() + public_key_pem = public_key.public_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PublicFormat.SubjectPublicKeyInfo, + ) + + return private_key_pem, public_key_pem + + +def parse_private_key_pem(private_key_input: str) -> bytes: + """Parse a private key from either raw PEM text or a base64-encoded PEM string. + + Handles two formats: + - Raw PEM: starts with "-----BEGIN" (e.g. from a GitHub secret storing PEM directly) + - Base64-encoded PEM: the PEM content encoded as base64 (whitespace is stripped before decoding) + + :param private_key_input: The private key as a raw PEM string or base64-encoded PEM. + :type private_key_input: str + :return: The private key PEM as bytes. + :rtype: bytes + """ + stripped = private_key_input.strip() + if stripped.startswith("-----BEGIN"): + return stripped.encode("utf-8") + cleaned = re.sub(r'\s', '', private_key_input) + return base64.b64decode(cleaned, validate=True) + + +def sign_data(data: bytes, private_key_pem: bytes) -> bytes: + """Generate an ECDSA signature for data + + This function generates a signature equivalent to: + - openssl dgst -sha256 -sign ecdsa_private_key.pem -out signature.bin data.bin + + :param data: The data to sign. + :type data: bytes + :param private_key_pem: The ECDSA private key in PEM format. + :type private_key_pem: bytes + :return: The signature as bytes. + :rtype: bytes + :raises ValueError: If the private key cannot be loaded or is invalid. + """ + # Load the private key from PEM format + private_key = serialization.load_pem_private_key( + private_key_pem, + password=None, + ) + + if not isinstance(private_key, ec.EllipticCurvePrivateKey): + raise ValueError("Private key must be an ECDSA key.") + + # Hash the data with SHA-256 and sign it + signature = private_key.sign(data, ec.ECDSA(hashes.SHA256())) + + return signature + + +def verify_signature(data: bytes, public_key_pem: bytes, signature: bytes) -> bool: + """Verify an ECDSA signature for data + + This function verifies a signature equivalent to: + - openssl dgst -sha256 -verify ecdsa_public_key.pem -signature signature.bin data.bin + + :param data: The original data that was signed. + :type data: bytes + :param public_key_pem: The ECDSA public key in PEM format. + :type public_key_pem: bytes + :param signature: The signature to verify. + :type signature: bytes + :return: True if the signature is valid, False otherwise. + :rtype: bool + :raises ValueError: If the public key cannot be loaded or is invalid. + """ + # Load the public key from PEM format + public_key = serialization.load_pem_public_key(public_key_pem) + + if not isinstance(public_key, ec.EllipticCurvePublicKey): + raise ValueError("Public key must be an ECDSA key.") + + try: + # Hash the data with SHA-256 and verify the signature + public_key.verify(signature, data, ec.ECDSA(hashes.SHA256())) + return True + except InvalidSignature: + return False diff --git a/packages/commons/octobot_commons/data_util.py b/packages/commons/octobot_commons/data_util.py new file mode 100644 index 0000000000..a7ade74b3c --- /dev/null +++ b/packages/commons/octobot_commons/data_util.py @@ -0,0 +1,67 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import numpy as np + + +def normalize_data(data): + """ + Normalize the specified data + :param data: the data to normalize + :return: normalized data + """ + if data.size > 1: + return (data - np.mean(data)) / (data.max() - data.min()) + return data + + +def drop_nan(data): + """ + Drop nan of a numpy array + :param data: the numpy array + :return: the numpy array without nan value + """ + return data[~np.isnan(data)] + + +def mean(number_list): + """ + Return the list average + :param number_list: the list to use + :return: the list average + """ + return sum(number_list) / len(number_list) if number_list else 0 + + +def shift_value_array(array, shift_count=-1, fill_value=np.nan, dtype=np.float64): + """ + Shift a numpy array + :param array: the numpy array + :param shift_count: the shift direction (- / +) and counter + :param fill_value: the new value of the shifted indexes + :param dtype: the type of the numpy array (and also the :fill_value:) + :return: the shifted array + """ + new_array = np.empty_like(array, dtype=dtype) + if shift_count > 0: + new_array[:shift_count] = fill_value + new_array[shift_count:] = array[:-shift_count] + elif shift_count < 0: + new_array[shift_count:] = fill_value + new_array[:shift_count] = array[-shift_count:] + else: + new_array[:] = array + return new_array diff --git a/packages/commons/octobot_commons/databases/__init__.py b/packages/commons/octobot_commons/databases/__init__.py new file mode 100644 index 0000000000..22b0191ace --- /dev/null +++ b/packages/commons/octobot_commons/databases/__init__.py @@ -0,0 +1,113 @@ +# pylint: disable=R0801 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + + +from octobot_commons.databases import global_storage +from octobot_commons.databases import database_caches +from octobot_commons.databases import document_database_adaptors +from octobot_commons.databases import bases +from octobot_commons.databases import implementations +from octobot_commons.databases import relational_databases + +from octobot_commons.databases import cache_manager +from octobot_commons.databases import databases_util +from octobot_commons.databases import cache_client +from octobot_commons.databases import run_databases + +from octobot_commons.databases.global_storage import ( + GlobalSharedMemoryStorage, +) + +from octobot_commons.databases.database_caches import ( + GenericDatabaseCache, + ChronologicalReadDatabaseCache, +) + +from octobot_commons.databases.document_database_adaptors import ( + AbstractDocumentDatabaseAdaptor, + TinyDBAdaptor, +) + +from octobot_commons.databases.bases import ( + DocumentDatabase, + BaseDatabase, +) + +from octobot_commons.databases.implementations import ( + DBReader, + DBWriter, + DBWriterReader, + MetaDatabase, + CacheDatabase, + CacheTimestampDatabase, +) + +from octobot_commons.databases.relational_databases import ( + SQLiteDatabase, + new_sqlite_database, +) + +from octobot_commons.databases.run_databases import ( + RunDatabasesIdentifier, + RunDatabasesProvider, + init_bot_storage, + close_bot_storage, + AbstractRunDatabasesPruner, + FileSystemRunDatabasesPruner, + run_databases_pruner_factory, +) + +from octobot_commons.databases.cache_manager import ( + CacheManager, +) + +from octobot_commons.databases.databases_util import ( + CacheWrapper, +) + +from octobot_commons.databases.cache_client import ( + CacheClient, +) + + +__all__ = [ + "GlobalSharedMemoryStorage", + "GenericDatabaseCache", + "ChronologicalReadDatabaseCache", + "AbstractDocumentDatabaseAdaptor", + "TinyDBAdaptor", + "DocumentDatabase", + "BaseDatabase", + "MetaDatabase", + "DBReader", + "DBWriter", + "DBWriterReader", + "CacheDatabase", + "CacheTimestampDatabase", + "SQLiteDatabase", + "new_sqlite_database", + "RunDatabasesIdentifier", + "RunDatabasesProvider", + "init_bot_storage", + "close_bot_storage", + "AbstractRunDatabasesPruner", + "FileSystemRunDatabasesPruner", + "run_databases_pruner_factory", + "CacheManager", + "CacheWrapper", + "CacheClient", +] diff --git a/packages/commons/octobot_commons/databases/bases/__init__.py b/packages/commons/octobot_commons/databases/bases/__init__.py new file mode 100644 index 0000000000..579c13e7b7 --- /dev/null +++ b/packages/commons/octobot_commons/databases/bases/__init__.py @@ -0,0 +1,32 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + + +from octobot_commons.databases.bases import document_database +from octobot_commons.databases.bases import base_database + +from octobot_commons.databases.bases.document_database import ( + DocumentDatabase, +) +from octobot_commons.databases.bases.base_database import ( + BaseDatabase, +) + + +__all__ = [ + "DocumentDatabase", + "BaseDatabase", +] diff --git a/packages/commons/octobot_commons/databases/bases/base_database.py b/packages/commons/octobot_commons/databases/bases/base_database.py new file mode 100644 index 0000000000..3e0799080e --- /dev/null +++ b/packages/commons/octobot_commons/databases/bases/base_database.py @@ -0,0 +1,187 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import contextlib +import numpy + +import octobot_commons.databases.document_database_adaptors as adaptors +import octobot_commons.databases.bases.document_database as document_database +import octobot_commons.databases.database_caches as database_cache + + +class BaseDatabase: + def __init__( + self, + file_path: str, + database_adaptor=adaptors.TinyDBAdaptor, + cache_size=None, + enable_storage=True, + **kwargs, + ): + self.enable_storage = enable_storage + self._database = None + if self.enable_storage and database_adaptor is not None: + self._database = document_database.DocumentDatabase( + database_adaptor(file_path, cache_size=cache_size, **kwargs) + ) + self._database.initialize() + self.are_data_initialized = False + self.are_data_initialized_by_key = {} + self.cache = database_cache.GenericDatabaseCache() + + def set_initialized_flags(self, value, keys=None): + """ + Updates the initialized values of the given keys + :param value: the updated initialized value + :param keys: keys to update. Updated every key if left to None + """ + self.are_data_initialized = value + for key in keys or self.are_data_initialized_by_key.keys(): + self.are_data_initialized_by_key[key] = value + + def get_db_path(self): + """ + :return: the path to the current database's path + """ + return self._database.get_db_path() + + async def search(self, dict_query: dict = None): + """ + :param dict_query: initialization dict for the query + :return: a search query + """ + if dict_query is None: + return await self._database.query_factory() + return (await self._database.query_factory()).fragment(dict_query) + + async def count(self, table_name: str, query) -> int: + """ + :param table_name: the table to count data from + :param query: the query to count results from + :return: the number of elements that match the given query + """ + return await self._database.count(table_name, query) + + async def flush(self): + """ + Flushes the database, "committing" operations into the database + """ + await self._database.flush() + + async def hard_reset(self): + """ + Completely resets the database as if it just was created + """ + return await self._database.hard_reset() + + def is_hard_reset_error(self, err: Exception) -> bool: + """ + returns True if the given error should trigger + a hard reset of the database + """ + return self._database.is_hard_reset_error(err) + + async def close(self): + """ + Closes the database, flushes it first + """ + if self.enable_storage: + await self.flush() + await self._database.close() + + async def clear(self): + """ + Clears the database, removing everything from it + """ + self.cache.clear() + + async def contains_row(self, table: str, row: dict): + """ + Returns true if the given rows are included in the given table, also looking into internal cache + :param table: the table to look into + :param row: the row to find + """ + if self.cache.contains_row(table, row): + return True + return await self.count(table, await self.search(row)) > 0 + + def __str__(self): + return f"{self.__class__.__name__}, database: {self._database}" + + @classmethod + def _create_database( + cls, + *args, + required_adaptor=False, + cache_size=None, + database_adaptor=None, + **kwargs, + ): + if required_adaptor: + adaptor = kwargs.pop("database_adaptor", adaptors.TinyDBAdaptor) + if adaptor is None: + raise RuntimeError("database_adaptor parameter required") + adaptor_instance = adaptor(*args, cache_size=cache_size, **kwargs) + return ( + cls( + *args, + database_adaptor=database_adaptor, + cache_size=cache_size, + **kwargs, + ), + adaptor_instance, + ) + return cls(*args, cache_size=cache_size, **kwargs), None + + @classmethod + @contextlib.asynccontextmanager + async def database( + cls, *args, with_lock=False, cache_size=None, database_adaptor=None, **kwargs + ): + """ + Yields a database and closes it when exiting the context manager + :param args: arguments to pass to the database constructor + :param with_lock: When True, creating a lock synchronized database + :param cache_size: size of the internal database cache + :param database_adaptor: Database class to use + :param kwargs: keyword arguments to pass to the database constructor + """ + database, adaptor_instance = cls._create_database( + *args, + required_adaptor=with_lock, + cache_size=cache_size, + database_adaptor=database_adaptor, + **kwargs, + ) + if with_lock: + async with document_database.DocumentDatabase.locked_database( + adaptor_instance + ) as locked_db: + database._database = locked_db + yield database + # context manager is taking care of closing the database + return + try: + yield database + finally: + await database.close() + + @staticmethod + def get_serializable_value(value): + """ + Returns a json serializable value of the given element. Mostly used to serialize numpy types + :param value: the element + """ + return value.item() if isinstance(value, numpy.generic) else value diff --git a/packages/commons/octobot_commons/databases/bases/document_database.py b/packages/commons/octobot_commons/databases/bases/document_database.py new file mode 100644 index 0000000000..60bcc01d2c --- /dev/null +++ b/packages/commons/octobot_commons/databases/bases/document_database.py @@ -0,0 +1,194 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import contextlib +import octobot_commons.logging + + +class DocumentDatabase: + """ + DocumentDatabase is used to communicate with an underlying database + """ + + def __init__(self, database_adaptor): + """ + DocumentDatabase constructor + :param database_adaptor: database adaptor + """ + self.adaptor = database_adaptor + + def initialize(self): + """ + Initialize the database adaptor. + """ + self.adaptor.initialize() + + def get_uuid(self, document) -> int: + """ + Returns the uuid of the document + :param document: the document + """ + return self.adaptor.get_uuid(document) + + def get_db_path(self): + """ + Select database path + """ + return self.adaptor.db_path + + async def select(self, table_name: str, query, uuid=None) -> list: + """ + Select data from the table_name table + :param table_name: name of the table + :param query: select query + :param uuid: id of the document + """ + return await self.adaptor.select(table_name, query, uuid=uuid) + + async def tables(self) -> list: + """ + Select tables + """ + return await self.adaptor.tables() + + async def insert(self, table_name: str, row: dict) -> int: + """ + Insert dict data into the table_name table + :param table_name: name of the table + :param row: data to insert + """ + return await self.adaptor.insert(table_name, row) + + async def upsert(self, table_name: str, row: dict, query, uuid=None) -> int: + """ + Insert or update dict data into the table_name table + :param table_name: name of the table + :param row: data to insert + :param query: select query + :param uuid: id of the document + """ + return await self.adaptor.upsert(table_name, row, query, uuid=uuid) + + async def insert_many(self, table_name: str, rows: list) -> list: + """ + Insert multiple dict data into the table_name table + :param table_name: name of the table + :param rows: data to insert + """ + return await self.adaptor.insert_many(table_name, rows) + + async def update(self, table_name: str, row: dict, query: dict, uuid=None) -> list: + """ + Insert dict data into the table_name table + :param table_name: name of the table + :param row: data to update + :param query: select statement + :param uuid: id of the document + """ + return await self.adaptor.update(table_name, row, query, uuid=uuid) + + async def update_many(self, table_name: str, update_values: list) -> list: + """ + Update multiple values from the table_name table + :param table_name: name of the table + :param update_values: values to update + """ + return await self.adaptor.update(table_name, update_values) + + async def delete(self, table_name: str, query, uuid=None) -> list: + """ + Delete data from the table_name table + :param table_name: name of the table + :param query: select query + :param uuid: id of the document + """ + return await self.adaptor.delete(table_name, query, uuid=uuid) + + async def count(self, table_name: str, query) -> int: + """ + Counts documents in the table_name table + :param table_name: name of the table + :param query: select query + """ + return await self.adaptor.count(table_name, query) + + async def query_factory(self): + """ + Creates a new empty select query + """ + return await self.adaptor.query_factory() + + async def hard_reset(self): + """ + Completely reset the database + """ + self.get_logger().debug("hard resetting database") + return await self.adaptor.hard_reset() + + def is_hard_reset_error(self, err: Exception) -> bool: + """ + returns True if the given error should trigger + a hard reset of the database + """ + return self.adaptor.is_hard_reset_error(err) + + async def flush(self): + """ + Flushes the database cache + """ + self.get_logger().debug("flushing database") + return await self.adaptor.flush() + + async def close(self): + """ + Closes the database + """ + self.get_logger().debug("closing database") + return await self.adaptor.close() + + def get_logger(self): + """ + :return: the database logger + """ + return octobot_commons.logging.get_logger(str(self)) + + def __str__(self): + return f"{self.__class__.__name__} with adaptor: {self.adaptor}" + + @classmethod + @contextlib.asynccontextmanager + async def locked_database(cls, *args, **kwargs): + """ + Instantiate and then ensure lock is acquired before initializing the database. + Closes the database and then releases the lock when exiting + :param args: args to pass to the database constructor + :param kwargs: kwargs to pass to the database constructor + """ + instance = None + lock_acquired = False + try: + instance = cls(*args, **kwargs) + if instance.adaptor.is_multiprocessing(): + await instance.adaptor.acquire() + lock_acquired = True + instance.initialize() + yield instance + finally: + if instance is not None: + try: + await instance.close() + finally: + if lock_acquired and instance.adaptor.is_multiprocessing(): + await instance.adaptor.release() diff --git a/packages/commons/octobot_commons/databases/cache_client.py b/packages/commons/octobot_commons/databases/cache_client.py new file mode 100644 index 0000000000..ee5efd9a68 --- /dev/null +++ b/packages/commons/octobot_commons/databases/cache_client.py @@ -0,0 +1,243 @@ +# pylint: disable=C0301,R0902,R0913,C0415,R0801 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.constants as constants +import octobot_commons.enums as enums +import octobot_commons.errors as errors +import octobot_commons.databases.cache_manager as cache_manager +import octobot_commons.databases.implementations as implementations +import octobot_commons.databases.document_database_adaptors as document_database_adaptors + + +class CacheClient: + def __init__( + self, + tentacle, + exchange_name, + symbol, + time_frame, + tentacles_setup_config, + flush_cache_when_necessary, + config_name=None, + ): + self._flush_cache_when_necessary = flush_cache_when_necessary + self.cache_manager = cache_manager.CacheManager( + database_adaptor=document_database_adaptors.TinyDBAdaptor + ) + self.config_name = config_name or self.cache_manager.DEFAULT_CONFIG_IDENTIFIER + self.tentacle = tentacle + self.exchange_name = exchange_name + self.symbol = symbol + self.time_frame = time_frame + self.tentacles_setup_config = tentacles_setup_config + try: + import octobot_tentacles_manager.models as tentacles_manager_models + + self.tentacles_requirements = ( + tentacles_manager_models.TentacleRequirementsTree( + self.tentacle, self.config_name + ) + ) + except ImportError as err: + raise ImportError( + "OctoBot-Tentacles-Manager is required to use cache clients" + ) from err + + def has_cache(self, pair, time_frame, tentacle_name=None, config_name=None): + """ + Returns True when a cache as specified in arguments is currently open + :param pair: pair/symbol to build a cache path from + :param pair: time_frame to build a cache path from + :param tentacle_name: tentacle to build a cache path from + :param config_name: name of the configuration + """ + return self.cache_manager.has_cache( + tentacle_name or self.tentacle.get_name(), + self.exchange_name, + pair, + time_frame, + config_name or self.config_name, + ) + + def get_cache_path(self, tentacle, config_name=None): + """ + Returns the path to the cache associated to the given tentacle + :param tentacle: tentacle to build a cache path from + :param config_name: name of the configuration + """ + config_name = config_name or self.config_name + return self.cache_manager.get_cache_or_build_path( + tentacle, + self.exchange_name, + self.symbol, + self.time_frame, + tentacle.get_name(), + config_name, + self.tentacles_setup_config, + self.tentacles_requirements, + ) + + def get_cache( + self, + tentacle_name=None, + cache_type=implementations.CacheTimestampDatabase, + config_name=None, + ): + """ + Returns the cache associated to the given tentacle_name + :param tentacle_name: name of the tentacle to get cache from + :param cache_type: type of the cache + :param config_name: name of the configuration + """ + tentacle = self.tentacle if tentacle_name is None else None + tentacle_name = tentacle_name or self.tentacle.get_name() + config_name = config_name or self.config_name + cache, just_created = self.cache_manager.get_cache( + tentacle, + tentacle_name, + self.exchange_name, + self.symbol, + self.time_frame, + config_name, + self.tentacles_setup_config, + self.tentacles_requirements, + cache_type=cache_type, + ) + if just_created and cache_type is implementations.CacheTimestampDatabase: + if tentacle is None: + metadata = self.cache_manager.get_cache_previous_db_metadata( + tentacle_name, + self.exchange_name, + self.symbol, + self.time_frame, + config_name, + ) + else: + metadata = { + enums.CacheDatabaseColumns.TRIGGERED_AFTER_CANDLES_CLOSE.value: tentacle.is_triggered_after_candle_close + } + if metadata is None: + raise RuntimeError( + "Missing db metadata. Please provide the tentacle parameter to this method" + ) + cache.add_metadata(metadata) + return cache + + async def get_cached_value( + self, + value_key: str = enums.CacheDatabaseColumns.VALUE.value, + cache_key=None, + tentacle_name=None, + config_name=None, + ) -> tuple: + """ + Get a value for the current cache + :param value_key: identifier of the value + :param cache_key: timestamp to use in order to look for a value + :param tentacle_name: name of the tentacle to get cache from + :param config_name: name of the tentacle configuration as used in nested tentacle calls + :return: the cached value and a boolean (True if cached value is missing from cache) + """ + try: + return ( + await self.get_cache( + tentacle_name=tentacle_name, config_name=config_name + ).get(cache_key, name=value_key), + False, + ) + except errors.NoCacheValue: + return None, True + + async def set_cached_value( + self, + value, + value_key: str = enums.CacheDatabaseColumns.VALUE.value, + cache_key=None, + flush_if_necessary=False, + tentacle_name=None, + config_name=None, + **kwargs, + ): + """ + Set a value into the current cache + :param value: value to set + :param value_key: identifier of the value + :param cache_key: timestamp to associate the value to + :param flush_if_necessary: flush the cache after set (write into database) + :param tentacle_name: name of the tentacle to get cache from + :param config_name: name of the tentacle configuration as used in nested tentacle calls + :param kwargs: other related value_key / value couples to set at this timestamp. Use for plotted data + :return: None + """ + cache = None + try: + cache = self.get_cache(tentacle_name=tentacle_name, config_name=config_name) + await cache.set(cache_key, value, name=value_key) + if kwargs: + for key, val in kwargs.items(): + await cache.set( + cache_key, + val, + name=f"{value_key}{constants.CACHE_RELATED_DATA_SEPARATOR}{key}", + ) + finally: + if flush_if_necessary and self._flush_cache_when_necessary and cache: + await cache.flush() + + async def set_cached_values( + self, + values, + value_key, + cache_keys, + flush_if_necessary=False, + tentacle_name=None, + config_name=None, + additional_values_by_key=None, + ): + """ + Set a value into the current cache + :param values: values to set + :param value_key: identifier of the value + :param cache_keys: timestamps to associate the values to + :param flush_if_necessary: flush the cache after set (write into database) + :param tentacle_name: name of the tentacle to get cache from + :param config_name: name of the tentacle configuration as used in nested tentacle calls + :param additional_values_by_key: other values to set in a dict of cache_keys + :return: None + """ + cache = None + try: + cache = self.get_cache(tentacle_name=tentacle_name, config_name=config_name) + await cache.set_values( + cache_keys, + values, + name=value_key, + additional_values_by_key=additional_values_by_key, + ) + finally: + if flush_if_necessary and self._flush_cache_when_necessary and cache: + await cache.flush() + + def ensure_no_missing_cached_value(self, is_missing): + """ + Raises NoCacheValue when is_missing is True + :param is_missing: True when a value is missing + """ + if is_missing: + raise errors.NoCacheValue( + f"No cache value with cache key: {enums.CacheDatabaseColumns.VALUE.value}. " + f"Impossible process {constants.DO_NOT_OVERRIDE_CACHE} return value." + ) diff --git a/packages/commons/octobot_commons/databases/cache_manager.py b/packages/commons/octobot_commons/databases/cache_manager.py new file mode 100644 index 0000000000..d4ad60b56b --- /dev/null +++ b/packages/commons/octobot_commons/databases/cache_manager.py @@ -0,0 +1,380 @@ +# pylint: disable=R0913,R0914,C0415 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import os + +import octobot_commons.databases.document_database_adaptors as adaptors +import octobot_commons.databases.implementations.cache_timestamp_database as cache_timestamp_database +import octobot_commons.databases.databases_util as databases_util +import octobot_commons.constants as common_constants +import octobot_commons.symbols.symbol_util as symbol_util +import octobot_commons.errors as common_errors +import octobot_commons.tree as tree + + +class CacheManager: + """ + Manages cache as a global dict since caches can be accessed from live, backtesting and optimizers concurrently + """ + + CACHES = tree.BaseTree() + DEFAULT_CONFIG_IDENTIFIER = "default" + + def __init__(self, database_adaptor=adaptors.TinyDBAdaptor): + self.database_adaptor = database_adaptor + + def get_cache( + self, + tentacle, + tentacle_name, + exchange_name, + symbol, + time_frame, + config_name, + tentacles_setup_config, + tentacles_requirements, + cache_type=cache_timestamp_database.CacheTimestampDatabase, + open_if_missing=True, + ) -> tuple: + """ + Returns the cache database associated to the given arguments. Creates/opens it if missing + :param tentacle: tentacle to read configuration from to when creating/opening the associated database. + Can be None when the database is open + :param tentacle_name: name of the tentacle. Used to identify the database + :param exchange_name: name of the exchange. Used to identify the database + :param symbol: symbol. Used to identify the database + :param time_frame: name of the timeframe. Used to identify the database + :param config_name: name of the configuration (used in nested contexts). Used to identify the database + :param tentacles_setup_config: the used tentacles_setup_config. Used to read configuration from to when + creating/opening the associated database if the given tentacle instance doesn't already have its configuration + :param tentacles_requirements: TentacleRequirementsTree associated to the given tentacle. Used to take + nested calls into account when creating/opening databases + :param cache_type: Type of the cache database. Used when creating/opening databases + :param open_if_missing: When True, if the database is missing when asked for then it is created. + :return: the cache database + """ + identifier = config_name or self.DEFAULT_CONFIG_IDENTIFIER + cache_path = [tentacle_name, exchange_name, symbol, time_frame, identifier] + try: + return self.__class__.CACHES.get_node(cache_path).node_value.get_database() + except tree.NodeExistsError: + if open_if_missing: + if tentacle is None: + config_names = self.__class__.CACHES.get_children_keys( + cache_path[:-1] + ) + available_config_names = ( + f"Available configuration names: {config_names}. " + if config_names + else "" + ) + raise common_errors.UninitializedCache( + f"No initialized cache for {tentacle_name} tentacle with config name: {config_name}. " + f"{available_config_names}" + f"The tentacle parameter must be set to get the associated cache database path" + ) + cache = self._open_or_create_cache_database( + tentacle, + exchange_name, + symbol, + time_frame, + tentacle_name, + identifier, + tentacles_setup_config, + cache_type, + tentacles_requirements, + ) + self.__class__.CACHES.set_node_at_path(cache, None, cache_path) + return cache.get_database() + raise common_errors.NoCacheValue( + f"Cache is initialized for {tentacle_name} on {exchange_name} " + f"{symbol} {time_frame}" + ) + + def has_cache( + self, tentacle_name, exchange_name, symbol, time_frame, config_name=None + ): + """ + Returns True if a cache database is open according to the given parameters + :param tentacle_name: name of the tentacle + :param exchange_name: name of the exchange + :param symbol: associated symbol + :param time_frame: name of the time frame + :param config_name: name of the configuration + """ + identifier = config_name or self.DEFAULT_CONFIG_IDENTIFIER + try: + return bool( + self.__class__.CACHES.get_node( + [tentacle_name, exchange_name, symbol, time_frame, identifier] + ) + ) + except tree.NodeExistsError: + return False + + def get_cache_registered_requirements( + self, tentacle_name, exchange_name, symbol, time_frame, config_name=None + ): + """ + Returns the TentacleRequirementsTree associated to the found cache database + :param tentacle_name: name of the tentacle + :param exchange_name: name of the exchange + :param symbol: associated symbol + :param time_frame: name of the time frame + :param config_name: name of the configuration + """ + identifier = config_name or self.DEFAULT_CONFIG_IDENTIFIER + return self.__class__.CACHES.get_node( + [tentacle_name, exchange_name, symbol, time_frame, identifier] + ).node_value.tentacles_requirements + + def get_cache_previous_db_metadata( + self, tentacle_name, exchange_name, symbol, time_frame, config_name=None + ): + """ + Returns the metadata associated of the previous cache database. Mostly used when including + new dependencies to ensure metadata are preserved throughout databases + :param tentacle_name: name of the tentacle + :param exchange_name: name of the exchange + :param symbol: associated symbol + :param time_frame: name of the time frame + :param config_name: name of the configuration + """ + identifier = config_name or self.DEFAULT_CONFIG_IDENTIFIER + return self.__class__.CACHES.get_node( + [tentacle_name, exchange_name, symbol, time_frame, identifier] + ).node_value.previous_db_metadata + + async def clear_cache( + self, + tentacle_name, + exchange_name=common_constants.UNPROVIDED_CACHE_IDENTIFIER, + symbol=common_constants.UNPROVIDED_CACHE_IDENTIFIER, + time_frame=common_constants.UNPROVIDED_CACHE_IDENTIFIER, + config_name=common_constants.UNPROVIDED_CACHE_IDENTIFIER, + ): + """ + Delete all the content of cache databases + :param tentacle_name: name of the tentacle + :param exchange_name: name of the exchange, all of them if left unspecified + :param symbol: associated symbol, all of them if left unspecified + :param time_frame: name of the time frame, all of them if left unspecified + :param config_name: name of the configuration, all of them if left unspecified + """ + try: + for cache, _ in self._caches( + tentacle_name, exchange_name, symbol, time_frame, config_name + ): + await cache.node_value.clear() + return True + except tree.NodeExistsError: + return False + + async def reset_cache( + self, tentacle_name, exchange_name, symbol, time_frame, config_name + ): + """ + Removes a cache database from cache manager. Closes but does not clear the database + :param tentacle_name: name of the tentacle + :param exchange_name: name of the exchange + :param symbol: associated symbol + :param time_frame: name of the time frame + :param config_name: name of the configuration + """ + identifier = config_name or self.DEFAULT_CONFIG_IDENTIFIER + cache = self.__class__.CACHES.delete_node( + [tentacle_name, exchange_name, symbol, time_frame, identifier] + ) + await cache.node_value.close() + + async def close_cache( + self, + tentacle_name, + exchange_name=common_constants.UNPROVIDED_CACHE_IDENTIFIER, + symbol=common_constants.UNPROVIDED_CACHE_IDENTIFIER, + time_frame=common_constants.UNPROVIDED_CACHE_IDENTIFIER, + config_name=common_constants.UNPROVIDED_CACHE_IDENTIFIER, + reset_cache_db_ids=False, + ): + """ + Closes but does not clear the associated databases. Removes cache databases from cache manager if + reset_cache_db_ids + :param tentacle_name: name of the tentacle + :param exchange_name: name of the exchange, all of them if left unspecified + :param symbol: associated symbol, all of them if left unspecified + :param time_frame: name of the time frame, all of them if left unspecified + :param config_name: name of the configuration, all of them if left unspecified + :param reset_cache_db_ids: When True, removes the database from cache manager + """ + try: + to_remove_caches = [] + for cache, identifiers in self._caches( + tentacle_name, exchange_name, symbol, time_frame, config_name + ): + await cache.node_value.close() + to_remove_caches.append(identifiers) + if reset_cache_db_ids: + # remove cache from caches to force complete reopen of the cache db + # (might be at a different place) + for identifier in to_remove_caches: + self.__class__.CACHES.delete_node(identifier) + return True + except tree.NodeExistsError: + return False + + async def reset(self): + """ + Completely resets the cache manager closing and unregistering every cache + """ + for cache, _ in self._caches(): + if cache.node_value.is_open(): + await cache.node_value.close() + self.__class__.CACHES = tree.BaseTree() + + def _caches( + self, + tentacle_name=common_constants.UNPROVIDED_CACHE_IDENTIFIER, + exchange_name=common_constants.UNPROVIDED_CACHE_IDENTIFIER, + symbol=common_constants.UNPROVIDED_CACHE_IDENTIFIER, + time_frame=common_constants.UNPROVIDED_CACHE_IDENTIFIER, + config_name=common_constants.UNPROVIDED_CACHE_IDENTIFIER, + ): + path = [] + for element in (tentacle_name, exchange_name, symbol, time_frame, config_name): + if element == common_constants.UNPROVIDED_CACHE_IDENTIFIER: + break + path.append(element) + if self.__class__.CACHES.get_children_keys(path): + for ( + cache, + identifiers, + ) in self.__class__.CACHES.get_nested_children_with_path(path): + # avoid caches that have no node value (might be remains of already cleared caches) + if cache.node_value is not None: + yield cache, identifiers + # no cache value + return [] + + def _open_or_create_cache_database( + self, + tentacle, + exchange, + symbol, + time_frame, + tentacle_name, + identifier, + tentacles_setup_config, + cache_type, + tentacles_requirements, + ): + cache_full_path = self.get_cache_or_build_path( + tentacle, + exchange, + symbol, + time_frame, + tentacle_name, + identifier, + tentacles_setup_config, + tentacles_requirements, + ) + cache_dir = os.path.split(cache_full_path)[0] + if not os.path.exists(cache_dir): + os.makedirs(cache_dir) + return self._open_cache_database( + cache_full_path, cache_type, tentacles_requirements + ) + + def _open_cache_database(self, file_path, cache_type, tentacles_requirements): + """ + Override to use another cache database or adaptor + :return: the cache database class + """ + return databases_util.CacheWrapper( + file_path, cache_type, self.database_adaptor, tentacles_requirements + ) + + def get_cache_or_build_path( + self, + tentacle, + exchange_name, + symbol, + time_frame, + tentacle_name, + config_name, + tentacles_setup_config, + tentacles_requirements, + ): + """ + Returns the cache path associated to the given arguments. Use local cache when available otherwise + recompute the whole path (time consuming) + :param tentacle: tentacle use to build the path. Required if path is not in cache already + :param tentacle_name: name of the tentacle + :param exchange_name: name of the exchange + :param symbol: associated symbol, all of them if left unspecified + :param time_frame: name of the time frame, all of them if left unspecified + :param config_name: name of the configuration, all of them if left unspecified + :param tentacles_setup_config: the used tentacles_setup_config. Used to read configuration from to when + creating/opening the associated database if the given tentacle instance doesn't already have its configuration + :param tentacles_requirements: TentacleRequirementsTree associated to the given tentacle. Used to take + nested calls into account when creating/opening databases + """ + identifier = config_name or self.DEFAULT_CONFIG_IDENTIFIER + try: + return self.__class__.CACHES.get_node( + [tentacle_name, exchange_name, symbol, time_frame, identifier] + ).node_value.get_path() + except tree.NodeExistsError: + sanitized_pair = symbol_util.merge_symbol(symbol) if symbol else symbol + required_tentacles = tentacles_requirements.get_all_required_tentacles( + False + ) + # ensure tentacles requirements are snapshotting the configuration that was used to build the + # cache identifier + tentacles_requirements.synchronize_tentacles_config() + identifying_tentacles = [tentacle] + required_tentacles + # warning: very slow, should be called as rarely as possible + code_hash, config_hash = self._tentacles_hashes( + identifying_tentacles, tentacles_setup_config + ) + return os.path.join( + common_constants.USER_FOLDER, + common_constants.CACHE_FOLDER, + tentacle_name, + exchange_name, + sanitized_pair, + time_frame, + code_hash, + config_hash, + common_constants.CACHE_FILE, + ) + + @staticmethod + def _tentacles_hashes(identifying_tentacles, tentacles_setup_config) -> (str, str): + try: + import octobot_tentacles_manager.api + + return ( + octobot_tentacles_manager.api.get_code_hash(identifying_tentacles)[ + : common_constants.CACHE_HASH_SIZE + ], + octobot_tentacles_manager.api.get_config_hash( + identifying_tentacles, tentacles_setup_config + )[: common_constants.CACHE_HASH_SIZE], + ) + except ImportError as err: + raise ImportError( + "octobot_tentacles_manager is required to use cache" + ) from err diff --git a/packages/commons/octobot_commons/databases/database_caches/__init__.py b/packages/commons/octobot_commons/databases/database_caches/__init__.py new file mode 100644 index 0000000000..d3469c313a --- /dev/null +++ b/packages/commons/octobot_commons/databases/database_caches/__init__.py @@ -0,0 +1,32 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + + +from octobot_commons.databases.database_caches import generic_database_cache +from octobot_commons.databases.database_caches import chronological_read_database_cache + +from octobot_commons.databases.database_caches.generic_database_cache import ( + GenericDatabaseCache, +) +from octobot_commons.databases.database_caches.chronological_read_database_cache import ( + ChronologicalReadDatabaseCache, +) + + +__all__ = [ + "GenericDatabaseCache", + "ChronologicalReadDatabaseCache", +] diff --git a/packages/commons/octobot_commons/databases/database_caches/chronological_read_database_cache.py b/packages/commons/octobot_commons/databases/database_caches/chronological_read_database_cache.py new file mode 100644 index 0000000000..6c831f0139 --- /dev/null +++ b/packages/commons/octobot_commons/databases/database_caches/chronological_read_database_cache.py @@ -0,0 +1,126 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.constants as constants + + +class ChronologicalReadDatabaseCache: + DATA_KEY = "data" + DATA_SORT_KEY = "data_sort_key" + CHRONO_INDEX_KEY = "chrono_index" + + def __init__(self): + self.timestamped_sorted_data = {} + + def set(self, values, sort_key, identifiers): + """ + Set whole cache to later be able to efficiently select it + :param values: cache values to set + :param sort_key: key in the values dict to use to chronologically order data + :param identifiers: identifiers of the given cache. Used to store multiple cache sets + """ + nested_cache = self.timestamped_sorted_data + for identifier in identifiers: + if identifier not in nested_cache: + nested_cache[identifier] = {} + nested_cache = nested_cache[identifier] + data = self._get_cache_data(identifiers) + data[self.DATA_SORT_KEY] = sort_key + data[self.DATA_KEY] = sorted(values, key=lambda x: x[sort_key]) + data[self.CHRONO_INDEX_KEY] = 0 + + def reset_cached_indexes(self, parent=None): + """ + Set the cache index of each cached element at 0 + :param parent: current cached element + """ + for cached_data in (parent or self.timestamped_sorted_data).values(): + if isinstance(cached_data, dict): + if self.CHRONO_INDEX_KEY in cached_data: + cached_data[self.CHRONO_INDEX_KEY] = 0 + else: + self.reset_cached_indexes(cached_data) + + def get(self, inferior_timestamp, superior_timestamp, identifiers): + """ + Returns a cache values + :param inferior_timestamp: timestamp to start selecting from. Use constants.DEFAULT_IGNORED_VALUE to select all + :param superior_timestamp: timestamp to stop selecting at. Use constants.DEFAULT_IGNORED_VALUE to select all + :param identifiers: identifiers of the cache to look into. Used to store multiple cache sets + """ + cache_data = self._get_cache_data(identifiers) + # if one timestamp is constants.DEFAULT_IGNORED_VALUE, return every available data from/up to this timestamp + if inferior_timestamp == constants.DEFAULT_IGNORED_VALUE: + if superior_timestamp == constants.DEFAULT_IGNORED_VALUE: + return cache_data[self.DATA_KEY] + return [ + element + for element in cache_data[self.DATA_KEY] + if element[cache_data[self.DATA_SORT_KEY]] <= superior_timestamp + ] + if superior_timestamp == constants.DEFAULT_IGNORED_VALUE: + return [ + element + for element in cache_data[self.DATA_KEY] + if element[cache_data[self.DATA_SORT_KEY]] >= inferior_timestamp + ] + return self._get_from_time_window( + cache_data, inferior_timestamp, superior_timestamp + ) + + def _get_from_time_window(self, cache_data, inferior_timestamp, superior_timestamp): + data = cache_data[self.DATA_KEY] + data_sort_key = cache_data[self.DATA_SORT_KEY] + # a specific time window is requested: use the local cache index to generate it faster + min_index = max_index = None + # identify the select window considering data are time sorted + start_index = cache_data[self.CHRONO_INDEX_KEY] + for index in range(start_index, len(data)): + element = data[index] + if element[data_sort_key] >= inferior_timestamp and min_index is None: + min_index = index + if element[data_sort_key] > superior_timestamp and max_index is None: + max_index = index + # consider that since inferior_timestamp got requested, as this is a chronological database cache, + # is going only following requests will only be in future times. Therefore previous data can be + # ignored to avoid iterating over them over and over + cache_data[self.CHRONO_INDEX_KEY] = min_index + return data[min_index:max_index] + if min_index is None: + return [] + return data[min_index:] + + def has(self, identifiers): + """ + :param identifiers: identifiers of the cache to look for. + :return: True if the current identifiers are related to a registered cache + """ + try: + self._get_cache_data(identifiers) + return True + except KeyError: + return False + + def _get_cache_data(self, identifiers): + found_data = self.timestamped_sorted_data + for identifier in identifiers: + found_data = found_data[identifier] + return found_data + + def clear(self): + """ + Resets the cache database + """ + self.timestamped_sorted_data = {} diff --git a/packages/commons/octobot_commons/databases/database_caches/generic_database_cache.py b/packages/commons/octobot_commons/databases/database_caches/generic_database_cache.py new file mode 100644 index 0000000000..a8a616d98e --- /dev/null +++ b/packages/commons/octobot_commons/databases/database_caches/generic_database_cache.py @@ -0,0 +1,140 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.errors as errors +import octobot_commons.dict_util as dict_util + + +class GenericDatabaseCache: + MAX_CACHE_SIZE = 512 + + def __init__(self): + # used for select / contains methods + self.rows_cache = {} + # used for cached_query() (used in upsert) + self.query_cache = {} + # used for cached_uuid() (used in upsert) + self.uuid_cache = {} + + def register(self, table, row, result=None, uuid=None): + """ + Saves the given row/query in local cache + :param table: the associated table + :param row: the row/query to save + :param result: select result to save + :param uuid: uuid to save + """ + cached = False + try: + if uuid is not None: + cached = True + try: + self.uuid_cache[table][row] = uuid + except KeyError: + self.uuid_cache[table] = {row: uuid} + elif result is not None: + cached = True + try: + self.query_cache[table][row] = result + except KeyError: + self.query_cache[table] = {row: result} + except TypeError as err: + # might happen when row can't be hashed: impossible to cache it in this case + raise errors.UncachableValue(f"Unhashable row: {row}") from err + if not cached: + self._add_to_rows_cache(table, row) + + def _add_to_rows_cache(self, table, row): + try: + if len(self.rows_cache[table]) >= self.MAX_CACHE_SIZE: + self.rows_cache[table] = self.rows_cache[table][ + self.MAX_CACHE_SIZE // 2 : + ] + self.rows_cache[table].append(row) + except KeyError: + self.rows_cache[table] = [row] + + def has(self, table): + """ + :param table: table name + :return: True if the given table is in rows_cache + """ + return table in self.rows_cache + + def cached_uuid(self, table, identifier): + """ + :param table: table name + :param identifier: identifier of to look for + :return: the cached uuid of the given identifier + """ + try: + return self.uuid_cache[table][identifier] + except KeyError: + return None + + def cached_query(self, table, identifier): + """ + :param table: table name + :param identifier: identifier of to look for + :return: the cached query of the given identifier + """ + try: + return self.query_cache[table][identifier] + except KeyError: + return None + + def contains_row(self, table, val_by_keys): + """ + :param table: table name + :param val_by_keys: dict to look for + :return: True if a row of the local cache contains every value of the given dict + """ + # Should check the real database in case this returns false + try: + for element in self.rows_cache[table]: + if dict_util.contains_each_element(element, val_by_keys): + return True + except KeyError: + pass + return False + + def delete_from_rows_cache(self, table, val_by_keys): + """ + :param table: table name + :param val_by_keys: dict to look for + :return: True if a row of the local cache contains every value of the given dict + """ + # Should check the real database in case this returns false + try: + self.rows_cache[table] = [ + element + for element in self.rows_cache[table] + if not dict_util.contains_each_element(element, val_by_keys) + ] + except KeyError: + pass + + def clear(self, table=None): + """ + Resets the current cache + """ + if table: + self.rows_cache.pop(table, None) + self.query_cache.pop(table, None) + self.uuid_cache.pop(table, None) + else: + self.rows_cache = {} + self.query_cache = {} + self.uuid_cache = {} diff --git a/packages/commons/octobot_commons/databases/databases_util/__init__.py b/packages/commons/octobot_commons/databases/databases_util/__init__.py new file mode 100644 index 0000000000..d2d6045042 --- /dev/null +++ b/packages/commons/octobot_commons/databases/databases_util/__init__.py @@ -0,0 +1,21 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + + +from octobot_commons.databases.databases_util import cache_wrapper +from octobot_commons.databases.databases_util.cache_wrapper import ( + CacheWrapper, +) diff --git a/packages/commons/octobot_commons/databases/databases_util/cache_wrapper.py b/packages/commons/octobot_commons/databases/databases_util/cache_wrapper.py new file mode 100644 index 0000000000..bc0325175d --- /dev/null +++ b/packages/commons/octobot_commons/databases/databases_util/cache_wrapper.py @@ -0,0 +1,72 @@ +# pylint: disable=R0902 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + + +class CacheWrapper: + def __init__( + self, file_path, cache_type, database_adaptor, tentacles_requirements, **kwargs + ): + self.file_path = file_path + self.cache_type = cache_type + self.database_adaptor = database_adaptor + self.db_kwargs = kwargs + self._cache_database = None + self._db_path = None + self.previous_db_metadata = None + self.tentacles_requirements = tentacles_requirements.summary() + + def get_database(self) -> tuple: + """ + Returns the database, creates it if messing + """ + if self._cache_database is None: + self._cache_database = self.cache_type( + self.file_path, database_adaptor=self.database_adaptor, **self.db_kwargs + ) + self._db_path = self._cache_database.get_db_path() + return self._cache_database, True + return self._cache_database, False + + def is_open(self): + """ + :return: True if a database is open + """ + return self._cache_database is not None + + async def close(self): + """ + Closes the current database. Stores its metadata into self.previous_db_metadata + """ + if self.is_open(): + self.previous_db_metadata = self._cache_database.get_non_default_metadata() + await self._cache_database.close() + self._cache_database = None + return True + return False + + async def clear(self): + """ + Clears the database, deleting its data + """ + if self._cache_database is not None: + await self._cache_database.clear() + + def get_path(self): + """ + :return: the database path + """ + return self._db_path diff --git a/packages/commons/octobot_commons/databases/document_database_adaptors/__init__.py b/packages/commons/octobot_commons/databases/document_database_adaptors/__init__.py new file mode 100644 index 0000000000..be6cf1c983 --- /dev/null +++ b/packages/commons/octobot_commons/databases/document_database_adaptors/__init__.py @@ -0,0 +1,35 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + + +from octobot_commons.databases.document_database_adaptors import ( + abstract_document_database_adaptor, +) +from octobot_commons.databases.document_database_adaptors import tinydb_adaptor + + +from octobot_commons.databases.document_database_adaptors.abstract_document_database_adaptor import ( + AbstractDocumentDatabaseAdaptor, +) +from octobot_commons.databases.document_database_adaptors.tinydb_adaptor import ( + TinyDBAdaptor, +) + + +__all__ = [ + "AbstractDocumentDatabaseAdaptor", + "TinyDBAdaptor", +] diff --git a/packages/commons/octobot_commons/databases/document_database_adaptors/abstract_document_database_adaptor.py b/packages/commons/octobot_commons/databases/document_database_adaptors/abstract_document_database_adaptor.py new file mode 100644 index 0000000000..761f210fca --- /dev/null +++ b/packages/commons/octobot_commons/databases/document_database_adaptors/abstract_document_database_adaptor.py @@ -0,0 +1,240 @@ +# pylint: disable=W0613, R0904 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.multiprocessing_util as multiprocessing_util +import octobot_commons.enums as commons_enums + + +class AbstractDocumentDatabaseAdaptor: + """ + AbstractDatabaseAdaptor is an interface listing document databases public methods + """ + + HARD_RESET_ERRORS = [] # errors that should trigger a hard reset + + def __init__(self, db_path: str, **kwargs): + """ + TinyDBAdaptor constructor + :param db_path: database path + :param kwargs: kwargs to pass to the underlying db driver constructor + """ + self.db_path = db_path + + def initialize(self): + """ + Initialize the database. + """ + raise NotImplementedError("initialize is not implemented") + + @staticmethod + def is_file_system_based() -> bool: + """ + Returns True when this database is identified as a file in the current file system, + False when it's managed by a database server + """ + raise NotImplementedError("is_file_system_based is not implemented") + + @staticmethod + def get_db_file_ext() -> str: + """ + Returns the database file extension. Implemented in file system based databases + """ + raise NotImplementedError("get_db_file_ext") + + @staticmethod + async def create_identifier(identifier): + """ + Initialize the identifier by creating it in the database + """ + raise NotImplementedError("identifier") + + @staticmethod + async def identifier_exists(identifier, is_full_identifier) -> bool: + """ + Returns True when the given identifier is part of an existing database identifier + :param identifier: the identifier to look into + :param is_full_identifier: when True, only check identifiers that don't have sub identifiers. + When False, only check identifiers that have sub identifiers + """ + raise NotImplementedError("identifier_exists") + + @staticmethod + async def get_sub_identifiers(identifier, ignored_identifiers): + """ + Returns an iterable over the existing sub-identifiers under the given identifier + """ + raise NotImplementedError("get_sub_identifiers") + + @staticmethod + async def get_single_sub_identifier(identifier, ignored_identifiers) -> str: + """ + Returns the name of the only sub-identifier at a given parent identifier, None otherwise + example use: get the name of the only exchange the backtesting happened on if it only ran on a single exchange, + """ + raise NotImplementedError("get_single_sub_identifier") + + def get_uuid(self, document) -> int: + """ + Returns the uuid of the document + :param document: the document + """ + raise NotImplementedError("get_uuid is not implemented") + + async def select(self, table_name: str, query, uuid=None) -> list: + """ + Select data from the table_name table + :param table_name: name of the table + :param query: select query + :param uuid: id of the document + """ + raise NotImplementedError("select is not implemented") + + async def insert(self, table_name: str, row: dict) -> int: + """ + Insert dict data into the table_name table + :param table_name: name of the table + :param row: data to insert + """ + raise NotImplementedError("insert is not implemented") + + async def upsert(self, table_name: str, row: dict, query, uuid=None) -> int: + """ + Insert or update dict data into the table_name table + :param table_name: name of the table + :param row: data to insert + :param query: select query + :param uuid: id of the document + """ + raise NotImplementedError("upsert is not implemented") + + async def tables(self) -> list: + """ + Select tables + """ + raise NotImplementedError("tables is not implemented") + + async def insert_many(self, table_name: str, rows: list) -> list: + """ + Insert multiple dict data into the table_name table + :param table_name: name of the table + :param rows: data to insert + """ + raise NotImplementedError("insert_many is not implemented") + + async def update(self, table_name: str, row: dict, query, uuid=None) -> list: + """ + Select data from the table_name table + :param table_name: name of the table + :param row: data to update + :param query: select query + :param uuid: id of the document + """ + raise NotImplementedError("update is not implemented") + + async def update_many(self, table_name: str, update_values: list) -> list: + """ + Update multiple values from the table_name table + :param table_name: name of the table + :param update_values: values to update + """ + raise NotImplementedError("update_many is not implemented") + + async def delete(self, table_name: str, query, uuid=None) -> list: + """ + Delete data from the table_name table + :param table_name: name of the table + :param query: select query + :param uuid: id of the document + """ + raise NotImplementedError("delete is not implemented") + + async def count(self, table_name: str, query) -> int: + """ + Counts documents in the table_name table + :param table_name: name of the table + :param query: select query + """ + raise NotImplementedError("count is not implemented") + + async def query_factory(self): + """ + Creates a new empty select query + """ + raise NotImplementedError("query_factory is not implemented") + + async def hard_reset(self): + """ + Completely reset the database + """ + raise NotImplementedError("hard_reset is not implemented") + + @classmethod + def is_hard_reset_error(cls, error) -> bool: + """ + returns True if the given error should trigger + a hard reset of the database + """ + for error_class in cls.HARD_RESET_ERRORS: + if isinstance(error, error_class): + return True + return False + + async def flush(self): + """ + Flushes the database cache + """ + raise NotImplementedError("flush is not implemented") + + async def close(self): + """ + Closes the database + """ + raise NotImplementedError("close is not implemented") + + def __str__(self): + return f"{self.__class__.__name__} [{self.db_path}]" + + @staticmethod + def is_multiprocessing(): + """ + Returns True if the current process is run in a multiprocessing context using the multiprocessing_util module. + """ + try: + multiprocessing_util.get_lock( + commons_enums.MultiprocessingLocks.DBLock.value + ) + return True + except KeyError: + # no lock to acquire: we are not in a multiprocessing context + return False + + @staticmethod + def _get_lock(): + return multiprocessing_util.get_lock( + commons_enums.MultiprocessingLocks.DBLock.value + ) + + async def acquire(self): + """ + Acquires the database lock. + """ + self._get_lock().acquire() + + async def release(self): + """ + Releases the database lock. + """ + self._get_lock().release() diff --git a/packages/commons/octobot_commons/databases/document_database_adaptors/tinydb_adaptor.py b/packages/commons/octobot_commons/databases/document_database_adaptors/tinydb_adaptor.py new file mode 100644 index 0000000000..05451a5a97 --- /dev/null +++ b/packages/commons/octobot_commons/databases/document_database_adaptors/tinydb_adaptor.py @@ -0,0 +1,340 @@ +# pylint: disable=C0301, R0904, R1732, C0116, W0231, W0231 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import json +import os + +try: + import tinydb + import tinydb.storages + import tinydb.middlewares + import tinydb.table +except ImportError: + pass + +import octobot_commons.logging as commons_logging +import octobot_commons.constants as constants +import octobot_commons.errors as errors +import octobot_commons.databases.document_database_adaptors.abstract_document_database_adaptor as abstract_document_database_adaptor + + +class TinyDBAdaptor(abstract_document_database_adaptor.AbstractDocumentDatabaseAdaptor): + """ + TinyDBAdaptor is an AbstractDatabaseAdaptor implemented using tinydb: a minimal python only + local document database. + Warning: loads the whole file in RAM and must be closed to ensure writing + """ + + DEFAULT_WRITE_CACHE_SIZE = 5000 + HARD_RESET_ERRORS = [ + json.JSONDecodeError + ] # errors that should trigger a hard reset + + def __init__(self, file_path: str, cache_size: int = None, **kwargs): + """ + TinyDBAdaptor constructor. + :param file_path: path to the database file + :param cache_size: size of the in memory cache (number of operations before updating the file + :param kwargs: unused + """ + super().__init__(file_path) + self.database = None + self.cache_size = cache_size + + def initialize(self): + """ + Initialize the database: opens the database file. + """ + + storage = self._get_storage() + middleware = tinydb.middlewares.CachingMiddleware(storage) + middleware.WRITE_CACHE_SIZE = self.cache_size or self.DEFAULT_WRITE_CACHE_SIZE + try: + self.database = tinydb.TinyDB(self.db_path, storage=middleware) + except FileNotFoundError as err: + raise errors.DatabaseNotFoundError( + f'Can\'t open database at "{self.db_path}"' + ) from err + + @staticmethod + def _get_storage(): + class LazyJSONStorage(tinydb.storages.JSONStorage): + def __init__( + self, + path: str, + create_dirs=False, + encoding=None, + access_mode="r+", + **kwargs, + ): + """ + Only creates the json file when actually needing to access it + # from tinydb.storages.JSONStorage + Create a new instance. + + Also creates the storage file, if it doesn't exist and the access mode is appropriate for writing. + + :param path: Where to store the JSON data. + :param access_mode: mode in which the file is opened (r, r+, w, a, x, b, t, +, U) + :type access_mode: str + """ + + # pylint: disable=W0233 + tinydb.storages.Storage.__init__(self) + + self._mode = access_mode + self.kwargs = kwargs + + # custom + self._path = path + self._create_dirs = create_dirs + self._encoding = encoding + self._lazy_handle = None + + # ensure path + if not self._create_dirs: + self._ensure_path() + + def _ensure_path(self): + dir_path = os.path.dirname(self._path) + if dir_path and not os.path.exists(dir_path): + raise FileNotFoundError(self._path) + + @property + def _handle(self): + # create and open file once and only when self._handle is used + if self._lazy_handle is None: + # Create the file if it doesn't exist and creating is allowed by the + # access mode + if any( + character in self._mode for character in ("+", "w", "a") + ): # any of the writing modes + tinydb.storages.touch(self._path, create_dirs=self._create_dirs) + + # Open the file for reading/writing + self._lazy_handle = open( + self._path, mode=self._mode, encoding=self._encoding + ) + return self._lazy_handle + + def close(self) -> None: + if self._lazy_handle is None: + # never opened the file: don't call self._handle (that would create it) + return + self._handle.close() + self._lazy_handle = None + + return LazyJSONStorage + + @staticmethod + def is_file_system_based() -> bool: + """ + Returns True when this database is identified as a file in the current file system, + False when it's managed by a database server + """ + return True + + @staticmethod + def get_db_file_ext() -> str: + """ + Returns the database file extension. Implemented in file system based databases + """ + return constants.TINYDB_EXT + + @staticmethod + async def create_identifier(identifier): + """ + Initialize the identifier by creating it in the database + """ + if not os.path.exists(identifier): + os.makedirs(identifier) + + @staticmethod + async def identifier_exists(identifier, is_full_identifier) -> bool: + """ + Returns True when the given identifier is part of an existing database identifier + :param identifier: the identifier to look into + :param is_full_identifier: when True, only check identifiers that don't have sub identifiers. + When False, only check identifiers that have sub identifiers + """ + return ( + os.path.isfile(identifier) + if is_full_identifier + else os.path.isdir(identifier) + ) + + @staticmethod + async def get_sub_identifiers(identifier, ignored_identifiers): + """ + Returns an iterable over the existing sub-identifiers under the given identifier + """ + for folder in os.scandir(identifier): + if ( + await TinyDBAdaptor.identifier_exists(folder, False) + and folder.name not in ignored_identifiers + ): + yield folder.name + + @staticmethod + async def get_single_sub_identifier(identifier, ignored_identifiers) -> str: + """ + Returns the name of the only sub-identifier at a given parent identifier, None otherwise + example use: get the name of the only exchange the backtesting happened on if it only ran on a single exchange, + """ + exchange_folders = [ + folder.name + for folder in os.scandir(identifier) + if os.path.isdir(folder) and folder.name not in ignored_identifiers + ] + return exchange_folders[0] if len(exchange_folders) == 1 else None + + def get_uuid(self, document) -> int: + """ + Returns the uuid of the document + :param document: the document + """ + return document.doc_id + + async def select(self, table_name: str, query, uuid=None) -> list: + """ + Select data from the table_name table + :param table_name: name of the table + :param query: select query + :param uuid: id of the document + """ + if uuid is None: + return ( + self.database.table(table_name).search(query) + if query + else self.database.table(table_name).all() + ) + return self.database.table(table_name).get(doc_id=uuid) + + async def tables(self) -> list: + """ + Select tables + """ + return list(self.database.tables()) + + async def insert(self, table_name: str, row: dict) -> int: + """ + Insert dict data into the table_name table + :param table_name: name of the table + :param row: data to insert + """ + return self.database.table(table_name).insert(row) + + async def upsert(self, table_name: str, row: dict, query, uuid=None) -> int: + """ + Insert or update dict data into the table_name table + :param table_name: name of the table + :param row: data to insert + :param query: select query + :param uuid: id of the document + """ + if uuid is None: + return self.database.table(table_name).upsert(row, query) + return self.database.table(table_name).upsert( + tinydb.table.Document(row, doc_id=uuid) + ) + + async def insert_many(self, table_name: str, rows: list) -> list: + """ + Insert multiple dict data into the table_name table + :param table_name: name of the table + :param rows: data to insert + """ + return self.database.table(table_name).insert_multiple(rows) + + async def update(self, table_name: str, row: dict, query, uuid=None) -> list: + """ + Select data from the table_name table + :param table_name: name of the table + :param row: data to update + :param query: select query + :param uuid: id of the document + """ + if uuid is None: + return self.database.table(table_name).update(row, query) + return self.database.table(table_name).update( + tinydb.table.Document(row, doc_id=uuid) + ) + + async def update_many(self, table_name: str, update_values: list) -> list: + """ + Update multiple values from the table_name table + :param table_name: name of the table + :param update_values: values to update + """ + return self.database.table(table_name).update_multiple(update_values) + + async def delete(self, table_name: str, query, uuid=None) -> list: + """ + Delete data from the table_name table + :param table_name: name of the table + :param query: select query + :param uuid: id of the document + """ + if uuid is None: + if query is None: + return self.database.drop_table(table_name) + return self.database.table(table_name).remove(query) + return self.database.table(table_name).remove(doc_ids=(uuid,)) + + async def count(self, table_name: str, query) -> int: + """ + Counts documents in the table_name table + :param table_name: name of the table + :param query: select query + """ + return self.database.table(table_name).count(query) + + async def query_factory(self): + """ + Creates a new empty select query + """ + return tinydb.Query() + + async def hard_reset(self): + """ + Completely reset the database + """ + await self.close() + os.remove(self.db_path) + self.initialize() + + async def flush(self): + """ + Flushes the database cache + """ + return self.database.storage.flush() + + async def close(self): + """ + Closes the database + """ + try: + return self.database.close() + except AttributeError: + # when self.database didn't open properly + pass + except TypeError as err: + commons_logging.get_logger(str(self)).exception( + err, + True, + f"Error when writing database, this is probably due to a script that " + f"is saving a non json-serializable value: {err}", + ) diff --git a/packages/commons/octobot_commons/databases/global_storage/__init__.py b/packages/commons/octobot_commons/databases/global_storage/__init__.py new file mode 100644 index 0000000000..1e1b375a84 --- /dev/null +++ b/packages/commons/octobot_commons/databases/global_storage/__init__.py @@ -0,0 +1,27 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + + +from octobot_commons.databases.global_storage import global_shared_memory_storage + +from octobot_commons.databases.global_storage.global_shared_memory_storage import ( + GlobalSharedMemoryStorage, +) + + +__all__ = [ + "GlobalSharedMemoryStorage", +] diff --git a/packages/commons/octobot_commons/databases/global_storage/global_shared_memory_storage.py b/packages/commons/octobot_commons/databases/global_storage/global_shared_memory_storage.py new file mode 100644 index 0000000000..d2317a815f --- /dev/null +++ b/packages/commons/octobot_commons/databases/global_storage/global_shared_memory_storage.py @@ -0,0 +1,41 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import sys + +import octobot_commons.singleton as singleton + + +class GlobalSharedMemoryStorage(dict, singleton.Singleton): + """ + A global singleton dict available to the whole python virtual machine. + Warnings: + only stored in RAM, not persisted on disc + not thread safe + """ + + def remove_oldest_elements(self, elements_count_to_remove: int): + """ + Remove (pop) the elements_count_to_remove oldest elements + :param elements_count_to_remove: number of elements to remove + """ + for key in list(self.keys())[:elements_count_to_remove]: + self.pop(key) + + def get_bytes_size(self): + """ + Return the size in bytes of the memory storage + """ + return sys.getsizeof(self) diff --git a/packages/commons/octobot_commons/databases/implementations/__init__.py b/packages/commons/octobot_commons/databases/implementations/__init__.py new file mode 100644 index 0000000000..a5ddd39a72 --- /dev/null +++ b/packages/commons/octobot_commons/databases/implementations/__init__.py @@ -0,0 +1,53 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + + +from octobot_commons.databases.implementations import db_reader +from octobot_commons.databases.implementations import db_writer +from octobot_commons.databases.implementations import db_writer_reader +from octobot_commons.databases.implementations import meta_database +from octobot_commons.databases.implementations import cache_database +from octobot_commons.databases.implementations import cache_timestamp_database + + +from octobot_commons.databases.implementations.db_reader import ( + DBReader, +) +from octobot_commons.databases.implementations.db_writer import ( + DBWriter, +) +from octobot_commons.databases.implementations.db_writer_reader import ( + DBWriterReader, +) +from octobot_commons.databases.implementations.meta_database import ( + MetaDatabase, +) +from octobot_commons.databases.implementations.cache_database import ( + CacheDatabase, +) +from octobot_commons.databases.implementations.cache_timestamp_database import ( + CacheTimestampDatabase, +) + + +__all__ = [ + "DBReader", + "DBWriter", + "DBWriterReader", + "MetaDatabase", + "CacheDatabase", + "CacheTimestampDatabase", +] diff --git a/packages/commons/octobot_commons/databases/implementations/_exchange_database.py b/packages/commons/octobot_commons/databases/implementations/_exchange_database.py new file mode 100644 index 0000000000..ace03317cf --- /dev/null +++ b/packages/commons/octobot_commons/databases/implementations/_exchange_database.py @@ -0,0 +1,139 @@ +# pylint: disable=R0902,C0103 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import os + +import octobot_commons.databases.implementations.db_writer_reader as db_writer_reader + + +class ExchangeDatabase: + def __init__(self, meta_database, exchange): + self.meta_database = meta_database + self.run_dbs_identifier = self.meta_database.run_dbs_identifier + self.exchange = exchange + self.orders_db: db_writer_reader.DBWriterReader = None + self.trades_db: db_writer_reader.DBWriterReader = None + self.transactions_db: db_writer_reader.DBWriterReader = None + self.historical_portfolio_value_db: db_writer_reader.DBWriterReader = None + self.symbol_dbs: dict = {} + + def get_orders_db(self, account_type): + """ + :return: the orders database. Opens it if not open already + """ + if self.orders_db is None: + self.orders_db = self.meta_database.get_db( + self.run_dbs_identifier.get_orders_db_identifier( + account_type, + self.exchange, + ) + ) + return self.orders_db + + def get_trades_db(self, account_type): + """ + :return: the trades database. Opens it if not open already + """ + if self.trades_db is None: + self.trades_db = self.meta_database.get_db( + self.run_dbs_identifier.get_trades_db_identifier( + account_type, + self.exchange, + ) + ) + return self.trades_db + + def get_transactions_db(self, account_type): + """ + :return: the transactions database. Opens it if not open already + """ + if self.transactions_db is None: + self.transactions_db = self.meta_database.get_db( + self.run_dbs_identifier.get_transactions_db_identifier( + account_type, + self.exchange, + ) + ) + return self.transactions_db + + def get_historical_portfolio_value_db(self, account_type): + """ + :return: the historical portfolio database. Opens it if not open already + """ + if self.historical_portfolio_value_db is None: + self.historical_portfolio_value_db = self.meta_database.get_db( + self.run_dbs_identifier.get_historical_portfolio_value_db_identifier( + account_type, self.exchange + ) + ) + return self.historical_portfolio_value_db + + def get_symbol_db(self, symbol): + """ + :return: the symbol database. Opens it if not open already + """ + key = self._get_symbol_db_key(self.exchange, symbol) + if key not in self.symbol_dbs: + self.symbol_dbs[key] = self.meta_database.get_db( + self.run_dbs_identifier.get_symbol_db_identifier(self.exchange, symbol) + ) + return self.symbol_dbs[key] + + async def get_all_symbol_dbs(self): + """ + :return: an iterable over each symbol database for the given exchange + """ + if self.run_dbs_identifier.database_adaptor.is_file_system_based(): + return [ + self.get_symbol_db(self.run_dbs_identifier.get_symbol_db_name(db.name)) + for db in os.scandir( + self.run_dbs_identifier.get_exchange_based_identifier(self.exchange) + ) + if self.run_dbs_identifier.is_symbol_database(db.name) + ] + raise NotImplementedError( + "get_all_symbol_dbs is not implemented for non is_file_system_based databases" + ) + + def all_basic_run_db(self, account_type): + """ + yields the run, orders, trades and transactions databases + """ + yield self.get_orders_db(account_type) + yield self.get_trades_db(account_type) + yield self.get_transactions_db(account_type) + + @staticmethod + def _get_symbol_db_key(exchange, symbol): + return f"{exchange}{symbol}" + + async def close(self): + """ + Closes all the open databases + """ + # avoid asyncio.gather here as it is producing unexplained side effects (frozen thread preventing stop) + for coro in ( + db.close() + for db in ( + self.orders_db, + self.trades_db, + self.transactions_db, + self.historical_portfolio_value_db, + *self.symbol_dbs.values(), + ) + if db is not None + ): + await coro diff --git a/packages/commons/octobot_commons/databases/implementations/cache_database.py b/packages/commons/octobot_commons/databases/implementations/cache_database.py new file mode 100644 index 0000000000..464bced63c --- /dev/null +++ b/packages/commons/octobot_commons/databases/implementations/cache_database.py @@ -0,0 +1,102 @@ +# pylint: disable=C0116,R0801 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import copy +import sortedcontainers + +import octobot_commons.enums as enums +import octobot_commons.databases.implementations.db_writer as writer +import octobot_commons.databases.document_database_adaptors as adaptors + + +class CacheDatabase(writer.DBWriter): + CACHE_TABLE = enums.CacheDatabaseTables.CACHE.value + CACHE_METADATA_TABLE = enums.CacheDatabaseTables.METADATA.value + UUID_KEY = "uuid" + + def __init__( + self, + file_path: str, + database_adaptor=adaptors.TinyDBAdaptor, + cache_size=None, + **kwargs + ): + super().__init__( + file_path, + database_adaptor=database_adaptor, + cache_size=cache_size, + **kwargs + ) + self._are_metadata_written = False + self._local_cache = None + self.metadata = { + enums.CacheDatabaseColumns.TYPE.value: self.__class__.__name__, + } + + def get_non_default_metadata(self): + metadata = copy.copy(self.metadata) + metadata.pop(enums.CacheDatabaseColumns.TYPE.value) + return metadata + + def add_metadata(self, additional_metadata: dict): + self.metadata.update(additional_metadata) + + async def _ensure_metadata(self): + if not self._are_metadata_written: + await self._database.upsert( + self.CACHE_METADATA_TABLE, self.metadata, None, uuid=1 + ) + self._are_metadata_written = True + + async def _ensure_local_cache(self, identifier_key, update=False): + if update or self._local_cache is None: + self._local_cache = sortedcontainers.SortedDict() + for cache in await self.get_cache(): + cache[self.UUID_KEY] = self._database.get_uuid(cache) + self._local_cache[cache[identifier_key]] = cache + + async def get_metadata(self): + return await self._database.select(self.CACHE_METADATA_TABLE, None, uuid=1) + + async def get_cache(self): + return await self._database.select(self.CACHE_TABLE, None) + + async def clear(self): + await self._database.delete(self.CACHE_TABLE, None) + await self._database.delete(self.CACHE_METADATA_TABLE, None) + await super().clear() + self._local_cache = sortedcontainers.SortedDict() + self._are_metadata_written = False + # always rewrite metadata as they are necessary to handle cache later + await self._ensure_metadata() + await self.flush() + + async def _get_from_local_cache(self, identifier_key, identifier_value, sub_key): + await self._ensure_local_cache(identifier_key) + return self._local_cache[identifier_value][sub_key] + + async def _needs_update( + self, identifier_key, identifier_value, sub_key, value + ) -> bool: + try: + return ( + await self._get_from_local_cache( + identifier_key, identifier_value, sub_key + ) + != value + ) + except KeyError: + return True diff --git a/packages/commons/octobot_commons/databases/implementations/cache_timestamp_database.py b/packages/commons/octobot_commons/databases/implementations/cache_timestamp_database.py new file mode 100644 index 0000000000..33ebb07004 --- /dev/null +++ b/packages/commons/octobot_commons/databases/implementations/cache_timestamp_database.py @@ -0,0 +1,206 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.databases.implementations.cache_database as cache_database +import octobot_commons.enums as commons_enums +import octobot_commons.errors as errors + + +class CacheTimestampDatabase(cache_database.CacheDatabase): + async def get( + self, + timestamp: float, + name: str = commons_enums.CacheDatabaseColumns.VALUE.value, + ) -> dict: + """ + Returns the value associated to the given timestamp + :param timestamp: timestamp to get data for + :param name: identifier of the value to get, default is commons_enums.CacheDatabaseColumns.VALUE.value + """ + try: + return await self._get_from_local_cache( + commons_enums.CacheDatabaseColumns.TIMESTAMP.value, timestamp, name + ) + except KeyError as err: + raise errors.NoCacheValue( + f"No cache value associated to {timestamp}" + if err.args[0] == timestamp + else f"No {name} value associated to {timestamp} cache." + ) + + async def get_values( + self, + timestamp: float, + name: str = commons_enums.CacheDatabaseColumns.VALUE.value, + limit=-1, + min_timestamp=0, + ) -> list: + """ + Returns all the values up to the given timestamp + :param timestamp: last timestamp to read get data to + :param name: identifier of the value to get, default is commons_enums.CacheDatabaseColumns.VALUE.value + :param limit: maximum number of elements to return + :param min_timestamp: timestamp to start returning data from + """ + try: + await self._ensure_local_cache( + commons_enums.CacheDatabaseColumns.TIMESTAMP.value + ) + values = [ + values[name] + for value_timestamp, values in self._local_cache.items() + if min_timestamp <= value_timestamp <= timestamp and name in values + ] + if limit != -1: + return values[-limit:] + return values + except IndexError: + raise errors.NoCacheValue(f"No cache value associated to {name}") + except KeyError: + raise errors.NoCacheValue(f"No {name} value associated to {name} cache.") + + async def set( + self, + timestamp: float, + value, + name: str = commons_enums.CacheDatabaseColumns.VALUE.value, + ) -> None: + """ + Sets a value at the given timestamp associated to the given identifier + :param timestamp: timestamp to set data to + :param value: value to set + :param name: identifier of the value to set, default is commons_enums.CacheDatabaseColumns.VALUE.value + """ + await self._ensure_metadata() + saved_value = self.get_serializable_value(value) + if await self._needs_update( + commons_enums.CacheDatabaseColumns.TIMESTAMP.value, + timestamp, + name, + saved_value, + ): + uuid = None + set_value = { + commons_enums.CacheDatabaseColumns.TIMESTAMP.value: timestamp, + name: saved_value, + } + if timestamp in self._local_cache: + # set uuid in case this value already exist in db + uuid = self._local_cache[timestamp].get(self.UUID_KEY) + self._local_cache[timestamp][ + commons_enums.CacheDatabaseColumns.TIMESTAMP.value + ] = timestamp + self._local_cache[timestamp][name] = saved_value + else: + self._local_cache[timestamp] = set_value + await self.upsert( + self.CACHE_TABLE, + set_value, + None, + uuid=uuid, + cache_query={ + commons_enums.CacheDatabaseColumns.TIMESTAMP.value: timestamp + }, + ) + + async def set_values( + self, + timestamps, + values, + name: str = commons_enums.CacheDatabaseColumns.VALUE.value, + additional_values_by_key: dict = None, + ) -> None: + """ + Sets values at the given timestamps associated to the given identifiers + :param timestamps: timestamps to set data to + :param values: value to set + :param name: identifier of the values to set, default is commons_enums.CacheDatabaseColumns.VALUE.value + :param additional_values_by_key: other key/values to set a these timestamps + """ + await self._ensure_local_cache( + commons_enums.CacheDatabaseColumns.TIMESTAMP.value + ) + to_bulk_update = { + name: [self.get_serializable_value(value) for value in values] + } + if additional_values_by_key: + to_bulk_update.update( + { + key: [self.get_serializable_value(value) for value in values] + for key, values in additional_values_by_key.items() + } + ) + # use optimized multiple insert to speed up the database insert operation + await self._bulk_update_values(timestamps, to_bulk_update) + + async def _bulk_update_values(self, timestamps, to_bulk_update): + await self._ensure_metadata() + rows = [] + can_just_insert_data = True + key = None + try: + # try to write data in the scenario their timestamp is not in cache already: can insert directly + for index, timestamp in enumerate(timestamps): + if timestamp in self._local_cache: + row = self._local_cache[timestamp] + # will have to update data + can_just_insert_data = False + else: + row = { + commons_enums.CacheDatabaseColumns.TIMESTAMP.value: timestamp + } + for key, values in to_bulk_update.items(): + row[key] = values[index] + self._local_cache[timestamp] = row + rows.append(row) + if can_just_insert_data: + await self.log_many(self.CACHE_TABLE, rows) + else: + await self._update_full_database() + except IndexError: + raise RuntimeError( + f"Data to set are required to have the same length as the timestamps list. " + f"Error on the {key} values" + ) + + async def _update_full_database(self): + # to be called to avoid multiple upsert / update which can be very slow: take full advantage of multiple inserts + # 1. recreate all database elements from self._local_cache + all_rows = [] + for element in self._local_cache.values(): + # remove artificial data if any + element.pop(self.UUID_KEY, None) + all_rows.append(element) + # 2. delete database content + await self.delete_all(self.CACHE_TABLE) + # 3. insert all local cache + await self.log_many(self.CACHE_TABLE, all_rows) + # 4. reset self._local_cache + await self._ensure_local_cache( + commons_enums.CacheDatabaseColumns.TIMESTAMP.value, update=True + ) + + async def _timestamp_query(self, timestamp): + return (await self._database.query_factory()).t == timestamp + + async def get_cache(self): + """ + :return: the sorted read cache values + """ + # relies on the fact that python dicts keep order + return sorted( + await self._database.select(self.CACHE_TABLE, None), + key=lambda x: x[commons_enums.CacheDatabaseColumns.TIMESTAMP.value], + ) diff --git a/packages/commons/octobot_commons/databases/implementations/db_reader.py b/packages/commons/octobot_commons/databases/implementations/db_reader.py new file mode 100644 index 0000000000..01d1491504 --- /dev/null +++ b/packages/commons/octobot_commons/databases/implementations/db_reader.py @@ -0,0 +1,39 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.databases.bases.base_database as base_database + + +class DBReader(base_database.BaseDatabase): + async def select(self, table_name: str, query: str) -> list: + """ + :param table_name: table to select data from + :param query: select query + :return: list of selected results + """ + return await self._database.select(table_name, query) + + async def tables(self) -> list: + """ + :return: list of tables contained in the database + """ + return await self._database.tables() + + async def all(self, table_name: str) -> list: + """ + :param table_name: table to select data from + :return: all data of the selected table + """ + return await self._database.select(table_name, None) diff --git a/packages/commons/octobot_commons/databases/implementations/db_writer.py b/packages/commons/octobot_commons/databases/implementations/db_writer.py new file mode 100644 index 0000000000..8ec0c3ca5d --- /dev/null +++ b/packages/commons/octobot_commons/databases/implementations/db_writer.py @@ -0,0 +1,194 @@ +# pylint: disable=R0913,R0801 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.databases.bases.base_database as base_database +import octobot_commons.databases.document_database_adaptors as adaptors +import octobot_commons.errors as commons_errors +import octobot_commons.logging as commons_logging + + +class DBWriter(base_database.BaseDatabase): + MAX_ROWS_BUFFER_SIZE = 500 + + def __init__( + self, + file_path: str, + database_adaptor=adaptors.TinyDBAdaptor, + cache_size=None, + **kwargs, + ): + super().__init__( + file_path, + database_adaptor=database_adaptor, + cache_size=cache_size, + **kwargs, + ) + self.rows_buffer = {} + self.rows_buffer_size = self.MAX_ROWS_BUFFER_SIZE + + async def log(self, table_name: str, row: dict, cache=True, rows_buffering=False): + """ + Write a row into the given table + :param table_name: name of the table + :param row: row to write + :param cache: When True, enables local cache + :param rows_buffering: When True, defer actual database writing using a row buffer + """ + if cache: + try: + self.cache.register(table_name, row) + except commons_errors.UncachableValue: + await self._database.insert(table_name, row) + if rows_buffering: + await self._buffer_row(table_name, row) + else: + await self._database.insert(table_name, row) + + async def update(self, table_name: str, row: dict, query, uuid=None): + """ + Updates rows + :param table_name: table to update data from + :param row: updated data + :param query: select query + :param uuid: uuid to select data (enable faster operations) + :return: the update result + """ + return await self._database.update(table_name, row, query, uuid=uuid) + + async def upsert( + self, table_name: str, row: dict, query, uuid=None, cache_query=None + ): + """ + Update or insert a row + Upsert can be a very slow operation: avoid is as much as possible + Upsert with uuid is fast though, try to use it when possible + :param table_name: table to update data from + :param row: updated data + :param query: select query + :param uuid: uuid to select data (enable faster operations) + :param cache_query: query identifier associated to this row, can be used later on for faster upserts on + the same row using cache + :return: the upsert result when operating without cache + """ + if uuid is not None or cache_query is None: + return await self._database.upsert(table_name, row, query, uuid=uuid) + if uuid := self.cache.cached_uuid(table_name, str(cache_query)): + return await self._database.upsert(table_name, row, query, uuid=uuid) + if result := self.cache.cached_query(table_name, str(cache_query)): + result.update(row) + else: + await self._buffer_row( + table_name, row, cache_query=cache_query, cache=False + ) + self.cache.register(table_name, str(cache_query), result=row) + return None + + async def update_many(self, table_name: str, update_values: list): + """ + Updates multiple values at once, doesn't use cache + """ + return await self._database.update_many(table_name, update_values) + + async def delete(self, table_name: str, dict_query: dict): + """ + Deletes selected values at once, doesn't use cache + """ + query = None + if dict_query: + if isinstance(dict_query, dict): + self.cache.delete_from_rows_cache(table_name, dict_query) + query = await self.search() + for key, value in dict_query.items(): + query = query[key] == value + else: + query = dict_query + else: + self.cache.clear(table_name) + return await self._database.delete(table_name, query) + + async def delete_all(self, table_name: str): + """ + Deletes all rows from a table + """ + self.cache.clear(table_name) + return await self._database.delete(table_name, None) + + async def log_many(self, table_name: str, rows: list, cache=True): + """ + Inserts multiple values into the given table + :param table_name: name of the table + :param rows: rows to insert + :param cache: when True, rows are written into a cache buffer and written in bulk when buffer will be full + """ + if cache: + for row in rows: + try: + self.cache.register(table_name, row) + except commons_errors.UncachableValue: + # can pass here since row will be inserted anyway + pass + return await self._database.insert_many(table_name, rows) + + async def replace_all(self, table_name, rows: list, cache=True): + """ + Deletes everything for the give table name and replace the content by the given rows + :param table_name: name of the table + :param rows: rows to insert + :param cache: When True, rows will be registered in cache + """ + await self.delete_all(table_name) + await self.log_many(table_name, rows, cache=cache) + + async def flush(self): + """ + Flushes all caches and "commit" to the database then forces the database to flush its own internal cache if any + """ + try: + await self._flush_all_rows_buffers(cache=True) + await super().flush() + except TypeError as err: + commons_logging.get_logger(str(self)).exception( + err, + True, + f"Error when writing database, this is probably due to a script that is " + f"saving a non json-serializable value: {err}", + ) + + async def _buffer_row(self, table, row, cache_query=None, cache=True): + try: + self.rows_buffer[table].append((row, cache_query)) + if len(self.rows_buffer[table]) >= self.rows_buffer_size: + await self._flush_rows_buffer(table, cache=cache) + except KeyError: + self.rows_buffer[table] = [(row, cache_query)] + + async def _flush_rows_buffer(self, table, cache=True): + uuids = await self.log_many( + table, tuple(row[0] for row in self.rows_buffer[table]), cache=cache + ) + for index, row in enumerate(self.rows_buffer[table]): + self.cache.register(table, str(row[1]), uuid=uuids[index]) + self.rows_buffer[table] = [] + + async def _flush_all_rows_buffers(self, cache=True): + for table, rows in self.rows_buffer.items(): + uuids = await self.log_many( + table, tuple(row[0] for row in rows), cache=cache + ) + if cache: + for index, row in enumerate(rows): + self.cache.register(table, str(row[1]), uuid=uuids[index]) + self.rows_buffer[table] = [] diff --git a/packages/commons/octobot_commons/databases/implementations/db_writer_reader.py b/packages/commons/octobot_commons/databases/implementations/db_writer_reader.py new file mode 100644 index 0000000000..86ce0e7f06 --- /dev/null +++ b/packages/commons/octobot_commons/databases/implementations/db_writer_reader.py @@ -0,0 +1,21 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.databases.implementations.db_writer as writer +import octobot_commons.databases.implementations.db_reader as reader + + +class DBWriterReader(writer.DBWriter, reader.DBReader): + pass diff --git a/packages/commons/octobot_commons/databases/implementations/meta_database.py b/packages/commons/octobot_commons/databases/implementations/meta_database.py new file mode 100644 index 0000000000..4641d6d023 --- /dev/null +++ b/packages/commons/octobot_commons/databases/implementations/meta_database.py @@ -0,0 +1,168 @@ +# pylint: disable=R0902,C0103 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import contextlib + +import octobot_commons.databases.implementations.db_writer_reader as db_writer_reader +import octobot_commons.databases.implementations._exchange_database as _exchange_database +import octobot_commons.enums as enums + + +class MetaDatabase: + def __init__(self, run_dbs_identifier, with_lock=False, cache_size=None): + self.run_dbs_identifier = run_dbs_identifier + self.with_lock = with_lock + self.cache_size = cache_size + self.database_adaptor = self.run_dbs_identifier.database_adaptor + self.run_db: db_writer_reader.DBWriterReader = None + self.backtesting_metadata_db: db_writer_reader.DBWriterReader = None + self.exchange_dbs = {} + + def get_run_db(self): + """ + :return: the run database. Opens it if not open already + """ + if self.run_db is None: + self.run_db = self.get_db( + self.run_dbs_identifier.get_run_data_db_identifier() + ) + return self.run_db + + def get_backtesting_metadata_db(self): + """ + :return: the backtesting metadata database. Opens it if not open already + """ + if self.backtesting_metadata_db is None: + self.backtesting_metadata_db = self.get_db( + self.run_dbs_identifier.get_backtesting_metadata_identifier() + ) + return self.backtesting_metadata_db + + async def get_backtesting_metadata_from_run(self): + """ + :return: the backtesting metadata for the associated run_dbs_identifier's backtesting_id + """ + db = self.get_backtesting_metadata_db() + return ( + await db.select( + enums.CacheDatabaseTables.METADATA.value, + (await db.search()).id == self.run_dbs_identifier.backtesting_id, + ) + )[0] + + def _get_exchange_db(self, exchange=None): + """ + :return: the ExchangeDatabase associated to the given exchange + """ + exchange = exchange or self.run_dbs_identifier.context.exchange_name + try: + return self.exchange_dbs[exchange] + except KeyError: + self.exchange_dbs[exchange] = _exchange_database.ExchangeDatabase( + self, exchange + ) + return self.exchange_dbs[exchange] + + def get_orders_db(self, account_type, exchange=None): + """ + :return: the orders database. Opens it if not open already + """ + return self._get_exchange_db(exchange).get_orders_db(account_type) + + def get_trades_db(self, account_type, exchange=None): + """ + :return: the trades database. Opens it if not open already + """ + return self._get_exchange_db(exchange).get_trades_db(account_type) + + def get_transactions_db(self, account_type, exchange=None): + """ + :return: the transactions database. Opens it if not open already + """ + return self._get_exchange_db(exchange).get_transactions_db(account_type) + + def get_historical_portfolio_value_db(self, account_type, exchange): + """ + :return: the historical portfolio database. Opens it if not open already + """ + return self._get_exchange_db(exchange).get_historical_portfolio_value_db( + account_type + ) + + def get_symbol_db(self, exchange, symbol): + """ + :return: the symbol database. Opens it if not open already + """ + return self._get_exchange_db(exchange).get_symbol_db(symbol) + + async def get_all_symbol_dbs(self, exchange): + """ + :return: an iterable over each symbol database for the given exchange + """ + return await self._get_exchange_db(exchange).get_all_symbol_dbs() + + def all_basic_run_db(self, account_type, exchange=None): + """ + yields the run, orders, trades and transactions databases + """ + yield self.get_run_db() + exchange = exchange or self.run_dbs_identifier.context.exchange_name + yield from self.exchange_dbs[exchange].all_basic_run_db(account_type) + + def get_db(self, db_identifier): + """ + :return: the database associated to the given identifier + """ + return db_writer_reader.DBWriterReader( + db_identifier, + with_lock=self.with_lock, + cache_size=self.cache_size, + database_adaptor=self.database_adaptor, + enable_storage=self.run_dbs_identifier.enable_storage, + ) + + async def close(self): + """ + Closes all the open databases + """ + # avoid asyncio.gather here as it is producing unexplained side effects (frozen thread preventing stop) + for coro in ( + db.close() + for db in ( + self.run_db, + self.backtesting_metadata_db, + ) + if db is not None + ): + await coro + for exchange_db in self.exchange_dbs.values(): + await exchange_db.close() + + @classmethod + @contextlib.asynccontextmanager + async def database(cls, database_manager, with_lock=False, cache_size=None): + """ + Created a local meta database and closes it upon leaving the context manager + """ + meta_db = None + try: + meta_db = MetaDatabase( + database_manager, with_lock=with_lock, cache_size=cache_size + ) + yield meta_db + finally: + if meta_db is not None: + await meta_db.close() diff --git a/packages/commons/octobot_commons/databases/relational_databases/__init__.py b/packages/commons/octobot_commons/databases/relational_databases/__init__.py new file mode 100644 index 0000000000..315bc6be37 --- /dev/null +++ b/packages/commons/octobot_commons/databases/relational_databases/__init__.py @@ -0,0 +1,29 @@ +# pylint: disable=R0801 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + + +from octobot_commons.databases.relational_databases import sqlite +from octobot_commons.databases.relational_databases.sqlite import ( + SQLiteDatabase, + new_sqlite_database, +) + + +__all__ = [ + "SQLiteDatabase", + "new_sqlite_database", +] diff --git a/packages/commons/octobot_commons/databases/relational_databases/sqlite/__init__.py b/packages/commons/octobot_commons/databases/relational_databases/sqlite/__init__.py new file mode 100644 index 0000000000..403d6dda0c --- /dev/null +++ b/packages/commons/octobot_commons/databases/relational_databases/sqlite/__init__.py @@ -0,0 +1,29 @@ +# pylint: disable=R0801,R0401 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + + +from octobot_commons.databases.relational_databases.sqlite import sqlite_database +from octobot_commons.databases.relational_databases.sqlite.sqlite_database import ( + SQLiteDatabase, + new_sqlite_database, +) + + +__all__ = [ + "SQLiteDatabase", + "new_sqlite_database", +] diff --git a/packages/commons/octobot_commons/databases/relational_databases/sqlite/cursor_pool.py b/packages/commons/octobot_commons/databases/relational_databases/sqlite/cursor_pool.py new file mode 100644 index 0000000000..fa06c44ed2 --- /dev/null +++ b/packages/commons/octobot_commons/databases/relational_databases/sqlite/cursor_pool.py @@ -0,0 +1,53 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import asyncio +import contextlib + +import octobot_commons.databases.relational_databases.sqlite.cursor_wrapper as cursor_wrapper + + +class CursorPool: + def __init__(self, db_connection): + self._db_connection = db_connection + self._cursors = [] + + @contextlib.asynccontextmanager + async def idle_cursor(self) -> cursor_wrapper.CursorWrapper: + """ + Yields an idle cursor, creates a new one if necessary + """ + cursor = None + try: + cursor = await self._get_or_create_idle_cursor() + cursor.idle = False + yield cursor + finally: + if cursor is not None: + cursor.idle = True + + async def close(self): + """ + Close every cursor + """ + await asyncio.gather(*(cursor.close() for cursor in self._cursors)) + + async def _get_or_create_idle_cursor(self) -> cursor_wrapper.CursorWrapper: + for cursor in self._cursors: + if cursor.idle: + return cursor + cursor = cursor_wrapper.CursorWrapper(await self._db_connection.cursor()) + self._cursors.append(cursor) + return cursor diff --git a/packages/commons/octobot_commons/databases/relational_databases/sqlite/cursor_wrapper.py b/packages/commons/octobot_commons/databases/relational_databases/sqlite/cursor_wrapper.py new file mode 100644 index 0000000000..abd17677ad --- /dev/null +++ b/packages/commons/octobot_commons/databases/relational_databases/sqlite/cursor_wrapper.py @@ -0,0 +1,27 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + + +class CursorWrapper: + def __init__(self, cursor): + self.cursor = cursor + self.idle = True + + async def close(self): + """ + Close the underlying cursor + """ + await self.cursor.close() diff --git a/packages/commons/octobot_commons/databases/relational_databases/sqlite/sqlite_database.py b/packages/commons/octobot_commons/databases/relational_databases/sqlite/sqlite_database.py new file mode 100644 index 0000000000..f066002f6d --- /dev/null +++ b/packages/commons/octobot_commons/databases/relational_databases/sqlite/sqlite_database.py @@ -0,0 +1,377 @@ +# pylint: disable=C0116,W0511,R0913 +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import contextlib +import sqlite3 + +import octobot_commons.logging as logging +import octobot_commons.enums as enums +import octobot_commons.errors as errors +import octobot_commons.databases.relational_databases.sqlite.cursor_pool as cursor_pool +import octobot_commons.constants as constants + +try: + import aiosqlite +except ImportError: + if constants.USE_MINIMAL_LIBS: + # mock aiosqlite imports + class AiosqliteImportMock: + def connect(self, *args): + raise ImportError("aiosqlite not installed") + + aiosqlite = AiosqliteImportMock() + else: + raise + + +class SQLiteDatabase: + TIMESTAMP_COLUMN = "timestamp" + DEFAULT_ORDER_BY = TIMESTAMP_COLUMN + DEFAULT_SORT = enums.DataBaseOrderBy.DESC.value + DEFAULT_WHERE_OPERATION = "=" + DEFAULT_SIZE = -1 + CACHE_SIZE = 50 + + def __init__(self, file_name): + self.file_name = file_name + self.logger = logging.get_logger(self.__class__.__name__) + + self.tables = [] + self.cache = {} + + self.connection = None + + # should never be used directly, use async with self.aio_cursor() as cursor: instead + self._cursor_pool = None + + async def initialize(self): + try: + self.connection = await aiosqlite.connect(self.file_name) + self._cursor_pool = cursor_pool.CursorPool(self.connection) + await self.__init_tables_list() + except (sqlite3.OperationalError, sqlite3.DatabaseError) as err: + raise errors.DatabaseNotFoundError(f"{err} (file: {self.file_name})") + + async def create_index(self, table, columns): + await self.__execute_index_creation( + table, "_".join(columns), ", ".join(columns) + ) + + @contextlib.asynccontextmanager + async def aio_cursor(self) -> sqlite3.Cursor: + """ + Use this as a context manager to get a free database cursor + :yield: A free cursor + :return: None + """ + async with self._cursor_pool.idle_cursor() as cursor: + yield cursor.cursor + + async def __execute_index_creation(self, table, name, columns): + async with self.aio_cursor() as cursor: + await cursor.execute( + f"CREATE INDEX index_{table.value}_{name} ON {table.value} ({columns})" + ) + + async def insert(self, table, timestamp, **kwargs): + if table.value not in self.tables: + await self.__create_table(table, **kwargs) + + # Insert a row of data + inserting_values = [f"'{value}'" for value in kwargs.values()] + await self.__execute_insert( + table, self.__insert_values(timestamp, ", ".join(inserting_values)) + ) + + async def insert_all(self, table, timestamp, **kwargs): + # TODO refactor with : cursor.executemany("INSERT INTO my_table VALUES (?,?)", values) + if table.value not in self.tables: + await self.__create_table(table, **kwargs) + + insert_values = [] + + for index, values in enumerate(timestamp): + # Insert a row of data + inserting_values = [ + f"'{value if not isinstance(value, list) else value[index]}'" + for value in kwargs.values() + ] + insert_values.append( + self.__insert_values(values, ", ".join(inserting_values)) + ) + + await self.__execute_insert(table, ", ".join(insert_values)) + + async def update(self, table, updated_value_by_column, **kwargs): + # Update a row of data + updating_values = [ + f"{key} = '{value}'" for key, value in updated_value_by_column.items() + ] + await self.__execute_update( + table, + ", ".join(updating_values), + self.__where_clauses_from_kwargs(**kwargs), + ) + + def __insert_values(self, timestamp, inserting_values) -> str: + return f"({timestamp}, {inserting_values})" + + async def __execute_insert(self, table, insert_items) -> None: + async with self.aio_cursor() as cursor: + await cursor.execute(f"INSERT INTO {table.value} VALUES {insert_items}") + + # Save (commit) the changes + await self.connection.commit() + + async def __execute_update(self, table, update_items, where_clauses) -> None: + async with self.aio_cursor() as cursor: + await cursor.execute( + f"UPDATE {table.value} SET {update_items} WHERE {where_clauses}" + ) + + # Save (commit) the changes + await self.connection.commit() + + async def select( + self, + table, + size=DEFAULT_SIZE, + order_by=DEFAULT_ORDER_BY, + sort=DEFAULT_SORT, + **kwargs, + ): + return await self.__execute_select( + table=table, + where_clauses=self.__where_clauses_from_kwargs(**kwargs), + additional_clauses=self.__select_order_by(order_by, sort), + size=size, + ) + + async def select_count(self, table, selected_items=None, **kwargs): + return await self.__execute_select( + table=table, + select_items=f"{self.__count(selected_items)}", + where_clauses=self.__where_clauses_from_kwargs(**kwargs), + ) + + async def select_max( + self, table, max_columns, selected_items=None, group_by=None, **kwargs + ): + return await self.__execute_select( + table=table, + select_items=f"{self.__max(max_columns)}" + f"{', ' if selected_items else ''}" + f"{self.__selected_columns(selected_items)}", + where_clauses=self.__where_clauses_from_kwargs(**kwargs), + group_by=self.__select_group_by(group_by) if group_by else "", + ) + + async def select_min( + self, table, min_columns, selected_items=None, group_by=None, **kwargs + ): + return await self.__execute_select( + table=table, + select_items=f"{self.__min(min_columns)}" + f"{', ' if selected_items else ''}" + f"{self.__selected_columns(selected_items)}", + where_clauses=self.__where_clauses_from_kwargs(**kwargs), + group_by=self.__select_group_by(group_by) if group_by else "", + ) + + async def select_from_timestamp( + self, + table, + timestamps: list, + operations: list, + size=DEFAULT_SIZE, + order_by=DEFAULT_ORDER_BY, + sort=DEFAULT_SORT, + **kwargs, + ): + timestamps_where_clauses = self.__where_clauses_from_operations( + keys=[self.TIMESTAMP_COLUMN] * len(timestamps), + values=timestamps, + operations=operations, + should_quote_value=False, + ) + where_clause = self.__where_clauses_from_kwargs(**kwargs) + final_where_close = ( + f"{where_clause} AND " + if where_clause and timestamps_where_clauses + else where_clause + ) + final_where_close = f"{final_where_close}{timestamps_where_clauses}" + return await self.__execute_select( + table=table, + where_clauses=final_where_close, + additional_clauses=self.__select_order_by(order_by, sort), + size=size, + ) + + async def delete(self, table, **kwargs): + return await self.__execute_delete( + table, + self.__where_clauses_from_kwargs(**kwargs), + ) + + def __where_clauses_from_kwargs(self, should_quote_value=True, **kwargs) -> str: + return self.__where_clauses_from_operations( + list(kwargs.keys()), + list(kwargs.values()), + [], + should_quote_value=should_quote_value, + ) + + def __where_clauses_from_operation( + self, key, value, operation=DEFAULT_WHERE_OPERATION, should_quote_value=True + ): + return ( + f"{key} {operation if operation is not None else self.DEFAULT_WHERE_OPERATION} " + f"{self.__quote_value(value) if should_quote_value else value}" + ) + + def __where_clauses_from_operations( + self, keys, values, operations, should_quote_value=True + ): + return " AND ".join( + [ + self.__where_clauses_from_operation( + keys[i], + values[i], + operations[i] if len(operations) > i else None, + should_quote_value=should_quote_value, + ) + for i in range(len(keys)) + if values[i] is not None + ] + ) + + def __select_order_by(self, order_by, sort): + return ( + f"ORDER BY " + f"{order_by if order_by is not None else self.DEFAULT_ORDER_BY} " + f"{sort if sort is not None else self.DEFAULT_SORT}" + ) + + def __select_group_by(self, group_by): + return f"GROUP BY {group_by}" + + def __quote_value(self, value): + return f"'{value}'" + + def __max(self, columns): + return f"MAX({self.__selected_columns(columns)})" + + def __min(self, columns): + return f"MIN({self.__selected_columns(columns)})" + + def __count(self, columns): + return f"COUNT({self.__selected_columns(columns)})" + + def __selected_columns(self, columns=None): + return ",".join(columns) if columns else "" + + async def __execute_select( + self, + table, + select_items="*", + where_clauses="", + additional_clauses="", + group_by="", + size=DEFAULT_SIZE, + ): + try: + async with self.aio_cursor() as cursor: + limit_clause = "" if size == self.DEFAULT_SIZE else f"LIMIT {size}" + await cursor.execute( + f"SELECT {select_items} FROM {table.value} " + f"{'WHERE' if where_clauses else ''} {where_clauses} " + f"{additional_clauses} {limit_clause} {group_by}" + ) + return await cursor.fetchall() + except sqlite3.OperationalError as err: + if not await self.check_table_exists(table): + raise errors.DatabaseNotFoundError(err) + self.logger.error(f"An error occurred when executing select : {err}") + return [] + + async def __execute_delete(self, table, where_clauses): + async with self.aio_cursor() as cursor: + await cursor.execute(f"DELETE FROM {table.value} WHERE {where_clauses} ") + # nothing to return, will raise on error + + async def check_table_exists(self, table) -> bool: + async with self.aio_cursor() as cursor: + await cursor.execute( + f"SELECT name FROM sqlite_master WHERE type='table' AND name='{table.value}'" + ) + return await cursor.fetchall() != [] + + async def check_table_not_empty(self, table) -> bool: + async with self.aio_cursor() as cursor: + await cursor.execute(f"SELECT count(*) FROM '{table.value}'") + row_count = await cursor.fetchone() + return row_count[0] != 0 + + async def __create_table( + self, table, with_index_on_timestamp=True, **kwargs + ) -> None: + try: + columns: list = list(kwargs.keys()) + async with self.aio_cursor() as cursor: + await cursor.execute( + f"CREATE TABLE {table.value} ({self.TIMESTAMP_COLUMN} datetime, " + f"{' text, '.join(col for col in columns)})" + ) + + if with_index_on_timestamp: + await self.create_index(table, [self.TIMESTAMP_COLUMN]) + + for i in range(1, round(len(columns) / 2) + 1): + await self.create_index( + table, + [self.TIMESTAMP_COLUMN] + [columns[u] for u in range(0, i)], + ) + + except sqlite3.OperationalError: + self.logger.error(f"{table} already exists") + finally: + self.tables.append(table.value) + + async def __init_tables_list(self): + async with self.aio_cursor() as cursor: + await cursor.execute("SELECT name FROM sqlite_master WHERE type='table'") + self.tables = [res[0] for res in await cursor.fetchall()] + + async def stop(self): + try: + if self._cursor_pool is not None: + await self._cursor_pool.close() + finally: + if self.connection is not None: + conn = self.connection + self.connection = None + await conn.close() + + +@contextlib.asynccontextmanager +async def new_sqlite_database(file_path): + local_database = SQLiteDatabase(file_path) + try: + await local_database.initialize() + yield local_database + finally: + await local_database.stop() diff --git a/packages/commons/octobot_commons/databases/run_databases/__init__.py b/packages/commons/octobot_commons/databases/run_databases/__init__.py new file mode 100644 index 0000000000..9841cfeed3 --- /dev/null +++ b/packages/commons/octobot_commons/databases/run_databases/__init__.py @@ -0,0 +1,48 @@ +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + + +from octobot_commons.databases.run_databases import run_databases_identifier +from octobot_commons.databases.run_databases import run_databases_provider +from octobot_commons.databases.run_databases import storage +from octobot_commons.databases.run_databases import abstract_run_databases_pruner +from octobot_commons.databases.run_databases import file_system_run_databases_pruner + +from octobot_commons.databases.run_databases.run_databases_identifier import ( + RunDatabasesIdentifier, +) +from octobot_commons.databases.run_databases.run_databases_provider import ( + RunDatabasesProvider, +) +from octobot_commons.databases.run_databases.storage import ( + init_bot_storage, + close_bot_storage, +) +from octobot_commons.databases.run_databases.abstract_run_databases_pruner import ( + AbstractRunDatabasesPruner, +) +from octobot_commons.databases.run_databases.file_system_run_databases_pruner import ( + FileSystemRunDatabasesPruner, +) +from octobot_commons.databases.run_databases.run_databases_pruning_factory import ( + run_databases_pruner_factory, +) + + +__all__ = [ + "RunDatabasesIdentifier", + "RunDatabasesProvider", + "init_bot_storage", + "close_bot_storage", + "AbstractRunDatabasesPruner", + "FileSystemRunDatabasesPruner", + "run_databases_pruner_factory", +] diff --git a/packages/commons/octobot_commons/databases/run_databases/abstract_run_databases_pruner.py b/packages/commons/octobot_commons/databases/run_databases/abstract_run_databases_pruner.py new file mode 100644 index 0000000000..a7b1cecc1d --- /dev/null +++ b/packages/commons/octobot_commons/databases/run_databases/abstract_run_databases_pruner.py @@ -0,0 +1,140 @@ +# pylint: disable=W0703 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import time + +import octobot_commons.enums as enums +import octobot_commons.logging as logging +import octobot_commons.databases.implementations as databases_implementations +import octobot_commons.databases.run_databases.utils as run_databases_utils + + +class AbstractRunDatabasesPruner: + def __init__(self, run_databases_identifier, max_databases_size): + self.logger = logging.get_logger(self.__class__.__name__) + self.database_adaptor = run_databases_identifier.database_adaptor + self.databases_root_identifier = run_databases_identifier.data_path + self.max_databases_size = max_databases_size + self.all_db_data = [] + self.backtesting_run_path_identifier = ( + run_databases_utils.get_backtesting_related_run_path_identifiers_str( + self.database_adaptor + ) + ) + self._run_db = run_databases_identifier.get_db_full_name( + enums.RunDatabases.RUN_DATA_DB.value + ) + + async def explore(self): + """ + Explore self.databases_root_identifier to gather storage + statistics to be used in prune_oldest_run_databases + """ + t_start = time.time() + await self._explore_databases() + total_time = round(time.time() - t_start, 2) + if total_time > 1: + self.logger.debug( + f"Explored run databases for pruning in {total_time} seconds." + ) + + async def prune_oldest_run_databases(self): + """ + Delete the necessary backtesting run data for the total backtesting storage + size to be <= self.max_databases_size. Deletes oldest run data first + """ + self.all_db_data = sorted( + self.all_db_data, key=lambda data: data.last_modified_time + ) + removed_databases = [] + while self._get_total_db_size() > self.max_databases_size: + if await self._prune_database(self.all_db_data[0]): + removed_databases.append(self.all_db_data[0]) + self.all_db_data = self.all_db_data[1:] + if removed_databases: + await self._update_backtesting_runs_metadata(removed_databases) + self._log_summary(removed_databases) + + async def _explore_databases(self): + raise NotImplementedError("_explore_databases is not implemented") + + async def _prune_database(self, db_data): + raise NotImplementedError("_prune_database is not implemented") + + async def _get_global_runs_identifiers(self, removed_databases): + raise NotImplementedError("_get_global_runs_identifiers is not implemented") + + async def _update_backtesting_runs_metadata(self, removed_databases): + for global_runs_identifier in await self._get_global_runs_identifiers( + removed_databases + ): + await self._update_metadata(global_runs_identifier) + + async def _update_metadata(self, global_runs_identifier): + run_db_identifier = run_databases_utils.get_global_run_database_identifier( + global_runs_identifier + ) + if run_db_identifier is not None: + remaining_run_ids = { + int(identifier) + for identifier in await run_db_identifier.get_backtesting_run_ids() + } + async with databases_implementations.DBWriterReader.database( + run_db_identifier.get_backtesting_metadata_identifier() + ) as reader_writer: + found_runs = await reader_writer.all(enums.DBTables.METADATA.value) + # iterate in reverse order to keep only latest appearance of each id + metadata = [] + added_runs = set() + for run in found_runs[::-1]: + run_id = run[enums.BacktestingMetadata.ID.value] + if run_id in remaining_run_ids and run_id not in added_runs: + metadata.append(run) + added_runs.add(run_id) + await reader_writer.replace_all(enums.DBTables.METADATA.value, metadata) + + def _log_summary(self, removed_databases): + first_removed = removed_databases[0].get_human_readable_last_modified_time() + last_removed = removed_databases[-1].get_human_readable_last_modified_time() + self.logger.debug( + f"Deleted the {len(removed_databases)} oldest run data from the {first_removed} to the {last_removed}" + ) + + def _get_total_db_size(self): + return sum(db_data.size for db_data in self.all_db_data) + + +class DBData: + def __init__(self, identifier, parts): + self.identifier = identifier + self.parts = parts + self.size = sum(part.size for part in self.parts) + self.last_modified_time = max(part.last_modified_time for part in self.parts) + + def get_human_readable_last_modified_time(self): + """ + :return: self.last_modified_time in a human-readable format + """ + return time.strftime( + "%Y-%m-%d %H:%M:%S", time.strptime(time.ctime(self.last_modified_time)) + ) + + +class AbstractDBPartData: + def __init__(self, identifier): + self.identifier = identifier + self.size = None + self.last_modified_time = None diff --git a/packages/commons/octobot_commons/databases/run_databases/file_system_run_databases_pruner.py b/packages/commons/octobot_commons/databases/run_databases/file_system_run_databases_pruner.py new file mode 100644 index 0000000000..df72c00b67 --- /dev/null +++ b/packages/commons/octobot_commons/databases/run_databases/file_system_run_databases_pruner.py @@ -0,0 +1,79 @@ +# pylint: disable=W0703 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import os +import shutil + +import octobot_commons.databases.run_databases.abstract_run_databases_pruner as abstract_run_databases_pruner + + +class FileSystemRunDatabasesPruner( + abstract_run_databases_pruner.AbstractRunDatabasesPruner +): + async def _explore_databases(self): + self.all_db_data = [ + abstract_run_databases_pruner.DBData( + directory, + [FileSystemDBPartData(f) for f in self._get_all_files(directory)], + ) + for directory in self._get_file_system_runs(self.databases_root_identifier) + ] + + async def _prune_database(self, db_data): + try: + shutil.rmtree(db_data.identifier) + return True + except Exception as err: + self.logger.exception(err, True, f"Error when deleting run database: {err}") + return False + + async def _get_global_runs_identifiers(self, removed_databases): + return { + os.path.dirname(removed_database.identifier) + for removed_database in removed_databases + } + + def _get_file_system_runs(self, root): + try: + # use os.scandir as it is much faster than os.walk + for entry in os.scandir(root): + if self._is_run_top_level_folder(entry): + yield entry + elif entry.is_dir(): + yield from self._get_file_system_runs(entry) + except FileNotFoundError: + # nothing to explore + pass + + def _get_all_files(self, root): + for entry in os.scandir(root): + if entry.is_file(): + yield entry + elif entry.is_dir(): + yield from self._get_all_files(entry) + + def _is_run_top_level_folder(self, dir_entry): + return os.path.isfile(os.path.join(dir_entry, self._run_db)) and any( + identifier in dir_entry.path + for identifier in self.backtesting_run_path_identifier + ) + + +class FileSystemDBPartData(abstract_run_databases_pruner.AbstractDBPartData): + def __init__(self, identifier): + super().__init__(identifier) + self.size = os.path.getsize(self.identifier) + self.last_modified_time = os.path.getmtime(self.identifier) diff --git a/packages/commons/octobot_commons/databases/run_databases/run_databases_identifier.py b/packages/commons/octobot_commons/databases/run_databases/run_databases_identifier.py new file mode 100644 index 0000000000..198b2963e5 --- /dev/null +++ b/packages/commons/octobot_commons/databases/run_databases/run_databases_identifier.py @@ -0,0 +1,429 @@ +# pylint: disable=R0902,R0913,C0415,R0904 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import os +import shutil + +import octobot_commons.databases.document_database_adaptors as adaptors +import octobot_commons.constants as constants +import octobot_commons.enums as enums +import octobot_commons.symbols.symbol_util as symbol_util + + +class RunDatabasesIdentifier: + def __init__( + self, + tentacle_class, + optimization_campaign_name=None, + database_adaptor=adaptors.TinyDBAdaptor, + backtesting_id=None, + live_id=None, + optimizer_id=None, + context=None, + enable_storage=True, + ): + self.database_adaptor = database_adaptor + self.optimization_campaign_name = optimization_campaign_name + self.backtesting_id = backtesting_id + self.live_id = live_id + self.optimizer_id = optimizer_id + self.tentacle_class = ( + tentacle_class + if isinstance(tentacle_class, str) + else tentacle_class.__name__ + ) + self.enable_storage = enable_storage + self.context = context + self.data_path = self._merge_parts(constants.USER_FOLDER, constants.DATA_FOLDER) + self.base_path = self._merge_parts(self.data_path, self.tentacle_class) + self.suffix = ( + self.database_adaptor.get_db_file_ext() + if self.database_adaptor.is_file_system_based() + else "" + ) + + async def initialize(self, exchange=None): + """ + Initializes the necessary elements for these run databases. Creates necessary folder on file system databases + :param exchange: name of the associated exchange + Used for live trading cross trading mode stats (such as profitability) + """ + if not self.enable_storage: + return + # global history is a live only feature + from_global_history = self.backtesting_id is None + deepest_identifier = ( + self._base_folder(from_global_history=from_global_history) + if exchange is None + else self._merge_parts( + self._base_folder(from_global_history=from_global_history), exchange + ) + ) + await self.database_adaptor.create_identifier(deepest_identifier) + + def is_backtesting(self) -> bool: + """ + :return: True when the database identifier associated to a backtesting run + """ + return self.backtesting_id is not None + + def get_run_data_db_identifier(self) -> str: + """ + :return: the database identifier associated to the run database + """ + return self._get_db_identifier(enums.RunDatabases.RUN_DATA_DB.value, None) + + def get_orders_db_identifier(self, account_type, exchange) -> str: + """ + :return: the database identifier associated to this exchange's orders + :param account_type: type of account + :param exchange: name of the associated exchange + """ + return self._get_db_identifier( + f"{enums.RunDatabases.ORDERS_DB.value}{account_type}", exchange + ) + + def get_trades_db_identifier(self, account_type, exchange) -> str: + """ + :return: the database identifier associated to this exchange's trades + :param account_type: type of account + :param exchange: name of the associated exchange + """ + return self._get_db_identifier( + f"{enums.RunDatabases.TRADES_DB.value}{account_type}", exchange + ) + + def get_transactions_db_identifier(self, account_type, exchange) -> str: + """ + :return: the database identifier associated to this exchange's transactions + :param account_type: type of account + :param exchange: name of the associated exchange + """ + return self._get_db_identifier( + f"{enums.RunDatabases.TRANSACTIONS_DB.value}{account_type}", exchange + ) + + def get_symbol_db_identifier(self, exchange, symbol) -> str: + """ + :return: the database identifier associated to this exchange's symbol data + :param exchange: name of the associated exchange + :param symbol: the associated symbol + """ + return self._get_db_identifier(symbol_util.merge_symbol(symbol), exchange) + + def get_historical_portfolio_value_db_identifier( + self, account_type, exchange + ) -> str: + """ + :return: the database identifier associated to this exchange's historical portfolio value + :param account_type: a suffix identifying the type of portfolio (future / sandbox etc) + :param exchange: name of the associated exchange + """ + return self._get_db_identifier( + f"{enums.RunDatabases.PORTFOLIO_VALUE_DB.value}{account_type}", + exchange, + ) + + def get_backtesting_metadata_identifier(self) -> str: + """ + :return: the database identifier associated to backtesting metadata + """ + return self._get_db_identifier( + enums.RunDatabases.METADATA.value, None, ignore_backtesting_id=True + ) + + def get_bot_live_metadata_identifier(self) -> str: + """ + :return: the database identifier associated to live metadata + """ + return self._get_db_identifier( + enums.RunDatabases.METADATA.value, None, ignore_live_id=True + ) + + def _get_db_identifier(self, run_database_name, exchange, **base_folder_kwargs): + if exchange is None: + return self._merge_parts( + self._base_folder(**base_folder_kwargs), + self.get_db_full_name(run_database_name), + ) + return self._merge_parts( + self._base_folder(**base_folder_kwargs), + exchange, + self.get_db_full_name(run_database_name), + ) + + def get_db_full_name(self, db_name): + """ + :return: the db_name's associated database name including suffix + """ + return f"{db_name}{self.suffix}" + + async def exchange_base_identifier_exists(self, exchange) -> bool: + """ + :return: True if there are data under this exchange name + """ + return await self.database_adaptor.identifier_exists( + self.get_exchange_based_identifier(exchange), False + ) + + def get_exchange_based_identifier(self, exchange): + """ + :return: the database identifier associated to the given exchange + """ + return self._merge_parts(self._base_folder(), exchange) + + async def get_single_existing_exchange(self) -> str: + """ + :return: the name of the only exchange the backtesting happened on if it only ran on a single exchange, + None otherwise + """ + ignored_folders = [enums.RunDatabases.LIVE.value] + try: + import octobot_tentacles_manager.constants as tentacles_manager_constants + + ignored_folders.append( + tentacles_manager_constants.TENTACLES_SPECIFIC_CONFIG_FOLDER + ) + except ImportError: + pass + return await self.database_adaptor.get_single_sub_identifier( + self._base_folder(), ignored_folders + ) + + async def symbol_base_identifier_exists(self, exchange, symbol) -> bool: + """ + :return: True if there are data under this exchange name + """ + identifier = self._merge_parts( + self._base_folder(), + exchange, + self.get_db_full_name(symbol_util.merge_symbol(symbol)), + ) + return await self.database_adaptor.identifier_exists(identifier, True) + + def get_backtesting_run_folder(self) -> str: + """ + :return: base folder associated to a backtesting run + """ + return self._base_folder() + + def get_optimizer_runs_schedule_identifier(self) -> str: + """ + :return: the identifier associated to the optimizer run schedule database + """ + return self._merge_parts( + self.base_path, + self.optimization_campaign_name, + enums.RunDatabases.OPTIMIZER.value, + self.get_db_full_name(enums.RunDatabases.OPTIMIZER_RUNS_SCHEDULE_DB.value), + ) + + async def generate_new_backtesting_id(self) -> int: + """ + :return: a new unique backtesting id + """ + return await self._generate_new_id(is_optimizer=False) + + async def generate_new_bot_live_id(self) -> int: + """ + :return: a new unique bot recording id + """ + return await self._generate_new_id(is_optimizer=False, is_bot_recording=True) + + async def generate_new_optimizer_id(self, back_list) -> int: + """ + :return: a new unique optimizer id + """ + return await self._generate_new_id(back_list=back_list, is_optimizer=True) + + def is_symbol_database(self, database_identifier: str) -> bool: + """ + :return: True if the given identifier is related to a symbol database + """ + return database_identifier.endswith(self.suffix) and all( + not other_identifier.value in database_identifier + for other_identifier in enums.RunDatabases + ) + + def get_symbol_db_name(self, symbol_db_identifier): + """ + :return: the given identifier's database name (without suffix if any) + """ + return symbol_db_identifier.split(self.suffix)[0] + + def remove_all(self): + """ + Clears every data from a backtesting run + """ + identifier = self._base_folder() + if self.database_adaptor.is_file_system_based(): + if os.path.isdir(identifier): + shutil.rmtree(identifier) + return + raise RuntimeError(f"Unhandled database_adaptor {self.database_adaptor}") + + async def _generate_new_id( + self, back_list=None, is_optimizer=False, is_bot_recording=False + ): + back_list = back_list or [] + max_runs = ( + constants.MAX_OPTIMIZER_RUNS + if is_optimizer + else constants.MAX_BACKTESTING_RUNS + ) + for index in range(1, max_runs + 1): + if index in back_list: + continue + name_candidate = ( + self._base_folder(optimizer_id=index) + if is_optimizer + else ( + self._base_folder(live_id=index) + if is_bot_recording + else self._base_folder(backtesting_id=index) + ) + ) + if not await self.database_adaptor.identifier_exists(name_candidate, False): + return index + raise RuntimeError( + f"Reached maximum number of {'optimizer' if is_optimizer else 'backtesting'} runs " + f"({constants.MAX_BACKTESTING_RUNS}). Please remove some." + ) + + async def get_optimization_campaign_names(self) -> list: + """ + :return: a list of every existing campaign name + """ + optimization_campaign_folder = self._merge_parts(self.base_path) + if await self.database_adaptor.identifier_exists( + optimization_campaign_folder, False + ): + return [ + element + async for element in self.database_adaptor.get_sub_identifiers( + optimization_campaign_folder, [enums.RunDatabases.LIVE.value] + ) + ] + return [] + + async def get_optimizer_run_ids(self) -> list: + """ + :return: a list of every optimizer id in the current campaign + """ + optimizer_runs_path = self._merge_parts( + self.base_path, + self.optimization_campaign_name, + enums.RunDatabases.OPTIMIZER.value, + ) + if await self.database_adaptor.identifier_exists(optimizer_runs_path, False): + return [ + self.parse_optimizer_id(element) + async for element in self.database_adaptor.get_sub_identifiers( + optimizer_runs_path, [] + ) + ] + + async def get_backtesting_run_ids(self) -> list: + """ + :return: a list of every backtesting id in the current campaign + """ + runs_path = self._base_folder(ignore_backtesting_id=True) + if await self.database_adaptor.identifier_exists(runs_path, False): + return [ + self.parse_backtesting_id(element) + async for element in self.database_adaptor.get_sub_identifiers( + runs_path, [] + ) + ] + + @staticmethod + def parse_optimizer_id(identifier) -> str: + """ + :return: the associated optimizer id + """ + return identifier.split(constants.DB_SEPARATOR)[-1] + + @staticmethod + def parse_backtesting_id(identifier) -> str: + """ + :return: the associated backtesting id + """ + return identifier.split(constants.DB_SEPARATOR)[-1] + + def _get_base_path(self, from_global_history, backtesting_id, optimizer_id): + if from_global_history and (backtesting_id is None and optimizer_id is None): + # in live global history, use self.data_path as it's not related to a trading mode + return self.data_path + return self.base_path + + def _base_folder( + self, + ignore_backtesting_id=False, + backtesting_id=None, + live_id=None, + ignore_live_id=None, + ignore_optimizer_id=False, + optimizer_id=None, + from_global_history=True, + ) -> str: + backtesting_id = backtesting_id or self.backtesting_id + optimizer_id = optimizer_id or self.optimizer_id + path = self._get_base_path(from_global_history, backtesting_id, optimizer_id) + live_id = live_id or self.live_id + # when in optimizer or backtesting: wrap it into the current campaign + if backtesting_id is not None or optimizer_id is not None: + if self.optimization_campaign_name is None: + raise RuntimeError( + f"optimization_campaign_name is required in {RunDatabasesIdentifier} " + f"constructor while in a backtesting or optimizer context" + ) + path = self._merge_parts(path, self.optimization_campaign_name) + if optimizer_id is not None: + if ignore_optimizer_id: + path = self._merge_parts(path, enums.RunDatabases.OPTIMIZER.value) + else: + path = self._merge_parts( + path, + enums.RunDatabases.OPTIMIZER.value, + f"{enums.RunDatabases.OPTIMIZER.value}{constants.DB_SEPARATOR}{optimizer_id}", + ) + if backtesting_id is not None: + if optimizer_id is None: + path = self._merge_parts(path, enums.RunDatabases.BACKTESTING.value) + if ignore_backtesting_id: + return path + return self._merge_parts( + path, + f"{enums.RunDatabases.BACKTESTING.value}" + f"{constants.DB_SEPARATOR}{backtesting_id}", + ) + if optimizer_id is None: + # live mode + if ignore_live_id: + return self._merge_parts(path, enums.RunDatabases.LIVE.value) + return self._merge_parts( + path, + f"{os.path.join(enums.RunDatabases.LIVE.value, enums.RunDatabases.LIVE.value)}" + f"{constants.DB_SEPARATOR}{live_id}", + ) + return path + + def _merge_parts(self, *parts): + return ( + os.path.join(*parts) + if self.database_adaptor.is_file_system_based() + else constants.DB_SEPARATOR.join(*parts) + ) diff --git a/packages/commons/octobot_commons/databases/run_databases/run_databases_provider.py b/packages/commons/octobot_commons/databases/run_databases/run_databases_provider.py new file mode 100644 index 0000000000..249d2d8899 --- /dev/null +++ b/packages/commons/octobot_commons/databases/run_databases/run_databases_provider.py @@ -0,0 +1,134 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.singleton as singleton +import octobot_commons.databases.implementations.meta_database as meta_database +import octobot_commons.errors as errors +import octobot_commons.logging as logging + + +class RunDatabasesProvider(singleton.Singleton): + def __init__(self): + self.logger = logging.get_logger(self.__class__.__name__) + self.run_databases = {} + + async def add_bot_id( + self, bot_id, run_database_identifier, with_lock=False, cache_size=None + ): + """ + Initialize the given run_database_identifier and create a new MetaDatabase associated to the given bot_id + """ + await run_database_identifier.initialize() + self.run_databases[bot_id] = meta_database.MetaDatabase( + run_database_identifier, with_lock=with_lock, cache_size=cache_size + ) + + def has_bot_id(self, bot_id): + """ + :return: True if the given bot_id has been added via add_bot_id + """ + return bot_id in self.run_databases + + def remove_bot_id(self, bot_id): + """ + remove the run database from the given bot_id + :return: the removed database + """ + return self.run_databases.pop(bot_id) + + def is_storage_enabled(self, bot_id): + """ + :return: True if storage is enabled for the given bot_id + """ + return self.run_databases[bot_id].run_dbs_identifier.enable_storage + + def get_any_run_databases_identifier(self): + """ + :return: the first added run_dbs_identifier + """ + return next(iter(self.run_databases.values())).run_dbs_identifier + + def get_run_databases_identifier(self, bot_id): + """ + :return: the bot_id associated run_dbs_identifier + """ + return self.run_databases[bot_id].run_dbs_identifier + + def get_run_db(self, bot_id): + """ + :return: the bot_id associated run database + """ + return self.run_databases[bot_id].get_run_db() + + def get_orders_db(self, bot_id, account_type, exchange=None): + """ + :return: the bot_id and exchange associated orders database and account_type. + Use local run_database_identifier.context for exchange if not provided. + """ + return self.run_databases[bot_id].get_orders_db(account_type, exchange=exchange) + + def get_trades_db(self, bot_id, account_type, exchange=None): + """ + :return: the bot_id and exchange associated trades database and account_type. + Use local run_database_identifier.context for exchange if not provided. + """ + return self.run_databases[bot_id].get_trades_db(account_type, exchange=exchange) + + def get_transactions_db(self, bot_id, account_type, exchange=None): + """ + :return: the bot_id and exchange associated transactions database and account_type. + Use local run_database_identifier.context for exchange if not provided. + """ + return self.run_databases[bot_id].get_transactions_db( + account_type, exchange=exchange + ) + + def get_backtesting_metadata_db(self, bot_id): + """ + :return: the bot_id and exchange associated backtesting metadata database. + """ + return self.run_databases[bot_id].get_backtesting_metadata_db() + + def get_symbol_db(self, bot_id, exchange, symbol): + """ + :return: the bot_id and exchange associated transactions database. + Use local run_database_identifier.context for exchange if exchange is None. + """ + if not symbol: + raise errors.DatabaseNotFoundError("symbol parameter has to be provided") + return self.run_databases[bot_id].get_symbol_db(exchange, symbol) + + async def get_all_symbol_dbs(self, bot_id, exchange): + """ + :return: an iterable over each symbol database for the given exchange + """ + return await self.run_databases[bot_id].get_all_symbol_dbs(exchange) + + def get_historical_portfolio_value_db(self, bot_id, account_type, exchange): + """ + :return: the bot_id, exchange and account_type associated transactions database. + """ + return self.run_databases[bot_id].get_historical_portfolio_value_db( + account_type, exchange + ) + + async def close(self, bot_id): + """ + Close the bot_id associated databases. Does not pop bot_id from self.run_databases to allow post-close calls. + """ + self.logger.debug(f"Closing bot storage for bot_id: {bot_id} ...") + await self.run_databases[bot_id].close() + # do not pop bot_id to keep run data access + self.logger.debug(f"Closed bot storage for bot_id: {bot_id}") diff --git a/packages/commons/octobot_commons/databases/run_databases/run_databases_pruning_factory.py b/packages/commons/octobot_commons/databases/run_databases/run_databases_pruning_factory.py new file mode 100644 index 0000000000..1f7245ce8a --- /dev/null +++ b/packages/commons/octobot_commons/databases/run_databases/run_databases_pruning_factory.py @@ -0,0 +1,29 @@ +# pylint: disable=W0703 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.databases.run_databases.file_system_run_databases_pruner as file_system_run_databases_pruner + + +def run_databases_pruner_factory(run_databases_identifier, max_db_size): + """ + :return: A RunDatabasesPruner instance + """ + if run_databases_identifier.database_adaptor.is_file_system_based(): + return file_system_run_databases_pruner.FileSystemRunDatabasesPruner( + run_databases_identifier, + max_db_size, + ) + raise NotImplementedError("Only file system based database pruner is implemented") diff --git a/packages/commons/octobot_commons/databases/run_databases/storage.py b/packages/commons/octobot_commons/databases/run_databases/storage.py new file mode 100644 index 0000000000..b76ed7dc43 --- /dev/null +++ b/packages/commons/octobot_commons/databases/run_databases/storage.py @@ -0,0 +1,60 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import json + +import octobot_commons.databases.run_databases.run_databases_provider as run_databases_provider +import octobot_commons.configuration as configuration +import octobot_commons.enums as enums +import octobot_commons.logging as logging + + +async def init_bot_storage(bot_id, run_database_identifier, clear_user_inputs): + """ + Initializes the associated bot_id databases. Deletes any existing user input if clear_user_inputs is True + """ + if not run_databases_provider.RunDatabasesProvider.instance().has_bot_id(bot_id): + # only one run database per bot id + await run_databases_provider.RunDatabasesProvider.instance().add_bot_id( + bot_id, run_database_identifier + ) + # always ensure database is valid + run_db = run_databases_provider.RunDatabasesProvider.instance().get_run_db( + bot_id + ) + if run_database_identifier.enable_storage: + await _repair_database_if_necessary(run_db) + if clear_user_inputs: + await configuration.clear_user_inputs(run_db) + + +async def close_bot_storage(bot_id): + """ + :return: Close the bot_id associated run databases + """ + if run_databases_provider.RunDatabasesProvider.instance().has_bot_id(bot_id): + await run_databases_provider.RunDatabasesProvider.instance().close(bot_id) + + +async def _repair_database_if_necessary(database): + try: + # will raise if the db has an issue + await database.all(enums.DBTables.METADATA.value) + except json.JSONDecodeError: + logging.get_logger(__name__).warning( + f"Invalid database at {database}, resetting content." + ) + # error in database, reset it + await database.hard_reset() diff --git a/packages/commons/octobot_commons/databases/run_databases/utils.py b/packages/commons/octobot_commons/databases/run_databases/utils.py new file mode 100644 index 0000000000..b9a50e32ad --- /dev/null +++ b/packages/commons/octobot_commons/databases/run_databases/utils.py @@ -0,0 +1,77 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import os + +# pathlib.Path should not be used to keep consistency with str paths +import pathlib + +import octobot_commons.enums as enums +import octobot_commons.logging as logging +import octobot_commons.constants as constants +import octobot_commons.databases.run_databases.run_databases_identifier as run_databases_identifier + + +def get_backtesting_related_run_path_identifiers_str(database_adaptor): + """ + :return: database identifier fragments associated to the given database_adaptor used + in backtesting + """ + separator = ( + os.path.sep if database_adaptor.is_file_system_based else constants.DB_SEPARATOR + ) + return { + f"{separator}{enums.RunDatabases.BACKTESTING.value}{separator}", + f"{separator}{enums.RunDatabases.OPTIMIZER.value}{separator}", + } + + +def get_global_run_database_identifier(runs_identifier): + """ + :return: a RunDatabasesIdentifier associated to the given runs_identifier + """ + # used to split paths into str parts + split_path = pathlib.Path(runs_identifier).parts + try: + if split_path[-2] == enums.RunDatabases.OPTIMIZER.value: + # in optimizer + # ex: [..., 'DipAnalyserTradingMode', 'Dip Analyser strat designer test', 'optimizer', 'optimizer_1'] + optimizer_id = ( + run_databases_identifier.RunDatabasesIdentifier.parse_optimizer_id( + split_path[-1] + ) + ) + campaign_name = split_path[-3] + trading_mode = split_path[-4] + return run_databases_identifier.RunDatabasesIdentifier( + trading_mode, + optimization_campaign_name=campaign_name, + backtesting_id=0, + optimizer_id=optimizer_id, + ) + # in backtesting + # ex: [..., 'DipAnalyserTradingMode', 'Dip Analyser strat designer test', 'backtesting'] + campaign_name = split_path[-2] + trading_mode = split_path[-3] + return run_databases_identifier.RunDatabasesIdentifier( + trading_mode, + optimization_campaign_name=campaign_name, + backtesting_id=0, + ) + except IndexError as err: + logging.get_logger("run_databases_utils").exception( + err, True, f"Unhandled backtesting data path format: {runs_identifier}" + ) + return None diff --git a/packages/commons/octobot_commons/dataclasses/__init__.py b/packages/commons/octobot_commons/dataclasses/__init__.py new file mode 100644 index 0000000000..989d6246ca --- /dev/null +++ b/packages/commons/octobot_commons/dataclasses/__init__.py @@ -0,0 +1,33 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_commons.dataclasses import flexible_dataclass +from octobot_commons.dataclasses.flexible_dataclass import ( + FlexibleDataclass, +) + +from octobot_commons.dataclasses import minimizable_dataclass +from octobot_commons.dataclasses.minimizable_dataclass import ( + MinimizableDataclass, +) + +from octobot_commons.dataclasses import updatable_dataclass +from octobot_commons.dataclasses.updatable_dataclass import ( + UpdatableDataclass, +) + + +__all__ = ["FlexibleDataclass", "MinimizableDataclass", "UpdatableDataclass"] diff --git a/packages/commons/octobot_commons/dataclasses/flexible_dataclass.py b/packages/commons/octobot_commons/dataclasses/flexible_dataclass.py new file mode 100644 index 0000000000..e4f9a5c06c --- /dev/null +++ b/packages/commons/octobot_commons/dataclasses/flexible_dataclass.py @@ -0,0 +1,90 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import dataclasses +import types +import typing + +try: + from pydantic import BaseModel as PydanticBaseModel +except ImportError: + PydanticBaseModel = None + + +@dataclasses.dataclass +class FlexibleDataclass: + _class_field_cache: typing.ClassVar[dict] = dataclasses.field(default={}, repr=False) + """ + Implements from_dict which can be called to instantiate a new instance of this class from a dict. Using from_dict + ignores any additional key from the given dict that is not defined as a dataclass field. + Nested dataclasses to be parsed inside a list or other container should be calling .from_dict in __post_init__ + """ + + @classmethod + def from_dict(cls, dict_value: dict): + """ + Creates a new instance of cls from the given dict, ignoring additional dict values + """ + if isinstance(dict_value, dict): + fields_values = { + k: _get_nested_class(v, cls._class_field_cache[k]) + for k, v in dict_value.items() + if k in cls.get_field_names() + } + try: + return cls(**fields_values) + except TypeError as e: + raise TypeError( + f"Invalid {cls.__name__} input in from_dict(): {e}" + ) from e + return dict_value + + @classmethod + def get_field_names(cls): + """ + :return a generator over the given FlexibleDataclass field names + """ + if not cls._class_field_cache: + cls._class_field_cache = { + f.name: f.type for f in dataclasses.fields(cls) if f.init + } + return cls._class_field_cache.keys() + + +def _resolve_target_type(target_type): + """Resolve Optional/Union to the concrete type for nested parsing.""" + origin = typing.get_origin(target_type) + union_type = getattr(types, "UnionType", type(None)) + if origin is typing.Union or origin is union_type: + args = typing.get_args(target_type) + # Get the non-None type from Union[X, None] or X | None + for arg in args: + if arg is not None: + return arg + return target_type + + +def _get_nested_class(value, target_type): + # does not support lists or dicts + if value is None: + return value + resolved_type = _resolve_target_type(target_type) + if not isinstance(resolved_type, type): + return value + if issubclass(resolved_type, FlexibleDataclass): + return resolved_type.from_dict(value) + if PydanticBaseModel is not None and issubclass(resolved_type, PydanticBaseModel): + return resolved_type.model_validate(value) + return value diff --git a/packages/commons/octobot_commons/dataclasses/minimizable_dataclass.py b/packages/commons/octobot_commons/dataclasses/minimizable_dataclass.py new file mode 100644 index 0000000000..5a2bda5125 --- /dev/null +++ b/packages/commons/octobot_commons/dataclasses/minimizable_dataclass.py @@ -0,0 +1,92 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import dataclasses +import json +import pydantic + +import octobot_commons.dataclasses.flexible_dataclass as flexible_dataclass + + +class MinimizableDataclass(flexible_dataclass.FlexibleDataclass): + def to_dict(self, include_default_values=True) -> dict: + """ + Creates a new dict from self. Recursively processes any MinimizableDataclass instance attribute. + Pydantic models are converted via json.loads(model.model_dump_json()). + """ + if include_default_values: + return { + f.name: _convert_value_for_dict(getattr(self, f.name), include_default_values) + for f in dataclasses.fields(self) + } + factory = _asdict_without_default_factory( + (self.__class__,) + + tuple( + ( + getattr(self, attr.name)[0].__class__ + if isinstance(getattr(self, attr.name), list) + and getattr(self, attr.name) + else getattr(self, attr.name).__class__ + ) + for attr in dataclasses.fields(self) + ), + include_default_values=include_default_values, + ) + return dataclasses.asdict(self, dict_factory=factory) + +def _convert_value_for_dict(val, include_default_values: bool): + """Convert a value to a dict-serializable form, handling Pydantic models and nested MinimizableDataclass.""" + if isinstance(val, flexible_dataclass.FlexibleDataclass): + if to_dict_method := getattr(val, "to_dict", None): + return to_dict_method(include_default_values=include_default_values) + return { + f.name: _convert_value_for_dict(getattr(val, f.name), include_default_values) + for f in dataclasses.fields(val) + } + if isinstance(val, list): + return [_convert_value_for_dict(v, include_default_values) for v in val] + if isinstance(val, dict): + return {k: _convert_value_for_dict(v, include_default_values) for k, v in val.items()} + if isinstance(val, pydantic.BaseModel): + return json.loads(val.model_dump_json(indent=None, exclude_defaults=True)) + return val + + +def _asdict_without_default_factory(possible_classes, include_default_values=True): + def factory(obj) -> dict: + formatted_dict = {} + found_class = None + for possible_class in possible_classes: + if possible_class in (int, float, str, list, dict): + continue + if not dataclasses.is_dataclass(possible_class): + continue + if all(key in possible_class.__dataclass_fields__ for key, _ in obj): + found_class = possible_class + if found_class is None: + return dict(obj) + for key, val in obj: + field = found_class.__dataclass_fields__[key] + default_field_value = field.default + if default_field_value is dataclasses.MISSING: + default_factory = field.default_factory + if default_factory is not dataclasses.MISSING: + default_field_value = default_factory() + if default_field_value is dataclasses.MISSING or default_field_value != val: + formatted_dict[key] = _convert_value_for_dict(val, include_default_values) + + return formatted_dict + + return factory diff --git a/packages/commons/octobot_commons/dataclasses/updatable_dataclass.py b/packages/commons/octobot_commons/dataclasses/updatable_dataclass.py new file mode 100644 index 0000000000..a026ba1fc9 --- /dev/null +++ b/packages/commons/octobot_commons/dataclasses/updatable_dataclass.py @@ -0,0 +1,107 @@ +# pylint: disable=W0212 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import dataclasses + + +@dataclasses.dataclass +class UpdatableDataclass: + _updated_fields: list[str] = dataclasses.field(default_factory=list, kw_only=True) + + def update(self, other) -> None: + """ + update self using another UpdatableDataclass + :param other: the other UpdatableDataclass to update self from + """ + for field_name in other._updated_fields: + self_val = getattr(self, field_name) + other_val = getattr(other, field_name) + if isinstance(self_val, UpdatableDataclass) and other_val: + self_val.update(other_val) + elif ( + isinstance(self_val, list) + and self_val + and isinstance(self_val[0], UpdatableDataclass) + ): + updated_list = [] + for i, other_element in enumerate(other_val): + if i < len(self_val): + self_element = self_val[i] + self_element.update(other_element) + updated_list.append(self_element) + else: + updated_list.append(other_element) + setattr(self, field_name, updated_list) + elif _should_be_changed(self_val, other_val): + setattr(self, field_name, other_val) + + def get_update(self, other): + """ + Creates a new instance of self.__class__ which fields will be set only if they changed between self and other + Requires a default constructor + :param other: the other UpdatableDataclass create the update from + :return: the UpdatableDataclass update containing differences between self and other + (unset values in other are ignored) + """ + update_content = self.__class__() + for field in dataclasses.fields(self): + field_name = field.name + self_val = getattr(self, field_name) + other_val = getattr(other, field_name) + if isinstance(self_val, UpdatableDataclass) and other_val: + update = self_val.get_update(other_val) + setattr(update_content, field_name, update) + if update._updated_fields: + update_content._updated_fields.append(field_name) + elif ( + isinstance(self_val, list) + and self_val + and isinstance(self_val[0], UpdatableDataclass) + ): + update_list = [] + has_updates = False + if self_val != other_val: + for i, other_element in enumerate(other_val): + if i < len(self_val): + self_element = self_val[i] + update = self_element.get_update(other_element) + update_list.append(update) + has_updates = has_updates or bool(update._updated_fields) + else: + update_list.append(other_element) + has_updates = True + setattr(update_content, field_name, update_list) + if has_updates or len(other_val) != len(self_val): + update_content._updated_fields.append(field_name) + elif _should_be_changed(self_val, other_val): + update_content._updated_fields.append(field_name) + setattr(update_content, field_name, other_val) + return update_content + + def to_dict_without_updated_fields(self) -> dict: + """ + :return: same as dataclasses.asdict(self) but without the + "_updated_fields" internal field added by this class + """ + dict_repr = dataclasses.asdict(self) + dict_repr.pop("_updated_fields", None) + return dict_repr + + +def _should_be_changed(current_value, new_value): + return ( + not current_value and new_value != current_value + ) or new_value != current_value diff --git a/packages/commons/octobot_commons/dict_util.py b/packages/commons/octobot_commons/dict_util.py new file mode 100644 index 0000000000..37d537b6bb --- /dev/null +++ b/packages/commons/octobot_commons/dict_util.py @@ -0,0 +1,148 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + + +def find_nested_value(dict_, field, list_indexes=None): + """ + Find a nested value in a dict + :param dict_: the dict + :param field: the field to search + :param list_indexes: indexes to go to on list elements. If not provided, each element of each list is explored + :return: a tuple : True if found else False, the dict at field value else the field + """ + if field in dict_: + return True, dict_[field] + for value in dict_.values(): + found_value = False + possible_value = None + if isinstance(value, dict): + found_value, possible_value = find_nested_value( + value, field, list_indexes=list_indexes + ) + elif isinstance(value, list): + found_value, possible_value = _find_nested_value_in_list( + value, field, list_indexes + ) + if found_value: + return found_value, possible_value + return False, field + + +def _find_nested_value_in_list(list_value, field, list_indexes): + if list_indexes: + # list_indexes is provided: only look at the given index + try: + item = list_value[list_indexes[0]] + if isinstance(item, dict): + found_value, possible_value = find_nested_value( + item, field, list_indexes=list_indexes[1:] + ) + if found_value: + return found_value, possible_value + except IndexError: + pass + else: + for item in list_value: + if isinstance(item, dict): + found_value, possible_value = find_nested_value( + item, field, list_indexes=list_indexes + ) + if found_value: + return found_value, possible_value + return False, field + + +def nested_update_dict( + base_dict: dict, updated_dict: dict, list_indexes=None, ignore_lists=False +): + """ + Updates a dict with values from another but keeps the 1st dict values when not specified + in the update dict. Handle nested values unlike the default dict.update(). + If a list is found in the dict, elements of the list are all updated + :param base_dict: the dict to be updated + :param updated_dict: the dict to take updated values from + :param list_indexes: indexes to go to on list elements. If not provided, each element of each list is explored + :param ignore_lists: when True, lists are not updated + """ + if isinstance(base_dict, list): + if ignore_lists: + return base_dict + if list_indexes: + nested_update_dict( + base_dict[list_indexes[0]], + updated_dict, + list_indexes=list_indexes[1:], + ignore_lists=ignore_lists, + ) + else: + for element in base_dict: + nested_update_dict(element, updated_dict) + return base_dict + for key, val in updated_dict.items(): + if isinstance(val, dict): + if key in base_dict: + nested_update_dict( + base_dict[key], + val, + list_indexes=list_indexes, + ignore_lists=ignore_lists, + ) + else: + base_dict[key] = val + elif ignore_lists and key in base_dict and isinstance(base_dict[key], list): + # list should be ignored + pass + else: + base_dict[key] = val + return base_dict + + +def check_and_merge_values_from_reference( + current_dict, reference_dict, exception_list, logger=None +): + """ + Check and merge dicts + :param current_dict: the dict to be merged + :param reference_dict: the reference dict + :param exception_list: the merge exception list + :param logger: the logger + """ + for key, val in reference_dict.items(): + if key not in current_dict: + current_dict[key] = val + if logger is not None: + logger.warning( + f"Missing {key} in configuration, added default value: {val}" + ) + elif isinstance(val, dict) and key not in exception_list: + check_and_merge_values_from_reference( + current_dict[key], val, exception_list, logger=logger + ) + + +def contains_each_element(element, val_by_keys_to_find): + """ + Check if each element in val_by_keys_to_find is in element + :param element: the dict to look into + :param val_by_keys_to_find: the dict of elements to find + """ + try: + for key, val in val_by_keys_to_find.items(): + if element[key] != val: + return False + return True + except KeyError: + return False diff --git a/packages/commons/octobot_commons/display/__init__.py b/packages/commons/octobot_commons/display/__init__.py new file mode 100644 index 0000000000..9191e0dd52 --- /dev/null +++ b/packages/commons/octobot_commons/display/__init__.py @@ -0,0 +1,34 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_commons.display import display_translator +from octobot_commons.display.display_translator import ( + DisplayTranslator, + Element, +) + +from octobot_commons.display import display_factory +from octobot_commons.display.display_factory import ( + display_translator_factory, +) + +from octobot_commons.display import plot_settings +from octobot_commons.display.plot_settings import ( + PlotSettings, +) + + +__all__ = ["DisplayTranslator", "Element", "display_translator_factory", "PlotSettings"] diff --git a/packages/commons/octobot_commons/display/display_factory.py b/packages/commons/octobot_commons/display/display_factory.py new file mode 100644 index 0000000000..1ae8073cea --- /dev/null +++ b/packages/commons/octobot_commons/display/display_factory.py @@ -0,0 +1,28 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.tentacles_management as tentacles_management +import octobot_commons.display.display_translator as display_translator + + +def display_translator_factory(**kwargs): + """ + Returns a new instance of the available display_translator.DisplayTranslator implementation + :param kwargs: kwargs to pass to the construction + :return: the created instance + """ + return tentacles_management.get_single_deepest_child_class( + display_translator.DisplayTranslator + )(**kwargs) diff --git a/packages/commons/octobot_commons/display/display_translator.py b/packages/commons/octobot_commons/display/display_translator.py new file mode 100644 index 0000000000..a3d53beaf8 --- /dev/null +++ b/packages/commons/octobot_commons/display/display_translator.py @@ -0,0 +1,546 @@ +# pylint: disable=R0913,R0914,R0902,W0622,C0103 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import contextlib + +import octobot_commons.logging as logging +import octobot_commons.constants as constants +import octobot_commons.enums as enums + + +class DisplayTranslator: + """ + Interface for simplifying displayed elements translation + """ + + INPUT_TYPE_TO_SCHEMA_TYPE = { + enums.UserInputTypes.INT.value: "number", + enums.UserInputTypes.FLOAT.value: "number", + enums.UserInputTypes.BOOLEAN.value: "boolean", + enums.UserInputTypes.TEXT.value: "text", + enums.UserInputTypes.OPTIONS.value: "options", + enums.UserInputTypes.MULTIPLE_OPTIONS.value: "array", + enums.UserInputTypes.OBJECT_ARRAY.value: "array", + enums.UserInputTypes.STRING_ARRAY.value: "array", + enums.UserInputTypes.OBJECT.value: "object", + constants.NESTED_TENTACLE_CONFIG: "object", + } + JSON_PROPERTY_AUTO_ORDER_START = 500 + DEFAULT_NUMBER_MULTIPLIER = 0.00000001 + + def __init__(self, element_type=enums.DisplayedElementTypes.CHART.value): + self.logger = logging.get_logger(self.__class__.__name__) + self.nested_elements = {} + self.elements = [] + self.type: str = element_type + + def to_json(self, name="root") -> dict: + """ + Return the json representation of this display + :param name: name of the root element + :return: the json compatible dict representation of this display + """ + return { + enums.PlotAttributes.NAME.value: name, + enums.PlotAttributes.TYPE.value: self.type, + enums.PlotAttributes.DATA.value: { + enums.PlotAttributes.SUB_ELEMENTS.value: [ + element.to_json(key) + for key, element in self.nested_elements.items() + if not element.is_empty() + ], + enums.PlotAttributes.ELEMENTS.value: [ + element.to_json() + for element in self.elements + if not element.is_empty() + ], + }, + } + + def add_parts_from_other(self, other_element): + """ + Adds the given "other_element" to the local nested elements + """ + self.nested_elements.update(other_element.nested_elements) + + def is_empty(self): + """ + :return: True if there is no element in self.elements or self.nested_elements + """ + return not (self.nested_elements or self.elements) + + @contextlib.contextmanager + def part(self, name, element_type=enums.DisplayedElementTypes.CHART.value): + """ + Adds a part to this display + :param name: name of the part + :param element_type: type of the part to add + """ + element = self.__class__(element_type=element_type) + self.nested_elements[name] = element + yield element + + def add_user_inputs(self, inputs, part=None, config_by_tentacles=None): + """ + add user inputs to the given part or self + """ + has_forced_config = config_by_tentacles is not None + config_by_tentacles = config_by_tentacles or {} + config_schema_by_tentacles = {} + tentacle_type_by_tentacles = {} + shown_tentacles = {} + nested_user_inputs_by_tentacle = self._extract_nested_user_inputs(inputs) + tentacle = None + for user_input_element in inputs: + try: + tentacle = user_input_element["tentacle"] + if user_input_element["is_nested_config"]: + # Do not display nested user input config as regular user inputs. + # These are mere models that are used in association with + # nested user inputs, which are used in the context of a + # nested tentacle configuration + if tentacle not in shown_tentacles: + shown_tentacles[tentacle] = False + else: + shown_tentacles[tentacle] = True + tentacle_type_by_tentacles[tentacle] = user_input_element[ + "tentacle_type" + ] + if tentacle not in config_schema_by_tentacles: + config_schema_by_tentacles[tentacle] = self._base_schema(tentacle) + if not has_forced_config: + config_by_tentacles[tentacle] = {} + if tentacle not in config_by_tentacles: + config_by_tentacles[tentacle][ + user_input_element["name"].replace(" ", "_") + ] = user_input_element["value"] + if user_input_element["parent_input_name"] is None: + # user input with parent_input_name are added alongside their parents, only add top + # level user inputs in schema + self._generate_schema( + config_schema_by_tentacles[tentacle], + user_input_element, + nested_user_inputs_by_tentacle, + ) + except KeyError as err: + self.logger.error( + f"Error when loading user inputs for {tentacle}: missing {err}" + ) + for tentacle, schema in config_schema_by_tentacles.items(): + (part or self).add_user_inputs_element( + "Inputs", + config_by_tentacles[tentacle], + schema, + tentacle, + tentacle_type_by_tentacles[tentacle], + not shown_tentacles[tentacle], + ) + + def _base_schema(self, tentacle): + return { + "type": "object", + "title": f"{tentacle} configuration", + "properties": {}, + } + + def _extract_nested_user_inputs(self, inputs): + user_inputs_by_tentacles = {} + for user_input in inputs: + if user_input["is_nested_config"] or user_input["parent_input_name"]: + tentacle = user_input["tentacle"] + if tentacle not in user_inputs_by_tentacles: + user_inputs_by_tentacles[tentacle] = {} + if user_input["is_nested_config"]: + user_inputs_by_tentacles[tentacle][ + user_input["name"].replace(" ", "_") + ] = user_input + else: + user_inputs_by_tentacles[tentacle][ + len(user_inputs_by_tentacles[tentacle]) + ] = user_input + return user_inputs_by_tentacles + + def _init_schema_properties(self, main_schema, user_input_element, title, def_val): + properties = { + "options": { + "in_summary": user_input_element.get("in_summary", True), + "in_optimizer": user_input_element.get("in_optimizer", True), + "custom_path": user_input_element.get("path", None), + } + } + # prioritize user defined order, otherwise keep the order inputs are saved + property_order = user_input_element.get("order", None) + properties["propertyOrder"] = ( + len(main_schema["properties"]) + self.JSON_PROPERTY_AUTO_ORDER_START + if property_order is None + else property_order + ) + name = user_input_element["name"] + properties["options"]["name"] = name.replace(" ", "_") + properties["title"] = name + if title: + # use title if available + properties["title"] = title + properties["default"] = def_val + min_val = user_input_element.get("min_val") + if min_val is not None: + properties["minimum"] = min_val + max_val = user_input_element.get("max_val") + if max_val is not None: + properties["maximum"] = max_val + if editor_options := user_input_element.get("editor_options"): + properties["options"].update(editor_options) + if ( + enums.UserInputOtherSchemaValuesTypes.DEPENDENCIES.value + in editor_options + ): + other_values = user_input_element.get("other_schema_values", {}) or {} + # when using dependencies, set field as not required as it might not be set + other_values["required"] = other_values.get("required", False) + user_input_element["other_schema_values"] = other_values + # waiting for fixes on + # - https://github.com/json-editor/json-editor/issues/1559 + # - https://github.com/json-editor/json-editor/issues/1621 + to_remove = [] + for key, val in editor_options[ + enums.UserInputOtherSchemaValuesTypes.DEPENDENCIES.value + ].items(): + if val is False: + # for now remove unsupported dependency + to_remove.append(key) + for key in to_remove: + editor_options[ + enums.UserInputOtherSchemaValuesTypes.DEPENDENCIES.value + ].pop(key) + self.logger.debug( + f"Removing unsupported 'False' dependency value for {key}" + ) + if other_schema_values := user_input_element.get("other_schema_values"): + properties.update(other_schema_values) + return properties + + def _adapt_to_input_type( + self, + user_input_element, + nested_user_inputs_by_tentacle, + properties, + input_type, + title, + def_val, + ): + try: + schema_type = self.INPUT_TYPE_TO_SCHEMA_TYPE[input_type] + if schema_type == "boolean": + properties["format"] = "checkbox" + elif schema_type == "number": + if input_type == enums.UserInputTypes.INT.value: + properties["multipleOf"] = 1 + elif input_type in ( + enums.UserInputTypes.STRING_ARRAY.value, + enums.UserInputTypes.OBJECT_ARRAY.value, + ): + self._adapt_to_specific_array_user_input( + user_input_element, + nested_user_inputs_by_tentacle, + properties, + input_type, + ) + elif schema_type in ("options", "array"): + options = user_input_element.get("options", []) + default_value = ( + def_val if def_val is not None else options[0] if options else None + ) + if schema_type == "options": + properties["default"] = (default_value,) + properties["format"] = "select" + properties["enum"] = options + # override schema_type as we couldn't know it before + schema_type = self._get_element_schema_type(options) + elif schema_type == "array": + properties["format"] = "select2" + properties["minItems"] = properties.get("minItems", 1) + properties["uniqueItems"] = True + properties["items"] = { + "title": title, + "type": self._get_element_schema_type(options), + "default": default_value, + "enum": options, + } + elif schema_type == "object": + self._adapt_to_object_user_input( + user_input_element, nested_user_inputs_by_tentacle, properties + ) + elif schema_type == "text": + schema_type = "string" + properties["minLength"] = properties.get("minLength", 1) + properties["type"] = schema_type + except KeyError as err: + self.logger.exception(err, True, f"Unknown input type: {err}") + + def _adapt_to_specific_array_user_input( + self, + user_input_element, + nested_user_inputs_by_tentacle, + properties, + input_type, + ): + # nested object in array, insert array first + properties["items"] = { + "type": ( + "object" + if input_type == enums.UserInputTypes.OBJECT_ARRAY.value + else "string" + ), + "properties": {}, + } + if item_title := user_input_element.get("item_title"): + properties["items"]["title"] = item_title + if input_type == enums.UserInputTypes.OBJECT_ARRAY.value: + for associated_user_input in self._get_associated_user_input( + user_input_element, nested_user_inputs_by_tentacle + ): + self._generate_schema( + properties["items"], + associated_user_input, + nested_user_inputs_by_tentacle, + ) + + def _adapt_to_object_user_input( + self, + user_input_element, + nested_user_inputs_by_tentacle, + properties, + ): + properties["properties"] = {} + nested_tentacle = user_input_element["nested_tentacle"] + if nested_tentacle: + for user_input_name in user_input_element["value"]: + properties["options"]["is_nested_tentacle"] = True + try: + self._generate_schema( + properties, + nested_user_inputs_by_tentacle[nested_tentacle][ + user_input_name + ], + nested_user_inputs_by_tentacle, + ) + except KeyError as err: + self.logger.warning( + f"Missing user input model for {err}. This element might not be " + f"associated to a tentacle" + ) + else: + for associated_user_input in self._get_associated_user_input( + user_input_element, nested_user_inputs_by_tentacle + ): + self._generate_schema( + properties, + associated_user_input, + nested_user_inputs_by_tentacle, + ) + + def _generate_schema( + self, main_schema, user_input_element, nested_user_inputs_by_tentacle + ): + title = user_input_element.get("title") + def_val = user_input_element.get("def_val") + properties = self._init_schema_properties( + main_schema, user_input_element, title, def_val + ) + if input_type := user_input_element.get("input_type"): + self._adapt_to_input_type( + user_input_element, + nested_user_inputs_by_tentacle, + properties, + input_type, + title, + def_val, + ) + main_schema["properties"][properties["options"]["name"]] = properties + + def _get_associated_user_input(self, user_input, nested_user_inputs_by_tentacle): + # include all user input associated to this one (same parent_input_name and tentacle) + return ( + associated_user_input + for associated_user_input in nested_user_inputs_by_tentacle[ + user_input["tentacle"] + ].values() + if associated_user_input["tentacle"] == user_input["tentacle"] + and associated_user_input["parent_input_name"] == user_input["name"] + ) + + def _get_element_schema_type(self, options): + default_type = "string" + try: + if isinstance(options[0], int): + return self.INPUT_TYPE_TO_SCHEMA_TYPE[enums.UserInputTypes.INT.value] + if isinstance(options[0], float): + return self.INPUT_TYPE_TO_SCHEMA_TYPE[enums.UserInputTypes.FLOAT.value] + if isinstance(options[0], bool): + return self.INPUT_TYPE_TO_SCHEMA_TYPE[ + enums.UserInputTypes.BOOLEAN.value + ] + if isinstance(options[0], str): + return default_type + except IndexError: + pass + except TypeError as error: + raise TypeError("a user input element is malformed.") from error + return default_type + + def add_user_inputs_element( + self, + name, + config_values, + schema, + tentacle, + tentacle_type, + is_hidden, + ): + """ + Add a user input type element to self.elements + """ + element = Element( + None, + None, + None, + title=name, + schema=schema, + config_values=config_values, + tentacle=tentacle, + tentacle_type=tentacle_type, + is_hidden=is_hidden, + type=enums.DisplayedElementTypes.INPUT.value, + ) + self.elements.append(element) + + +class Element: + def __init__( + self, + kind, + x, + y, + open=None, + high=None, + low=None, + close=None, + volume=None, + x_type=None, + y_type=None, + title=None, + text=None, + mode=None, + line_shape=None, + own_xaxis=False, + own_yaxis=False, + value=None, + config_values=None, + schema=None, + tentacle=None, + tentacle_type=None, + columns=None, + rows=None, + searches=None, + is_hidden=None, + type=enums.DisplayedElementTypes.CHART.value, + color=None, + html=None, + size=None, + symbol=None, + ): + self.kind = kind + self.x = x + self.y = y + self.open = open + self.high = high + self.low = low + self.close = close + self.volume = volume + self.x_type = x_type + self.y_type = y_type + self.title = title + self.text = text + self.mode = mode + self.line_shape = line_shape + self.own_xaxis = own_xaxis + self.own_yaxis = own_yaxis + self.value = value + self.config_values = config_values + self.schema = schema + self.tentacle = tentacle + self.tentacle_type = tentacle_type + self.columns = columns + self.rows = rows + self.searches = searches + self.is_hidden = is_hidden + self.type = type + self.color = color + self.html = html + self.size = size + self.symbol = symbol + + def to_json(self): + """ + :return: the json representation of self + """ + return { + enums.PlotAttributes.KIND.value: self.kind, + enums.PlotAttributes.X.value: self.x, + enums.PlotAttributes.Y.value: self.y, + enums.PlotAttributes.OPEN.value: self.open, + enums.PlotAttributes.HIGH.value: self.high, + enums.PlotAttributes.LOW.value: self.low, + enums.PlotAttributes.CLOSE.value: self.close, + enums.PlotAttributes.VOLUME.value: self.volume, + enums.PlotAttributes.X_TYPE.value: self.x_type, + enums.PlotAttributes.Y_TYPE.value: self.y_type, + enums.PlotAttributes.TITLE.value: self.title, + enums.PlotAttributes.TEXT.value: self.text, + enums.PlotAttributes.MODE.value: self.mode, + enums.PlotAttributes.LINE_SHAPE.value: self.line_shape, + enums.PlotAttributes.OWN_XAXIS.value: self.own_xaxis, + enums.PlotAttributes.OWN_YAXIS.value: self.own_yaxis, + enums.PlotAttributes.VALUE.value: self.value, + enums.PlotAttributes.CONFIG.value: self.config_values, + enums.PlotAttributes.SCHEMA.value: self.schema, + enums.PlotAttributes.TENTACLE.value: self.tentacle, + enums.PlotAttributes.TENTACLE_TYPE.value: self.tentacle_type, + enums.PlotAttributes.COLUMNS.value: self.columns, + enums.PlotAttributes.ROWS.value: self.rows, + enums.PlotAttributes.SEARCHES.value: self.searches, + enums.PlotAttributes.IS_HIDDEN.value: self.is_hidden, + enums.PlotAttributes.TYPE.value: self.type, + enums.PlotAttributes.COLOR.value: self.color, + enums.PlotAttributes.HTML.value: self.html, + enums.PlotAttributes.SIZE.value: self.size, + enums.PlotAttributes.SYMBOL.value: self.symbol, + } + + def is_empty(self): + """ + :return: False + """ + return False + + @staticmethod + def to_list(array, multiplier=1): + """ + :return: a new array in which each value is multiplied by multiplier + """ + if array is None: + return None + return [e * multiplier for e in array] diff --git a/packages/commons/octobot_commons/display/plot_settings.py b/packages/commons/octobot_commons/display/plot_settings.py new file mode 100644 index 0000000000..17f610c241 --- /dev/null +++ b/packages/commons/octobot_commons/display/plot_settings.py @@ -0,0 +1,33 @@ +# pylint: disable=R0913 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.enums as enums + + +class PlotSettings: + def __init__( + self, + chart=enums.PlotCharts.MAIN_CHART.value, + x_multiplier=1000, + kind="scattergl", + mode="markers", + y_data=None, + ): + self.chart = chart + self.x_multiplier = x_multiplier + self.kind = kind + self.mode = mode + self.y_data = y_data diff --git a/packages/commons/octobot_commons/dsl_interpreter/__init__.py b/packages/commons/octobot_commons/dsl_interpreter/__init__.py new file mode 100644 index 0000000000..ab9bdd8090 --- /dev/null +++ b/packages/commons/octobot_commons/dsl_interpreter/__init__.py @@ -0,0 +1,85 @@ +# pylint: disable=R0801,R0401 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +from octobot_commons.dsl_interpreter.interpreter import Interpreter +from octobot_commons.dsl_interpreter.operator import ( + Operator, + OperatorParameterType, + ComputedOperatorParameterType, +) +from octobot_commons.dsl_interpreter.dictionnaries import ( + get_all_operators, + clear_get_all_operators_cache, +) +from octobot_commons.dsl_interpreter.operator_parameter import ( + OperatorParameter, + UNINITIALIZED_VALUE, +) +from octobot_commons.dsl_interpreter.operator_docs import OperatorDocs +from octobot_commons.dsl_interpreter.operators import ( + BinaryOperator, + UnaryOperator, + CompareOperator, + NaryOperator, + CallOperator, + NameOperator, + ExpressionOperator, + PreComputingCallOperator, + ReCallableOperatorMixin, + ReCallingOperatorResult, + ReCallingOperatorResultKeys, +) +from octobot_commons.dsl_interpreter.interpreter_dependency import ( + InterpreterDependency, +) +from octobot_commons.dsl_interpreter.parameters_util import ( + format_parameter_value, + resove_operator_params, + apply_resolved_parameter_value, + add_resolved_parameter_value, + has_unresolved_parameters, +) +from octobot_commons.dsl_interpreter.dsl_call_result import ( + DSLCallResult, +) + +__all__ = [ + "get_all_operators", + "clear_get_all_operators_cache", + "Interpreter", + "Operator", + "OperatorParameter", + "UNINITIALIZED_VALUE", + "OperatorDocs", + "BinaryOperator", + "UnaryOperator", + "CompareOperator", + "NaryOperator", + "CallOperator", + "NameOperator", + "ExpressionOperator", + "PreComputingCallOperator", + "ReCallableOperatorMixin", + "InterpreterDependency", + "format_parameter_value", + "resove_operator_params", + "apply_resolved_parameter_value", + "add_resolved_parameter_value", + "DSLCallResult", + "has_unresolved_parameters", + "ReCallingOperatorResult", + "ReCallingOperatorResultKeys", +] diff --git a/packages/commons/octobot_commons/dsl_interpreter/dictionnaries.py b/packages/commons/octobot_commons/dsl_interpreter/dictionnaries.py new file mode 100644 index 0000000000..6c2b4e0415 --- /dev/null +++ b/packages/commons/octobot_commons/dsl_interpreter/dictionnaries.py @@ -0,0 +1,65 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import functools + +import octobot_commons.dsl_interpreter +import octobot_commons.tentacles_management +import octobot_commons.constants + + +@functools.lru_cache(maxsize=16) +def get_all_operators( + *libraries: str, +) -> list["octobot_commons.dsl_interpreter.Operator"]: + """ + Get all operators from the DSL interpreter. + This function is cached and will return the same list of operators every time. + All operators must be subclasses of octobot_commons.dsl_interpreter.Operator + All operators must have been imported before calling this function for the first time. + :param libraries: List of libraries to filter operators by. If None, all operators will be returned. + By default, operators are in the octobot_commons.constants.BASE_OPERATORS_LIBRARY library. + """ + libraries_filter = set(libraries) if libraries else None + all_with_abstracts = ( + operator + for operator in octobot_commons.tentacles_management.get_all_classes_from_parent( + octobot_commons.dsl_interpreter.Operator + ) + if ( + libraries_filter is None + or operator.get_library() + in libraries_filter # pylint: disable=unsupported-membership-test + ) + # contextual operators should not be included by default + and operator.get_library() + != octobot_commons.constants.CONTEXTUAL_OPERATORS_LIBRARY + ) + non_abstract_operators = [] + for operator in all_with_abstracts: + try: + operator.get_name() + non_abstract_operators.append(operator) + except NotImplementedError: + # this is an abstract operator + pass + return non_abstract_operators + + +def clear_get_all_operators_cache(): + """ + Clear the cache of the get_all_operators function. + """ + get_all_operators.cache_clear() diff --git a/packages/commons/octobot_commons/dsl_interpreter/dsl_call_result.py b/packages/commons/octobot_commons/dsl_interpreter/dsl_call_result.py new file mode 100644 index 0000000000..aadaca7d24 --- /dev/null +++ b/packages/commons/octobot_commons/dsl_interpreter/dsl_call_result.py @@ -0,0 +1,37 @@ +# Drakkar-Software OctoBot +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import dataclasses +import typing + +import octobot_commons.dataclasses + + +@dataclasses.dataclass +class DSLCallResult(octobot_commons.dataclasses.FlexibleDataclass): + """ + Stores a DSL call result alongside its statement (and error if any) + """ + statement: str + result: typing.Optional[typing.Any] = None + error: typing.Optional[str] = None + + def succeeded(self) -> bool: + """ + Check if the DSL call succeeded + :return: True if the DSL call succeeded, False otherwise + """ + return self.error is None diff --git a/packages/commons/octobot_commons/dsl_interpreter/interpreter.py b/packages/commons/octobot_commons/dsl_interpreter/interpreter.py new file mode 100644 index 0000000000..cf00cee389 --- /dev/null +++ b/packages/commons/octobot_commons/dsl_interpreter/interpreter.py @@ -0,0 +1,405 @@ +# pylint: disable=too-many-branches,too-many-return-statements,too-many-locals,too-many-statements +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import ast +import typing +import octobot_commons.errors +import octobot_commons.dsl_interpreter.operator as dsl_interpreter_operator +import octobot_commons.dsl_interpreter.interpreter_dependency as dsl_interpreter_dependency +import octobot_commons.dsl_interpreter.parameters_util as parameters_util +import octobot_commons.dsl_interpreter.dsl_call_result as dsl_call_result + + +class Interpreter: + """ + Interpreter class for parsing and interpreting DSL expressions. + Maps AST elements to Operator instances based on operator names. + """ + + def __init__( + self, operators: typing.List[typing.Type[dsl_interpreter_operator.Operator]] + ): + """ + Initialize the interpreter with a list of operator classes. + + Args: + operators: List of Operator subclasses to be used for interpretation + """ + # Save operators as a dictionary mapping operator name to operator class + self.operators_by_name: typing.Dict[ + str, typing.Type[dsl_interpreter_operator.Operator] + ] = {} + self.extend(operators) + self._operator_tree_or_constant: typing.Union[ + dsl_interpreter_operator.Operator, + dsl_interpreter_operator.ComputedOperatorParameterType, + ] = None + self._parsed_expression: typing.Optional[str] = None + + def extend( + self, operators: typing.List[typing.Type[dsl_interpreter_operator.Operator]] + ): + """ + Extend the interpreter with a list of operator classes. + """ + self.operators_by_name.update( + {operator_class.get_name(): operator_class for operator_class in operators} + ) + + async def interprete( + self, expression: str + ) -> dsl_interpreter_operator.ComputedOperatorParameterType: + """ + Interpret a string expression by parsing it with AST and mapping to operators. + + Args: + expression: String expression to interpret + + Returns: + Operator instance or literal value representing the interpreted expression + """ + self._parse_expression(expression) + return await self.compute_expression() + + def get_dependencies( + self, + ) -> list[dsl_interpreter_dependency.InterpreterDependency]: + """ + Get the dependencies of the interpreter's parsed expression. + """ + if self._operator_tree_or_constant is None: + raise octobot_commons.errors.DSLInterpreterError( + "Expression not prepared, call prepare() first" + ) + if isinstance( + self._operator_tree_or_constant, dsl_interpreter_operator.Operator + ): + all_deps = self._operator_tree_or_constant.get_dependencies() + # don't use set to avoid forcing the dependency to implement __hash__ + deduplicated_deps = [] + for dep in all_deps: + if dep not in deduplicated_deps: + deduplicated_deps.append(dep) + return deduplicated_deps + return [] + + def prepare(self, expression: str): + """ + Prepare the expression by parsing it, leaving it ready to be computed one or multiple times. + Call await self.compute_expression() to compute the expression. + """ + self._parse_expression(expression) + + def _parse_expression(self, expression: str): + """ + Parse the expression into an AST and store the result in self._operator_tree_or_constant. + """ + # Parse the expression into an AST + # mode: can be 'exec' if source consists of a sequence of statements, 'eval' if + # it consists of a single expression, or 'single' if it consists of a single + # interactive statement. + # docs: https://docs.python.org/3/library/functions.html#compile + self._parsed_expression = expression + try: + tree = ast.parse(expression, mode="eval") + self._operator_tree_or_constant = self._visit_node(tree.body) + except SyntaxError: + tree = ast.parse(expression, mode="single") + if len(tree.body) != 1: + raise octobot_commons.errors.DSLInterpreterError( + "Single statement required when using statement mode" + ) + self._operator_tree_or_constant = self._visit_node(tree.body[0]) + + async def compute_expression( + self, + ) -> dsl_interpreter_operator.ComputedOperatorParameterType: + """ + Compute the result of the expression stored in self._operator_tree_or_constant. + If the expression is a constant, return it directly. + If the expression is an operator, pre_compute and compute its result. + """ + if isinstance( + self._operator_tree_or_constant, dsl_interpreter_operator.Operator + ): + await self._operator_tree_or_constant.pre_compute() + return self._operator_tree_or_constant.compute() + return self._operator_tree_or_constant + + def get_top_operator(self) -> typing.Union[ + dsl_interpreter_operator.Operator, + dsl_interpreter_operator.ComputedOperatorParameterType, + ]: + """ + Return the top operator of the parsed expression. + """ + return self._operator_tree_or_constant + + async def compute_expression_with_result( + self, + ) -> dsl_call_result.DSLCallResult: + """ + Compute the result of the expression stored in self._operator_tree_or_constant. + If the expression is a constant, return it directly. + If the expression is an operator, pre_compute and compute its result. + """ + try: + return dsl_call_result.DSLCallResult( + statement=self._parsed_expression, + result=await self.compute_expression(), + ) + except octobot_commons.errors.ErrorStatementEncountered as err: + return dsl_call_result.DSLCallResult( + statement=self._parsed_expression, + error=err.args[0] if err.args else "" + ) + + def _visit_node(self, node: typing.Optional[ast.AST]) -> typing.Union[ + dsl_interpreter_operator.Operator, + dsl_interpreter_operator.ComputedOperatorParameterType, + ]: + """ + Recursively visit AST nodes and convert them to Operator instances or values. + + Args: + node: AST node to visit + + Returns: + Operator instance or literal value representing the node + """ + if node is None: + return None + + if isinstance(node, ast.Call): + # Function call: func(arg1, arg2, ...) + func_name = self._get_name_from_node(node.func) + if func_name in self.operators_by_name: + operator_class = self.operators_by_name[func_name] + # Convert arguments to Operator instances or values + args = [ + ( + self._get_value_from_constant_node(arg) + if isinstance(arg, ast.Constant) + else self._visit_node(arg) + ) + for arg in node.args + ] + kwargs = {} + for kw in node.keywords: + value = ( + self._get_value_from_constant_node(kw.value) + if isinstance(kw.value, ast.Constant) + else self._visit_node(kw.value) + ) + if kw.arg is not None: + kwargs[kw.arg] = value + else: + if isinstance(value, dict): + kwargs.update(value) + else: + raise octobot_commons.errors.UnsupportedOperatorError( + f"**kwargs must unpack a dict, got {type(value).__name__}" + ) + args, kwargs = parameters_util.resolve_operator_args_and_kwargs( + operator_class, args, kwargs + ) + return operator_class(*args, **kwargs) + raise octobot_commons.errors.UnsupportedOperatorError( + f"Unknown operator: {func_name}" + ) + + if isinstance(node, ast.BinOp): + # Binary operation: left op right + op_name = type(node.op).__name__ + if op_name in self.operators_by_name: + operator_class = self.operators_by_name[op_name] + left = self._visit_node(node.left) + right = self._visit_node(node.right) + return operator_class(left, right) + raise octobot_commons.errors.UnsupportedOperatorError( + f"Unknown binary operator: {op_name}" + ) + + if isinstance(node, ast.UnaryOp): + # Unary operation: op operand + op_name = type(node.op).__name__ + if op_name in self.operators_by_name: + operator_class = self.operators_by_name[op_name] + operand = self._visit_node(node.operand) + return operator_class(operand) + raise octobot_commons.errors.UnsupportedOperatorError( + f"Unknown unary operator: {op_name}" + ) + + if isinstance(node, ast.Compare): + # Comparison: left op right + # Handles both single comparisons (a < b) and chained comparisons (a < b <= c) + # Chained comparisons are decomposed into: (a < b) And (b <= c) + comparisons = [] + left = self._visit_node(node.left) + for op, comparator in zip(node.ops, node.comparators): + op_name = type(op).__name__ + if op_name not in self.operators_by_name: + raise octobot_commons.errors.UnsupportedOperatorError( + f"Unknown comparison operator: {op_name}" + ) + operator_class = self.operators_by_name[op_name] + right = self._visit_node(comparator) + comparisons.append(operator_class(left, right)) + left = right + if len(comparisons) == 1: + return comparisons[0] + and_op_name = ast.And.__name__ + if and_op_name not in self.operators_by_name: + raise octobot_commons.errors.UnsupportedOperatorError( + f"Chained comparisons require the '{and_op_name}' operator" + ) + return self.operators_by_name[and_op_name](*comparisons) + + if isinstance(node, (ast.Constant)): + # Literal values: numbers, strings, booleans, None + return self._get_value_from_constant_node(node) + + if isinstance(node, ast.Name): + # Name reference: look up in operators_by_name + name = node.id + if name in self.operators_by_name: + operator_class = self.operators_by_name[name] + return operator_class() + raise octobot_commons.errors.UnsupportedOperatorError( + f"Unknown name: {name}" + ) + + if isinstance(node, ast.BoolOp): + # Boolean operation: left op right + op_name = type(node.op).__name__ + if op_name in self.operators_by_name: + operator_class = self.operators_by_name[op_name] + operands = [self._visit_node(operand) for operand in node.values] + return operator_class(*operands) + raise octobot_commons.errors.UnsupportedOperatorError( + f"Unknown BoolOp operator: {op_name}" + ) + + if isinstance(node, ast.IfExp): + # If expression: "body if test else orelse" + op_name = ast.IfExp.__name__ + if op_name in self.operators_by_name: + operator_class = self.operators_by_name[op_name] + test = self._visit_node(node.test) + body = self._visit_node(node.body) + orelse = self._visit_node(node.orelse) + return operator_class(test, body, orelse) + raise octobot_commons.errors.UnsupportedOperatorError( + f"Unknown IfExp operator: {op_name}" + ) + + if isinstance(node, ast.Subscript): + # Subscript: array[index] + op_name = type(node).__name__ + if op_name in self.operators_by_name: + operator_class = self.operators_by_name[op_name] + array_or_list = self._visit_node(node.value) + index_or_slice = self._visit_node(node.slice) + context = node.ctx + return operator_class(array_or_list, index_or_slice, context) + + if isinstance(node, ast.List): + # List: [1, 2, 3] + op_name = ast.List.__name__ + if op_name in self.operators_by_name: + operator_class = self.operators_by_name[op_name] + operands = [self._visit_node(operand) for operand in node.elts] + return operator_class(*operands) + + if isinstance(node, ast.Dict): + # Dict: {"a": 1, "b": 2} or {"a": 1, **other} + op_name = ast.Dict.__name__ + result = {} + for key, value in zip(node.keys, node.values): + if key is not None: + result[self._visit_node(key)] = self._visit_node(value) + else: + unpacked = self._visit_node(value) + if isinstance(unpacked, dict): + result.update(unpacked) + else: + raise octobot_commons.errors.UnsupportedOperatorError( + f"** unpacking in dict requires a dict, got {type(unpacked).__name__}" + ) + return result + + if isinstance(node, ast.Slice): + # Slice: slice(1, 2, 3) + op_name = ast.Slice.__name__ + if op_name in self.operators_by_name: + operator_class = self.operators_by_name[op_name] + lower = self._visit_node(node.lower) + upper = self._visit_node(node.upper) + step = self._visit_node(node.step) + return operator_class(lower, upper, step) + + if isinstance(node, ast.Raise): + # Raise statement: raise exc [from cause] - maps to RaiseOperator + op_name = "raise" + if op_name in self.operators_by_name: + operator_class = self.operators_by_name[op_name] + args = [] + if node.exc is not None: + args.append( + self._get_value_from_constant_node(node.exc) + if isinstance(node.exc, ast.Constant) + else self._visit_node(node.exc) + ) + if node.cause is not None: + args.append( + self._get_value_from_constant_node(node.cause) + if isinstance(node.cause, ast.Constant) + else self._visit_node(node.cause) + ) + args, kwargs = parameters_util.resolve_operator_args_and_kwargs( + operator_class, args, {} + ) + return operator_class(*args, **kwargs) + raise octobot_commons.errors.UnsupportedOperatorError( + f"Unknown operator: {op_name}" + ) + + raise octobot_commons.errors.UnsupportedOperatorError( + f"Unsupported AST node type: {type(node).__name__}. Expression: {self._parsed_expression}" + ) + + def _get_name_from_node(self, node: ast.AST) -> str: + """Extract the name from a function node.""" + if isinstance(node, ast.Name): + return node.id + # elif isinstance(node, ast.Attribute): ex: snake.colour => not supported + # return node.attr + raise octobot_commons.errors.UnsupportedOperatorError( + f"Cannot extract name from node type: {type(node).__name__}" + ) + + def _get_value_from_constant_node( + self, node: ast.Constant + ) -> dsl_interpreter_operator.ComputedOperatorParameterType: + """Extract a literal value from an AST constant node.""" + value = node.value + # Filter out unsupported types like complex numbers or Ellipsis + if isinstance(value, (str, int, float, bool, type(None), dict)): + return value + raise octobot_commons.errors.UnsupportedOperatorError( + f"Unsupported constant type: {type(value).__name__}" + ) diff --git a/packages/commons/octobot_commons/dsl_interpreter/interpreter_dependency.py b/packages/commons/octobot_commons/dsl_interpreter/interpreter_dependency.py new file mode 100644 index 0000000000..e1dec8751b --- /dev/null +++ b/packages/commons/octobot_commons/dsl_interpreter/interpreter_dependency.py @@ -0,0 +1,21 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import dataclasses + + +@dataclasses.dataclass +class InterpreterDependency: + pass diff --git a/packages/commons/octobot_commons/dsl_interpreter/operator.py b/packages/commons/octobot_commons/dsl_interpreter/operator.py new file mode 100644 index 0000000000..53b077d75c --- /dev/null +++ b/packages/commons/octobot_commons/dsl_interpreter/operator.py @@ -0,0 +1,240 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import typing +import collections +import numpy as np + +import octobot_commons.errors +import octobot_commons.constants +import octobot_commons.dsl_interpreter.interpreter_dependency as dsl_interpreter_dependency +import octobot_commons.dsl_interpreter.operator_parameter as dsl_interpreter_operator_parameter +import octobot_commons.dsl_interpreter.operator_docs as dsl_interpreter_operator_docs + +ComputedOperatorParameterType = typing.Union[ + str, int, float, bool, None, list, dict, np.ndarray +] +OperatorParameterType = typing.Union[ + str, int, float, bool, None, list, dict, np.ndarray, "Operator" +] + + +class Operator: + """ + Operator class is used to represent an operator in the DSL. + It can have one or more parameters which are used to compute the result of the operator. + """ + + MIN_PARAMS: typing.Optional[int] = ( + None # min number of parameters when not defined in get_parameters() + ) + MAX_PARAMS: typing.Optional[int] = ( + None # max number of parameters when not defined in get_parameters() + ) + NAME: str = ( + "" # name of the operator as written in the DSL expression, if not provided, get_name() will be used + ) + DESCRIPTION: str = "" # description of the operator + EXAMPLE: str = "" # example of the operator in the DSL + + def __init__(self, *parameters: OperatorParameterType, **kwargs: typing.Any): + self._validate_parameters(parameters, kwargs) + self.parameters = parameters + self.kwargs = kwargs + + @staticmethod + def get_name() -> str: + """ + Get the name of the operator, as seen in the AST parsed expression. + """ + raise NotImplementedError("get_name is not implemented") + + @staticmethod + def get_library() -> str: + """ + Get the library of the operator. + """ + return octobot_commons.constants.BASE_OPERATORS_LIBRARY + + def _validate_parameters( + self, parameters: list[OperatorParameterType], kwargs: dict[str, OperatorParameterType] + ) -> None: + """ + Validate the parameters of the operator. + """ + if self.MIN_PARAMS is not None and len(parameters) < self.MIN_PARAMS: + raise octobot_commons.errors.InvalidParametersError( + f"{self.get_name()} requires at least {self.MIN_PARAMS} parameter(s)" + ) + if self.MAX_PARAMS is not None and len(parameters) > self.MAX_PARAMS: + raise octobot_commons.errors.InvalidParametersError( + f"{self.get_name()} supports up to {self.MAX_PARAMS} parameters" + ) + if expected_parameters := self.get_parameters(): + min_params = len(tuple(p for p in expected_parameters if p.required)) + max_params = len(tuple(p for p in expected_parameters)) + total_params = len(parameters) + len(kwargs) + if total_params < min_params: + raise octobot_commons.errors.InvalidParametersError( + f"{self.get_name()} requires at least {min_params} " + f"parameter(s): {self.get_parameters_description()}" + ) + if max_params is not None and total_params > max_params: + raise octobot_commons.errors.InvalidParametersError( + f"{self.get_name()} got {total_params} parameters " + f"({', '.join([str(p) for p in tuple(parameters) + tuple(kwargs.values())])}) " + f"but supports up to {max_params} " + f"parameters: {self.get_parameters_description()}" + ) + + @classmethod + def get_parameters_description(cls) -> str: + """ + Get the description of the parameters of the operator. + """ + return ", ".join( + (f"{i+1}: {param}" for i, param in enumerate(cls.get_parameters())) + ) + + @classmethod + def get_docs(cls) -> dsl_interpreter_operator_docs.OperatorDocs: + """ + Get the documentation of the operator. + """ + return dsl_interpreter_operator_docs.OperatorDocs( + name=cls.NAME or cls.get_name(), + description=cls.DESCRIPTION, + type=cls.get_library(), + example=cls.EXAMPLE, + parameters=cls.get_parameters(), + ) + + @classmethod + def get_parameters(cls) -> list[dsl_interpreter_operator_parameter.OperatorParameter]: + """ + return: the description of the parameters of the operator. + """ + return [] + + async def pre_compute(self) -> None: # rename pre_compute + """ + Refreshes the operator data, override if necessary. + Will always be called before compute() + """ + for parameter in self.parameters: + if isinstance(parameter, Operator): + await parameter.pre_compute() + + def compute(self) -> ComputedOperatorParameterType: + """ + Compute the result of the operator considering its computed parameters. + """ + raise NotImplementedError("compute is not implemented") + + def get_computed_parameters(self) -> list[ComputedOperatorParameterType]: + """ + Get the computed parameters of the operator. + Here computed means that any nested operator has already been computed. + """ + return [self._get_computed_parameter(parameter) for parameter in self.parameters] + + def _get_computed_parameter(self, parameter: OperatorParameterType) -> ComputedOperatorParameterType: + if isinstance(parameter, Operator): + return parameter.compute() + if isinstance(parameter, dict): + return {self._get_computed_parameter(k): self._get_computed_parameter(v) for k, v in parameter.items()} + if isinstance(parameter, list): + return [self._get_computed_parameter(v) for v in parameter] + return parameter + + def get_computed_kwargs(self) -> dict[str, ComputedOperatorParameterType]: + """ + Get the computed kwargs of the operator. + """ + return { + kw: value.compute() if isinstance(value, Operator) else value + for kw, value in self.kwargs.items() + } + + def get_computed_value_by_parameter(self) -> dict[str, ComputedOperatorParameterType]: + """ + Get the COMPUTED value of each parameter by its name. + """ + computed_parameters_queue = collections.deque(self.get_computed_parameters()) + computed_kwargs = self.get_computed_kwargs() + return self._get_value_by_parameter(computed_parameters_queue, computed_kwargs) # type: ignore + + def get_input_value_by_parameter(self) -> dict[str, OperatorParameterType]: + """ + Get the raw input (uncomputed) value of each parameter by its name. + """ + return self._get_value_by_parameter( + collections.deque(self.parameters), dict(self.kwargs) + ) + + def _get_value_by_parameter( + self, + args: collections.deque[OperatorParameterType], + kwargs: dict[str, OperatorParameterType] + ) -> dict[str, OperatorParameterType]: + """ + Get the value of each parameter by its name. + If a value is not provided, the default value will be used if available, + otherwise the parameter will be skipped. + """ + value_by_parameter = {} + for parameter in self.get_parameters(): + # 1. non kw parameters are first + if args: + value_by_parameter[parameter.name] = args.popleft() + else: + # 2. no more non kw parameters, explore kw parameters + if parameter.name in kwargs: + if parameter.name in value_by_parameter: + raise octobot_commons.errors.InvalidParametersError( + f"Parameter {parameter.name} has multiple values" + ) + value_by_parameter[parameter.name] = kwargs[parameter.name] + kwargs.pop(parameter.name) + else: + # 3. try to get the default value if set + if parameter.default is not dsl_interpreter_operator_parameter.UNSET_VALUE: + value_by_parameter[parameter.name] = parameter.default + if kwargs: + parameter_names = [p.name for p in self.get_parameters()] # use a list to preserve order + if unknown_parameters := { + k: v for k, v in kwargs.items() if k not in parameter_names + }: + raise octobot_commons.errors.InvalidParametersError( + f"Parameter(s) {', '.join(f"'{k}'" for k in unknown_parameters.keys())} " + f"are unknown. Supported parameters: {', '.join(parameter_names)}" + ) + raise octobot_commons.errors.InvalidParametersError( + f"Parameter(s) {', '.join(f"'{k}'" for k in kwargs.keys())} " + f"have multiple values" + ) + return value_by_parameter + + def get_dependencies( + self, + ) -> list[dsl_interpreter_dependency.InterpreterDependency]: + """ + Get the dependencies of the operator. + """ + dependencies = [] + for parameter in self.parameters: + if isinstance(parameter, Operator): + dependencies.extend(parameter.get_dependencies()) + return dependencies diff --git a/packages/commons/octobot_commons/dsl_interpreter/operator_docs.py b/packages/commons/octobot_commons/dsl_interpreter/operator_docs.py new file mode 100644 index 0000000000..962dca08f8 --- /dev/null +++ b/packages/commons/octobot_commons/dsl_interpreter/operator_docs.py @@ -0,0 +1,45 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import dataclasses + + +import octobot_commons.dsl_interpreter.operator_parameter as dsl_interpreter_operator_parameter + + +@dataclasses.dataclass +class OperatorDocs: + """ + Operator documentation class, used to store operators metadata to + generate an operator documentation. + """ + + name: str + description: str + type: str + example: str + parameters: list[dsl_interpreter_operator_parameter.OperatorParameter] + + def to_json(self) -> dict: + """ + Convert the operator documentation to a JSON serializable dict. + """ + return { + "name": self.name, + "description": self.description, + "type": self.type, + "example": self.example, + "parameters": [parameter.to_json() for parameter in self.parameters], + } diff --git a/packages/commons/octobot_commons/dsl_interpreter/operator_parameter.py b/packages/commons/octobot_commons/dsl_interpreter/operator_parameter.py new file mode 100644 index 0000000000..7293f0f2df --- /dev/null +++ b/packages/commons/octobot_commons/dsl_interpreter/operator_parameter.py @@ -0,0 +1,50 @@ +# pylint: disable=too-many-branches,too-many-return-statements +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import dataclasses +import typing + + +UNSET_VALUE = "UNSET_VALUE" +UNINITIALIZED_VALUE = object() + + +@dataclasses.dataclass +class OperatorParameter: + name: str + description: str + required: bool + type: typing.Type[typing.Any] + default: typing.Any = UNSET_VALUE + + def __repr__(self) -> str: + default_str = f' (default: {self.default})' if self.default is not UNSET_VALUE else '' + return ( + f"{self.name}{' (required)' if self.required else default_str}" + f"[{self.type.__name__}] - {self.description}" + ) + + def to_json(self) -> dict: + """ + Convert the operator parameter to a JSON serializable dict. + """ + return { + "name": self.name, + "description": self.description, + "required": self.required, + "type": self.type.__name__, + "default": self.default, + } diff --git a/packages/commons/octobot_commons/dsl_interpreter/operators/__init__.py b/packages/commons/octobot_commons/dsl_interpreter/operators/__init__.py new file mode 100644 index 0000000000..7591c19131 --- /dev/null +++ b/packages/commons/octobot_commons/dsl_interpreter/operators/__init__.py @@ -0,0 +1,68 @@ +# pylint: disable=R0801 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_commons.dsl_interpreter.operators.binary_operator import ( + BinaryOperator, +) +from octobot_commons.dsl_interpreter.operators.compare_operator import ( + CompareOperator, +) +from octobot_commons.dsl_interpreter.operators.unary_operator import ( + UnaryOperator, +) +from octobot_commons.dsl_interpreter.operators.n_ary_operator import ( + NaryOperator, +) +from octobot_commons.dsl_interpreter.operators.call_operator import ( + CallOperator, +) +from octobot_commons.dsl_interpreter.operators.name_operator import ( + NameOperator, +) +from octobot_commons.dsl_interpreter.operators.expression_operator import ( + ExpressionOperator, +) +from octobot_commons.dsl_interpreter.operators.subscripting_operator import ( + SubscriptingOperator, +) +from octobot_commons.dsl_interpreter.operators.iterable_operator import ( + IterableOperator, +) +from octobot_commons.dsl_interpreter.operators.pre_computing_call_operator import ( + PreComputingCallOperator, +) +from octobot_commons.dsl_interpreter.operators.re_callable_operator_mixin import ( + ReCallableOperatorMixin, + ReCallingOperatorResult, + ReCallingOperatorResultKeys, +) + +__all__ = [ + "BinaryOperator", + "CompareOperator", + "UnaryOperator", + "NaryOperator", + "CallOperator", + "NameOperator", + "ExpressionOperator", + "SubscriptingOperator", + "IterableOperator", + "PreComputingCallOperator", + "ReCallableOperatorMixin", + "ReCallingOperatorResult", + "ReCallingOperatorResultKeys", +] diff --git a/packages/commons/octobot_commons/dsl_interpreter/operators/binary_operator.py b/packages/commons/octobot_commons/dsl_interpreter/operators/binary_operator.py new file mode 100644 index 0000000000..4adeee9c9c --- /dev/null +++ b/packages/commons/octobot_commons/dsl_interpreter/operators/binary_operator.py @@ -0,0 +1,50 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import typing + +import octobot_commons.dsl_interpreter.operator as dsl_interpreter_operator + + +class BinaryOperator( + dsl_interpreter_operator.Operator +): # pylint: disable=abstract-method + """ + Base class for binary operators. + Binary operators have two operands. + """ + + def __init__( + self, + left: dsl_interpreter_operator.OperatorParameterType, + right: dsl_interpreter_operator.OperatorParameterType, + **kwargs: typing.Any + ): + """ + Initialize the binary operator with its left and right operands. + """ + super().__init__(left, right, **kwargs) + + def get_computed_left_and_right_parameters( + self, + ) -> typing.Tuple[ + dsl_interpreter_operator.ComputedOperatorParameterType, + dsl_interpreter_operator.ComputedOperatorParameterType, + ]: + """ + Get the computed left and right computed operands. + """ + computed_parameters = self.get_computed_parameters() + return computed_parameters[0], computed_parameters[1] diff --git a/packages/commons/octobot_commons/dsl_interpreter/operators/call_operator.py b/packages/commons/octobot_commons/dsl_interpreter/operators/call_operator.py new file mode 100644 index 0000000000..3873900727 --- /dev/null +++ b/packages/commons/octobot_commons/dsl_interpreter/operators/call_operator.py @@ -0,0 +1,25 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.dsl_interpreter.operator as dsl_interpreter_operator + + +class CallOperator( + dsl_interpreter_operator.Operator +): # pylint: disable=abstract-method + """ + Base class for call operators (function calls). + Call operators can have variable arity (unary, binary, n-ary). + """ diff --git a/packages/commons/octobot_commons/dsl_interpreter/operators/compare_operator.py b/packages/commons/octobot_commons/dsl_interpreter/operators/compare_operator.py new file mode 100644 index 0000000000..a02689a418 --- /dev/null +++ b/packages/commons/octobot_commons/dsl_interpreter/operators/compare_operator.py @@ -0,0 +1,50 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import typing + +import octobot_commons.dsl_interpreter.operator as dsl_interpreter_operator + + +class CompareOperator( + dsl_interpreter_operator.Operator +): # pylint: disable=abstract-method + """ + Base class for compare operators. + Compare operators have two operands. + """ + + def __init__( + self, + left: dsl_interpreter_operator.OperatorParameterType, + right: dsl_interpreter_operator.OperatorParameterType, + **kwargs: typing.Any + ): + """ + Initialize the compare operator with its left and right operands. + """ + super().__init__(left, right, **kwargs) + + def get_computed_left_and_right_parameters( + self, + ) -> typing.Tuple[ + dsl_interpreter_operator.ComputedOperatorParameterType, + dsl_interpreter_operator.ComputedOperatorParameterType, + ]: + """ + Get the computed left and right computed operands. + """ + computed_parameters = self.get_computed_parameters() + return computed_parameters[0], computed_parameters[1] diff --git a/packages/commons/octobot_commons/dsl_interpreter/operators/expression_operator.py b/packages/commons/octobot_commons/dsl_interpreter/operators/expression_operator.py new file mode 100644 index 0000000000..38d2d131ad --- /dev/null +++ b/packages/commons/octobot_commons/dsl_interpreter/operators/expression_operator.py @@ -0,0 +1,24 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.dsl_interpreter.operator as dsl_interpreter_operator + + +class ExpressionOperator( + dsl_interpreter_operator.Operator +): # pylint: disable=abstract-method + """ + Base class for expression operators (ex: if, elif, else). + """ diff --git a/packages/commons/octobot_commons/dsl_interpreter/operators/iterable_operator.py b/packages/commons/octobot_commons/dsl_interpreter/operators/iterable_operator.py new file mode 100644 index 0000000000..d279e5ea60 --- /dev/null +++ b/packages/commons/octobot_commons/dsl_interpreter/operators/iterable_operator.py @@ -0,0 +1,25 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.dsl_interpreter.operator as dsl_interpreter_operator + + +class IterableOperator( + dsl_interpreter_operator.Operator +): # pylint: disable=abstract-method + """ + Base class for iterable operators. + Iterable operators have one or more operands. + """ diff --git a/packages/commons/octobot_commons/dsl_interpreter/operators/n_ary_operator.py b/packages/commons/octobot_commons/dsl_interpreter/operators/n_ary_operator.py new file mode 100644 index 0000000000..bb4b8816d5 --- /dev/null +++ b/packages/commons/octobot_commons/dsl_interpreter/operators/n_ary_operator.py @@ -0,0 +1,25 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.dsl_interpreter.operator as dsl_interpreter_operator + + +class NaryOperator( + dsl_interpreter_operator.Operator +): # pylint: disable=abstract-method + """ + Base class for n-ary operators. + N-ary operators have one or more operands. + """ diff --git a/packages/commons/octobot_commons/dsl_interpreter/operators/name_operator.py b/packages/commons/octobot_commons/dsl_interpreter/operators/name_operator.py new file mode 100644 index 0000000000..248f50a800 --- /dev/null +++ b/packages/commons/octobot_commons/dsl_interpreter/operators/name_operator.py @@ -0,0 +1,25 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.dsl_interpreter.operator as dsl_interpreter_operator + + +class NameOperator( + dsl_interpreter_operator.Operator +): # pylint: disable=abstract-method + """ + Base class for name operators (variable/constant name references). + Name operators have no parameters and return a constant or computed value. + """ diff --git a/packages/commons/octobot_commons/dsl_interpreter/operators/pre_computing_call_operator.py b/packages/commons/octobot_commons/dsl_interpreter/operators/pre_computing_call_operator.py new file mode 100644 index 0000000000..e6ba30f207 --- /dev/null +++ b/packages/commons/octobot_commons/dsl_interpreter/operators/pre_computing_call_operator.py @@ -0,0 +1,46 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import typing +import octobot_commons.errors +import octobot_commons.dsl_interpreter.operator as dsl_interpreter_operator +import octobot_commons.dsl_interpreter.operator_parameter as dsl_interpreter_operator_parameter +import octobot_commons.dsl_interpreter.operators.call_operator as dsl_interpreter_call_operator + + +class PreComputingCallOperator( + dsl_interpreter_call_operator.CallOperator +): # pylint: disable=abstract-method + """ + Base class for pre-computing call operators (function calls). + Pre-computing call operators are call operators that must be + pre-computed before being computed. + """ + def __init__(self, *parameters: dsl_interpreter_operator.OperatorParameterType, **kwargs: typing.Any): + super().__init__(*parameters, **kwargs) + self.value: dsl_interpreter_operator.ComputedOperatorParameterType = ( + dsl_interpreter_operator_parameter.UNINITIALIZED_VALUE + ) # type: ignore + + async def pre_compute(self) -> None: + await super().pre_compute() + self.value = dsl_interpreter_operator_parameter.UNINITIALIZED_VALUE # type: ignore + + def compute(self) -> dsl_interpreter_operator.ComputedOperatorParameterType: + if self.value is dsl_interpreter_operator_parameter.UNINITIALIZED_VALUE: + raise octobot_commons.errors.DSLInterpreterError( + "{self.__class__.__name__} has not been pre_computed" + ) + return self.value diff --git a/packages/commons/octobot_commons/dsl_interpreter/operators/re_callable_operator_mixin.py b/packages/commons/octobot_commons/dsl_interpreter/operators/re_callable_operator_mixin.py new file mode 100644 index 0000000000..fda68f056e --- /dev/null +++ b/packages/commons/octobot_commons/dsl_interpreter/operators/re_callable_operator_mixin.py @@ -0,0 +1,171 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import dataclasses +import typing +import time +import enum + +import octobot_commons.dataclasses +import octobot_commons.dsl_interpreter.operator_parameter as operator_parameter +import octobot_commons.dsl_interpreter.parameters_util as parameters_util + + +class ReCallingOperatorResultKeys(str, enum.Enum): + WAITING_TIME = "waiting_time" + LAST_EXECUTION_TIME = "last_execution_time" + SCRIPT_OVERRIDE = "script_override" + + +@dataclasses.dataclass +class ReCallingOperatorResult(octobot_commons.dataclasses.MinimizableDataclass): + keyword: typing.Optional[str] = None + reset_to_id: typing.Optional[str] = None + last_execution_result: typing.Optional[dict] = None + + @staticmethod + def is_re_calling_operator_result(result: typing.Any) -> bool: + """ + Check if the result is a re-calling operator result. + """ + return isinstance(result, dict) and ( + ReCallingOperatorResult.__name__ in result + ) + + def get_next_call_time(self) -> typing.Optional[float]: + """ + Returns the next call time based on the last execution result's + waiting time and last execution time. + """ + if ( + self.last_execution_result + and (waiting_time := self.last_execution_result.get(ReCallingOperatorResultKeys.WAITING_TIME.value)) + ): + last_execution_time = self.last_execution_result.get( + ReCallingOperatorResultKeys.LAST_EXECUTION_TIME.value + ) or time.time() + return last_execution_time + waiting_time + return None + + @staticmethod + def get_script_override(result: typing.Any) -> typing.Optional[str]: + """ + Returns the script override from the last execution result. + """ + if not ReCallingOperatorResult.is_re_calling_operator_result(result): + return None + return result[ReCallingOperatorResult.__name__].get("last_execution_result", {}).get( + ReCallingOperatorResultKeys.SCRIPT_OVERRIDE.value + ) + + @staticmethod + def get_keyword(result: typing.Any) -> typing.Optional[str]: + """ + Returns the keyword from the re-calling operator result. + """ + return result[ReCallingOperatorResult.__name__]["keyword"] + + +class ReCallableOperatorMixin: + """ + Mixin for re-callable operators. + """ + LAST_EXECUTION_RESULT_KEY = "last_execution_result" + + @classmethod + def get_re_callable_parameters(cls) -> list[operator_parameter.OperatorParameter]: + """ + Returns the parameters for the re-callable operator. + """ + return [ + operator_parameter.OperatorParameter( + name=cls.LAST_EXECUTION_RESULT_KEY, + description="the return value of the previous call", + required=False, + type=dict, + default=None, + ), + ] + + def get_last_execution_result( + self, param_by_name: dict[str, typing.Any] + ) -> typing.Optional[dict]: + """ + Returns the potential last execution result from param_by_name. + """ + if ( + (result_dict := param_by_name.get(self.LAST_EXECUTION_RESULT_KEY, None)) + and ReCallingOperatorResult.is_re_calling_operator_result(result_dict) + ): + return ReCallingOperatorResult.from_dict(result_dict[ + ReCallingOperatorResult.__name__ + ]).last_execution_result + return None + + def create_re_callable_result( # pylint: disable=too-many-arguments + self, + keyword: str, + reset_to_id: typing.Optional[str] = None, + waiting_time: typing.Optional[float] = None, + last_execution_time: typing.Optional[float] = None, + script_override: typing.Optional[str] = None, + **kwargs: typing.Any, + ) -> ReCallingOperatorResult: + """ + Builds a re-callable result from the given parameters. + """ + return ReCallingOperatorResult( + keyword=keyword, + reset_to_id=reset_to_id, + last_execution_result={ + ReCallingOperatorResultKeys.WAITING_TIME.value: waiting_time, + ReCallingOperatorResultKeys.LAST_EXECUTION_TIME.value: last_execution_time, + ReCallingOperatorResultKeys.SCRIPT_OVERRIDE.value: script_override, + **kwargs, + }, + ) + + def create_re_callable_result_dict( # pylint: disable=too-many-arguments + self, + keyword: str, + reset_to_id: typing.Optional[str] = None, + waiting_time: typing.Optional[float] = None, + last_execution_time: typing.Optional[float] = None, + script_override: typing.Optional[str] = None, + **kwargs: typing.Any, + ) -> dict: + """ + Builds a dict formatted re-callable result from the given parameters. + """ + return { + ReCallingOperatorResult.__name__: self.create_re_callable_result( + keyword=keyword, + reset_to_id=reset_to_id, + waiting_time=waiting_time, + last_execution_time=last_execution_time, + script_override=script_override, + **kwargs, + ).to_dict(include_default_values=False) + } + + def re_create_script(self, param_by_name: dict[str, typing.Any]): + """ + Returns the re-created script from the given parameters. + """ + param_without_re_callable_operator_params = { + k: v for k, v in param_by_name.items() if k != self.LAST_EXECUTION_RESULT_KEY + } + params = parameters_util.resove_operator_params(self, param_without_re_callable_operator_params) + return f"{self.get_name()}({', '.join(params)})" # type: ignore diff --git a/packages/commons/octobot_commons/dsl_interpreter/operators/subscripting_operator.py b/packages/commons/octobot_commons/dsl_interpreter/operators/subscripting_operator.py new file mode 100644 index 0000000000..b8729216d9 --- /dev/null +++ b/packages/commons/octobot_commons/dsl_interpreter/operators/subscripting_operator.py @@ -0,0 +1,26 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.dsl_interpreter.operator as dsl_interpreter_operator + + +class SubscriptingOperator( + dsl_interpreter_operator.Operator +): # pylint: disable=abstract-method + """ + Base class for subscripting operators (array/list subscripting). + Subscripting operators have 3 parameters: the array/list, the index or slice and the context. + # https://docs.python.org/3/library/ast.html#subscripting + """ diff --git a/packages/commons/octobot_commons/dsl_interpreter/operators/unary_operator.py b/packages/commons/octobot_commons/dsl_interpreter/operators/unary_operator.py new file mode 100644 index 0000000000..e08eca4172 --- /dev/null +++ b/packages/commons/octobot_commons/dsl_interpreter/operators/unary_operator.py @@ -0,0 +1,46 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import typing + +import octobot_commons.dsl_interpreter.operator as dsl_interpreter_operator + + +class UnaryOperator( + dsl_interpreter_operator.Operator +): # pylint: disable=abstract-method + """ + Base class for unary operators. + Unary operators have one operand. + """ + + def __init__( + self, + operand: dsl_interpreter_operator.OperatorParameterType, + **kwargs: typing.Any + ): + """ + Initialize the unary operator with its operand. + """ + super().__init__(operand, **kwargs) + + def get_computed_operand( + self, + ) -> dsl_interpreter_operator.ComputedOperatorParameterType: + """ + Get the computed operand of the unary operator. + """ + computed_parameters = self.get_computed_parameters() + return computed_parameters[0] diff --git a/packages/commons/octobot_commons/dsl_interpreter/parameters_util.py b/packages/commons/octobot_commons/dsl_interpreter/parameters_util.py new file mode 100644 index 0000000000..2cdac24022 --- /dev/null +++ b/packages/commons/octobot_commons/dsl_interpreter/parameters_util.py @@ -0,0 +1,240 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import re +import typing +import json + +import octobot_commons.dsl_interpreter.operator as dsl_interpreter_operator +import octobot_commons.errors +import octobot_commons.constants + + +def format_parameter_value(value: typing.Any) -> str: # pylint: disable=too-many-return-statements + """ + Formats a parameter value to a string usable in a DSL expression. + Handles special cases for some values (ex: lists, dicts, ...). + """ + if value is None: + return "None" + if isinstance(value, bool): + return "True" if value else "False" + if isinstance(value, (int, float)): + return repr(value) + if isinstance(value, str): + try: + parsed = json.loads(value) + if isinstance(parsed, list): + return repr(parsed) + if isinstance(parsed, dict): + return repr(parsed) + except (json.JSONDecodeError, TypeError): + return repr(value) + if isinstance(value, list): + return repr(value) + if isinstance(value, dict): + return repr(value) + return repr(value) + + +def resove_operator_params( + operator_class: dsl_interpreter_operator.Operator, + param_value_by_name: dict[str, typing.Any] +) -> list[str]: + """ + Resolves operator parameters to a list of positional and keyword arguments. + Returns a list of formatted strings usable in a DSL expression. + """ + operator_params = operator_class.get_parameters() + required_params = [p for p in operator_params if p.required] + optional_params = [p for p in operator_params if not p.required] + positional_parts = [] + keyword_parts = [] + for param_def in required_params: + name = param_def.name + if name in param_value_by_name: + value = param_value_by_name[name] + positional_parts.append( + format_parameter_value(value) + ) + for param_def in optional_params: + name = param_def.name + if name in param_value_by_name: + value = param_value_by_name[name] + keyword_parts.append(f"{name}={format_parameter_value(value)}") + return positional_parts + keyword_parts + + +def resolve_operator_args_and_kwargs( + operator_class: typing.Type[dsl_interpreter_operator.Operator], + args: typing.List, + kwargs: typing.Dict[str, typing.Any], +) -> typing.Tuple[typing.List, typing.Dict[str, typing.Any]]: + """ + For operators with get_parameters(), merge positional args and kwargs + into a single args tuple in parameter order. This ensures validation + passes when using named parameters (e.g. xyz(1, p2=2) where p2 is a required parameter). + """ + expected_params = operator_class.get_parameters() + if not expected_params: + return args, kwargs + + max_params = len(expected_params) + merged_args = [] + args_index = 0 + remaining_kwargs = dict(kwargs) + + for param in expected_params: + if args_index < len(args): + merged_args.append(args[args_index]) + args_index += 1 + elif param.name in remaining_kwargs: + merged_args.append(remaining_kwargs.pop(param.name)) + else: + # Parameter not provided - leave for Operator's default handling + break + + if args_index < len(args): + raise octobot_commons.errors.InvalidParametersError( + f"{operator_class.get_name()} supports up to {max_params} " + f"parameters: {operator_class.get_parameters_description()}" + ) + + return merged_args, remaining_kwargs + + +def apply_resolved_parameter_value(script: str, parameter: str, value: typing.Any): + """ + Apply a resolved parameter value to a DSL script. + """ + to_replace = f"{parameter}={octobot_commons.constants.UNRESOLVED_PARAMETER_PLACEHOLDER}" + if to_replace in script: + return script.replace( + to_replace, + f"{parameter}={format_parameter_value(value)}" + ) + to_replace_in_dict = f"{parameter!r}: {octobot_commons.constants.UNRESOLVED_PARAMETER_PLACEHOLDER!r}" + if to_replace_in_dict in script: + return script.replace( + to_replace_in_dict, + f"{parameter!r}: {format_parameter_value(value)}" + ) + raise octobot_commons.errors.ResolvedParameterNotFoundError( + f"Parameter {parameter} not found in script: {script}" + ) + + +def _find_matching_close_paren(source: str, open_paren_index: int) -> int: + if open_paren_index >= len(source) or source[open_paren_index] != "(": + raise octobot_commons.errors.InvalidParametersError( + f"Expected '(' at index {open_paren_index} in script: {source!r}" + ) + nesting_depth = 0 + for char_index in range(open_paren_index, len(source)): + char = source[char_index] + if char == "(": + nesting_depth += 1 + elif char == ")": + nesting_depth -= 1 + if nesting_depth == 0: + return char_index + raise octobot_commons.errors.InvalidParametersError( + f"Script {source} has unclosed parenthesis" + ) + + +def _inject_kwarg_into_call( + source: str, + open_paren_index: int, + close_paren_index: int, + parameter: str, + formatted_kwarg: str, +) -> str: + existing_call_arguments = source[open_paren_index + 1 : close_paren_index] + # Match keyword only at the top level of this call's argument list: after `(` is + # stripped, so use start-of-string or comma — not `(|`, which would miss `op(x=1)`. + if re.search(rf"(?:^|,)\s*{re.escape(parameter)}\s*=", existing_call_arguments): + raise octobot_commons.errors.InvalidParametersError( + f"Parameter {parameter} is already in operator keyword args: " + f"{source[open_paren_index : close_paren_index + 1]}" + ) + if not existing_call_arguments.strip(): + call_arguments_with_kwarg = f"{existing_call_arguments}{formatted_kwarg}" + else: + call_arguments_with_kwarg = f"{existing_call_arguments.rstrip()}, {formatted_kwarg}" + return ( + source[: open_paren_index + 1] + + call_arguments_with_kwarg + + source[close_paren_index:] + ) + + +def add_resolved_parameter_value(script: str, operator: str, parameter: str, value: typing.Any) -> str: + """ + Append a resolved keyword argument to every call to ``operator`` in ``script``. + Supports: + - Calls with no parenthesis when the whole script is only the operator name + (e.g. op -> op(x='a')) + - Calls with no existing params (e.g. op() -> op(x='a')) + - Calls with existing params (e.g. op(1) -> op(1, x='a')) + - Multiple calls (e.g. wait(1) if wait(2) -> both wait(...) gain the new kwarg) + Raises InvalidParametersError if the parameter is already in one of those calls' kwargs, + or if parentheses are unbalanced. + """ + formatted_kwarg = f"{parameter}={format_parameter_value(value)}" + operator_name_pattern = re.compile(rf"(?<![\w.])\b{re.escape(operator)}\b") + + call_opening_and_closing_indices: list[tuple[int, int]] = [] + for operator_match in operator_name_pattern.finditer(script): + index_after_operator_name = operator_match.end() + while index_after_operator_name < len(script) and script[index_after_operator_name] in " \t": + index_after_operator_name += 1 + if index_after_operator_name < len(script) and script[index_after_operator_name] == "(": + opening_paren_index = index_after_operator_name + closing_paren_index = _find_matching_close_paren(script, opening_paren_index) + call_opening_and_closing_indices.append((opening_paren_index, closing_paren_index)) + + if call_opening_and_closing_indices: + updated_script = script + for opening_paren_index, closing_paren_index in sorted( + call_opening_and_closing_indices, + key=lambda open_close: open_close[0], + reverse=True, + ): + updated_script = _inject_kwarg_into_call( + updated_script, + opening_paren_index, + closing_paren_index, + parameter, + formatted_kwarg, + ) + return updated_script + + if "(" in script: + raise octobot_commons.errors.InvalidParametersError( + f"Operator {operator!r} call sites not found or script has unclosed parenthesis: {script!r}" + ) + + if operator_name_pattern.fullmatch(script.strip()) is not None: + return f"{script.strip()}({formatted_kwarg})" + + return f"{script}({formatted_kwarg})" + + +def has_unresolved_parameters(script: str) -> bool: + """ + Check if a DSL script has unresolved parameters. + """ + return octobot_commons.constants.UNRESOLVED_PARAMETER_PLACEHOLDER in script diff --git a/packages/commons/octobot_commons/enums.py b/packages/commons/octobot_commons/enums.py new file mode 100644 index 0000000000..70fcdcfa32 --- /dev/null +++ b/packages/commons/octobot_commons/enums.py @@ -0,0 +1,536 @@ +# pylint: disable=C0103 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import enum + + +class TimeFrames(enum.Enum): + """ + OctoBot supported time frames values + """ + + ONE_MINUTE = "1m" + THREE_MINUTES = "3m" + FIVE_MINUTES = "5m" + FIFTEEN_MINUTES = "15m" + THIRTY_MINUTES = "30m" + ONE_HOUR = "1h" + TWO_HOURS = "2h" + THREE_HOURS = "3h" + FOUR_HOURS = "4h" + SIX_HOURS = "6h" + HEIGHT_HOURS = "8h" + TWELVE_HOURS = "12h" + ONE_DAY = "1d" + THREE_DAYS = "3d" + ONE_WEEK = "1w" + ONE_MONTH = "1M" + ONE_YEAR = "1y" + + +TimeFramesMinutes = { + TimeFrames.ONE_MINUTE: 1, + TimeFrames.THREE_MINUTES: 3, + TimeFrames.FIVE_MINUTES: 5, + TimeFrames.FIFTEEN_MINUTES: 15, + TimeFrames.THIRTY_MINUTES: 30, + TimeFrames.ONE_HOUR: 60, + TimeFrames.TWO_HOURS: 120, + TimeFrames.THREE_HOURS: 180, + TimeFrames.FOUR_HOURS: 240, + TimeFrames.SIX_HOURS: 360, + TimeFrames.HEIGHT_HOURS: 480, + TimeFrames.TWELVE_HOURS: 720, + TimeFrames.ONE_DAY: 1440, + TimeFrames.THREE_DAYS: 4320, + TimeFrames.ONE_WEEK: 10080, + TimeFrames.ONE_MONTH: 43200, + TimeFrames.ONE_YEAR: 524160, +} + + +class PriceIndexes(enum.Enum): + """ + Default candle price index correspondence + """ + + IND_PRICE_TIME = 0 + IND_PRICE_OPEN = 1 + IND_PRICE_HIGH = 2 + IND_PRICE_LOW = 3 + IND_PRICE_CLOSE = 4 + IND_PRICE_VOL = 5 + + +class PriceStrings(enum.Enum): + """ + Default candle price str + """ + + STR_PRICE_TIME = "time" + STR_PRICE_CLOSE = "close" + STR_PRICE_OPEN = "open" + STR_PRICE_HIGH = "high" + STR_PRICE_LOW = "low" + STR_PRICE_VOL = "vol" + + +class OptionTypes(enum.Enum): + """ + Default option type + """ + + PUT = "P" + CALL = "C" + + +class PlatformsName(enum.Enum): + """ + OctoBot supported platforms name + """ + + WINDOWS = "nt" + LINUX = "posix" + MAC = "mac" + + +class OctoBotTypes(enum.Enum): + """ + OctoBot running types + """ + + BINARY = "binary" + PYTHON = "python" + DOCKER = "docker" + + +class MarkdownFormat(enum.Enum): + """ + Markdown formating + """ + + ITALIC = "_" + BOLD = "*" + CODE = "`" + IGNORE = 1 + NONE = 0 + + +class OctoBotChannelSubjects(enum.Enum): + """ + OctoBot Channel subjects + """ + + NOTIFICATION = "notification" + CREATION = "creation" + UPDATE = "update" + DELETION = "deletion" + ERROR = "error" + + +class UserCommands(enum.Enum): + """ + Allowed user commands + """ + + MANUAL_TRIGGER = "manual_trigger" + OPTIMIZE_INITIAL_PORTFOLIO = "optimize_initial_portfolio" + TRIGGER_HEALTH_CHECK = "trigger_health_check" + RELOAD_CONFIG = "reload_config" + RELOAD_SCRIPT = "reload_script" + CLEAR_PLOTTING_CACHE = "clear_plotting_cache" + CLEAR_SIMULATED_ORDERS_CACHE = "clear_simulated_orders_cache" + CLEAR_SIMULATED_TRADES_CACHE = "clear_simulated_trades_cache" + CLEAR_SIMULATED_TRANSACTIONS_CACHE = "clear_simulated_transactions_cache" + + +class MultiprocessingLocks(enum.Enum): + """ + Keys to multiprocessing lock + """ + + DBLock = "db_lock" + + +class CacheDatabaseTables(enum.Enum): + """ + Tables in cache databases + """ + + CACHE = "cache" + METADATA = "metadata" + + +class CacheDatabaseColumns(enum.Enum): + """ + Keys/columns in cache databases tables + """ + + TIMESTAMP = "t" + VALUE = "v" + TYPE = "ty" + TRIGGERED_AFTER_CANDLES_CLOSE = "triggered_after_candles_close" + + +class PlotAttributes(enum.Enum): + KIND = "kind" + X = "x" + Y = "y" + Z = "z" + OPEN = "open" + HIGH = "high" + LOW = "low" + CLOSE = "close" + VOLUME = "volume" + TITLE = "title" + TEXT = "text" + SUB_ELEMENTS = "sub_elements" + ELEMENTS = "elements" + NAME = "name" + DATA = "data" + X_TYPE = "x_type" + Y_TYPE = "y_type" + MODE = "mode" + LINE_SHAPE = "line_shape" + OWN_XAXIS = "own_xaxis" + OWN_YAXIS = "own_yaxis" + SIDE = "side" + VALUE = "value" + CONFIG = "config" + SCHEMA = "schema" + TENTACLE = "tentacle" + TENTACLE_TYPE = "tentacle_type" + COLUMNS = "columns" + ROWS = "rows" + SEARCHES = "searches" + IS_HIDDEN = "is_hidden" + TYPE = "type" + COLOR = "color" + HTML = "html" + SIZE = "size" + SHAPE = "shape" + SYMBOL = "symbol" + + +class BacktestingMetadata(enum.Enum): + ID = "id" + GAINS = "gains" + PERCENT_GAINS = "% gains" + MARKETS_PROFITABILITY = "markets profitability" + END_PORTFOLIO = "end portfolio" + START_PORTFOLIO = "start portfolio" + WIN_RATE = "% win rate" + DRAW_DOWN = "% draw down" + COEFFICIENT_OF_DETERMINATION_MAX_BALANCE = "R² max balance" + COEFFICIENT_OF_DETERMINATION_END_BALANCE = "R² end balance" + SYMBOLS = "symbols" + TIME_FRAMES = "time frames" + START_TIME = "start time" + END_TIME = "end time" + DURATION = "duration" + ENTRIES = "entries" + WINS = "wins" + LOSES = "loses" + TRADES = "trades" + TIMESTAMP = "timestamp" + NAME = "name" + LEVERAGE = "leverage" + OPTIMIZATION_CAMPAIGN = "optimization campaign" + USER_INPUTS = "user inputs" + BACKTESTING_FILES = "backtesting files" + CHILDREN = "children" + OPTIMIZER_ID = "optimizer id" + EXCHANGE = "exchange" + + +class DBRows(enum.Enum): + ID = "id" + REFERENCE_MARKET = "ref_market" + EXCHANGE = "exchange" + EXCHANGES = "exchanges" + FUTURE_CONTRACTS = "future_contracts" + PAIR = "pair" + TIME_FRAME = "time_frame" + VALUE = "value" + START_TIME = "start_time" + END_TIME = "end_time" + TRADING_TYPE = "trading_type" + TRADING_MODE = "trading_mode" + SYMBOL = "symbol" + SYMBOLS = "symbols" + FEES_AMOUNT = "fees_amount" + FEES_CURRENCY = "fees_currency" + + +class PlotCharts(enum.Enum): + MAIN_CHART = "main-chart" + SUB_CHART = "sub-chart" + + +class DisplayedElementTypes(enum.Enum): + CHART = "chart" + INPUT = "input" + TABLE = "table" + VALUE = "value" + DICTIONARY = "dictionary" + + +class DBTables(enum.Enum): + METADATA = "metadata" + INPUTS = "inputs" + PORTFOLIO = "portfolio" + ORDERS = "all_orders" + HISTORICAL_ORDERS_UPDATES = "order_updates" + TRADES = "all_trades" + TRANSACTIONS = "all_transactions" + CANDLES = "candles" + CANDLES_SOURCE = "candles_source" + CACHE_SOURCE = "cache_source" + + +class ActivationTopics(enum.Enum): + """ + Events that can trigger actions + """ + + FULL_CANDLES = "once per bar close" + IN_CONSTRUCTION_CANDLES = "once per second (Live Price)" + RECENT_TRADES = "recent trades" + EVALUATION_CYCLE = "after evaluators" + + +class TriggerSource(enum.Enum): + INITIALIZATION = "initialization" + EVALUATION_MATRIX = "evaluation_matrix" + EVALUATOR_REFRESH = "evaluator_refresh" + OHLCV = "ohlcv" + KLINE = "kline" + ORDER = "order" + TRADE = "trade" + PRICE = "price" + BALANCE = "balance" + POSITION = "position" + CONFIGURATION_UPDATE = "configuration_update" + MANUAL = "manual" + UNDEFINED = "undefined" + + +class DataBaseOrderBy(enum.Enum): + """ + Database orders + """ + + ASC = "ASC" + DESC = "DESC" + + +class DataBaseOperations(enum.Enum): + """ + Database operators + """ + + SUP = ">" + INF = "<" + EQUALS = "=" + INF_EQUALS = f"{INF}{EQUALS}" + SUP_EQUALS = f"{SUP}{EQUALS}" + + +class RunDatabases(enum.Enum): + """ + Database identifiers + """ + + HISTORY = "history" + LIVE = "live" + BACKTESTING = "backtesting" + OPTIMIZER = "optimizer" + OPTIMIZER_RUNS_SCHEDULE_DB = "runs_schedule" + RUN_DATA_DB = "run_data" + PORTFOLIO_VALUE_DB = "portfolio_value" + HISTORICAL_PORTFOLIO_VALUE = "historical_portfolio_value" + ORDERS_DB = "orders" + TRADES_DB = "trades" + TRANSACTIONS_DB = "transactions" + EXCHANGES = "exchanges" + METADATA = "metadata" + + +class LogicalOperators(enum.Enum): + """ + Logical operators + """ + + LOWER_THAN = "lower_than" + HIGHER_THAN = "higher_than" + LOWER_OR_EQUAL_TO = "lower_or_equal_to" + HIGHER_OR_EQUAL_TO = "higher_or_equal_to" + EQUAL_TO = "equal_to" + DIFFERENT_FROM = "different_from" + + +class CommunityFeedAttrs(enum.Enum): + ID = "u" + STREAM_ID = "i" + VALUE = "s" + VERSION = "v" + TIMESTAMP = "d" + CHANNEL_TYPE = "t" + + +class CommunityChannelTypes(enum.Enum): + CONFIGURATION = "cfg" + SIGNAL = "t" + TRADINGVIEW = "tv" + ALERT = "alert" + + +class SignalBundlesAttrs(enum.Enum): + IDENTIFIER = "identifier" + SIGNALS = "signals" + VERSION = "version" + + +class SignalsAttrs(enum.Enum): + TOPIC = "topic" + CONTENT = "content" + DEPENDENCIES = "dependencies" + + +class SignalDependenciesAttrs(enum.Enum): + DEPENDENCY = "dependency" + + +class InitializationEventExchangeTopics(enum.Enum): + POSITIONS = "positions" + BALANCE = "balance" + PROFITABILITY = "profitability" + ORDERS = "orders" + TRADES = "trades" + CONTRACTS = "contracts" + CANDLES = "candles" + PRICE = "price" + ORDER_BOOK = "order_book" + FUNDING = "funding" + MARKETS = "markets" + + +class UserInputTentacleTypes(enum.Enum): + TRADING_MODE = "trading_mode" + EVALUATOR = "evaluator" + EXCHANGE = "exchange" + BLOCKCHAIN_WALLET = "blockchain_wallet" + WEB_PLUGIN = "web_plugin" + AUTOMATION = "automation" + UNDEFINED = "undefined" + + +class UserInputTypes(enum.Enum): + INT = "int" + FLOAT = "float" + BOOLEAN = "boolean" + OPTIONS = "options" + MULTIPLE_OPTIONS = "multiple-options" + TEXT = "text" + OBJECT = "object" + OBJECT_ARRAY = "object_array" + STRING_ARRAY = "string_array" + + +class UserInputEditorOptionsTypes(enum.Enum): + # source for the available options: + # https://github.com/json-editor/json-editor#editor-options + + # If set to true, the editor will start collapsed (works for objects and arrays) + COLLAPSED = "collapsed" + # If set to true, the "add row" button will be hidden (works for arrays) + DISABLE_ARRAY_ADD = "disable_array_add" + # If set to true, all of the "delete" buttons will be hidden (works for arrays) + DISABLE_ARRAY_DELETE = "disable_array_delete" + # If set to true, just the "delete all rows" + # button will be hidden (works for arrays) + DISABLE_ARRAY_DELETE_ALL_ROWS = "disable_array_delete_all_rows" + # If set to true, just the "delete last row" + # buttons will be hidden (works for arrays) + DISABLE_ARRAY_DELETE_LAST_ROW = "disable_array_delete_last_row" + # If set to true, the "move up/down" buttons will be hidden (works for arrays) + DISABLE_ARRAY_REORDER = "disable_array_reorder" + # If set to true, the collapse button will be hidden (works for objects and arrays) + DISABLE_COLLAPSE = "disable_collapse" + # If set to true, the Edit JSON button will be hidden (works for objects) + DISABLE_EDIT_JSON = "disable_edit_json" + # If set to true, the Edit Properties button will be hidden (works for objects) + DISABLE_PROPERTIES = "disable_properties" + # If set to true, array controls (add, delete etc) will be + # displayed at top of list (works for arrays) + ARRAY_CONTROLS_TOP = "array_controls_top" + # See Enum options (https://github.com/json-editor/json-editor#enum-options) + ENUM = "enum" + # An array of display values to use for select box options in the same + # order as defined with the enum keyword. Works with schema using enum values. + ENUM_TITLES = "enum_titles" + # If set to true, the input will auto expand/contract to fit the content. + # Works best with textareas. + EXPAND_HEIGHT = "expand_height" + # Explicitly set the number of grid columns (1-12) for the editor + # if it's within an object using a grid layout. + GRID_COLUMNS = "grid_columns" + # If set to true, the editor will not appear in the UI (works for all types) + HIDDEN = "hidden" + # Explicitly set the height of the input element. Should be a valid CSS + # width string (e.g. "100px"). Works best with textareas. + INPUT_HEIGHT = "input_height" + # Explicitly set the width of the input element. Should be a valid CSS + # width string (e.g. "100px"). Works for string, number, and integer data types. + INPUT_WIDTH = "input_width" + # If set to true for an object, empty object properties + # (i.e. those with falsy values) will not be returned by getValue(). + REMOVE_EMPTY_PROPERTIES = "remove_empty_properties" + + +class UserInputOtherSchemaValuesTypes(enum.Enum): + DESCRIPTION = "description" + DEPENDENCIES = "dependencies" + + +class ProfileComplexity(enum.Enum): + EASY = 1 + MEDIUM = 2 + DIFFICULT = 3 + + +class ProfileRisk(enum.Enum): + LOW = 1 + MODERATE = 2 + HIGH = 3 + + +class ProfileType(enum.Enum): + LIVE = "live" + BACKTESTING = "backtesting" + + +class SignalHistoryTypes(enum.Enum): + GPT = "gpt" + + +class StopReason(enum.Enum): + MISSING_API_KEY_TRADING_RIGHTS = "missing_api_key_trading_rights" + INVALID_EXCHANGE_CREDENTIALS = "invalid_exchange_credentials" + STOP_CONDITION_TRIGGERED = "stop_condition_triggered" + MISSING_MINIMAL_FUNDS = "missing_minimal_funds" + INVALID_CONFIG = "invalid_config" + UNKNOWN = "unknown" diff --git a/packages/commons/octobot_commons/errors.py b/packages/commons/octobot_commons/errors.py new file mode 100644 index 0000000000..ace0dfedb3 --- /dev/null +++ b/packages/commons/octobot_commons/errors.py @@ -0,0 +1,189 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + + +class ConfigError(Exception): + """ + Config related Exception + """ + + +class RemoteConfigError(ConfigError): + """ + Fetched config related Exception + """ + + +class NoProfileError(Exception): + """ + Profile related Exception: raised when the current profile can't be found and default profile can't be loaded + """ + + +class ProfileConflictError(Exception): + """ + Profile related Exception: raised when the current profile can't be renamed as expected + """ + + +class ProfileRemovalError(Exception): + """ + Profile related Exception: raised when the current profile can't be can't be removed + """ + + +class ProfileImportError(Exception): + """ + Profile related Exception: raised when the imported profile is invalid + """ + + +class ProfileDataError(Exception): + """ + Profile related Exception: raised when the profile data is invalid + """ + + +class ConfigEvaluatorError(Exception): + """ + Evaluator config related Exception + """ + + +class ConfigTradingError(Exception): + """ + Trading config related Exception + """ + + +class TentacleNotFound(Exception): + """ + Tentacle not found related Exception + """ + + +class UninitializedCache(Exception): + """ + Raised when a cache is requested but has not yet been initialized + """ + + +class NoCacheValue(Exception): + """ + Raised when a cache value is selected but is not available in database + """ + + +class UncachableValue(Exception): + """ + Raised when a cache value is selected but is not available in database + """ + + +class DatabaseNotFoundError(Exception): + """ + Raised when a database can't be found + """ + + +class MissingDataError(Exception): + """ + Raised when there is not enough available candles + """ + + +class MissingExchangeDataError(Exception): + """ + Raised when there is no available data for this exchange + """ + + +class ExecutionAborted(Exception): + """ + Raised when the current execution should be aborted + """ + + +class LogicalOperatorError(Exception): + """ + Raised when a logical operation is invalid + """ + + +class UnsupportedError(Exception): + """ + Raised when an unsupported message is received + """ + + +class InvalidUserInputError(Exception): + """ + Raised when a user input in invalid + """ + + +class MissingSignalBuilder(Exception): + """ + Raised when a signal builder is not found + """ + + +class DSLInterpreterError(Exception): + """ + Raised when a DSL interpreter error occurs + """ + + +class UnsupportedOperatorError(DSLInterpreterError): + """ + Raised when an unknown operator is encountered + """ + + +class InvalidParametersError(DSLInterpreterError): + """ + Raised when the parameters of an operator are invalid + """ + + +class MissingDefaultValueError(InvalidParametersError): + """ + Raised when a parameter has no default value + """ + + +class InvalidParameterFormatError(InvalidParametersError): + """ + Raised when the format of a parameter is invalid + """ + + +class ResolvedParameterNotFoundError(DSLInterpreterError): + """ + Raised when a resolved parameter is not found in the script + """ + + +class ErrorStatementEncountered(DSLInterpreterError): + """ + Raised when a error statement is encountered when executing a script + """ + + +class MaxAttemptsExceededError(ErrorStatementEncountered): + """ + Raised when a max attempts is exceeded when executing a script + """ diff --git a/packages/commons/octobot_commons/evaluators_util.py b/packages/commons/octobot_commons/evaluators_util.py new file mode 100644 index 0000000000..a8b74c12a8 --- /dev/null +++ b/packages/commons/octobot_commons/evaluators_util.py @@ -0,0 +1,49 @@ +# pylint: disable=R0913 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import octobot_commons.constants as constants + +UNSET_EVAL_TYPE = "unset_eval_type_param" + + +def check_valid_eval_note( + eval_note, + eval_type=UNSET_EVAL_TYPE, + expected_eval_type=None, + eval_time=None, + expiry_delay=None, + current_time=None, +): + """ + Will also test evaluation type if if eval_type is provided. + :param eval_note: The evaluation value + :param eval_type: The evaluation type + :param expected_eval_type: The expected type. Default is EVALUATOR_EVAL_DEFAULT_TYPE + :param eval_time: The evaluation time + :param expiry_delay: The allowed evaluation delay + :param current_time: The current time + :return: True when evaluation value is valid + """ + if eval_type != UNSET_EVAL_TYPE and ( + eval_type != expected_eval_type or expected_eval_type is None + ): + return False + return ( + eval_note is not None + and eval_note is not constants.START_PENDING_EVAL_NOTE + and (eval_time is None or eval_time + expiry_delay - current_time > 0) + ) diff --git a/packages/commons/octobot_commons/external_resources_manager.py b/packages/commons/octobot_commons/external_resources_manager.py new file mode 100644 index 0000000000..78b9a25dc6 --- /dev/null +++ b/packages/commons/octobot_commons/external_resources_manager.py @@ -0,0 +1,80 @@ +# pylint: disable=W0703,W3101 +# Drakkar-Software OctoBot +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import json +import requests + +import octobot_commons.logging as logging_util +import octobot_commons.constants as constants + + +def _handle_exception(exception, resource_key, catch_exception, default_response): + """ + Handle exception when fetching external resources + :param exception: the exception + :param resource_key: the resource key + :param catch_exception: if exception should be caught + :param default_response: the default response + :return: the default response if an exception has been caught + """ + if catch_exception: + logging_util.get_logger("ExternalResourcesManager").warning( + f"Exception when calling get_external_resource for {resource_key} key: {exception}" + ) + return default_response + raise exception + + +def get_external_resource( + resource_key, catch_exception=False, default_response="" +) -> object: + """ + Get an external resource + :param resource_key: the resource key + :param catch_exception: if exception should be caught + :param default_response: the default response + :return: the external resource key value + """ + try: + external_resources = json.loads( + requests.get(constants.EXTERNAL_RESOURCE_URL).text + ) + return external_resources[resource_key] + except Exception as global_exception: + return _handle_exception( + global_exception, resource_key, catch_exception, default_response + ) + + +async def async_get_external_resource( + resource_key, aiohttp_session, catch_exception=False, default_response="" +) -> object: + """ + Get an external resource in async way + :param resource_key: the resource key + :param aiohttp_session: the aiohttp session + :param catch_exception: if exception should be caught + :param default_response: the default reponse + :return: the external resource key value + """ + try: + async with aiohttp_session.get(constants.EXTERNAL_RESOURCE_URL) as resp: + external_resources = json.loads(resp.text()) + return external_resources[resource_key] + except Exception as global_exception: + return _handle_exception( + global_exception, resource_key, catch_exception, default_response + ) diff --git a/packages/commons/octobot_commons/html_util.py b/packages/commons/octobot_commons/html_util.py new file mode 100644 index 0000000000..c8ab9627dd --- /dev/null +++ b/packages/commons/octobot_commons/html_util.py @@ -0,0 +1,140 @@ +# pylint: disable=W0718, W1203 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import html.parser +import collections +import typing + +# avoid cyclic imports with commons_logging +import logging + + +_IGNORED_ELEMENTS = ["script", "button"] +DEFAULT_ELEMENT_TEXT_MAX_SIZE = 100 +MAX_RECURSIVE_EXCEPTION_CAUSES_DEPTH = 20 + + +def summarize_page_content( + html_content: str, max_element_text_size: int = DEFAULT_ELEMENT_TEXT_MAX_SIZE +) -> list[tuple[str, str]]: + """ + :return: a list of (tag, content) tuples representing a html page's useful content + """ + parser = _SummarizerHTMLParser(max_element_text_size) + parser.feed(html_content) + return parser.summary + + +def pretty_print_summary(summary: list[tuple[str, str]]) -> str: + """ + :return: a str representing the summary of the page + """ + return "; ".join(f"{element[0]}<{element[1]}>" for element in summary) + + +def is_html_content(html_content: str) -> bool: + """ + :return: True if the given html_content looks like html + """ + return "</html>" in html_content + + +def get_html_summary_if_relevant( + html_content: typing.Any, max_element_text_size: int = DEFAULT_ELEMENT_TEXT_MAX_SIZE +) -> typing.Any: + """ + :return: the str summary of the given html_content if + it is html, the given html_content otherwise + """ + try: + str_html_content = ( + html_content if isinstance(html_content, str) else str(html_content) + ) + if is_html_content(str_html_content): + return pretty_print_summary( + summarize_page_content( + str_html_content, max_element_text_size=max_element_text_size + ) + ) + return str_html_content + except BaseException as err: + logging.getLogger("html_util").error( + f"Error when parsing html_content '{html_content}', " + f"error: {err} ({err.__class__.__name__})" + ) + return html_content + + +def summarize_exception_html_cause_if_relevant(exception: BaseException, depth=0): + """ + Recursively updates args and the __cause__ attribute + of the exception to summarize html content if any + """ + try: + # Optimistic consideration of attributes being available: should always be the case. + # However, if this is not the case, just catch the error not forward it + if exception is not None: + exception.args = tuple( + get_html_summary_if_relevant(arg) for arg in exception.args + ) + # condition should not be necessary but still make sure to avoid infinite recursive loops + if depth < MAX_RECURSIVE_EXCEPTION_CAUSES_DEPTH: + # recursive call to make sure nested causes are also summarized + summarize_exception_html_cause_if_relevant( + exception.__cause__, depth=depth + 1 + ) + except BaseException: + # Can't format html, nothing to do: just stop processing + pass + + +class _SummarizerHTMLParser(html.parser.HTMLParser): + """ + Walks through the give html document and stores its relevant into self.summary + """ + + # from https://docs.python.org/3/library/html.parser.html + + def __init__(self, max_element_text_size: int): + super().__init__() + self.summary: list[tuple[str, str]] = [] + + self._path = collections.deque() + self._max_element_text_size = max_element_text_size + + def handle_starttag(self, tag, attrs): + self._path.append(tag) + + def handle_endtag(self, tag): + self._path.pop() + + def handle_data(self, data): + cleared_data = data.strip() + if len(cleared_data) > self._max_element_text_size: + cleared_data = f"{cleared_data[:self._max_element_text_size]}[...]" + if len(cleared_data) > 1: + try: + element_name = self._path[-1] + if element_name not in _IGNORED_ELEMENTS: + self.summary.append((element_name, cleared_data)) + except IndexError: + # before or after html content + self.summary.append(("message", cleared_data)) + except BaseException as err: + logging.getLogger(self.__class__.__name__).error( + f"Error when parsing element for: '{data}', " + f"error: {err} ({err.__class__.__name__})", + ) diff --git a/packages/commons/octobot_commons/json_util.py b/packages/commons/octobot_commons/json_util.py new file mode 100644 index 0000000000..faddf8eb27 --- /dev/null +++ b/packages/commons/octobot_commons/json_util.py @@ -0,0 +1,193 @@ +# pylint: disable=W0718 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import json +import os.path +import shutil +import decimal +import typing + +import octobot_commons.dataclasses as commons_dataclasses +import octobot_commons.logging +import octobot_commons.constants + +try: + import jsonschema +except ImportError: + if octobot_commons.constants.USE_MINIMAL_LIBS: + # mock jsonschema imports + class JsonschemaImportMock: + def validate( + self, instance, schema + ): # pylint: disable=missing-function-docstring + raise ImportError("jsonschema not installed") + + jsonschema = JsonschemaImportMock() + else: + raise + + +LOGGER_NAME = "json_util" + + +def validate(config, schema_file) -> None: + """ + Validate a config file, raise upon validation error + :param config: the config + :param schema_file: the config schema + :return: None + """ + with open(schema_file) as json_schema: + loaded_schema = json.load(json_schema) + jsonschema.validate(instance=config, schema=loaded_schema) + + +def has_same_content(file_path: str, expected_content: dict) -> bool: + """ + :return: True if the content of the parsed json file at file_path equals the given expected_content + """ + if os.path.isfile(file_path): + content = read_file(file_path, raise_errors=False) + return content == expected_content + return False + + +def read_file( + file_path: str, + raise_errors: bool = True, + on_error_value: dict = None, + open_mode="r", +) -> dict: + """ + Read a load the given file with json.load() + :param file_path: file to read + :param raise_errors: when True will forward errors. Will just log errors otherwise + :param on_error_value: return this value when raise_errors is False and an error occurs + :param open_mode: the file open mode to give to open() + :return: the parsed file or default value on error if possible + """ + try: + with open(file_path, open_mode) as open_file: + return json.load(open_file) + except PermissionError as err: + if raise_errors: + raise + octobot_commons.logging.get_logger(LOGGER_NAME).error( + f"Permission error when reading {file_path} file: {err}." + ) + except Exception as err: + if raise_errors: + raise + octobot_commons.logging.get_logger(LOGGER_NAME).exception( + f"Unexpected error when reading {file_path} file: {err}." + ) + if on_error_value is None: + raise ValueError("on_error_value is unset") + return on_error_value + + +def safe_dump(content: dict, save_path: str) -> None: + """ + Safely dump content into save_path restoring the previous content if writing fails + """ + restore_file = f"{save_path}{octobot_commons.constants.SAFE_DUMP_SUFFIX}" + has_restore_file = False + try: + if os.path.exists(save_path): + if os.path.exists(restore_file): + os.remove(restore_file) + # prepare a restoration file + shutil.copy(save_path, restore_file) + has_restore_file = True + except Exception as err: + # when failing to create restore file + error_details = ( + f"Failed to create {restore_file} backup file. Is the associated " + f"folder accessible ? : {err} ({err.__class__.__name__}) Continuing anyway." + ) + octobot_commons.logging.get_logger(LOGGER_NAME).exception( + err, True, error_details + ) + try: + # create config content as str before opening file not to clear it on json dump exception + str_content = dump_formatted_json(content) + with open(save_path, "w") as write_file: + write_file.write(str_content) + + except Exception as global_exception: + # when failing to save the new file config + octobot_commons.logging.get_logger(LOGGER_NAME).error( + f"File save failed : {global_exception}. " + f"{'restoring previous content' if has_restore_file else 'no previous content to restore'}" + ) + if has_restore_file: + # restore file with previous content + shutil.copy(restore_file, save_path) + raise global_exception + finally: + # remove temporary restore file if any + try: + if os.path.exists(restore_file): + os.remove(restore_file) + except Exception as err: + octobot_commons.logging.get_logger(LOGGER_NAME).exception( + err, True, f"Failed to remove {restore_file} restore file: {err}" + ) + + +def dump_formatted_json(json_data) -> str: + """ + The dumped json data + :param json_data: the json data to be dumped + :return: the dumped json data + """ + return json.dumps(json_data, indent=4, sort_keys=True) + + +def _get_sanitized_value(value: typing.Any) -> typing.Any: + if isinstance(value, (list, dict)): + return sanitize(value) + if isinstance(value, decimal.Decimal): + return float(value) + return value + + +def sanitize(values: typing.Any) -> typing.Any: + """ + Sanitize the given values by converting decimal.Decimal to float and + recursively sanitizing lists, dictionaries and dataclasses. + """ + if isinstance(values, (list, tuple)): + return type(values)( + sanitize(val) + for val in values + ) + if isinstance(values, dict): + for key, val in values.items(): + values[key] = _get_sanitized_value(val) + elif isinstance(values, commons_dataclasses.FlexibleDataclass): + for field in values.get_field_names(): + setattr(values, field, sanitize(getattr(values, field))) + return values + + +def sanitized(f): + """ + Decorator to sanitize the result of an asynchronous function. + """ + async def sanitized_wrapper(*args, **kwargs): + return sanitize(await f(*args, **kwargs)) + return sanitized_wrapper diff --git a/packages/commons/octobot_commons/list_util.py b/packages/commons/octobot_commons/list_util.py new file mode 100644 index 0000000000..4420ee675d --- /dev/null +++ b/packages/commons/octobot_commons/list_util.py @@ -0,0 +1,37 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import functools + + +def flatten_list(list_to_flatten): + """ + Flatten the list :list_to_flatten: + :param list_to_flatten: the list to flatten + :return: the flattened list + """ + return functools.reduce( + lambda first_level, second_level: first_level + second_level, list_to_flatten + ) + + +def deduplicate(elements: list) -> list: + """ + remove duplicated values from a list while preserving order + """ + # from https://stackoverflow.com/questions/480214/how-do-i-remove-duplicates-from-a-list-while-preserving-order + seen = set() + seen_add = seen.add + return [x for x in elements if not (x in seen or seen_add(x))] diff --git a/packages/commons/octobot_commons/logging/__init__.py b/packages/commons/octobot_commons/logging/__init__.py new file mode 100644 index 0000000000..1186af610c --- /dev/null +++ b/packages/commons/octobot_commons/logging/__init__.py @@ -0,0 +1,77 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_commons.logging import logging_util +from octobot_commons.logging.logging_util import ( + BotLogger, + set_global_logger_level, + get_global_logger_level, + temporary_log_level, + get_logger_level_per_handler, + get_logger, + set_logging_level, + get_backtesting_errors_count, + reset_backtesting_errors, + set_error_publication_enabled, + get_private_minimized_message_if_necessary, + get_private_placeholder_if_necessary, + BACKTESTING_NEW_ERRORS_COUNT, + LOG_DATABASE, + LOG_NEW_ERRORS_COUNT, + logs_database, + error_notifier_callbacks, + LOGS_MAX_COUNT, + add_log, + get_errors_count, + reset_errors_count, + register_error_notifier, + register_log_callback, + set_enable_web_interface_logs, +) +from octobot_commons.logging.context_based_file_handler import ( + add_context_based_file_handler, + ContextBasedFileHandler, +) + + +__all__ = [ + "BotLogger", + "set_global_logger_level", + "get_global_logger_level", + "temporary_log_level", + "get_logger_level_per_handler", + "get_logger", + "set_logging_level", + "get_backtesting_errors_count", + "reset_backtesting_errors", + "set_error_publication_enabled", + "get_private_minimized_message_if_necessary", + "get_private_placeholder_if_necessary", + "BACKTESTING_NEW_ERRORS_COUNT", + "LOG_DATABASE", + "LOG_NEW_ERRORS_COUNT", + "logs_database", + "error_notifier_callbacks", + "LOGS_MAX_COUNT", + "add_log", + "get_errors_count", + "reset_errors_count", + "register_error_notifier", + "register_log_callback", + "set_enable_web_interface_logs", + "add_context_based_file_handler", + "ContextBasedFileHandler", +] diff --git a/packages/commons/octobot_commons/logging/context_based_file_handler.py b/packages/commons/octobot_commons/logging/context_based_file_handler.py new file mode 100644 index 0000000000..4852bfee84 --- /dev/null +++ b/packages/commons/octobot_commons/logging/context_based_file_handler.py @@ -0,0 +1,82 @@ +# pylint: disable=C0415, W0603, W1508, R0913, C0103 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import logging +import os +import typing + + +MAX_CONTEXT_BASED_FILE_HANDLERS_PER_CATEGORY = 30 +DEFAULT_CONTEXT_BASED_FILE_FORMATTER = "%(asctime)s %(levelname)-8s %(name)-20s %(message)s" + + +def add_context_based_file_handler( + logs_folder: str, + file_name_provider: typing.Callable[[], typing.Optional[str]] +) -> None: + """ + Add the ContextBasedFileHandler to the root logger. Logs will + additionally be written to a file named after the file name provided by the file_name_provider. + """ + logging.getLogger().addHandler( + ContextBasedFileHandler(logs_folder, file_name_provider) + ) + + +class ContextBasedFileHandler(logging.Handler): + """ + Logging handler that writes logs to specific files when the + context is set. The log file name is the file name provided by the file_name_provider. + """ + def __init__( + self, + logs_folder: str, + file_name_provider: typing.Callable[[], typing.Optional[str]], + ): + super().__init__() + self._custom_handlers: dict[str, logging.FileHandler] = {} + self._file_name_provider = file_name_provider + self._logs_folder = logs_folder + os.makedirs(self._logs_folder, exist_ok=True) + + def emit(self, record: logging.LogRecord) -> None: + if file_name := self._file_name_provider(): + if file_name not in self._custom_handlers: + if len(self._custom_handlers) >= MAX_CONTEXT_BASED_FILE_HANDLERS_PER_CATEGORY: + self._remove_oldest_handler() + self._custom_handlers[file_name] = self._create_file_handler(file_name) + self._custom_handlers[file_name].emit(record) + + def _remove_oldest_handler(self) -> None: + oldest_key = next(iter(self._custom_handlers)) + oldest_handler = self._custom_handlers.pop(oldest_key) + logging.getLogger().removeHandler(oldest_handler) + oldest_handler.close() + + def _create_file_handler(self, file_name: str) -> logging.FileHandler: + log_path = os.path.join(self._logs_folder, f"{file_name}.log") + file_handler = logging.FileHandler(log_path, mode="a", encoding="utf-8") + file_handler.setLevel(self.level) + root_logger = logging.getLogger() + for handler in root_logger.handlers: + if isinstance(handler, logging.FileHandler) and handler.formatter: + # reuse the user configured formatter + file_handler.setFormatter(handler.formatter) + break + else: + # default formatter + file_handler.setFormatter(logging.Formatter(DEFAULT_CONTEXT_BASED_FILE_FORMATTER)) + return file_handler diff --git a/packages/commons/octobot_commons/logging/logging_util.py b/packages/commons/octobot_commons/logging/logging_util.py new file mode 100644 index 0000000000..103dcb51fb --- /dev/null +++ b/packages/commons/octobot_commons/logging/logging_util.py @@ -0,0 +1,399 @@ +# pylint: disable=C0415, W0603, W1508, R0913, C0103 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import contextlib +import logging +import typing + +import octobot_commons.constants as constants +import octobot_commons.timestamp_util as timestamp_util +import octobot_commons.html_util as html_util + +LOG_DATABASE = "log_db" +LOG_NEW_ERRORS_COUNT = "log_new_errors_count" + +BACKTESTING_NEW_ERRORS_COUNT: str = "log_backtesting_errors_count" + +logs_database = { + LOG_DATABASE: [], + LOG_NEW_ERRORS_COUNT: 0, + BACKTESTING_NEW_ERRORS_COUNT: 0, +} + +error_notifier_callbacks = [] + +LOGS_MAX_COUNT = 1000 + +STORED_LOG_MIN_LEVEL = logging.WARNING +ENABLE_WEB_INTERFACE_LOGS = True +ERROR_PUBLICATION_ENABLED = True +SHOULD_PUBLISH_LOGS_WHEN_RE_ENABLED = False + + +def _default_callback(*_, **__): + pass + + +_ERROR_CALLBACK = _default_callback +_LOG_CALLBACK: typing.Union[None, typing.Callable[[str], str]] = None + + +def set_global_logger_level(level, handler_levels=None) -> None: + """ + Set the global logger level + :param level: the level to set + """ + logger = logging.getLogger() + logger.setLevel(level) + levels = handler_levels or [level] * len(logger.handlers) + for handler, updated_level in zip(logger.handlers, levels): + handler.setLevel(updated_level) + + +def get_global_logger_level() -> object: + """ + Return the global logger level + :return: the global logger level + """ + return logging.getLogger().getEffectiveLevel() + + +@contextlib.contextmanager +def temporary_log_level(level): + """ + Sets the log level to the given level inside this context + """ + previous_level = get_global_logger_level() + try: + set_global_logger_level(level) + yield + finally: + set_global_logger_level(previous_level) + + +def get_logger_level_per_handler() -> list: + """ + Return the global logger level + :return: order handles logging levels + """ + return [handler.level for handler in logging.getLogger().handlers] + + +def get_logger(logger_name="Anonymous"): + """ + Return the logger from the logger_name + :param logger_name: the logger name + :return: the logger from the logger name + """ + return BotLogger(logger_name) + + +def set_logging_level(logger_names, level) -> None: + """ + Set the logging level for the logger names + :param logger_names: the logger names + :param level: the level to set + """ + for name in logger_names: + logging.getLogger(name).setLevel(level) + + +def get_private_minimized_message_if_necessary(message: typing.Any) -> typing.Any: + """ + :param message: the message to minimize + :return: the private minimized message if necessary + """ + if constants.ALLOW_PRIVATE_DATA_LOGS or message is None: + return message + str_message = message if isinstance(message, str) else str(message) + return ( + str_message[:constants.PRIVATE_MESSAGE_ALLOWED_CHARS_COUNT] + + constants.PRIVATE_MESSAGE_PLACEHOLDER + + str_message[-constants.PRIVATE_MESSAGE_ALLOWED_CHARS_COUNT:] + ) + + +def get_private_placeholder_if_necessary(message: typing.Any) -> str: + """ + :param message: the message replace with a placeholder + :return: the private placeholder if necessary + """ + if constants.ALLOW_PRIVATE_DATA_LOGS: + return message + return constants.PRIVATE_MESSAGE_PLACEHOLDER + + +def add_log(level, source, message, keep_log=True, call_notifiers=True): + """ + Add a log to the log database + :param level: the log level + :param source: the log source + :param message: the log message + :param keep_log: if the log should be stored + :param call_notifiers: if the log should trigger the notifiers + """ + if keep_log: + logs_database[LOG_DATABASE].append( + { + "Time": timestamp_util.get_now_time(), + "Level": logging.getLevelName(level), + "Source": str(source), + "Message": message, + } + ) + if len(logs_database[LOG_DATABASE]) > LOGS_MAX_COUNT: + logs_database[LOG_DATABASE].pop(0) + # do not count this error if keep_log is False + if level >= logging.ERROR: + logs_database[LOG_NEW_ERRORS_COUNT] += 1 + logs_database[BACKTESTING_NEW_ERRORS_COUNT] += 1 + if call_notifiers: + for callback in error_notifier_callbacks: + callback() + + +def get_errors_count(counter=LOG_NEW_ERRORS_COUNT): + """ + Return the error count according to the specified counter + :param counter: the counter to use + :return: the error count + """ + return logs_database[counter] + + +def reset_errors_count(counter=LOG_NEW_ERRORS_COUNT): + """ + Reset the specified counter error count + :param counter: the counter to use + """ + logs_database[counter] = 0 + + +def register_error_notifier(callback): + """ + Register an error notifier + :param callback: the callback to call when the notifier is triggered + """ + error_notifier_callbacks.append(callback) + + +class BotLogger: + """ + The bot logger that manage all OctoBot's logs + """ + + def __init__(self, logger_name): + self.logger_name = logger_name + self.logger = logging.getLogger(logger_name) + + def debug(self, message: str, *args, **kwargs) -> None: + """ + Called for a debug log + :param message: the log message + """ + message = self._process_log_callback(message) + self.logger.debug(message, *args, **kwargs) + self._publish_log_if_necessary(message, logging.DEBUG) + + def info(self, message: str, *args, **kwargs) -> None: + """ + Called for an info log + :param message: the log message + """ + message = self._process_log_callback(message) + self.logger.info(message, *args, **kwargs) + self._publish_log_if_necessary(message, logging.INFO) + + def warning(self, message: str, *args, **kwargs) -> None: + """ + Called for a warning log + :param message: the log message + """ + message = self._process_log_callback(message) + self.logger.warning(message, *args, **kwargs) + self._publish_log_if_necessary(message, logging.WARNING) + + def error(self, message: str, *args, skip_post_callback=False, **kwargs) -> None: + """ + Called for an error log + :param message: the log message + :param skip_post_callback: when True, the error callback wont be called + """ + message = self._process_log_callback(message) + self.logger.error(message, *args, **kwargs) + self._publish_log_if_necessary(message, logging.ERROR) + self._post_callback_if_necessary(None, message, skip_post_callback) + + def exception( + self, + exception: Exception, + publish_error_if_necessary: bool = True, + error_message: str = None, + include_exception_name: bool = True, + skip_post_callback: bool = False, + **kwargs, + ) -> None: + """ + Called for an exception log + :param exception: the log exception + :param publish_error_if_necessary: if the error should be published + :param error_message: the log message + :param include_exception_name: when True adds the __class__.__name__ of the exception at the end of the message + :param skip_post_callback: when True, the error callback won't be called + """ + extra = kwargs.get("extra", {}) + origin_error_message = error_message + error_message = ( + self._process_log_callback(error_message) + if error_message + else error_message + ) + extra[constants.EXCEPTION_DESC] = error_message + html_util.summarize_exception_html_cause_if_relevant(exception) + self.logger.exception(exception, extra=extra, **kwargs) + if publish_error_if_necessary: + message = origin_error_message + if message is None: + message = ( + str(exception) if str(exception) else exception.__class__.__name__ + ) + elif include_exception_name: + message = f"{message} (error: {exception.__class__.__name__})" + self.error( + message, + skip_post_callback=True, + extra={constants.IS_EXCEPTION_DESC: True}, + ) + self._post_callback_if_necessary(exception, error_message, skip_post_callback) + + def critical(self, message: str, *args, **kwargs) -> None: + """ + Called for a critical log + :param message: the log message + """ + message = self._process_log_callback(message) + self.logger.critical(message, *args, **kwargs) + self._publish_log_if_necessary(message, logging.CRITICAL) + + def fatal(self, message: str, *args, **kwargs) -> None: + """ + Called for a fatal log + :param message: the log message + """ + message = self._process_log_callback(message) + self.logger.fatal(message, *args, **kwargs) + self._publish_log_if_necessary(message, logging.FATAL) + + def disable(self, disabled): + """ + Used to disable or enable this logger + :param disabled: True to disable + """ + self.logger.disabled = disabled + + def _process_log_callback(self, message: str) -> str: + if _LOG_CALLBACK is None: + return message + return _LOG_CALLBACK(message) + + def _publish_log_if_necessary(self, message, level) -> None: + """ + Publish the log message if necessary + :param message: the log message + :param level: the log level + """ + if ( + ENABLE_WEB_INTERFACE_LOGS + and STORED_LOG_MIN_LEVEL <= level + and get_global_logger_level() <= level + ): + self._web_interface_publish_log(message, level) + if not ERROR_PUBLICATION_ENABLED and logging.ERROR <= level: + global SHOULD_PUBLISH_LOGS_WHEN_RE_ENABLED + SHOULD_PUBLISH_LOGS_WHEN_RE_ENABLED = True + + def _web_interface_publish_log(self, message, level) -> None: + """ + Publish log to web interface + :param message: the log message + :param level: the log level + """ + add_log( + level, + self.logger_name, + message, + call_notifiers=ERROR_PUBLICATION_ENABLED, + ) + + @staticmethod + def register_error_callback(callback): + """ + :param callback: the callback to be called upon errors and exceptions + Register callback as the ERROR_CALLBACK + """ + global _ERROR_CALLBACK + _ERROR_CALLBACK = callback + + @staticmethod + def _post_callback_if_necessary(exception, error_message, skip_post_callback): + if not skip_post_callback: + _ERROR_CALLBACK(exception, error_message) + + +def register_log_callback(callback: typing.Union[None, typing.Callable[[str], str]]): + """ + :param callback: the callback to be called upon any log of any level + Register callback as the _LOG_CALLBACK + """ + global _LOG_CALLBACK + _LOG_CALLBACK = callback + + +def get_backtesting_errors_count() -> int: + """ + Get backtesting errors count + :return: the backtesting errors count + """ + return get_errors_count(BACKTESTING_NEW_ERRORS_COUNT) + + +def reset_backtesting_errors() -> None: + """ + Reset the backtesting errors count + """ + reset_errors_count(BACKTESTING_NEW_ERRORS_COUNT) + + +def set_error_publication_enabled(enabled) -> None: + """ + Set the error publication enabling + :param enabled: if the error publication is enabled + """ + global ERROR_PUBLICATION_ENABLED + global SHOULD_PUBLISH_LOGS_WHEN_RE_ENABLED + ERROR_PUBLICATION_ENABLED = enabled + if enabled and SHOULD_PUBLISH_LOGS_WHEN_RE_ENABLED: + add_log(logging.ERROR, None, None, keep_log=False, call_notifiers=True) + else: + SHOULD_PUBLISH_LOGS_WHEN_RE_ENABLED = False + + +def set_enable_web_interface_logs(enabled): + """ + Disable or enable errors storage in web interface + """ + global ENABLE_WEB_INTERFACE_LOGS + ENABLE_WEB_INTERFACE_LOGS = enabled diff --git a/packages/commons/octobot_commons/logical_operators.py b/packages/commons/octobot_commons/logical_operators.py new file mode 100644 index 0000000000..c9c273243a --- /dev/null +++ b/packages/commons/octobot_commons/logical_operators.py @@ -0,0 +1,40 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.enums as enums +import octobot_commons.errors as errors + + +def evaluate_condition(left_operand, right_operand, operator: str) -> bool: + """ + evaluates the given condition + :param left_operand: the left operand of the condition + :param right_operand: the right operand of the condition + :param operator: the operator of the condition + :return: True if the evaluated condition is True, False otherwise + """ + if operator == enums.LogicalOperators.LOWER_THAN.value: + return left_operand < right_operand + if operator == enums.LogicalOperators.HIGHER_THAN.value: + return left_operand > right_operand + if operator == enums.LogicalOperators.LOWER_OR_EQUAL_TO.value: + return left_operand <= right_operand + if operator == enums.LogicalOperators.HIGHER_OR_EQUAL_TO.value: + return left_operand >= right_operand + if operator == enums.LogicalOperators.EQUAL_TO.value: + return left_operand == right_operand + if operator == enums.LogicalOperators.DIFFERENT_FROM.value: + return left_operand != right_operand + raise errors.LogicalOperatorError(f"Unknown operator: {operator}") diff --git a/packages/commons/octobot_commons/monitored_process.py b/packages/commons/octobot_commons/monitored_process.py new file mode 100644 index 0000000000..3d69a2a938 --- /dev/null +++ b/packages/commons/octobot_commons/monitored_process.py @@ -0,0 +1,378 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +""" +Monitored Process + +Base class for async context managers that spawn, monitor, and gracefully terminate +external subprocesses. + +Provides: +- Subprocess lifecycle management (start → ready → stop) +- Background stdout/stderr monitoring with error-pattern detection +- Readiness detection via a configurable string match +- Premature-exit detection +- Graceful SIGTERM with forced SIGKILL fallback + +Subclasses must implement ``_get_subprocess_args()`` and set ``READINESS_STRING``. +All other hooks have sensible defaults that can be overridden as needed. + +Example:: + + class MyProcess(MonitoredProcess): + READINESS_STRING = "server ready" + ERROR_PATTERNS = ["FATAL"] + READINESS_TIMEOUT_SECONDS = 30.0 + + def __init__(self, port: int): + super().__init__() + self._port = port + + def _get_subprocess_args(self) -> list[str]: + return ["myserver", "--port", str(self._port)] + + async with MyProcess(port=8080) as proc: + # subprocess is up and the "server ready" line was found in its output + ... +""" + +import asyncio +import logging +import typing + + +class MonitoredProcessError(Exception): + """Base error for all monitored-process failures.""" + + +class MonitoredProcessOutputError(MonitoredProcessError): + """Raised when an error pattern is found in process stdout/stderr.""" + + def __init__( # pylint: disable=too-many-arguments + self, + message: str, + stream: str, + line: str, + std_out_buffer: list[str], + std_err_buffer: list[str], + ): + self.stream = stream + self.line = line + self.std_out_buffer = '\n'.join(std_out_buffer) if std_out_buffer else None + self.std_err_buffer = '\n'.join(std_err_buffer) if std_err_buffer else None + super().__init__( + f"{message} ({stream}): {line}" + f"{chr(10) + 'stdout:' + chr(10) if self.std_out_buffer else ''}{self.std_out_buffer or ''}" + f"{chr(10) + 'stderr:' + chr(10) if self.std_err_buffer else ''}{self.std_err_buffer or ''}" + ) + + +class MonitoredProcessConfigurationError(MonitoredProcessError): + """Raised when the executable is not found or the process cannot be started.""" + + def __init__(self, message: str): + super().__init__(message) + + +class MonitoredProcessReadyTimeoutError(MonitoredProcessError): + """Raised when the readiness string is not detected within the timeout.""" + + def __init__(self, readiness_string: str, timeout_seconds: float): + self.readiness_string = readiness_string + self.timeout_seconds = timeout_seconds + super().__init__( + f"Did not find '{readiness_string}' in process output within {timeout_seconds}s" + ) + + +class MonitoredProcessExitedError(MonitoredProcessError): + """Raised when the process exits prematurely (before the context exits).""" + + def __init__(self, exit_code: typing.Optional[int], stderr: typing.Optional[str] = None): + self.exit_code = exit_code + self.stderr = stderr + msg = f"Process exited prematurely with code {exit_code}" + if stderr: + msg += f"; stderr: {stderr}" + super().__init__(msg) + + +class MonitoredProcess: # pylint: disable=too-many-instance-attributes + """ + Async context manager that spawns and monitors a subprocess. + + Lifecycle + --------- + ``__aenter__``: + 1. Calls ``_get_subprocess_args()`` (and optionally ``_get_subprocess_env()`` / + ``_get_subprocess_cwd()``) to build the subprocess invocation. + 2. Starts background tasks that stream stdout/stderr and watch for process exit. + 3. Blocks until ``READINESS_STRING`` is found in the output (or an error occurs). + + ``__aexit__``: + 1. Cancels the background monitor tasks. + 2. Sends SIGTERM; waits up to ``TERMINATE_TIMEOUT_SECONDS``; sends SIGKILL on timeout. + 3. Re-raises any error captured by the monitors. + + Subclassing + ----------- + **Must override**: + - ``_get_subprocess_args() -> list[str]`` — full command ``[program, arg, ...]`` + - ``READINESS_STRING: str`` — substring to watch for in output + + **May override** (all have sensible defaults): + - ``ERROR_PATTERNS: list[str]`` — substrings that signal a fatal error + - ``TERMINATE_TIMEOUT_SECONDS: float`` — time before forced kill (default 10 s) + - ``READINESS_TIMEOUT_SECONDS: float`` — readiness wait limit (default 60 s) + - ``_get_subprocess_env()`` — environment dict or None (inherit) + - ``_get_subprocess_cwd()`` — working directory or None (inherit) + - ``_make_output_error(...)`` — override to raise a subclass-specific type + - ``_make_exited_error(...)`` — override to raise a subclass-specific type + - ``_make_ready_timeout_error()`` — override to raise a subclass-specific type + - ``_make_configuration_error(message)`` — override to raise a subclass-specific type + """ + + TERMINATE_TIMEOUT_SECONDS: float = 10.0 + READINESS_TIMEOUT_SECONDS: float = 60.0 + + #: Substring that must appear in stdout/stderr for the process to be considered ready. + READINESS_STRING: str = "" + + #: Substrings that indicate a fatal error; triggers ``_make_output_error`` when matched. + ERROR_PATTERNS: list[str] = [] + + def __init__(self) -> None: + self._process: typing.Optional[asyncio.subprocess.Process] = None # pylint: disable=no-member + self._monitor_tasks: list[asyncio.Task] = [] + self._monitor_error: typing.Optional[BaseException] = None + self._ready_event = asyncio.Event() + self._error_event = asyncio.Event() + self._shutting_down = False + self._stderr_buffer: list[str] = [] + self._stdout_buffer: list[str] = [] + self._logger: logging.Logger = logging.getLogger(self.__class__.__name__) + + def _get_subprocess_args(self) -> list[str]: + """Return the full command: ``[program, arg1, arg2, ...]``.""" + raise NotImplementedError( + f"{self.__class__.__name__} must implement _get_subprocess_args()" + ) + + def _get_subprocess_env(self) -> typing.Optional[dict[str, str]]: + """Return the environment mapping for the subprocess, or ``None`` to inherit.""" + return None + + def _get_subprocess_cwd(self) -> typing.Optional[str]: + """Return the working directory for the subprocess, or ``None`` to inherit.""" + return None + + # --- error factories (override to return subclass-specific types) ----- + + def _make_output_error( + self, + stream: str, + line: str, + stdout_buf: list[str], + stderr_buf: list[str], + ) -> BaseException: + return MonitoredProcessOutputError( + f"{self.__class__.__name__} error", stream, line, stdout_buf, stderr_buf + ) + + def _make_exited_error( + self, + exit_code: typing.Optional[int], + output_err: typing.Optional[str], + ) -> BaseException: + return MonitoredProcessExitedError(exit_code, output_err) + + def _make_ready_timeout_error(self) -> BaseException: + return MonitoredProcessReadyTimeoutError( + self.READINESS_STRING, self.READINESS_TIMEOUT_SECONDS + ) + + def _make_configuration_error(self, message: str) -> BaseException: + return MonitoredProcessConfigurationError(message) + + async def __aenter__(self) -> "MonitoredProcess": + """Spawn the subprocess and block until it signals readiness.""" + args = self._get_subprocess_args() + self._logger.info("Spawning %s: %s", self.__class__.__name__, ' '.join(args)) + try: + self._process = await asyncio.create_subprocess_exec( + *args, + stdin=asyncio.subprocess.DEVNULL, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + cwd=self._get_subprocess_cwd(), + env=self._get_subprocess_env(), + ) + except FileNotFoundError as e: + raise self._make_configuration_error( + f"Executable '{args[0]}' not found" + ) from e + self._logger.debug( + "Started %s with pid: %s", self.__class__.__name__, self._process.pid + ) + self._start_output_monitor() + await self._wait_for_ready() + self._logger.info("%s is ready", self.__class__.__name__) + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb) -> None: + """Terminate the subprocess and cancel monitors; re-raise any captured error.""" + self._shutting_down = True + + self._logger.debug("Cancelling monitor tasks") + for task in self._monitor_tasks: + task.cancel() + if self._monitor_tasks: + await asyncio.gather(*self._monitor_tasks, return_exceptions=True) + self._monitor_tasks.clear() + self._logger.debug("Monitor tasks cancelled") + + if self._process is None: + if self._monitor_error is not None: + raise self._monitor_error + return + + if self._process.returncode is None: + self._process.terminate() + graceful = True + try: + self._logger.debug( + "Waiting for %s to terminate", self.__class__.__name__ + ) + await asyncio.wait_for( + self._process.wait(), + timeout=self.TERMINATE_TIMEOUT_SECONDS, + ) + except asyncio.TimeoutError: + self._logger.warning( + "Terminate timeout, killing %s", self.__class__.__name__ + ) + self._process.kill() + await self._process.wait() + graceful = False + + self._logger.info( + "%s %s terminated", + self.__class__.__name__, + 'gracefully' if graceful else 'forcibly', + ) + self._process = None + + self._stderr_buffer.clear() + self._stdout_buffer.clear() + + if self._monitor_error is not None: + raise self._monitor_error + + async def _wait_for_ready(self) -> None: + """Block until ``READINESS_STRING`` is found or a monitor error is detected.""" + self._logger.info("Waiting for %s to be ready", self.__class__.__name__) + + async def wait_ready_or_error() -> None: + ready_task = asyncio.create_task(self._ready_event.wait()) + error_task = asyncio.create_task(self._error_event.wait()) + done, pending = await asyncio.wait( + [ready_task, error_task], + return_when=asyncio.FIRST_COMPLETED, + ) + for t in pending: + t.cancel() + if error_task in done or self._monitor_error is not None: + raise self._monitor_error # type: ignore[misc] + + try: + await asyncio.wait_for( + wait_ready_or_error(), + timeout=self.READINESS_TIMEOUT_SECONDS, + ) + except asyncio.TimeoutError: + if self._monitor_error is not None: + raise self._monitor_error + raise self._make_ready_timeout_error() + + def _start_output_monitor(self) -> None: + """ + Spawn three background tasks: + + - ``read_stream(stdout)`` — logs and scans stdout for the readiness string + and error patterns. + - ``read_stream(stderr)`` — same for stderr. + - ``watch_exit`` — detects premature non-zero exits. + + Captured errors are stored in ``_monitor_error`` and re-raised in ``__aexit__``. + """ + if self._process is None: + raise MonitoredProcessError( + "Process not started; call _start_output_monitor() inside the context" + ) + if not self._process.stdout or not self._process.stderr: + raise MonitoredProcessError("stdout/stderr are not PIPE") + if self._monitor_tasks: + return # Already started + + async def read_stream(stream: asyncio.StreamReader, name: str) -> None: + while True: + line = await stream.readline() + if not line: + break + line_str = line.decode("utf-8", errors="replace").rstrip() + self._logger.debug("[%s] %s", name, line_str) + if name == "stderr": + self._stderr_buffer.append(line_str) + else: + self._stdout_buffer.append(line_str) + if self.READINESS_STRING and self.READINESS_STRING in line_str: + self._ready_event.set() + for pat in self.ERROR_PATTERNS: + if pat in line_str and self._monitor_error is None: + self._logger.error( + "[%s] [%s] %s", self.__class__.__name__, name, line_str + ) + self._monitor_error = self._make_output_error( + name, line_str, + self._stdout_buffer, self._stderr_buffer, + ) + self._error_event.set() + break + + async def watch_exit() -> None: + if self._process is None: + return + exit_code = await self._process.wait() + if not self._shutting_down and exit_code != 0 and self._monitor_error is None: + output_err = "\n".join(self._stderr_buffer) if self._stderr_buffer else ( + "\n".join(self._stdout_buffer) if self._stdout_buffer else None + ) + self._monitor_error = self._make_exited_error(exit_code, output_err) + self._error_event.set() + + self._monitor_tasks = [ + asyncio.create_task(read_stream(self._process.stdout, "stdout")), + asyncio.create_task(read_stream(self._process.stderr, "stderr")), + asyncio.create_task(watch_exit()), + ] + + def log_output(self, last_lines: int) -> None: + """Log the stdout and stderr buffers.""" + self._logger.info("%s last %s lines from stdout and stderr outputs:\n", self.__class__.__name__, last_lines) + if self._stdout_buffer: + self._logger.info("stdout:\n%s", '\n'.join(self._stdout_buffer[-last_lines:])) + if self._stderr_buffer: + self._logger.info("stderr:\n%s", '\n'.join(self._stderr_buffer[-last_lines:])) diff --git a/packages/commons/octobot_commons/multiprocessing_util.py b/packages/commons/octobot_commons/multiprocessing_util.py new file mode 100644 index 0000000000..d016a765e4 --- /dev/null +++ b/packages/commons/octobot_commons/multiprocessing_util.py @@ -0,0 +1,74 @@ +# pylint: disable=C0103 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import multiprocessing +import contextlib + + +_LOCKS = {} +_ELEMENTS = {} + + +def register_lock_and_shared_elements( + name: str, lock: multiprocessing.RLock, shared_elements: dict +): + """ + Add elements to the globally available elements + """ + _LOCKS[name] = lock + _ELEMENTS.update(shared_elements) + + +def unregister_lock_and_shared_elements( + name: str, shared_elements=None +) -> multiprocessing.RLock: + """ + Remove elements to the globally available elements + """ + if shared_elements is None: + _ELEMENTS.clear() + else: + for key in shared_elements: + _ELEMENTS.pop(key) + return _LOCKS.pop(name) + + +@contextlib.contextmanager +def registered_lock_and_shared_elements( + name: str, lock: multiprocessing.RLock, shared_elements: dict +): + """ + Add and remove elements to the globally available elements + """ + try: + register_lock_and_shared_elements(name, lock, shared_elements) + yield lock + finally: + unregister_lock_and_shared_elements(name, shared_elements) + + +def get_lock(name: str) -> multiprocessing.RLock: + """ + Returns a shared lock + """ + return _LOCKS[name] + + +def get_shared_element(shared_elements_name: str) -> multiprocessing.RLock: + """ + Returns a shared element + """ + return _ELEMENTS[shared_elements_name] diff --git a/packages/commons/octobot_commons/number_util.py b/packages/commons/octobot_commons/number_util.py new file mode 100644 index 0000000000..a042d8bdf2 --- /dev/null +++ b/packages/commons/octobot_commons/number_util.py @@ -0,0 +1,47 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import decimal +import math +import typing + + +def round_into_str_with_max_digits(number: float, digits_count: int) -> str: + """ + Round the number with digits_count + :param number: the number to round + :param digits_count: the digit count + :return: the rounded number + """ + return "{:.{}f}".format(round(number, digits_count), digits_count) + + +def round_into_float_with_max_digits(number: float, digits_count: int) -> float: + """ + Round the float number with digits_count + :param number: the number to round + :param digits_count: the digit count + :return: the rounded number + """ + return float( + round_into_str_with_max_digits(number=number, digits_count=digits_count) + ) + + +def get_digits_count(value: typing.Union[float, decimal.Decimal]): + """ + :return: the number of digits in the given number + """ + return round(abs(math.log(value, 10))) diff --git a/packages/commons/octobot_commons/optimization_campaign.py b/packages/commons/octobot_commons/optimization_campaign.py new file mode 100644 index 0000000000..41cf1e1c4e --- /dev/null +++ b/packages/commons/octobot_commons/optimization_campaign.py @@ -0,0 +1,50 @@ +# pylint: disable=C0103,W0603 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.constants as constants + + +class OptimizationCampaign: + def __init__(self, name=None): + self.name = name or self.get_campaign_name() + + @classmethod + def get_campaign_name(cls, *args): + """ + Returns the name of the current optimization campaign + :param args: arguments passed to the optimization_campaign_name_proxy + """ + return _optimization_name_proxy(*args) + + +def _default_optimization_name_proxy(*_): + return constants.DEFAULT_CAMPAIGN + + +_name_proxy = _default_optimization_name_proxy + + +def _optimization_name_proxy(*args): + return _name_proxy(*args) + + +def register_optimization_campaign_name_proxy(new_proxy): + """ + Registers a new campaign name provider as a proxy function + :param new_proxy: the proxy function to be called by OptimizationCampaign.get_campaign_name + """ + global _name_proxy + _name_proxy = new_proxy diff --git a/packages/commons/octobot_commons/os_clock_sync.py b/packages/commons/octobot_commons/os_clock_sync.py new file mode 100644 index 0000000000..e09c726e0a --- /dev/null +++ b/packages/commons/octobot_commons/os_clock_sync.py @@ -0,0 +1,142 @@ +# Drakkar-Software OctoBot +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import asyncio + +import octobot_commons.constants as commons_constants +import octobot_commons.enums as commons_enums +import octobot_commons.singleton as singleton +import octobot_commons.logging as logging +import octobot_commons.async_job as async_job +import octobot_commons.os_util as os_util + + +class ClockSynchronizer(singleton.Singleton): + DEFAULT_SYNC_REFRESH_INTERVAL = ( + commons_constants.CLOCK_REFRESH_HOURS_INTERVAL + * commons_constants.HOURS_TO_SECONDS + ) + + def __init__(self): + super().__init__() + self.sync_job = None + self.sync_interval = self.DEFAULT_SYNC_REFRESH_INTERVAL + self.logger = logging.get_logger(self.__class__.__name__) + + def _get_sync_cmd(self): + platform = os_util.get_os() + bot_type = os_util.get_octobot_type() + if bot_type == commons_enums.OctoBotTypes.DOCKER.value: + raise NotImplementedError(bot_type) + if platform is commons_enums.PlatformsName.WINDOWS: + # use 2x w32tm /resync as the 1st one often fails + return "net stop w32time && net start w32time && w32tm /resync & w32tm /resync && w32tm /query /status" + if platform is commons_enums.PlatformsName.LINUX: + return "sudo service ntp stop && sudo ntpd -gq && sudo service ntp start" + if platform is commons_enums.PlatformsName.MAC: + raise NotImplementedError(platform.value) + raise NotImplementedError("Unidentified platform") + + async def _sync_clock(self, raise_not_implemented=False): + # should only be called when the initial _sync_clock worked + try: + proc = await asyncio.create_subprocess_shell( + self._get_sync_cmd(), + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + ) + stdout, stderr = await proc.communicate() + if proc.returncode == 0: + self.logger.info("Successful os clock synchronization") + elif not raise_not_implemented: + self.logger.warning( + f"Warning: Time synchronization command exited with {proc.returncode}] " + f'command: "{self._get_sync_cmd()}"' + ) + if stdout: + self.logger.debug(f"[stdout] {stdout}") + if stderr: + message = f"[stderr] {stderr}" + if raise_not_implemented: + raise NotImplementedError(message) + self.logger.debug(message) + except NotImplementedError as err: + if raise_not_implemented: + raise + # might happen if event loop changed after initial check, in this case stop job + self.logger.exception(err, False) + self.stop() + + async def _ensure_clock_synch_availability(self): + try: + # make sure the command is available on this platform + self._get_sync_cmd() + except NotImplementedError as err: + self.logger.debug(f"Clock synchronizer: not implemented on {err}.") + return False + if not os_util.has_admin_rights(): + self.logger.debug( + "Admin rights are required to synchronize the computer clock" + ) + return False + try: + # make sure the command is usable on this platform + await self._sync_clock(raise_not_implemented=True) + except NotImplementedError as err: + self.logger.debug( + f"Error when synchronizing clock: {err}. Disabling synchronizer." + ) + return False + return True + + async def start(self) -> bool: + """ + Synch the clock and start the clock synchronization loop if possible on this system + :return: True if the loop has been started + """ + if not await self._ensure_clock_synch_availability(): + self.logger.debug("Clock synch loop disabled") + return False + self.logger.debug("Starting clock synchronizer") + self.sync_job = async_job.AsyncJob( + self._sync_clock, + first_execution_delay=self.sync_interval, + execution_interval_delay=self.sync_interval, + ) + await self.sync_job.run() + return True + + def stop(self): + """ + Stop the synchronization loop + """ + if self.sync_job is not None and not self.sync_job.is_stopped(): + self.logger.debug("Stopping clock synchronizer") + self.sync_job.stop() + + +async def start_clock_synchronizer(): + """ + Start the clock synchronization loop if possible on this system + :return: True if the loop has been started + """ + return await ClockSynchronizer.instance().start() + + +async def stop_clock_synchronizer(): + """ + Stop the synchronization loop + """ + return ClockSynchronizer.instance().stop() diff --git a/packages/commons/octobot_commons/os_util.py b/packages/commons/octobot_commons/os_util.py new file mode 100644 index 0000000000..f2cf718a65 --- /dev/null +++ b/packages/commons/octobot_commons/os_util.py @@ -0,0 +1,203 @@ +# Drakkar-Software OctoBot +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + + +import sys +import os +import platform +import ctypes + +import octobot_commons.constants as constants +import octobot_commons.enums as enums + +try: + import psutil +except ImportError: + if constants.USE_MINIMAL_LIBS: + # mock psutil imports + class PsutilImportMock: + class virtual_memory: # pylint: disable=invalid-name + def __init__(self, *args): + raise ImportError("psutil not installed") + + def __getitem__(self, key): + return self[key] + + class Process: + def __init__(self, *args): + raise ImportError("psutil not installed") + + def memory_full_info(self): # pylint: disable=missing-function-docstring + class MemoryInfo: + def __init__(self, *args): # pylint: disable=unused-argument + self.rss = 0 + self.vms = 0 + self.uss = 0 + + return MemoryInfo() + + class cpu_percent: # pylint: disable=invalid-name + def __init__(self, *args): + raise ImportError("psutil not installed") + + psutil = PsutilImportMock() + else: + raise + + +def get_current_platform(): + """ + Return the current platform details + Return examples + For Windows : + >>> 'Windows:10:AMD64' + For Linux : + >>> 'Linux:4.15.0-46-generic:x86_64' + For Raspberry : + >>> 'Linux:4.14.98-v7+:armv7l' + :return: the current platform details + """ + return ( + f"{platform.system()}{constants.PLATFORM_DATA_SEPARATOR}{platform.release()}{constants.PLATFORM_DATA_SEPARATOR}" + f"{platform.machine()}" + ) + + +def get_octobot_type(): + """ + Return OctoBot running type from OctoBotTypes + :return: the OctoBot running type + """ + try: + execution_arg = sys.argv[0] + # sys.argv[0] is always the name of the python script called when using a command "python xyz.py" + if execution_arg.endswith(".py"): + if _is_on_docker(): + return enums.OctoBotTypes.DOCKER.value + return enums.OctoBotTypes.PYTHON.value + # sys.argv[0] is the name of the binary when using a binary version: ends with nothing or .exe" + return enums.OctoBotTypes.BINARY.value + except IndexError: + return enums.OctoBotTypes.BINARY.value + + +def get_os(): + """ + Return the OS name + :return: the OS name + """ + return enums.PlatformsName(os.name) + + +def has_admin_rights() -> bool: + """ + :return: True if the current thread has admin rights + """ + try: + return os.getuid() == 0 + except AttributeError: + return ctypes.windll.shell32.IsUserAnAdmin() + + +def is_machine_64bit() -> bool: + """ + Win: AMD64 + Debian-64: x86_64 + From https://stackoverflow.com/questions/2208828/detect-64bit-os-windows-in-python + :return: True if the machine is 64bit + """ + return platform.machine().endswith("64") + + +def is_arm_machine() -> bool: + """ + Can be armv7l or aarch64 (raspberry, Android smartphone...) + From https://raspberrypi.stackexchange.com/questions/5100/detect-that-a-python-program-is-running-on-the-pi + :return: True if the machine is 64bit + """ + return platform.machine() in ["armv7l", "aarch64"] + + +def is_raspberry_pi_machine() -> bool: + """ + Check if the machine is a Raspberry Pi + Works on bare metal and in Docker containers + :return: True if the machine is a Raspberry Pi + """ + # Try device-tree method (works on bare metal Raspberry Pi) + try: + with open("/proc/device-tree/model", "r") as f: + if "Raspberry Pi" in f.read(): + return True + except (FileNotFoundError, IOError): + pass + + # Fallback to cpuinfo (works in Docker containers) + try: + with open("/proc/cpuinfo", "r") as f: + cpuinfo = f.read() + return "Raspberry Pi" in cpuinfo or "BCM" in cpuinfo + except (FileNotFoundError, IOError): + pass + return False + + +def _is_on_docker(): + """ + Check if the current platform is docker + :return: True if OctoBot is running with docker + """ + file_to_check = "/proc/self/cgroup" + try: + return os.path.exists("/.dockerenv") or ( + os.path.isfile(file_to_check) + and any("docker" in line for line in open(file_to_check)) + ) + except FileNotFoundError: + return False + + +def parse_boolean_environment_var(env_key: str, default_value: str) -> bool: + """ + Parse a boolean environment variable + :param env_key: the environment variable key + :param default_value: the default value + :return: the boolean value + """ + return constants.parse_boolean_environment_var(env_key, default_value) + + +def get_cpu_and_ram_usage( + cpu_watching_seconds: float +) -> tuple[float, float, float, float, float, float]: + """ + WARNING: blocking the current thread for the given cpu_watching_seconds seconds + :return: the CPU usage percent, RAM usaage %, total RAM used, shared RAM used, + virtual RAM used and unique RAM used by this process + """ + mem_ret = psutil.virtual_memory() + mem_info = psutil.Process(os.getpid()).memory_full_info() + process_shared_used_ram = mem_info.rss # Resident Set Size = physical RAM used + process_virtual_used_ram = mem_info.vms # Virtual Memory Size (ram + swap) + process_unique_used_ram = mem_info.uss # Unique Set Size (ram without sharedmemory from other processes) + return ( + psutil.cpu_percent(cpu_watching_seconds), + mem_ret[2], + mem_ret[3] / constants.BYTES_BY_GB, + process_shared_used_ram / constants.BYTES_BY_GB, + process_virtual_used_ram / constants.BYTES_BY_GB, + process_unique_used_ram / constants.BYTES_BY_GB, + ) diff --git a/packages/commons/octobot_commons/pretty_printer.py b/packages/commons/octobot_commons/pretty_printer.py new file mode 100644 index 0000000000..f3c6c8bde8 --- /dev/null +++ b/packages/commons/octobot_commons/pretty_printer.py @@ -0,0 +1,326 @@ +# pylint: disable=C0415 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons +import octobot_commons.enums as enums +import octobot_commons.constants as constants +import octobot_commons.logging as logging_util +import octobot_commons.symbols.symbol_util as symbol_util +import octobot_commons.timestamp_util as timestamp_util +import octobot_commons.number_util as number_util + +ORDER_TIME_FORMAT = "%m-%d %H:%M" +LOGGER = logging_util.get_logger("PrettyPrinter") + + +def open_order_pretty_printer(exchange_name, dict_order, markdown=False) -> str: + """ + Open Order pretty printer + :param exchange_name: the exchange name + :param dict_order: the order dict + :param markdown: if printer use markdown + :return: the order pretty printed + """ + try: + from octobot_trading.enums import ( + ExchangeConstantsOrderColumns, + TraderOrderType, + TradeOrderSide, + ) + from octobot_trading.api.orders import parse_order_type + + _, _, code = get_markers(markdown) + market = symbol_util.parse_symbol( + str(dict_order.get(ExchangeConstantsOrderColumns.SYMBOL.value, "")) + ).quote + quantity_currency = dict_order.get( + ExchangeConstantsOrderColumns.QUANTITY_CURRENCY.value, "" + ) + order_type = parse_order_type(dict_order) + if order_type == TraderOrderType.UNKNOWN: + order_type = TradeOrderSide( + dict_order.get(ExchangeConstantsOrderColumns.SIDE.value) + ) + quantity = dict_order.get(ExchangeConstantsOrderColumns.AMOUNT.value, 0.0) + price = dict_order.get(ExchangeConstantsOrderColumns.PRICE.value, 0.0) + + return ( + f"{code}{order_type.name.replace('_', ' ')}{code}: {code}" + f"{get_min_string_from_number(quantity)} " + f"{quantity_currency}{code} at {code}" + f"{get_min_string_from_number(price)} {market}{code} " + f"on {exchange_name.capitalize()}" + ) + except ImportError: + LOGGER.error( + "open_order_pretty_printer requires OctoBot-Trading package installed" + ) + return "" + + +def trade_pretty_printer(exchange_name, trade, markdown=False) -> str: + """ + Trade pretty printer + :param exchange_name: the exchange name + :param trade: the trade object + :param markdown: if printer use markdown + :return: the trade pretty printed + """ + try: + from octobot_trading.enums import TraderOrderType + + _, _, code = get_markers(markdown) + trade_type = trade.trade_type + if trade_type == TraderOrderType.UNKNOWN: + trade_type = trade.side + + trade_executed_time_str = ( + timestamp_util.convert_timestamp_to_datetime( + trade.executed_time, time_format=ORDER_TIME_FORMAT, local_timezone=True + ) + if trade.executed_time + else "" + ) + return ( + f"{code}{trade_type.name.replace('_', ' ')}{code}: {code}" + f"{get_min_string_from_number(trade.executed_quantity)} " + f"{trade.quantity_currency}{code} at {code}" + f"{get_min_string_from_number(trade.executed_price)} {trade.market}{code} " + f"{exchange_name.capitalize()} " + f"{trade_executed_time_str} " + ) + except ImportError: + LOGGER.error( + "open_order_pretty_printer requires OctoBot-Trading package installed" + ) + return "" + + +def cryptocurrency_alert(result, final_eval) -> (str, str): + """ + Cryptocurrency alert + :param result: the result + :param final_eval: the final eval + :return: alert and the markdown alert + """ + try: + import telegram.helpers + + _, _, code = get_markers(True) + display_result = str(result).split(".")[1].replace("_", " ") + alert = f"Result : {display_result}\n" f"Evaluation : {final_eval}" + alert_markdown = ( + f"Result : {code}{display_result}{code}\n" + f"Evaluation : {code}{telegram.helpers.escape_markdown(str(final_eval))}{code}" + ) + return alert, alert_markdown + except ImportError: + LOGGER.error("cryptocurrency_alert requires Telegram package installed") + return "", "" + + +def _get_row_pretty_portfolio_row(holdings, currency, ref_market, ref_market_value): + """ + :return: the portfolio row adapted for a raw format + """ + str_holdings = get_min_string_from_number(holdings) + if ref_market: + return f"{str_holdings} {currency}{get_min_string_from_number(ref_market_value)} {ref_market}" + return f"{str_holdings} {currency}" + + +def _get_max_digits(number): + abs_number = abs(number) + if abs_number < 0.0001: + return 8 + if abs_number < 0.01: + return 6 + if abs_number < 1: + return 4 + if abs_number < 10000: + return 2 + return 0 + + +def _get_markdown_pretty_portfolio_row( + holdings, currency, ref_market, ref_market_value +): + """ + :return: the portfolio row adapted for a markdown format + """ + str_currency = "{:<4}".format(currency) + str_holdings = "{:<12}".format(get_min_string_from_number(holdings)) + str_ref_market_value = "{:<12}".format("") + if ref_market: + str_ref_market_value = "{:<12}".format( + get_min_string_from_number(ref_market_value) + ) + return f"{str_currency} {str_holdings} {str_ref_market_value}" + + +def global_portfolio_pretty_print( + global_portfolio, + currency_values=None, + ref_market_name=None, + separator="\n", + markdown=False, +) -> str: + """ + Global portfolio pretty printer + :param global_portfolio: the global portfolio + :param currency_values: dict of current currency values {"BTC": 20000, "ETH": 1000 } + :param ref_market_name: current ref market "USD" + :param separator: the printer separator + :param markdown: if printer use markdown + :return: the global portfolio pretty printed + """ + results = [] + currency = "currency" + holdings = "holdings" + value = "value" + for asset, asset_dict in global_portfolio.items(): + if asset_dict[constants.PORTFOLIO_TOTAL] > 0: + holdings_value = 0 + if currency_values and ref_market_name: + if ref_market_name == asset: + holdings_value = asset_dict[constants.PORTFOLIO_TOTAL] + else: + try: + holdings_value = ( + currency_values[asset] + * asset_dict[constants.PORTFOLIO_TOTAL] + ) + except KeyError: + # no currency value + pass + results.append( + { + currency: asset, + holdings: asset_dict[constants.PORTFOLIO_TOTAL], + value: holdings_value, + } + ) + results.sort(key=lambda r: r[value], reverse=True) + if markdown: + # fill lines with empty spaces if necessary + header = ( + f"{'{:<4}'.format('')} " + f"{' {:<9}'.format('Holdings')} " + f"{' {:<7}'.format(ref_market_name or '')}" + ) + header_separator = f"{'-' * 4}|{'-' * 12}|{'-' * 12}" + content = separator.join( + [ + _get_markdown_pretty_portfolio_row( + result[holdings], result[currency], ref_market_name, result[value] + ) + for result in results + ] + ) + return f"{header}\n{header_separator}\n{content}" + return separator.join( + [ + _get_row_pretty_portfolio_row( + result[holdings], result[currency], ref_market_name, result[value] + ) + for result in results + ] + ) + + +def portfolio_profitability_pretty_print( + profitability, profitability_percent, reference +) -> str: + """ + Profitability pretty printer + :param profitability: the profitability + :param profitability_percent: the profitability percent + :param reference: the reference + :return: the profitability pretty printed + """ + difference = ( + f"({get_min_string_from_number(profitability_percent, 5)}%)" + if profitability_percent is not None + else "" + ) + return f"{get_min_string_from_number(profitability, 5)} {reference} {difference}" + + +def pretty_print_dict(dict_content, default="0", markdown=False) -> str: + """ + Dict pretty printer + :param dict_content: the dict to be printed + :param default: the default printed + :param markdown: if printer use markdown + :return: the dict pretty printed + """ + _, _, code = get_markers(markdown) + if dict_content: + result_str = octobot_commons.DICT_BULLET_TOKEN_STR + return ( + f"{result_str}{code}" + f"{octobot_commons.DICT_BULLET_TOKEN_STR.join(f'{value} {key}' for key, value in dict_content.items())}" + f"{code}" + ) + return default + + +def round_with_decimal_count(number, max_digits=8) -> float: + """ + Round a decimal count + :param number: the number to round + :param max_digits: the digits + :return: the rounded number + """ + if number is None: + return 0 + return float(get_min_string_from_number(number, max_digits)) + + +def get_min_string_from_number(number, max_digits=None) -> str: + """ + Get a min string from number + :param number: the number + :param max_digits: the mex digits + :return: the string from number + """ + max_digits = _get_max_digits(number) if max_digits is None else max_digits + if number is None or round(number, max_digits) == 0.0: + return "0" + if number % 1 != 0: + number_str = number_util.round_into_str_with_max_digits(number, max_digits) + # remove post comma trailing 0 + if "." in number_str: + # remove "0" first and only the "." to avoid removing 2x"0" in 10.0 and returning 1 for example. + number_str = number_str.rstrip("0").rstrip(".") + return number_str + return "{:f}".format(number).split(".")[0] + + +# return markers for italic, bold and code +def get_markers(markdown=False) -> (str, str, str): + """ + Get the markdown markers + :param markdown: if printer use markdown + :return: the italic marker, the bold marker, the code marker + """ + if markdown: + return ( + enums.MarkdownFormat.ITALIC.value, + enums.MarkdownFormat.BOLD.value, + enums.MarkdownFormat.CODE.value, + ) + return "", "", "" diff --git a/packages/commons/octobot_commons/profiles/__init__.py b/packages/commons/octobot_commons/profiles/__init__.py new file mode 100644 index 0000000000..8c5b764d06 --- /dev/null +++ b/packages/commons/octobot_commons/profiles/__init__.py @@ -0,0 +1,88 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + + +from octobot_commons.profiles import profile + +from octobot_commons.profiles.profile import ( + Profile, +) + +from octobot_commons.profiles import profile_sharing +from octobot_commons.profiles.profile_sharing import ( + export_profile, + install_profile, + import_profile, + import_profile_data_as_profile, + update_profile, + download_profile, + download_and_install_profile, +) + +from octobot_commons.profiles import profile_data + +from octobot_commons.profiles.profile_data import ( + ProfileData, + ExchangeData, + MinimalFund, + OptionsData, +) + +from octobot_commons.profiles import profile_sync + +from octobot_commons.profiles.profile_sync import ( + start_profile_synchronizer, + stop_profile_synchronizer, +) + +from octobot_commons.profiles import exchange_auth_data + +from octobot_commons.profiles.exchange_auth_data import ( + ExchangeAuthData, +) + +from octobot_commons.profiles import tentacles_profile_data_translator + +from octobot_commons.profiles.tentacles_profile_data_translator import ( + TentaclesProfileDataTranslator, +) + +from octobot_commons.profiles import tentacles_profile_data_adapter + +from octobot_commons.profiles.tentacles_profile_data_adapter import ( + TentaclesProfileDataAdapter, +) + + +__all__ = [ + "Profile", + "export_profile", + "install_profile", + "import_profile", + "import_profile_data_as_profile", + "update_profile", + "download_profile", + "download_and_install_profile", + "ProfileData", + "ExchangeData", + "MinimalFund", + "OptionsData", + "start_profile_synchronizer", + "stop_profile_synchronizer", + "TentaclesProfileDataTranslator", + "TentaclesProfileDataAdapter", + "ExchangeAuthData", +] diff --git a/packages/commons/octobot_commons/profiles/exchange_auth_data.py b/packages/commons/octobot_commons/profiles/exchange_auth_data.py new file mode 100644 index 0000000000..af12b9ad3f --- /dev/null +++ b/packages/commons/octobot_commons/profiles/exchange_auth_data.py @@ -0,0 +1,68 @@ +# pylint: disable=C0103,R0902 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import dataclasses + +import octobot_commons.dataclasses +import octobot_commons.constants + + +@dataclasses.dataclass +class ExchangeAuthData(octobot_commons.dataclasses.FlexibleDataclass): + internal_name: str + exchange_credential_id: str = "" + encrypted: str = "" + api_key: str = "" + api_secret: str = "" + api_password: str = "" + exchange_type: str = octobot_commons.constants.DEFAULT_EXCHANGE_TYPE + sandboxed: bool = False + + def apply_to_exchange_config(self, config): + """ + Updates the given Configuration object to use the local authentication data + :param config: Configuration object to update + """ + applied = False + for exchange, exchange_config in config.config[ + octobot_commons.constants.CONFIG_EXCHANGES + ].items(): + if exchange == self.internal_name: + self._apply_config(exchange_config) + applied = True + break + if not applied: + # exchange doesn't already exist: add it + exchange_config = {octobot_commons.constants.CONFIG_ENABLED_OPTION: True} + self._apply_config(exchange_config) + config.config[octobot_commons.constants.CONFIG_EXCHANGES][ + self.internal_name + ] = exchange_config + + def _apply_config(self, exchange_config: dict): + exchange_config[octobot_commons.constants.CONFIG_EXCHANGE_KEY] = self.api_key + exchange_config[octobot_commons.constants.CONFIG_EXCHANGE_SECRET] = ( + self.api_secret + ) + exchange_config[octobot_commons.constants.CONFIG_EXCHANGE_PASSWORD] = ( + self.api_password + ) + exchange_config[octobot_commons.constants.CONFIG_EXCHANGE_SANDBOXED] = ( + self.sandboxed + ) + exchange_config[octobot_commons.constants.CONFIG_EXCHANGE_TYPE] = ( + self.exchange_type + ) diff --git a/packages/commons/octobot_commons/profiles/profile.py b/packages/commons/octobot_commons/profiles/profile.py new file mode 100644 index 0000000000..98a0b693c1 --- /dev/null +++ b/packages/commons/octobot_commons/profiles/profile.py @@ -0,0 +1,450 @@ +# pylint: disable=R0902, W0703 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import copy +import os +import shutil +import uuid +import octobot_commons.constants as constants +import octobot_commons.enums as enums +import octobot_commons.logging as commons_logging +import octobot_commons.json_util as json_util +import octobot_commons.errors as errors + + +class Profile: + """ + A profile is managing an OctoBot local configuration: activated tentacles, exchanges, currencies and + trading settings. + """ + + FULLY_MANAGED_ELEMENTS = [ + constants.CONFIG_DISTRIBUTION, + constants.CONFIG_CRYPTO_CURRENCIES, + constants.CONFIG_TRADING, + constants.CONFIG_TRADER, + constants.CONFIG_SIMULATOR, + ] + PARTIALLY_MANAGED_ELEMENTS = { + constants.CONFIG_EXCHANGES: { + constants.CONFIG_EXCHANGE_KEY: constants.DEFAULT_API_KEY, + constants.CONFIG_EXCHANGE_SECRET: constants.DEFAULT_API_SECRET, + constants.CONFIG_EXCHANGE_PASSWORD: constants.DEFAULT_API_PASSWORD, + constants.CONFIG_ENABLED_OPTION: False, + constants.CONFIG_EXCHANGE_TYPE: constants.DEFAULT_EXCHANGE_TYPE, + } + } + PARTIALLY_MANAGED_ELEMENTS_FORCED_DEFAULT_KEYS = { + constants.CONFIG_EXCHANGES: { + constants.CONFIG_ENABLED_OPTION: False, + } + } + PARTIALLY_MANAGED_ELEMENTS_ALLOWED_KEYS = { + constants.CONFIG_EXCHANGES: [ + constants.CONFIG_ENABLED_OPTION, + constants.CONFIG_EXCHANGE_TYPE, + ] + } + + def __init__(self, profile_path: str, schema_path: str = None): + self.profile_id: str = None + self.path: str = profile_path + self.schema_path: str = schema_path or constants.PROFILE_FILE_SCHEMA + self.name: str = None + self.slug: str = None + self.description: str = None + self.avatar: str = None + self.avatar_path: str = None + self.origin_url: str = None + self.auto_update: bool = False + self.read_only: bool = False + self.hidden: bool = False + self.imported: bool = False + self.complexity: enums.ProfileComplexity = enums.ProfileComplexity.MEDIUM + self.risk: enums.ProfileRisk = enums.ProfileRisk.MODERATE + self.profile_type: enums.ProfileType = enums.ProfileType.LIVE + self.extra_backtesting_time_frames = [] + + self.config: dict = {} + + def read_config(self): + """ + Reads a profile from self.path + :return: self + """ + return self.from_dict(json_util.read_file(self.config_file())) + + def from_dict(self, profile_dict: dict): + """ + Reads a profile from the given dict + :return: self + """ + profile_config = profile_dict.get(constants.CONFIG_PROFILE, {}) + self.profile_id = profile_config.get(constants.CONFIG_ID, str(uuid.uuid4())) + self.name = profile_config.get(constants.CONFIG_NAME, "") + self.slug = profile_config.get(constants.CONFIG_SLUG, "") + self.description = profile_config.get(constants.CONFIG_DESCRIPTION, "") + self.avatar = profile_config.get(constants.CONFIG_AVATAR, "") + self.origin_url = profile_config.get(constants.CONFIG_ORIGIN_URL, None) + self.auto_update = profile_config.get(constants.CONFIG_AUTO_UPDATE, False) + self.read_only = profile_config.get(constants.CONFIG_READ_ONLY, False) + self.hidden = profile_config.get(constants.CONFIG_HIDDEN, False) + self.imported = profile_config.get(constants.CONFIG_IMPORTED, False) + self.complexity = enums.ProfileComplexity( + profile_config.get( + constants.CONFIG_COMPLEXITY, enums.ProfileComplexity.MEDIUM.value + ) + ) + self.risk = enums.ProfileRisk( + profile_config.get(constants.CONFIG_RISK, enums.ProfileRisk.MODERATE.value) + ) + self.profile_type = enums.ProfileType( + profile_config.get(constants.CONFIG_TYPE, enums.ProfileType.LIVE.value) + ) + self.extra_backtesting_time_frames = profile_config.get( + constants.CONFIG_EXTRA_BACKTESTING_TIME_FRAMES, [] + ) + self.config = self.apply_default_values(profile_dict[constants.PROFILE_CONFIG]) + if self.avatar and self.path: + avatar_path = os.path.join(self.path, self.avatar) + if os.path.isfile(avatar_path): + self.avatar_path = avatar_path + return self + + def save_config(self, global_config: dict): + """ + Save this profile config + :param global_config: the bot config containing profile data + :return: None + """ + for element in self.FULLY_MANAGED_ELEMENTS: + if element in global_config: + self.config[element] = global_config[element] + self.sync_partially_managed_elements(global_config) + self.validate_and_save_config() + + def remove_deleted_elements(self, global_config): + """ + Removes elements from self.PARTIALLY_MANAGED_ELEMENTS + that are in profile but not in global config + """ + for element in self.PARTIALLY_MANAGED_ELEMENTS: + if element in global_config and element in self.config: + current_elements = list(self.config[element]) + to_keep_elements = set(global_config[element]) + for key in current_elements: + if key not in to_keep_elements: + self.config[element].pop(key) + + def sync_partially_managed_elements(self, global_config): + """ + Update the partially managed elements of this profile using the given configuration + """ + for element in self.PARTIALLY_MANAGED_ELEMENTS: + if element in global_config: + allowed_keys = self.PARTIALLY_MANAGED_ELEMENTS_ALLOWED_KEYS.get( + element, None + ) + if allowed_keys is not None: + self._filter_fill_elements( + global_config, self.config, element, allowed_keys + ) + + def validate(self): + """ + Validate this profile configuration against self.schema_path + :return: + """ + try: + json_util.validate(self.as_dict(), self.schema_path) + except FileNotFoundError as err: + commons_logging.get_logger("ProfileSaver").warning( + f"Impossible to validate profile: {err} ({err.__class__.__name__})" + ) + + def validate_and_save_config(self) -> None: + """ + JSON validates this profile and then saves its configuration file + :return: None + """ + self.validate() + self.save() + + def save(self) -> None: + """ + Saves the current profile configuration file + :return: None + """ + json_util.safe_dump(self.as_dict(), self.config_file()) + + def rename_folder(self, new_name, should_raise) -> str: + """ + rename the profile folder + :param new_name: name of the new folder + :param should_raise: raises ProfileConflictError if the profile can't be renamed + :return: the new profile path + """ + new_path = os.path.join(os.path.split(self.path)[0], new_name) + if os.path.exists(new_path): + if should_raise: + raise errors.ProfileConflictError( + "Skipping folder renaming: a profile already exists at this path" + ) + return self.path + try: + os.rename(self.path, new_path) + self.path = new_path + except Exception as err: + commons_logging.get_logger("ProfileRenamer").error( + f"Error when renaming profile: {err}" + ) + raise errors.ProfileConflictError from err + return self.path + + def duplicate(self, name: str = None, description: str = None): + """ + Duplicates the current profile and associates it with a new profile_id + :param name: name of the profile to create, uses the original's one by default + :param description: description of the profile to create, uses the original's one by default + :return: the created profile + """ + clone = copy.deepcopy(self) + clone.name = name or clone.name + clone.description = description or clone.description + clone.profile_id = str(uuid.uuid4()) + clone.read_only = False + clone.imported = False + clone.origin_url = None + clone.auto_update = False + try: + clone.path = os.path.join( + os.path.split(self.path)[0], f"{clone.name}_{clone.profile_id}" + ) + shutil.copytree(self.path, clone.path) + except OSError: + # invalid profile name for a filename + clone.path = os.path.join(os.path.split(self.path)[0], clone.profile_id) + shutil.copytree(self.path, clone.path) + clone.save() + return clone + + def get_tentacles_config_path(self) -> str: + """ + :return: The tentacles configurations path + """ + return os.path.join(self.path, constants.CONFIG_TENTACLES_FILE) + + def as_dict(self) -> dict: + """ + :return: A dict representation of this profile configuration + """ + return { + constants.CONFIG_PROFILE: { + constants.CONFIG_ID: self.profile_id, + constants.CONFIG_NAME: self.name, + constants.CONFIG_SLUG: self.slug, + constants.CONFIG_DESCRIPTION: self.description, + constants.CONFIG_AVATAR: self.avatar, + constants.CONFIG_ORIGIN_URL: self.origin_url, + constants.CONFIG_AUTO_UPDATE: self.auto_update, + constants.CONFIG_READ_ONLY: self.read_only, + constants.CONFIG_HIDDEN: self.hidden, + constants.CONFIG_IMPORTED: self.imported, + constants.CONFIG_COMPLEXITY: ( + self.complexity.value if self.complexity else None + ), + constants.CONFIG_RISK: self.risk.value if self.risk else None, + constants.CONFIG_TYPE: ( + self.profile_type.value if self.profile_type else None + ), + constants.CONFIG_EXTRA_BACKTESTING_TIME_FRAMES: self.extra_backtesting_time_frames, + }, + constants.PROFILE_CONFIG: self.config, + } + + def config_file(self): + """ + :return: the path to this profile config file + """ + return os.path.join(self.path, constants.PROFILE_CONFIG_FILE) + + def merge_partially_managed_element_into_config(self, config: dict, element: str): + """ + Merge this profile configuration's partially managed element into the given config + :param config: dict to merge this profile configuration's partially managed element into + :param element: the partially managed element to merge + :return: None + """ + Profile._merge_partially_managed_element( + config, self.config, element, Profile.PARTIALLY_MANAGED_ELEMENTS[element] + ) + + @staticmethod + def _merge_partially_managed_element( + config: dict, profile_config: dict, element: str, template: dict + ): + if element in config: + Profile._merge_profile_values(config, profile_config, element, template) + Profile._apply_forced_default_values(config, profile_config, element) + else: + # use profile value for element + config[element] = { + key: Profile._get_element_from_template(template, val) + for key, val in profile_config[element].items() + } + + @staticmethod + def _merge_profile_values( + config: dict, profile_config: dict, element: str, template: dict + ): + for key, val in profile_config[element].items(): + if key in config[element]: + if isinstance(config[element][key], dict): + # merge profile values for element[key] + Profile._merge_partially_managed_element( + config[element], profile_config[element], key, template + ) + else: + # overwrite element[key] by profile value + config[element][key] = copy.deepcopy(profile_config[element][key]) + else: + # use profile value for element[key] + if isinstance(val, dict): + config[element][key] = Profile._get_element_from_template( + template, val + ) + else: + config[element][key] = val + + @staticmethod + def _apply_forced_default_values(config: dict, profile_config: dict, element: str): + if element in Profile.PARTIALLY_MANAGED_ELEMENTS_FORCED_DEFAULT_KEYS: + for config_key, config_val in config[element].items(): + if config_key not in profile_config[element]: + for config_sub_element in config_val: + if ( + config_sub_element + in Profile.PARTIALLY_MANAGED_ELEMENTS_FORCED_DEFAULT_KEYS[ + element + ] + ): + # item not in profile, it will be added to profile upon save + # use forced default profile value for forced default keys + config[element][config_key][config_sub_element] = ( + Profile.PARTIALLY_MANAGED_ELEMENTS_FORCED_DEFAULT_KEYS[ + element + ][config_sub_element] + ) + + @staticmethod + def _get_element_from_template(template: dict, profile_values: dict) -> dict: + merged_values = copy.deepcopy(template) + merged_values.update(profile_values) + return merged_values + + @staticmethod + def _filter_fill_elements( + config: dict, profile_config: dict, element: str, allowed_keys: list + ): + if element in config: + # reset profile element to avoid saving outdated data + profile_config[element] = {} + for key, value in config[element].items(): + if isinstance(value, dict): + # handle nested elements + Profile._filter_fill_elements( + config[element], profile_config[element], key, allowed_keys + ) + else: + # save allowed keys + if key in allowed_keys: + profile_config[element][key] = value + + @staticmethod + def load_profile(profiles_path, profile_id, schema_path: str = None): + """ + :param profiles_path: the path to look for the profile + :param profile_id: the required profile id + :return: the loaded profile + """ + for profile in Profile.get_all_profiles(profiles_path, schema_path=schema_path): + if profile.profile_id == profile_id: + return profile + raise errors.NoProfileError(f"No profile with id: {profile_id}") + + @staticmethod + def get_all_profiles(profiles_path, ignore: str = None, schema_path: str = None): + """ + Loads profiles found in the given directory + :param profiles_path: Path to a directory containing profiles + :param ignore: A profile path to ignore + :param schema_path: Path to the json schema to pass to the created profile instances + :return: the profile instances list + """ + profiles = [] + ignored_path = None if ignore is None else os.path.normpath(ignore) + for profile_entry in os.scandir(profiles_path): + if ( + ignored_path is None + or os.path.normpath(profile_entry.path) != ignored_path + ): + profile = Profile._load_profile(profile_entry.path, schema_path) + if profile is not None: + profiles.append(profile) + return profiles + + @staticmethod + def _load_profile(profile_path: str, schema_path: str): + logger = commons_logging.get_logger("ProfileExplorer") + profile = Profile(profile_path, schema_path) + try: + if os.path.isfile(profile.config_file()): + profile.read_config() + return profile + logger.debug( + f"Ignored {profile_path} as it does not contain a profile configuration" + ) + except Exception as err: + logger.exception( + err, + True, + f"Ignored profile due to an error upon reading '{profile_path}': {err}", + ) + return None + + @staticmethod + def get_all_profiles_ids(profiles_path, ignore: str = None): + """ + Get ids of profiles found in the given directory + :param profiles_path: Path to a directory containing profiles + :param ignore: A profile path to ignore in ids listing + :return: the profile ids list + """ + return [ + profile.profile_id + for profile in Profile.get_all_profiles(profiles_path, ignore) + ] + + @staticmethod + def apply_default_values(config: dict) -> dict: + """ + Apply default values to the given config + :param config: the config to apply default values to + :return: the config with default values applied + """ + if constants.CONFIG_DISTRIBUTION not in config: + config[constants.CONFIG_DISTRIBUTION] = constants.DEFAULT_DISTRIBUTION + return config diff --git a/packages/commons/octobot_commons/profiles/profile_data.py b/packages/commons/octobot_commons/profiles/profile_data.py new file mode 100644 index 0000000000..8676b557eb --- /dev/null +++ b/packages/commons/octobot_commons/profiles/profile_data.py @@ -0,0 +1,356 @@ +# pylint: disable=C0103,R0902,C0301 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import copy +import dataclasses +import typing + +import octobot_commons.profiles.profile as profile_import +import octobot_commons.dataclasses +import octobot_commons.constants as constants + + +@dataclasses.dataclass +class ProfileDetailsData( + octobot_commons.dataclasses.FlexibleDataclass, + octobot_commons.dataclasses.UpdatableDataclass, +): + name: str = "" + id: typing.Union[str, None] = None + bot_id: typing.Union[str, None] = None + version: typing.Union[str, None] = None + user_id: typing.Union[str, None] = None + nested_strategy_config_id: typing.Union[str, None] = None + + +@dataclasses.dataclass +class CryptoCurrencyData( + octobot_commons.dataclasses.FlexibleDataclass, + octobot_commons.dataclasses.UpdatableDataclass, +): + trading_pairs: list[str] = dataclasses.field(default_factory=list) + name: typing.Union[str, None] = None + enabled: bool = True + + +@dataclasses.dataclass +class ExchangeData( + octobot_commons.dataclasses.FlexibleDataclass, + octobot_commons.dataclasses.UpdatableDataclass, +): + exchange_credential_id: typing.Union[str, None] = ( + None # deprecated, use exchange_account_id instead, exchange_credential_id is updated in exchange_data.auth_details + ) + internal_name: typing.Union[str, None] = None + exchange_type: str = constants.DEFAULT_EXCHANGE_TYPE + exchange_id: typing.Union[str, None] = None + exchange_account_id: typing.Union[str, None] = None + sandboxed: bool = False + + +@dataclasses.dataclass +class FutureSymbolData( + octobot_commons.dataclasses.FlexibleDataclass, + octobot_commons.dataclasses.UpdatableDataclass, +): + symbol: typing.Optional[str] = None + leverage: typing.Union[float, None] = None + + +@dataclasses.dataclass +class FutureExchangeData( + octobot_commons.dataclasses.FlexibleDataclass, + octobot_commons.dataclasses.UpdatableDataclass, +): + default_leverage: typing.Union[float, None] = None + symbol_data: list[FutureSymbolData] = dataclasses.field(default_factory=list) + + # pylint: disable=E1134 + def __post_init__(self): + if self.symbol_data and isinstance(self.symbol_data[0], dict): + self.symbol_data = [ + FutureSymbolData.from_dict(symbol_datum) + for symbol_datum in self.symbol_data + ] + + +@dataclasses.dataclass +class TraderData( + octobot_commons.dataclasses.FlexibleDataclass, + octobot_commons.dataclasses.UpdatableDataclass, +): + enabled: bool = True + + +@dataclasses.dataclass +class TraderSimulatorData( + octobot_commons.dataclasses.FlexibleDataclass, + octobot_commons.dataclasses.UpdatableDataclass, +): + enabled: bool = False + starting_portfolio: dict[str, float] = dataclasses.field(default_factory=dict) + maker_fees: float = 0.1 + taker_fees: float = 0.1 + + +@dataclasses.dataclass +class MinimalFund( + octobot_commons.dataclasses.FlexibleDataclass, + octobot_commons.dataclasses.UpdatableDataclass, +): + asset: typing.Optional[str] = None + available: typing.Optional[float] = None + total: typing.Optional[float] = None + + @classmethod + def from_dict(cls, dict_value: dict): + to_use_dict = copy.copy(dict_value) + if "value" in dict_value: + if "available" not in dict_value: + to_use_dict["available"] = dict_value["value"] + if "total" not in dict_value: + to_use_dict["total"] = dict_value["value"] + return super().from_dict(to_use_dict) + + +@dataclasses.dataclass +class TradingData( + octobot_commons.dataclasses.FlexibleDataclass, + octobot_commons.dataclasses.UpdatableDataclass, +): + reference_market: str = "" + minimal_funds: list[MinimalFund] = dataclasses.field(default_factory=list) + risk: float = 1.0 + sub_portfolio: dict[str, float] = dataclasses.field(default_factory=dict) + sellable_assets: typing.Optional[list[str]] = None + paused: bool = False + + # pylint: disable=E1134 + def __post_init__(self): + if self.minimal_funds and isinstance(self.minimal_funds[0], dict): + self.minimal_funds = [ + MinimalFund.from_dict(minimal_fund) + for minimal_fund in self.minimal_funds + ] + + +@dataclasses.dataclass +class TentaclesData( + octobot_commons.dataclasses.FlexibleDataclass, + octobot_commons.dataclasses.UpdatableDataclass, +): + name: typing.Optional[str] = None + config: dict = dataclasses.field(default_factory=dict) + + +@dataclasses.dataclass +class BacktestingContext( + octobot_commons.dataclasses.FlexibleDataclass, + octobot_commons.dataclasses.UpdatableDataclass, +): + start_time_delta: float = 0 + update_interval: float = 7 * constants.DAYS_TO_SECONDS + starting_portfolio: dict = dataclasses.field(default_factory=dict) + exchanges: list[str] = dataclasses.field(default_factory=list) + + +@dataclasses.dataclass +class OptionsData( + octobot_commons.dataclasses.FlexibleDataclass, + octobot_commons.dataclasses.UpdatableDataclass, +): + values: dict = dataclasses.field(default_factory=dict) + + +@dataclasses.dataclass +class ProfileData( + octobot_commons.dataclasses.MinimizableDataclass, + octobot_commons.dataclasses.UpdatableDataclass, +): + profile_details: ProfileDetailsData = dataclasses.field( + default_factory=ProfileDetailsData + ) + crypto_currencies: list[CryptoCurrencyData] = dataclasses.field( + default_factory=list + ) + trading: TradingData = dataclasses.field(default_factory=TradingData) + exchanges: list[ExchangeData] = dataclasses.field(default_factory=list) + future_exchange_data: FutureExchangeData = dataclasses.field( + default_factory=FutureExchangeData + ) + trader: TraderData = dataclasses.field(default_factory=TraderData) + trader_simulator: TraderSimulatorData = dataclasses.field( + default_factory=TraderSimulatorData + ) + tentacles: list[TentaclesData] = dataclasses.field(default_factory=list) + backtesting_context: BacktestingContext = dataclasses.field( + default_factory=BacktestingContext + ) + options: OptionsData = dataclasses.field(default_factory=OptionsData) + distribution: str = constants.DEFAULT_DISTRIBUTION + + # pylint: disable=E1134 + def __post_init__(self): + if self.crypto_currencies and isinstance(self.crypto_currencies[0], dict): + self.crypto_currencies = [ + CryptoCurrencyData.from_dict(crypto_currency) + for crypto_currency in self.crypto_currencies + ] + if self.exchanges and isinstance(self.exchanges[0], dict): + self.exchanges = [ + ExchangeData.from_dict(exchange) for exchange in self.exchanges + ] + if self.tentacles and isinstance(self.tentacles[0], dict): + self.tentacles = ( + [TentaclesData.from_dict(tentacle) for tentacle in self.tentacles] + if self.tentacles + else [] + ) + + @classmethod + def from_profile(cls, profile: profile_import.Profile): + """ + Creates a cls instance from the given profile + """ + profile_dict = profile.as_dict() + content = profile_dict[constants.PROFILE_CONFIG] + return cls.from_dict( + { + "profile_details": { + "id": profile_dict[constants.CONFIG_PROFILE][constants.CONFIG_ID], + "name": profile_dict[constants.CONFIG_PROFILE][ + constants.CONFIG_NAME + ], + }, + "crypto_currencies": [ + { + "trading_pairs": details.get(constants.CONFIG_CRYPTO_PAIRS, []), + "name": currency, + "enabled": details.get(constants.CONFIG_ENABLED_OPTION, True), + } + for currency, details in content[ + constants.CONFIG_CRYPTO_CURRENCIES + ].items() + ], + "trader": { + "enabled": content[constants.CONFIG_TRADER][ + constants.CONFIG_ENABLED_OPTION + ], + }, + "trader_simulator": { + "enabled": content[constants.CONFIG_SIMULATOR][ + constants.CONFIG_ENABLED_OPTION + ], + "starting_portfolio": content[constants.CONFIG_SIMULATOR][ + constants.CONFIG_STARTING_PORTFOLIO + ], + "maker_fees": content[constants.CONFIG_SIMULATOR][ + constants.CONFIG_SIMULATOR_FEES + ].get(constants.CONFIG_SIMULATOR_FEES_MAKER, 0.0), + "taker_fees": content[constants.CONFIG_SIMULATOR][ + constants.CONFIG_SIMULATOR_FEES + ].get(constants.CONFIG_SIMULATOR_FEES_TAKER, 0.0), + }, + "trading": { + "reference_market": content[constants.CONFIG_TRADING][ + constants.CONFIG_TRADER_REFERENCE_MARKET + ], + "risk": content[constants.CONFIG_TRADING][ + constants.CONFIG_TRADER_RISK + ], + }, + "tentacles": [], + } + ) + + def to_profile(self, to_create_profile_path: str) -> profile_import.Profile: + """ + Returns a new Profile from self + """ + profile = profile_import.Profile(to_create_profile_path) + profile.from_dict(self._to_profile_dict()) + return profile + + def set_tentacles_config(self, config_by_tentacle: dict): + """ + Update self.tentacles from the given config_by_tentacle + """ + self.tentacles = [ + TentaclesData(name=tentacle, config=config) + for tentacle, config in config_by_tentacle.items() + ] + + def get_config_by_tentacle(self) -> dict[typing.Optional[str], dict]: + """ + Returns a dictionary of tentacle names and their configurations + """ + return { + tentacle.name: tentacle.config + for tentacle in self.tentacles + } + + def get_traded_symbols(self) -> list[str]: + """ + Returns a list of traded symbols + """ + symbols = [] + for crypto_currency in self.crypto_currencies: + symbols.extend(crypto_currency.trading_pairs) + return symbols + + def _to_profile_dict(self) -> dict: + return { + constants.PROFILE_CONFIG: { + constants.CONFIG_CRYPTO_CURRENCIES: { + crypto_currency.name: { + constants.CONFIG_CRYPTO_PAIRS: crypto_currency.trading_pairs, + constants.CONFIG_ENABLED_OPTION: crypto_currency.enabled, + } + for crypto_currency in self.crypto_currencies + }, + constants.CONFIG_EXCHANGES: { + exchange_details.internal_name: { + constants.CONFIG_ENABLED_OPTION: True, + constants.CONFIG_EXCHANGE_TYPE: exchange_details.exchange_type, + } + for exchange_details in self.exchanges + }, + constants.CONFIG_TRADER: { + constants.CONFIG_ENABLED_OPTION: self.trader.enabled, + constants.CONFIG_LOAD_TRADE_HISTORY: True, + }, + constants.CONFIG_SIMULATOR: { + constants.CONFIG_ENABLED_OPTION: self.trader_simulator.enabled, + constants.CONFIG_STARTING_PORTFOLIO: self.trader_simulator.starting_portfolio + or ( + self.backtesting_context.starting_portfolio + if self.backtesting_context + else {} + ), + constants.CONFIG_SIMULATOR_FEES: { + constants.CONFIG_SIMULATOR_FEES_MAKER: self.trader_simulator.maker_fees, + constants.CONFIG_SIMULATOR_FEES_TAKER: self.trader_simulator.taker_fees, + }, + }, + constants.CONFIG_TRADING: { + constants.CONFIG_TRADER_REFERENCE_MARKET: self.trading.reference_market, + constants.CONFIG_TRADER_RISK: self.trading.risk, + constants.CONFIG_TRADER_PAUSED: self.trading.paused, + }, + constants.CONFIG_DISTRIBUTION: self.distribution, + }, + constants.CONFIG_PROFILE: dataclasses.asdict(self.profile_details), + } diff --git a/packages/commons/octobot_commons/profiles/profile_data_import.py b/packages/commons/octobot_commons/profiles/profile_data_import.py new file mode 100644 index 0000000000..24082d07ce --- /dev/null +++ b/packages/commons/octobot_commons/profiles/profile_data_import.py @@ -0,0 +1,276 @@ +# pylint: disable=R0913,W0718,W0706,C0415 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import copy +import os +import uuid + +import octobot_commons.profiles.profile_data as profile_data_import +import octobot_commons.profiles.profile as profile_import +import octobot_commons.logging as bot_logging +import octobot_commons.json_util as json_util +import octobot_commons.constants as constants +import octobot_commons.aiohttp_util as aiohttp_util +import octobot_commons.enums as enums + +IMPORTED_AVATAR = "avatar" +IMPORTED_PROFILES_DEFAULT_EXTRA_BACKTESTING_TIMEFRAME = ( + enums.TimeFrames.FIFTEEN_MINUTES.value +) + + +def init_profile_directory( + output_path: str, +): + """ + :param output_path: profile folder path + """ + if os.path.exists(output_path): + raise OSError(f"{output_path} already exists") + os.mkdir(output_path) + + +async def convert_profile_data_to_profile_directory( + profile_data: profile_data_import.ProfileData, + output_path: str, + description: str = None, + risk: enums.ProfileRisk = enums.ProfileRisk.MODERATE, + auto_update: bool = False, + slug: str = None, + avatar_url: str = None, + force_simulator: bool = False, + aiohttp_session=None, + profile_to_update: profile_import.Profile = None, + changed: bool = False, +) -> bool: + """ + Creates a profile folder from the given ProfileData + :param profile_data: path to the profile zipped archive + :param description: profile description + :param risk: profile risk + :param slug: slug of the associated strategy + :param auto_update: True if the profile should be kept up-to-date + :param avatar_url: profile avatar_url + :param output_path: profile folder path + :param force_simulator: True if trader simulator should be forced in config + :param aiohttp_session: session to use + :param profile_to_update: profile to update instead of creating a new one + :param changed: if True, profile will be saved even if no change are identified + """ + profile = ( + profile_to_update + if profile_to_update + else _get_profile( + profile_data, + description, + risk, + output_path, + auto_update, + slug, + force_simulator, + ) + ) + # when updating profile, keep existing registered tentacles + import_registered_tentacles = profile_to_update is not None + # tentacles_config.json + tentacles_setup_config = _get_tentacles_setup_config( + profile_data, output_path, import_registered_tentacles + ) + if tentacles_setup_config.save_config(is_config_update=True): + changed = True + # specific_config + if _save_specific_config(profile_data, output_path, bool(profile_to_update)): + changed = True + # avatar file + if avatar_url: + try: + await _download_and_set_avatar( + profile, avatar_url, output_path, aiohttp_session + ) + except Exception as err: + bot_logging.get_logger(__name__).exception( + err, True, f"Error when downloading profile avatar: {err}" + ) + # finish with profile.json to include edits from previous methods + if changed: + profile.save() + return changed + + +def _get_profile( + profile_data: profile_data_import.ProfileData, + description: str, + risk: enums.ProfileRisk, + output_path: str, + auto_update: bool, + slug: str, + force_simulator: bool, +): + profile = profile_data.to_profile(output_path) + if force_simulator: + profile.config[constants.CONFIG_TRADER][constants.CONFIG_ENABLED_OPTION] = False + profile.config[constants.CONFIG_SIMULATOR][ + constants.CONFIG_ENABLED_OPTION + ] = True + profile.description = description + profile.risk = risk + profile.auto_update = auto_update + profile.slug = slug + profile.profile_id = str(uuid.uuid4().hex) + profile.read_only = True + profile.extra_backtesting_time_frames = [ + IMPORTED_PROFILES_DEFAULT_EXTRA_BACKTESTING_TIMEFRAME + ] + return profile + + +def get_updated_profile( + profile_to_update: profile_import.Profile, + profile_data: profile_data_import.ProfileData, +) -> bool: + """ + :param profile_to_update: the profile to be updated + :param profile_data: the profile_data to get the update from + :return: True if something changed in the updated profile + """ + updated_profile = profile_data.to_profile("") + changed = False + # update traded currencies (add new currencies) + origin_currencies = copy.deepcopy( + profile_to_update.config[constants.CONFIG_CRYPTO_CURRENCIES] + ) + profile_to_update.config[constants.CONFIG_CRYPTO_CURRENCIES] = { + **origin_currencies, + **updated_profile.config[constants.CONFIG_CRYPTO_CURRENCIES], + } + if ( + origin_currencies + != profile_to_update.config[constants.CONFIG_CRYPTO_CURRENCIES] + ): + changed = True + # update ref market + origin_ref_market = profile_to_update.config[constants.CONFIG_TRADING][ + constants.CONFIG_TRADER_REFERENCE_MARKET + ] + profile_to_update.config[constants.CONFIG_TRADING][ + constants.CONFIG_TRADER_REFERENCE_MARKET + ] = profile_data.trading.reference_market + if ( + origin_ref_market + != profile_to_update.config[constants.CONFIG_TRADING][ + constants.CONFIG_TRADER_REFERENCE_MARKET + ] + ): + changed = True + # leave other fields as is (tentacles config will be updated) + return changed + + +def _get_tentacles_setup_config( + profile_data: profile_data_import.ProfileData, + output_path: str, + import_registered_tentacles: bool, +): + try: + import octobot_tentacles_manager.api + import octobot_tentacles_manager.constants + + classes = [ + octobot_tentacles_manager.api.get_tentacle_class_from_string( + tentacle_data.name + ).__name__ + for tentacle_data in profile_data.tentacles + if tentacle_data.name not in ( + octobot_tentacles_manager.constants.IGNORED_TENTACLES_NAMES_IN_TENTACLES_SETUP_CONFIG + ) + ] + config_path = os.path.join(output_path, constants.CONFIG_TENTACLES_FILE) + tentacles_setup_config = ( + octobot_tentacles_manager.api.create_tentacles_setup_config_with_tentacles( + *classes, config_path=config_path + ) + ) + use_reference_registered_tentacles = ( + not tentacles_setup_config.registered_tentacles + ) + octobot_tentacles_manager.api.fill_with_installed_tentacles( + tentacles_setup_config, + import_registered_tentacles=import_registered_tentacles, + use_reference_registered_tentacles=use_reference_registered_tentacles, + ) + return tentacles_setup_config + except ImportError: + raise + + +def _save_specific_config( + profile_data: profile_data_import.ProfileData, + output_path: str, + is_config_update: bool, +) -> bool: + changed = False + try: + import octobot_tentacles_manager.constants + + specific_config_dir = os.path.join( + output_path, + octobot_tentacles_manager.constants.TENTACLES_SPECIFIC_CONFIG_FOLDER, + ) + if not os.path.exists(specific_config_dir): + os.mkdir(specific_config_dir) + for tentacle_config in profile_data.tentacles: + file_path = os.path.join( + specific_config_dir, + f"{tentacle_config.name}{octobot_tentacles_manager.constants.CONFIG_EXT}", + ) + if is_config_update and json_util.has_same_content( + file_path, tentacle_config.config + ): + # nothing to do + continue + changed = True + json_util.safe_dump( + tentacle_config.config, + file_path, + ) + except ImportError: + raise + return changed + + +async def _download_and_set_avatar( + profile, avatar_url, output_path: str, aiohttp_session +): + profile.avatar = _get_avatar_name(avatar_url) + try: + import aiofiles + + async with aiofiles.open( + os.path.join(output_path, profile.avatar), "wb+" + ) as downloaded_file: + await aiohttp_util.download_stream_file( + downloaded_file, + avatar_url, + aiohttp_session, + is_aiofiles_output_file=True, + ) + except ImportError: + raise + + +def _get_avatar_name(avatar_url: str): + # remove params and get file name with ext + return avatar_url.split("?")[0].split("/")[-1] diff --git a/packages/commons/octobot_commons/profiles/profile_sharing.py b/packages/commons/octobot_commons/profiles/profile_sharing.py new file mode 100644 index 0000000000..737107580a --- /dev/null +++ b/packages/commons/octobot_commons/profiles/profile_sharing.py @@ -0,0 +1,428 @@ +# pylint: disable=R0913,W0703 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import json +import os +import zipfile +import shutil +import pathlib +import uuid +import time +import requests +import octobot_commons.constants as constants +import octobot_commons.enums as enums +import octobot_commons.logging as bot_logging +import octobot_commons.errors as errors +import octobot_commons.json_util as json_util +import octobot_commons.authentication as authentication + +try: + import jsonschema +except ImportError: + if constants.USE_MINIMAL_LIBS: + # mock jsonschema imports + class JsonschemaImportMock: + class exceptions: # pylint: disable=invalid-name + class ValidationError(Exception): + def __init__(self, *args): + raise ImportError("jsonschema not installed") + + jsonschema = JsonschemaImportMock() + else: + raise +# avoid cyclic import +from octobot_commons.profiles.profile import Profile +import octobot_commons.profiles.profile_data as profile_data_import +import octobot_commons.profiles.profile_data_import as profile_data_importer + + +NON_OVERWRITTEN_PROFILE_FOLDERS = [] +NON_OVERWRITTEN_PROFILE_FILES = [constants.PROFILE_CONFIG_FILE] +try: + import octobot_tentacles_manager.constants as tentacles_manager_constants + + NON_OVERWRITTEN_PROFILE_FOLDERS.append( + tentacles_manager_constants.TENTACLES_SPECIFIC_CONFIG_FOLDER + ) +except ImportError: + pass + + +def export_profile(profile, export_path: str) -> str: + """ + Exports the given profile into export_path, appends ".zip" as a file extension + :param profile: profile to export + :param export_path: export path ending with filename + :return: the exported profile path including file extension + """ + temp_path = f"{export_path}{int(time.time() * 1000)}" + # remove any existing file to prevent any side effect + if os.path.exists(temp_path): + raise OSError(f"Can't export profile, the {temp_path} folder exists") + export_path_with_ext = f"{export_path}.{constants.PROFILE_EXPORT_FORMAT}" + if os.path.isfile(export_path_with_ext): + os.remove(export_path_with_ext) + # copy profile into a temp dir to edit it + shutil.copytree(profile.path, temp_path) + try: + _filter_profile_export(temp_path) + # export the edited profile + shutil.make_archive( + os.path.abspath(export_path), constants.PROFILE_EXPORT_FORMAT, temp_path + ) + finally: + shutil.rmtree(temp_path) + return export_path_with_ext + + +def install_profile( + import_path: str, + profile_name: str, + bot_install_path: str, + replace_if_exists: bool, + is_imported: bool, + origin_url: str = None, + quite: bool = False, + profile_schema: str = None, +) -> Profile: + """ + Installs the given profile export archive into the user's profile directory + :param import_path: path to the profile zipped archive + :param profile_name: name of the profile folder + :param bot_install_path: path to the octobot installation + :param replace_if_exists: when True erase the profile with the same name if it exists + :param is_imported: when True the profile is set as imported + :param origin_url: url the profile is coming from (if relevant) + :param quite: when True, only log errors + :param profile_schema: the schema to validate profile against + :return: The created profile + """ + logger = bot_logging.get_logger("ProfileSharing") + target_import_path = _get_target_import_path( + bot_install_path, profile_name, replace_if_exists + ) + action = "Creat" + if replace_if_exists: + action = "Updat" + if not quite: + logger.info(f"{action}ing {profile_name} profile.") + _import_profile_files(import_path, target_import_path) + profile = Profile(target_import_path, schema_path=profile_schema).read_config() + profile.imported = is_imported + profile.origin_url = origin_url + _ensure_unique_profile_id(profile) + if is_imported: + try: + profile.validate() + except jsonschema.exceptions.ValidationError as err: + shutil.rmtree(target_import_path) + raise errors.ProfileImportError( + f"Invalid imported profile: {err.message} in '{'/'.join(err.absolute_path)}'" + ) from err + profile.save() + if not quite: + logger.info(f"{action}ed {profile.name} ({profile_name}) profile.") + return profile + + +def import_profile( + import_path: str, + profile_schema: str, + name: str = None, + bot_install_path: str = ".", + origin_url: str = None, +) -> Profile: + """ + Imports the given profile export archive into the user's profile directory with the "imported_" prefix + :param import_path: path to the profile zipped archive + :param profile_schema: the schema to validate profile against + :param name: name of the profile folder + :param bot_install_path: path to the octobot installation + :param origin_url: url the profile is coming from + :return: The created profile + """ + temp_profile_name = _get_profile_name(name, import_path) + profile = install_profile( + import_path, + temp_profile_name, + bot_install_path, + False, + True, + origin_url=origin_url, + profile_schema=profile_schema, + ) + if profile.name != temp_profile_name: + profile.rename_folder(_get_unique_profile_folder_from_name(profile), False) + return profile + + +async def import_profile_data_as_profile( + profile_data: profile_data_import.ProfileData, + profile_schema: str, + aiohttp_session, + name: str = None, + description: str = None, + risk: enums.ProfileRisk = enums.ProfileRisk.MODERATE, + bot_install_path: str = ".", + origin_url: str = None, + logo_url: str = None, + auto_update: bool = False, + force_simulator: bool = False, +) -> Profile: + """ + Imports the given ProfileData into the user's profile directory with the "imported_" prefix + :param profile_data: path to the profile zipped archive + :param aiohttp_session: aiohttp session to use to download the profile logo + :param profile_schema: the schema to validate profile against + :param name: name of the profile folder + :param description: description of the profile + :param risk: risk of the profile + :param bot_install_path: path to the octobot installation + :param origin_url: url the profile is coming from + :param logo_url: url the profile avatar + :param auto_update: True if the profile should automatically be kept up-to-date + :param force_simulator: True if trader simulator should be forced in config + :return: The created profile + """ + logger = bot_logging.get_logger("ProfileSharing") + import_path = f"{name}-{uuid.uuid4().hex}" + try: + slug = profile_data.profile_details.name + profile_data.profile_details.name = name + profile_data_importer.init_profile_directory(import_path) + await profile_data_importer.convert_profile_data_to_profile_directory( + profile_data, + import_path, + description=description, + risk=risk, + auto_update=auto_update, + slug=slug, + avatar_url=logo_url, + force_simulator=force_simulator, + aiohttp_session=aiohttp_session, + ) + return import_profile( + import_path=import_path, + profile_schema=profile_schema, + name=name, + bot_install_path=bot_install_path, + origin_url=origin_url, + ) + finally: + try: + if os.path.isdir(import_path): + shutil.rmtree(import_path) + except Exception as err: + logger.exception(err, True, f"Error when removing profile temp dir: {err}") + + +async def update_profile( + profile: Profile, +) -> bool: + """ + :param profile: profile to update + """ + authenticator = authentication.Authenticator.instance() + profile_data = await authenticator.get_strategy_profile_data( + None, product_slug=profile.slug + ) + changed = profile_data_importer.get_updated_profile(profile, profile_data) + if await profile_data_importer.convert_profile_data_to_profile_directory( + profile_data, profile.path, profile_to_update=profile, changed=changed + ): + changed = True + return changed + + +def download_profile(url, target_file, timeout=60): + """ + Downloads a profile from the given url + :param url: profile url + :param target_file: path to save the file + :param timeout: time given to the request before timeout + :return: saved file path + """ + # unauthenticated download + with requests.get(url, stream=True, timeout=timeout) as req: + req.raise_for_status() + with open(target_file, "wb") as write_file: + for chunk in req.iter_content(chunk_size=8192): + write_file.write(chunk) + return target_file + + +def download_and_install_profile(download_url, profile_schema): + """ + :param download_url: profile url + :param profile_schema: the schema to validate profile against + :return: the installed profile, None if an error occurred + """ + logger = bot_logging.get_logger("ProfileSharing") + name = download_url.split("/")[-1] + file_path = None + try: + file_path = download_profile(download_url, name) + profile = import_profile( + file_path, profile_schema, name=name, origin_url=download_url + ) + logger.info( + f"Downloaded and installed {profile.name} from {profile.origin_url}" + ) + return profile + except errors.UnsupportedError as err: + logger.error(f"Error when installing profile: {err}") + return None + except Exception as err: + logger.exception(err, True, f"Error when installing profile: {err}") + return None + finally: + if file_path is not None and os.path.isfile(file_path): + os.remove(file_path) + + +def _get_profile_name(name, import_path): + profile_name = name or ( + f"{constants.IMPORTED_PROFILE_PREFIX}_{os.path.split(import_path)[-1]}" + ) + return profile_name.split(f".{constants.PROFILE_EXPORT_FORMAT}")[0] + + +def _filter_profile_export(profile_path: str): + profile_file = os.path.join(profile_path, constants.PROFILE_CONFIG_FILE) + if os.path.isfile(profile_file): + parsed_profile = json_util.read_file(profile_file) + _filter_disabled(parsed_profile, constants.CONFIG_EXCHANGES) + with open(profile_file, "w") as open_file: + json.dump(parsed_profile, open_file, indent=4, sort_keys=True) + + +def _filter_disabled(profile_config: dict, element): + filtered_exchanges = { + exchange: details + for exchange, details in profile_config[constants.PROFILE_CONFIG][ + element + ].items() + if details.get(constants.CONFIG_ENABLED_OPTION, True) + } + profile_config[constants.PROFILE_CONFIG][element] = filtered_exchanges + + +def _get_target_import_path( + bot_install_path: str, profile_name: str, replace_if_exists: bool +) -> str: + """ + Get the target profile folder path + :param bot_install_path: path to the octobot installation + :param profile_name: name of the profile folder + :param replace_if_exists: when True erase the profile with the same name if it exists + :return: (the final target import path, True if the profile is replaced) + """ + target_import_path = os.path.join( + bot_install_path, constants.USER_PROFILES_FOLDER, profile_name + ) + if replace_if_exists: + return target_import_path + return _get_unique_profile_folder(target_import_path) + + +def _import_profile_files(profile_path: str, target_profile_path: str) -> None: + """ + Copy or extract profile files to destination. Does not override local tentacles configuration + :param profile_path: the current profile path + :param target_profile_path: the target profile path + :return: None + """ + if zipfile.is_zipfile(profile_path): + with zipfile.ZipFile(profile_path) as zipped_profile: + for archive_member in zipped_profile.namelist(): + if _should_profile_file_be_imported( + target_profile_path, archive_member + ): + zipped_profile.extract(archive_member, target_profile_path) + else: + if not os.path.isdir(profile_path): + raise errors.UnsupportedError( + f"Profile format not supported ({profile_path})" + ) + + def _get_ignored_elements(current_dir, sub_elements): + return [ + element + for element in sub_elements + if not _should_profile_file_be_imported( + target_profile_path, + os.path.join(current_dir, element).replace( + f"{profile_path}{os.path.sep}", "" + ), # force local path + ) + ] + + shutil.copytree( + profile_path, + target_profile_path, + ignore=_get_ignored_elements, + dirs_exist_ok=True, + ) + + +def _should_profile_file_be_imported( + target_profile_path: str, profile_file_path: str +) -> bool: + for non_overwritten_element in NON_OVERWRITTEN_PROFILE_FOLDERS: + # ignore files in NON_OVERWRITTEN_PROFILE_FOLDERS that already exist + element_path = pathlib.Path(profile_file_path) + if ( + element_path.name in NON_OVERWRITTEN_PROFILE_FILES + or non_overwritten_element in element_path.parts[:-1] + ) and os.path.isfile(os.path.join(target_profile_path, profile_file_path)): + return False + return True + + +def _get_unique_profile_folder_from_name(profile) -> str: + folder = _get_unique_profile_folder( + os.path.join(os.path.split(profile.path)[0], profile.name) + ) + return os.path.split(folder)[1] + + +def _get_unique_profile_folder(target_import_path: str) -> str: + """ + Creates an unique profile folder name + :param target_import_path: the expected target profile folder name + :return: the unique profile folder name + """ + iteration = 1 + candidate = target_import_path + while os.path.exists(candidate) and iteration < 100: + iteration += 1 + candidate = f"{target_import_path}_{iteration}" + return candidate + + +def _ensure_unique_profile_id(profile) -> None: + """ + Ensure that no other installed profile has the same id + :param profile: the installed profile + :return: None + """ + ids = Profile.get_all_profiles_ids( + pathlib.Path(profile.path).parent, ignore=profile.path + ) + iteration = 1 + while profile.profile_id in ids and iteration < 100: + profile.profile_id = str(uuid.uuid4()) + iteration += 1 diff --git a/packages/commons/octobot_commons/profiles/profile_sync.py b/packages/commons/octobot_commons/profiles/profile_sync.py new file mode 100644 index 0000000000..a6d5b73029 --- /dev/null +++ b/packages/commons/octobot_commons/profiles/profile_sync.py @@ -0,0 +1,103 @@ +# Drakkar-Software OctoBot +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import octobot_commons.constants as commons_constants +import octobot_commons.singleton as singleton +import octobot_commons.logging as logging +import octobot_commons.async_job as async_job +import octobot_commons.authentication as authentication +import octobot_commons.profiles.profile_sharing as profile_sharing + + +class ProfileSynchronizer(singleton.Singleton): + """ + Async job to maintain the profile associated to the given configuration up-to-date + """ + + DEFAULT_SYNC_REFRESH_INTERVAL = ( + commons_constants.PROFILE_REFRESH_HOURS_INTERVAL + * commons_constants.HOURS_TO_SECONDS + ) + + def __init__(self, current_config, on_profile_change): + super().__init__() + self.current_config = current_config + self._on_profile_change = on_profile_change + self.sync_job = None + self.sync_interval = self.DEFAULT_SYNC_REFRESH_INTERVAL + self.logger = logging.get_logger(self.__class__.__name__) + + async def _sync_profile(self): + if not self.current_config.profile.auto_update: + self.logger.debug("Skipping profile update check: auto_update is False") + return + if not self.current_config.profile.slug: + self.logger.error( + "Impossible to check profile updates: profile slug is unset" + ) + return + self.logger.info(f"Synchronizing {self.current_config.profile.name} profile") + if await profile_sharing.update_profile(self.current_config.profile): + self.logger.info(f"{self.current_config.profile.name} profile updated") + await self._on_profile_change(self.current_config.profile.name) + else: + self.logger.info( + f"{self.current_config.profile.name} profile already up-to-date" + ) + + async def _should_sync_profiles(self): + return ( + await authentication.Authenticator.wait_and_check_has_open_source_package() + ) + + async def start(self) -> bool: + """ + Synch the profile if necessary + """ + if not await self._should_sync_profiles(): + self.logger.debug("Profile synch loop disabled") + return False + self.logger.debug("Starting profile synchronizer") + self.sync_job = async_job.AsyncJob( + self._sync_profile, + first_execution_delay=0, + execution_interval_delay=self.sync_interval, + ) + await self.sync_job.run() + return True + + def stop(self): + """ + Stop the synchronization loop + """ + if self.sync_job is not None and not self.sync_job.is_stopped(): + self.logger.debug("Stopping profile synchronizer") + self.sync_job.stop() + + +async def start_profile_synchronizer(current_config, on_profile_change): + """ + Start the clock synchronization loop if possible on this system + :return: True if the loop has been started + """ + return await ProfileSynchronizer.instance(current_config, on_profile_change).start() + + +async def stop_profile_synchronizer(): + """ + Stop the synchronization loop + """ + return ProfileSynchronizer.instance().stop() diff --git a/packages/commons/octobot_commons/profiles/tentacles_profile_data_adapter.py b/packages/commons/octobot_commons/profiles/tentacles_profile_data_adapter.py new file mode 100644 index 0000000000..385ce3a014 --- /dev/null +++ b/packages/commons/octobot_commons/profiles/tentacles_profile_data_adapter.py @@ -0,0 +1,54 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import typing + +import octobot_commons.profiles.profile_data as profile_data_import +import octobot_commons.profiles.exchange_auth_data as exchange_auth_data_import + + +class TentaclesProfileDataAdapter: + """ + Used to adapt the content of a ProfileData using the given TentaclesData + """ + + def __init__( + self, + tentacles_data: list[profile_data_import.TentaclesData], + additional_data: dict, + authenticator, + auth_key: typing.Optional[str], + ): + self.tentacles_data: list[profile_data_import.TentaclesData] = tentacles_data + self.additional_data: dict = additional_data + self.authenticator = authenticator + self.auth_key = auth_key + + async def adapt( + self, + profile_data: profile_data_import.ProfileData, + auth_data: list[exchange_auth_data_import.ExchangeAuthData], + ) -> None: + """ + Use self.tentacles_data to adapt the given profile_data + """ + raise NotImplementedError("adapt is not implemented") + + @classmethod + def get_tentacle_name(cls) -> str: + """ + :return: the name of the adapter + """ + raise NotImplementedError("get_tentacle_name is not implemented") diff --git a/packages/commons/octobot_commons/profiles/tentacles_profile_data_translator.py b/packages/commons/octobot_commons/profiles/tentacles_profile_data_translator.py new file mode 100644 index 0000000000..c606b063c8 --- /dev/null +++ b/packages/commons/octobot_commons/profiles/tentacles_profile_data_translator.py @@ -0,0 +1,76 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import typing + +import octobot_commons.tentacles_management.class_inspector as class_inspector +import octobot_commons.profiles.profile_data as profile_data_import +import octobot_commons.profiles.exchange_auth_data as exchange_auth_data_import +import octobot_commons.profiles.tentacles_profile_data_adapter as tentacles_profile_data_adapter + + +class TentaclesProfileDataTranslator: + """ + Translates a tentacle-specific configuration into a ProfileData + """ + + def __init__( + self, + profile_data: profile_data_import.ProfileData, + auth_data: list[exchange_auth_data_import.ExchangeAuthData], + ): + self.profile_data: profile_data_import.ProfileData = profile_data + self.auth_data: list[exchange_auth_data_import.ExchangeAuthData] = auth_data + + async def translate( + self, + tentacles_data: list[profile_data_import.TentaclesData], + additional_data: dict, + authenticator, + auth_key: typing.Optional[str], + ) -> None: + """ + updates self.profile_data by applying the given tentacles_data and + additional_data configuration + :param tentacles_data: the tentacles data to use + :param additional_data: other data that can be useful in translation + :param authenticator: authenticator to fetch data from if necessary + :param auth_key: auth key to used if necessary + :return: + """ + adapter = self._get_adapter(tentacles_data) + await adapter(tentacles_data, additional_data, authenticator, auth_key).adapt( + self.profile_data, self.auth_data + ) + + @classmethod + def _get_adapter(cls, tentacles_data: list[profile_data_import.TentaclesData]): + """ + :return: the first adapter matching a TentaclesData name + """ + adapters = cls._get_adapters() + for tentacles_data_element in tentacles_data: + if adapter := adapters.get(tentacles_data_element.name): + return adapter + raise KeyError("TentaclesData adapter not found") + + @classmethod + def _get_adapters(cls) -> dict: + return { + adapter.get_tentacle_name(): adapter + for adapter in class_inspector.get_all_classes_from_parent( + tentacles_profile_data_adapter.TentaclesProfileDataAdapter + ) + } diff --git a/packages/commons/octobot_commons/proxy_config.py b/packages/commons/octobot_commons/proxy_config.py new file mode 100644 index 0000000000..27825c9906 --- /dev/null +++ b/packages/commons/octobot_commons/proxy_config.py @@ -0,0 +1,152 @@ +# pylint: disable=too-many-instance-attributes +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import typing +import dataclasses +SOCKS_PROXY_AVAILABLE = False # pylint: disable=invalid-name +try: + import aiohttp_socks + SOCKS_PROXY_AVAILABLE = True # pylint: disable=invalid-name +except ImportError: + pass + + +DEFAULT_PROXY_HOST = "DEFAULT PROXY HOST" + + +def parse_socks_proxy_url_for_connector(proxy_url: str) -> typing.Tuple[bool, str]: + """ + Split a SOCKS proxy URL into reverse-DNS flag and URL for aiohttp_socks.ProxyConnector.from_url. + + socks5h:// means the proxy resolves hostnames remotely (reverse DNS). aiohttp_socks expects + socks5:// with rdns=True in that case. + """ + reverse_dns = proxy_url.startswith('socks5h://') + selected_proxy_url = ( + proxy_url if not reverse_dns else proxy_url.replace('socks5h://', 'socks5://') + ) + return reverse_dns, selected_proxy_url + + +@dataclasses.dataclass +class ProxyConfig: + """ + Proxy configuration class. + """ + # REST proxy + http_proxy: typing.Optional[str] = None + http_proxy_callback: typing.Optional[ + typing.Callable[[str, str, dict, typing.Any], typing.Optional[str]] + ] = None + https_proxy: typing.Optional[str] = None + https_proxy_callback: typing.Optional[ + typing.Callable[[str, str, dict, typing.Any], typing.Optional[str]] + ] = None + socks_proxy : typing.Optional[str] = None + socks_proxy_callback: typing.Optional[ + typing.Callable[[str, str, dict, typing.Any], typing.Optional[str]] + ] = None + # Websocket proxy + ws_proxy: typing.Optional[str] = None + wss_proxy: typing.Optional[str] = None + ws_socks_proxy: typing.Optional[str] = None + # if set, returns the last url given to a callback method that return "True", + # meaning the last url that used a proxy + get_last_proxied_request_url: typing.Optional[ + typing.Callable[[], typing.Optional[str]] + ] = None + get_proxy_url: typing.Optional[typing.Callable[[], str]] = None + # the host of this proxy, used to identify proxy connexion errors + proxy_host: str = DEFAULT_PROXY_HOST + _last_proxied_request_url: typing.Optional[str] = None + + def has_rest_proxy(self) -> bool: + """ + Returns True if any rest proxy is set. + """ + return bool( + self.http_proxy or self.https_proxy or self.socks_proxy or + self.http_proxy_callback or self.https_proxy_callback or self.socks_proxy_callback + ) + + def has_websocket_proxy(self) -> bool: + """ + Returns True if any websocket proxy is set. + """ + return bool(self.ws_proxy or self.wss_proxy or self.ws_socks_proxy) + + def has_proxy(self) -> bool: + """ + Returns True if any proxy is set. + """ + return self.has_rest_proxy() or self.has_websocket_proxy() + + def get_rest_proxy_url(self) -> typing.Optional[str]: + """ + Returns the first rest proxy url that is set. + Prioritizes https proxy, then http proxy. + """ + return self.https_proxy or self.http_proxy + + def get_rest_socks_proxy_connector(self) -> "aiohttp_socks.ProxyConnector": + """ + Returns the socks proxy connector that is set. + """ + return self._socks_proxy_factory(self.socks_proxy, "socks_proxy") + + def get_websocket_proxy_url(self) -> typing.Optional[str]: + """ + Returns the first websocket proxy url that is set. + Prioritizes wss proxy, then ws proxy. + """ + return self.wss_proxy or self.ws_proxy + + def get_websocket_proxy_connector(self) -> "aiohttp_socks.ProxyConnector": + """ + Returns the wss proxy connector that is set. + """ + return self._socks_proxy_factory(self.wss_proxy, "wss_proxy") + + def _socks_proxy_factory( + self, proxy_url: typing.Optional[str], + proxy_type: str + ) -> "aiohttp_socks.ProxyConnector": + """ + Returns the socks proxy connector that is set. + """ + if not SOCKS_PROXY_AVAILABLE: + raise ImportError("aiohttp_socks is not available") + if proxy_url is None: + raise ValueError(f"{proxy_type} proxy url is not set") + reverse_dns, selected_proxy_url = parse_socks_proxy_url_for_connector(proxy_url) + return aiohttp_socks.ProxyConnector.from_url( + selected_proxy_url, + rdns=reverse_dns if reverse_dns else None, + ) + + def get_aiohttp_session_proxy_args(self) -> dict: + """ + Returns the arguments for aiohttp.ClientSession to use the proxy. + """ + if self.socks_proxy: + return { + "connector": self.get_rest_socks_proxy_connector(), + } + if self.has_rest_proxy(): + return { + "proxy": self.get_rest_proxy_url(), + } + return {} diff --git a/packages/commons/octobot_commons/signals/__init__.py b/packages/commons/octobot_commons/signals/__init__.py new file mode 100644 index 0000000000..ed58d67827 --- /dev/null +++ b/packages/commons/octobot_commons/signals/__init__.py @@ -0,0 +1,79 @@ +# Copyright +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + + +from octobot_commons.signals import signal + +from octobot_commons.signals.signal import ( + Signal, +) + + +from octobot_commons.signals import signal_dependencies + +from octobot_commons.signals.signal_dependencies import ( + SignalDependencies, +) + +from octobot_commons.signals import signal_bundle + +from octobot_commons.signals.signal_bundle import ( + SignalBundle, +) + +from octobot_commons.signals import signal_bundle_builder + +from octobot_commons.signals.signal_bundle_builder import ( + SignalBundleBuilder, +) + +from octobot_commons.signals import signal_factory + +from octobot_commons.signals.signal_factory import ( + create_signal_bundle, + create_signal, +) + +from octobot_commons.signals import signals_emitter + +from octobot_commons.signals.signals_emitter import ( + emit_signal_bundle, +) + +from octobot_commons.signals import signal_builder_wrapper + +from octobot_commons.signals.signal_builder_wrapper import ( + SignalBuilderWrapper, +) + +from octobot_commons.signals import signal_publisher + +from octobot_commons.signals.signal_publisher import ( + SignalPublisher, +) + + +__all__ = [ + "Signal", + "SignalBundle", + "create_signal_bundle", + "create_signal", + "emit_signal_bundle", + "SignalBuilderWrapper", + "SignalPublisher", + "SignalDependencies", +] diff --git a/packages/commons/octobot_commons/signals/signal.py b/packages/commons/octobot_commons/signals/signal.py new file mode 100644 index 0000000000..4239d6c4e9 --- /dev/null +++ b/packages/commons/octobot_commons/signals/signal.py @@ -0,0 +1,47 @@ +# pylint: disable=C0116 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import typing +import octobot_commons.enums + +import octobot_commons.signals.signal_dependencies as signal_dependencies + + +class Signal: + def __init__( + self, + topic: str, + content: dict, + dependencies: typing.Optional[signal_dependencies.SignalDependencies] = None, + **_, + ): + self.topic: str = topic + self.content: dict = content + self.dependencies: typing.Optional[signal_dependencies.SignalDependencies] = ( + dependencies + ) + + def to_dict(self) -> dict: + return { + octobot_commons.enums.SignalsAttrs.TOPIC.value: self.topic, + octobot_commons.enums.SignalsAttrs.CONTENT.value: self.content, + octobot_commons.enums.SignalsAttrs.DEPENDENCIES.value: ( + self.dependencies.to_dict() if self.dependencies else None + ), + } + + def __str__(self): + return f"{self.to_dict()}" diff --git a/packages/commons/octobot_commons/signals/signal_builder_wrapper.py b/packages/commons/octobot_commons/signals/signal_builder_wrapper.py new file mode 100644 index 0000000000..432df960e7 --- /dev/null +++ b/packages/commons/octobot_commons/signals/signal_builder_wrapper.py @@ -0,0 +1,52 @@ +# pylint: disable=C0116 +# Drakkar-Software OctoBot-Trading +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import asyncio +import time + +import octobot_commons.signals.signal_bundle_builder as signal_bundle_builder + + +class SignalBuilderWrapper: + NO_TIMEOUT_VALUE = -1 + + def __init__( + self, + identifier: str, + signal_builder_class=signal_bundle_builder.SignalBundleBuilder, + timeout: float = NO_TIMEOUT_VALUE, + builder_args: tuple = None, + ): + self.signal_builder_class = signal_builder_class + self.signal_bundle_builder = ( + signal_builder_class(identifier, *builder_args) + if builder_args + else signal_builder_class(identifier) + ) + self.is_being_emitted = False + self.timeout = timeout + self.timeout_event = asyncio.Event() + self.signal_emit_time = time.time() + timeout + self._users_count = 0 + + def register_user(self): + self._users_count += 1 + + def unregister_user(self): + self._users_count -= 1 + + def has_single_user(self): + return self._users_count == 1 diff --git a/packages/commons/octobot_commons/signals/signal_bundle.py b/packages/commons/octobot_commons/signals/signal_bundle.py new file mode 100644 index 0000000000..88e209fd05 --- /dev/null +++ b/packages/commons/octobot_commons/signals/signal_bundle.py @@ -0,0 +1,45 @@ +# pylint: disable=C0116 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.enums + + +class SignalBundle: + def __init__(self, identifier: str, signals=None, version=None): + self.identifier: str = identifier + self.signals: list = signals or [] + self.version: str = version or self._get_version() + + def to_dict(self) -> dict: + return { + octobot_commons.enums.SignalBundlesAttrs.IDENTIFIER.value: self.identifier, + octobot_commons.enums.SignalBundlesAttrs.SIGNALS.value: [ + signal.to_dict() for signal in self.signals + ], + octobot_commons.enums.SignalBundlesAttrs.VERSION.value: self.version, + } + + def __str__(self): + return f"{self.to_dict()}" + + # pylint: disable=C0415 + def _get_version(self) -> str: + try: + import octobot.constants + + return octobot.constants.COMMUNITY_FEED_CURRENT_MINIMUM_VERSION + except ImportError: + return "1.0.0" diff --git a/packages/commons/octobot_commons/signals/signal_bundle_builder.py b/packages/commons/octobot_commons/signals/signal_bundle_builder.py new file mode 100644 index 0000000000..c18ef2e8a1 --- /dev/null +++ b/packages/commons/octobot_commons/signals/signal_bundle_builder.py @@ -0,0 +1,87 @@ +# Drakkar-Software OctoBot-Trading +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import typing + +import octobot_commons.signals.signal_bundle as signal_bundle +import octobot_commons.signals.signal as signal +import octobot_commons.signals.signal_dependencies as signal_dependencies +import octobot_commons.logging as logging + + +class SignalBundleBuilder: + DEFAULT_SIGNAL_CLASS = signal.Signal + + def __init__(self, identifier: str): + self.signals: list = [] + self.identifier: str = identifier + self.version: str = None + self.signal_class = self.__class__.DEFAULT_SIGNAL_CLASS + self.logger = logging.get_logger(self.__class__.__name__) + self.reset() + + def register_signal( + self, + topic: str, + content: dict, + dependencies: typing.Optional[signal_dependencies.SignalDependencies] = None, + **kwargs + ): + """ + Store a signal to be packed on build call + """ + self.signals.append( + self.create_signal(topic, content, dependencies=dependencies, **kwargs) + ) + + def create_signal( + self, + topic: str, + content: dict, + dependencies: typing.Optional[signal_dependencies.SignalDependencies] = None, + **kwargs + ): + """ + Create a signal from self.signal_class + """ + return self.signal_class(topic, content, dependencies=dependencies, **kwargs) + + def is_empty(self) -> bool: + """ + Return True when no signal are to be built + """ + return not self.signals + + def build(self) -> signal_bundle.SignalBundle: + """ + Create a signal_bundle.SignalBundle from registered signals + """ + return signal_bundle.SignalBundle( + self.identifier, + signals=self.signals, + version=self.version, + ) + + def sort_signals(self): + """ + Implement if necessary + """ + return self + + def reset(self): + """ + Remove all registered signals + """ + self.signals = [] diff --git a/packages/commons/octobot_commons/signals/signal_dependencies.py b/packages/commons/octobot_commons/signals/signal_dependencies.py new file mode 100644 index 0000000000..f1ee761c45 --- /dev/null +++ b/packages/commons/octobot_commons/signals/signal_dependencies.py @@ -0,0 +1,69 @@ +# pylint: disable=C0116 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import typing + +import octobot_commons.enums + + +class SignalDependencies: + def __init__(self, dependencies: typing.Optional[list[dict]] = None): + self.dependencies: list[dict] = dependencies if dependencies else [] + + def extend(self, dependencies: "SignalDependencies"): + self.dependencies.extend(dependencies.dependencies) + + def is_filled_by(self, filled_dependencies: "SignalDependencies") -> bool: + for dependency in self.dependencies: + has_missing_dependency = True + for filled_dependency in filled_dependencies.dependencies: + has_missing_dependency = bool( + { + key: value + for key, value in dependency.items() + if key not in filled_dependency + or filled_dependency[key] != value + } + ) + if not has_missing_dependency: + break + # iterated over all filled_dependency and did not break, + # this dependency is not filled + if has_missing_dependency: + return False + # all dependencies are filled + return True + + def to_dict(self) -> dict: + return { + octobot_commons.enums.SignalDependenciesAttrs.DEPENDENCY.value: self.dependencies, + } + + def __str__(self): + return f"{self.to_dict()}" + + def __repr__(self): + return self.__str__() + + def __eq__(self, other: "SignalDependencies") -> bool: + if self is other: + return True + if not isinstance(other, SignalDependencies): + return False + return self.dependencies == other.dependencies + + def __bool__(self) -> bool: + return bool(self.dependencies) diff --git a/packages/commons/octobot_commons/signals/signal_factory.py b/packages/commons/octobot_commons/signals/signal_factory.py new file mode 100644 index 0000000000..1b0b847c86 --- /dev/null +++ b/packages/commons/octobot_commons/signals/signal_factory.py @@ -0,0 +1,52 @@ +# pylint: disable=C0116 +# Drakkar-Software OctoBot-Trading +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.signals.signal as signal +import octobot_commons.signals.signal_bundle as signal_bundle +import octobot_commons.signals.signal_dependencies as signal_dependencies +import octobot_commons.enums as commons_enums + + +def create_signal_bundle(signal_bundle_dict: dict) -> signal_bundle.SignalBundle: + signal_bundle_value = signal_bundle_dict[ + commons_enums.CommunityFeedAttrs.VALUE.value + ] + return signal_bundle.SignalBundle( + signal_bundle_value.get(commons_enums.SignalBundlesAttrs.IDENTIFIER.value), + signals=[ + create_signal(s) + for s in signal_bundle_value.get( + commons_enums.SignalBundlesAttrs.SIGNALS.value, [] + ) + ], + version=signal_bundle_value.get(commons_enums.SignalBundlesAttrs.VERSION.value), + ) + + +def create_signal(signal_dict: dict) -> signal.Signal: + return signal.Signal( + signal_dict.get(commons_enums.SignalsAttrs.TOPIC.value), + signal_dict.get(commons_enums.SignalsAttrs.CONTENT.value), + ( + signal_dependencies.SignalDependencies( + signal_dict.get(commons_enums.SignalsAttrs.DEPENDENCIES.value).get( + commons_enums.SignalDependenciesAttrs.DEPENDENCY.value + ) + ) + if signal_dict.get(commons_enums.SignalsAttrs.DEPENDENCIES.value) + else None + ), + ) diff --git a/packages/commons/octobot_commons/signals/signal_publisher.py b/packages/commons/octobot_commons/signals/signal_publisher.py new file mode 100644 index 0000000000..db5e503e0f --- /dev/null +++ b/packages/commons/octobot_commons/signals/signal_publisher.py @@ -0,0 +1,149 @@ +# pylint: disable=R0913, W0718 +# Drakkar-Software OctoBot-Trading +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import asyncio +import contextlib +import time + +import octobot_commons.singleton as singleton +import octobot_commons.logging as logging +import octobot_commons.errors as errors +import octobot_commons.signals.signals_emitter as signals_emitter +import octobot_commons.signals.signal_bundle_builder as signal_bundle_builder +import octobot_commons.signals.signal_builder_wrapper as signal_builder_wrapper + + +class SignalPublisher(singleton.Singleton): + DEFAULT_SIGNAL_BUILDER_CLASS = signal_bundle_builder.SignalBundleBuilder + + def __init__(self): + self._signal_builder_wrappers = {} + self._timeout_watcher_tasks = {} + + def get_signal_bundle_builder( + self, wrapper_key: str + ) -> signal_builder_wrapper.SignalBuilderWrapper: + """ + Return the SignalBuilderWrapper registered under the given key + """ + try: + return self._signal_builder_wrappers[wrapper_key].signal_bundle_builder + except KeyError as err: + raise errors.MissingSignalBuilder from err + + @contextlib.asynccontextmanager + async def remote_signal_bundle_builder( + self, + wrapper_key: str, + identifier: str, + timeout: float = signal_builder_wrapper.SignalBuilderWrapper.NO_TIMEOUT_VALUE, + signal_builder_class=DEFAULT_SIGNAL_BUILDER_CLASS, + builder_args=None, + ): + """ + Context manager ensuring that any signal under the given key is buildable and sent + when context manager is closing. + Use signal_builder_class to specify signal builders to create + """ + signal_builder_wrap = None + try: + signal_builder_wrap = self._create_or_get_signal_builder_wrapper( + wrapper_key, identifier, timeout, signal_builder_class, builder_args + ) + signal_builder_wrap.register_user() + self._register_timeout_if_any(wrapper_key) + yield signal_builder_wrap.signal_bundle_builder + # send the full signal when the last user is done + if signal_builder_wrap.has_single_user(): + await self._emit_signal_if_necessary(signal_builder_wrap) + finally: + if signal_builder_wrap is not None: + if signal_builder_wrap.has_single_user(): + self._unregister_timeout(wrapper_key) + self._signal_builder_wrappers.pop(wrapper_key, None) + else: + signal_builder_wrap.unregister_user() + + def stop(self): + """ + Stop all timeout tasks and clear any remaining registered wrapper + :return: + """ + logging.get_logger(self.__class__.__name__).debug("Stopping ...") + for task in self._timeout_watcher_tasks.values(): + task.cancel() + self._timeout_watcher_tasks = {} + self._signal_builder_wrappers = {} + logging.get_logger(self.__class__.__name__).debug("Stopped ...") + + def _create_or_get_signal_builder_wrapper( + self, + wrapper_key: str, + identifier: str, + timeout: float, + signal_builder_class, + builder_args: tuple, + ) -> signal_builder_wrapper.SignalBuilderWrapper: + if wrapper_key in self._signal_builder_wrappers: + return self._signal_builder_wrappers[wrapper_key] + self._signal_builder_wrappers[wrapper_key] = ( + signal_builder_wrapper.SignalBuilderWrapper( + identifier, signal_builder_class, timeout, builder_args + ) + ) + return self._signal_builder_wrappers[wrapper_key] + + async def _emit_signal_if_necessary(self, signal_builder_wrap): + # check has_single_user in case the same builder is used multiple times at once + if ( + not signal_builder_wrap.signal_bundle_builder.is_empty() + and not signal_builder_wrap.is_being_emitted + ): + try: + signal_builder_wrap.is_being_emitted = True + await signals_emitter.emit_signal_bundle( + signal_builder_wrap.signal_bundle_builder.build() + ) + except Exception as err: + logging.get_logger(self.__class__.__name__).exception( + err, True, f"Unexpected error when emitting signal: {err}" + ) + finally: + # always reset builder after emitting to avoid emitting the same signal twice + signal_builder_wrap.signal_bundle_builder.reset() + signal_builder_wrap.is_being_emitted = False + + async def _schedule_signal_auto_emit(self, wrapper_key, delay): + while wrapper_key in self._signal_builder_wrappers: + await asyncio.sleep(delay) + if wrapper_key not in self._signal_builder_wrappers: + # signal should not be emitted anymore + break + wrapper = self._signal_builder_wrappers[wrapper_key] + if time.time() >= wrapper.signal_emit_time: + await self._emit_signal_if_necessary(wrapper) + wrapper.signal_emit_time = time.time() + delay + + def _register_timeout_if_any(self, wrapper_key): + wrapper = self._signal_builder_wrappers[wrapper_key] + if wrapper.timeout != wrapper.NO_TIMEOUT_VALUE: + self._timeout_watcher_tasks[wrapper_key] = asyncio.create_task( + self._schedule_signal_auto_emit(wrapper_key, wrapper.timeout) + ) + + def _unregister_timeout(self, wrapper_key): + if task := self._timeout_watcher_tasks.pop(wrapper_key, None): + task.cancel() diff --git a/packages/commons/octobot_commons/signals/signals_emitter.py b/packages/commons/octobot_commons/signals/signals_emitter.py new file mode 100644 index 0000000000..aa0d9b49e8 --- /dev/null +++ b/packages/commons/octobot_commons/signals/signals_emitter.py @@ -0,0 +1,29 @@ +# Drakkar-Software OctoBot-Trading +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.enums as commons_enums +import octobot_commons.authentication as authentication +import octobot_commons.signals.signal_bundle as signal_bundle + + +async def emit_signal_bundle(to_send_signal_bundle: signal_bundle.SignalBundle): + """ + Emits a signal bundle + """ + await authentication.Authenticator.instance().send( + to_send_signal_bundle.to_dict(), + commons_enums.CommunityChannelTypes.SIGNAL, + identifier=to_send_signal_bundle.identifier, + ) diff --git a/packages/commons/octobot_commons/singleton/__init__.py b/packages/commons/octobot_commons/singleton/__init__.py new file mode 100644 index 0000000000..3452b2c82c --- /dev/null +++ b/packages/commons/octobot_commons/singleton/__init__.py @@ -0,0 +1,23 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_commons.singleton import singleton_class + +from octobot_commons.singleton.singleton_class import Singleton + +__all__ = [ + "Singleton", +] diff --git a/packages/commons/octobot_commons/singleton/singleton_class.py b/packages/commons/octobot_commons/singleton/singleton_class.py new file mode 100644 index 0000000000..ef56ef5985 --- /dev/null +++ b/packages/commons/octobot_commons/singleton/singleton_class.py @@ -0,0 +1,48 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + + +class Singleton: + """ + From https://stackoverflow.com/questions/51245056/singleton-is-not-working-in-cython + """ + + _instances = {} + + @classmethod + def instance(cls, *args, **kwargs): + """ + Create the instance if not already created + Return the class instance + :param args: the constructor arguments + :param kwargs: the constructor optional arguments + :return: the class only instance + """ + if cls not in cls._instances: + cls._instances[cls] = cls(*args, **kwargs) + return cls._instances[cls] + + @classmethod + def get_instance_if_exists(cls): + """ + Return the instance if it exist + Return the class instance if it exist + :return: the class only instance if it exist otherwise None + """ + try: + return cls._instances[cls] + except KeyError: + return None diff --git a/packages/commons/octobot_commons/str_util.py b/packages/commons/octobot_commons/str_util.py new file mode 100644 index 0000000000..1ca53b5620 --- /dev/null +++ b/packages/commons/octobot_commons/str_util.py @@ -0,0 +1,21 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import re + + +def camel_to_snake(name: str) -> str: + """Convert CamelCase to snake_case (e.g. for DSL operator names).""" + return re.sub(r"(?<!^)(?=[A-Z])", "_", name).lower() diff --git a/packages/commons/octobot_commons/support.py b/packages/commons/octobot_commons/support.py new file mode 100644 index 0000000000..1534f87e3d --- /dev/null +++ b/packages/commons/octobot_commons/support.py @@ -0,0 +1,29 @@ +# Drakkar-Software OctoBot +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import abc + + +class Support: + """ + Abstract class to be implemented when using supports + """ + + @abc.abstractmethod + def is_supporting(self) -> bool: + """ + Return True when supporting + """ + raise NotImplementedError diff --git a/packages/commons/octobot_commons/symbols/__init__.py b/packages/commons/octobot_commons/symbols/__init__.py new file mode 100644 index 0000000000..b7445cbb68 --- /dev/null +++ b/packages/commons/octobot_commons/symbols/__init__.py @@ -0,0 +1,45 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_commons.symbols import symbol_util + +from octobot_commons.symbols.symbol_util import ( + parse_symbol, + merge_symbol, + merge_currencies, + convert_symbol, + is_symbol, + is_usd_like_coin, + get_most_common_usd_like_symbol, +) + +from octobot_commons.symbols import symbol + +from octobot_commons.symbols.symbol import ( + Symbol, +) + + +__all__ = [ + "parse_symbol", + "merge_symbol", + "merge_currencies", + "convert_symbol", + "is_symbol", + "is_usd_like_coin", + "get_most_common_usd_like_symbol", + "Symbol", +] diff --git a/packages/commons/octobot_commons/symbols/symbol.py b/packages/commons/octobot_commons/symbols/symbol.py new file mode 100644 index 0000000000..96cc9d6127 --- /dev/null +++ b/packages/commons/octobot_commons/symbols/symbol.py @@ -0,0 +1,231 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import re +import typing + +import octobot_commons + +_FULL_SYMBOL_GROUPS_REGEX = r"([^//]*)\/([^:]*):?([^-]*)-?([^-]*)-?([^-]*)-?([^-]*)" + + +# pylint: disable=R0902 +class Symbol: + # base / quote : settlement-identifier-strike price-type + # Inspired from CCXT https://docs.ccxt.com/en/latest/manual.html#option: + # // + # // base asset or currency + # // ↓ + # // ↓ quote asset or currency + # // ↓ ↓ + # // ↓ ↓ settlement asset or currency + # // ↓ ↓ ↓ + # // ↓ ↓ ↓ identifier (settlement date) + # // ↓ ↓ ↓ ↓ + # // ↓ ↓ ↓ ↓ strike price + # // ↓ ↓ ↓ ↓ ↓ + # // ↓ ↓ ↓ ↓ ↓ type, put (P) or call (C) + # // ↓ ↓ ↓ ↓ ↓ ↓ + # 'BTC/USDT:BTC-211225-60000-P' // BTC/USDT put option contract strike price 60000 USDT settled in BTC (inverse) + # on 2021-12-25 + # 'ETH/USDT:USDT-211225-40000-C' // BTC/USDT call option contract strike price 40000 USDT settled in USDT (linear, + # vanilla) on 2021-12-25 + # 'ETH/USDT:ETH-210625-5000-P' // ETH/USDT put option contract strike price 5000 USDT settled in ETH (inverse) + # on 2021-06-25 + # 'ETH/USDT:USDT-210625-5000-C' // ETH/USDT call option contract strike price 5000 USDT settled in USDT (linear, + # vanilla) on 2021-06-25 + + def __init__( + self, + symbol_str: str, + market_separator: str = octobot_commons.MARKET_SEPARATOR, + settlement_separator: str = octobot_commons.SETTLEMENT_ASSET_SEPARATOR, + option_separator: str = octobot_commons.OPTION_SEPARATOR, + ): + self.symbol_str: str = symbol_str + self.base: typing.Optional[str] = None + self.quote: typing.Optional[str] = None + self.settlement_asset: typing.Optional[str] = None + self.identifier: typing.Optional[str] = None + self.strike_price: typing.Optional[str] = None + self.option_type: typing.Optional[str] = None + self.market_separator: str = market_separator + self.settlement_separator: str = settlement_separator + self.option_separator: str = option_separator + self.parse_symbol(self.symbol_str) + + def parse_symbol(self, symbol_str: str): + """ + Parse the specified symbol + :param symbol_str: the symbol to parse + """ + if self.settlement_separator in symbol_str: + ( + self.base, + self.quote, + self.settlement_asset, + self.identifier, + self.strike_price, + self.option_type, + ) = _parse_symbol_full(_FULL_SYMBOL_GROUPS_REGEX, symbol_str) + self.option_type = _parse_option_type(self.option_type) + else: + # simple (probably spot) pair, use str.split as it is much faster + self.base, self.quote = _parse_spot_symbol( + self.market_separator, symbol_str + ) + self.settlement_asset = self.identifier = self.strike_price = "" + self.option_type = None + + def base_and_quote(self) -> typing.Tuple[str, str]: + """ + return a tuple made of this symbol's base and quote assets + """ + return self.base, self.quote + + def merged_str_symbol( + self, + market_separator: str = octobot_commons.MARKET_SEPARATOR, + settlement_separator: str = octobot_commons.SETTLEMENT_ASSET_SEPARATOR, + option_separator: str = octobot_commons.OPTION_SEPARATOR, + ) -> str: + """ + return the base/quote representation of this symbol. includes settlement asset if set + """ + merged_symbol = f"{self.base}{market_separator}{self.quote}" + if self.settlement_asset: + merged_symbol = ( + f"{merged_symbol}{settlement_separator}{self.settlement_asset}" + ) + if self.strike_price and self.identifier and self.option_type: + details = [ + "", + self.identifier, + str(self.strike_price), + _parse_option_type(self.option_type), + ] + merged_symbol = f"{merged_symbol}{option_separator.join(details)}" + return merged_symbol + + def merged_str_base_and_quote_only_symbol( + self, + market_separator: str = octobot_commons.MARKET_SEPARATOR, + ): + """ + return the base/quote representation of this symbol. includes settlement asset if set + """ + return f"{self.base}{market_separator}{self.quote}" + + def is_perpetual_future(self): + """ + return True when this symbol is related to a perpetual future contract + """ + return self.settlement_asset and not ( + self.identifier or self.strike_price or self.option_type + ) + + def is_spot(self): + """ + return True when this symbol is related to a spot asset + """ + return self.base and self.quote and not self.settlement_asset + + def is_future(self): + """ + return True when this symbol is related to a non-perpetual future contract + """ + return bool( + self.settlement_asset and not (self.strike_price or self.option_type) + ) + + def does_expire(self): + """ + return True when this symbol is related to a contract that expires + """ + return bool(self.settlement_asset and self.identifier) + + def is_put_option(self): + """ + return True when this symbol is related to a put option contract + """ + return self.option_type == octobot_commons.enums.OptionTypes.PUT.value + + def is_call_option(self): + """ + return True when this symbol is related to a call option contract + """ + return self.option_type == octobot_commons.enums.OptionTypes.CALL.value + + def is_option(self): + """ + return True when this symbol is related to an option contract + """ + return bool( + self.settlement_asset + and self.identifier + and self.strike_price + and self.option_type + ) + + def is_linear(self): + """ + return True when this symbol is related to a linear contract based on the settlement_asset + """ + return self.quote == self.settlement_asset if self.settlement_asset else True + + def is_inverse(self): + """ + return True when this symbol is related to an inverse contract based on the settlement_asset + """ + return self.base == self.settlement_asset if self.settlement_asset else False + + def is_same_base_and_quote(self, other): + """* + :return: True if the given symbol has the same base and quote as self + """ + return self.base == other.base and self.quote == other.quote + + def __eq__(self, other): + return self is other or ( + isinstance(other, Symbol) + and self.symbol_str == other.symbol_str + and self.base == other.base + and self.quote == other.quote + and self.settlement_asset == other.settlement_asset + and self.identifier == other.identifier + and self.strike_price == other.strike_price + and self.option_type == other.option_type + ) + + def __str__(self): + return self.symbol_str + + def __repr__(self): + return str(self) + + +def _parse_symbol_full(full_symbol_regex, symbol_str): + return re.search(full_symbol_regex, symbol_str).groups() + + +def _parse_spot_symbol(separator, symbol_str): + split_result = symbol_str.split(separator) + if len(split_result) < 2: + return symbol_str, None + return split_result[0], split_result[1] + + +def _parse_option_type(option_type_str: typing.Optional[str]) -> typing.Optional[str]: + return option_type_str.upper() if option_type_str else None diff --git a/packages/commons/octobot_commons/symbols/symbol_util.py b/packages/commons/octobot_commons/symbols/symbol_util.py new file mode 100644 index 0000000000..7076376364 --- /dev/null +++ b/packages/commons/octobot_commons/symbols/symbol_util.py @@ -0,0 +1,149 @@ +# pylint: disable=R0913 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import functools +import typing +import collections + +import octobot_commons +import octobot_commons.constants as constants +import octobot_commons.symbols.symbol + + +_MAX_PARSED_SYMBOLS_CACHE_SIZE = 2048 + + +@functools.lru_cache(maxsize=_MAX_PARSED_SYMBOLS_CACHE_SIZE) +def parse_symbol(symbol: str): + """ + Parse the specified symbol into a Symbol object + :param symbol: the symbol to parse + :return: Symbol object + """ + return octobot_commons.symbols.symbol.Symbol(symbol) + + +def merge_symbol(symbol: str) -> str: + """ + Return merged currency and market without / + :param symbol: the specified symbol + :return: merged currency and market without / + """ + return symbol.replace(octobot_commons.MARKET_SEPARATOR, "").replace( + octobot_commons.SETTLEMENT_ASSET_SEPARATOR, "_" + ) + + +def is_symbol(value: str, separator: str = octobot_commons.MARKET_SEPARATOR) -> bool: + """ + Check if the given string is a symbol or a coin based on the separator + :param value: the string to check + :param separator: the separator to use for checking (e.g., "/" for "BTC/USDT") + :return: True if the string is a symbol (contains the separator), False if it's a coin (single currency) + """ + return separator in value + + +def merge_currencies( + currency: str, + market: str, + settlement_asset: typing.Optional[str] = None, + identifier: typing.Optional[str] = None, + strike_price: typing.Optional[str] = None, + option_type: typing.Optional[str] = None, + market_separator: str = octobot_commons.MARKET_SEPARATOR, + settlement_separator: str = octobot_commons.SETTLEMENT_ASSET_SEPARATOR, + option_separator: str = octobot_commons.OPTION_SEPARATOR, +) -> str: + """ + Merge currency and market + :param currency: the base currency + :param market: the quote currency + :param settlement_asset: the settlement asset currency (unused for spot trading) + :param strike_price: the strike price or time (used for options) + :param market_separator: the separator between currency and market + :param settlement_separator: the separator between the pair and reference market + :param option_separator: the separator between the option details + :return: currency and market merged + """ + symbol = octobot_commons.symbols.symbol.Symbol( + f"{currency}{market_separator}{market}", market_separator=market_separator + ) + if settlement_asset is not None: + symbol.settlement_asset = settlement_asset + if strike_price: + symbol.strike_price = strike_price + if identifier: + symbol.identifier = identifier + if option_type: + symbol.option_type = option_type + return symbol.merged_str_symbol( + market_separator=market_separator, + settlement_separator=settlement_separator, + option_separator=option_separator, + ) + + +def convert_symbol( + symbol: str, + symbol_separator: str, + new_symbol_separator: str = octobot_commons.MARKET_SEPARATOR, + should_uppercase: bool = False, + should_lowercase: bool = False, + base_and_quote_only: bool = False, +) -> str: + """ + Convert symbol according to parameter + :param symbol: the symbol to convert + :param symbol_separator: the symbol separator + :param new_symbol_separator: the new symbol separator + :param should_uppercase: if it should be concerted to uppercase + :param should_lowercase: if it should be concerted to lowercase + :param base_and_quote_only: if it should only contain base and quote from the given symbol + :return: + """ + if base_and_quote_only: + symbol = symbol.split(octobot_commons.SETTLEMENT_ASSET_SEPARATOR)[0] + if should_uppercase: + return symbol.replace(symbol_separator, new_symbol_separator).upper() + if should_lowercase: + return symbol.replace(symbol_separator, new_symbol_separator).lower() + return symbol.replace(symbol_separator, new_symbol_separator) + + +def is_usd_like_coin(coin: str) -> bool: + """ + :return: True if the given coin is a USD-like coin + """ + return coin in constants.USD_LIKE_COINS + + +def get_most_common_usd_like_symbol(pairs: list[str]) -> str: + """ + :return: The most common USD like symbol from the given pairs + """ + if not pairs: + raise ValueError("Pairs cannot be empty") + symbols = [] + for pair in pairs: + parsed = octobot_commons.symbols.symbol.Symbol(pair) + symbols.append(parsed.quote) + symbols.append(parsed.base) + counter = collections.Counter(symbols) + for symbol, _ in counter.most_common(): + if is_usd_like_coin(symbol): + return symbol + raise ValueError("Pairs cannot be empty") diff --git a/packages/commons/octobot_commons/system_resources_watcher.py b/packages/commons/octobot_commons/system_resources_watcher.py new file mode 100644 index 0000000000..2109ea4603 --- /dev/null +++ b/packages/commons/octobot_commons/system_resources_watcher.py @@ -0,0 +1,218 @@ +# pylint: disable=W0703,R0902 +# Drakkar-Software OctoBot +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import threading +import tracemalloc +import csv +import gc + +import octobot_commons.constants as commons_constants +import octobot_commons.singleton as singleton +import octobot_commons.timestamp_util as timestamp_util +import octobot_commons.logging as logging +import octobot_commons.async_job as async_job +import octobot_commons.os_util as os_util + + +class SystemResourcesWatcher(singleton.Singleton): + DEFAULT_WATCHER_INTERVAL = ( + commons_constants.RESOURCES_WATCHER_MINUTES_INTERVAL + * commons_constants.MINUTE_TO_SECONDS + ) + CPU_WATCHING_SECONDS = 2 + + def __init__(self, dump_resources, watch_ram, output_file, dump_snapshot=False): + super().__init__() + self.watcher_job = None + self.watcher_interval: float = self.DEFAULT_WATCHER_INTERVAL + self.logger = logging.get_logger(self.__class__.__name__) + self.watch_ram = watch_ram + self.dump_resources = dump_resources + self.output_file = output_file + self.dump_snapshot = dump_snapshot + self.snapshot_file = f"{output_file}_snapshot.dump" + self.initialized_output = False + self.first_memory_snapshot = None + self.largest_peak = 0 + + def _log_memory(self): + self.logger.debug("Memory snapshot:") + # see https://docs.python.org/3/library/tracemalloc.html + snapshot = tracemalloc.take_snapshot() + if not self.first_memory_snapshot: + self.first_memory_snapshot = snapshot + + limit = 20 + top_stats = snapshot.compare_to(self.first_memory_snapshot, "lineno") + + # Summary + changes + print(f"[ Top {limit} differences ]") + for stat in top_stats[:limit]: + print(stat) + + # Top RAM users context + top_stats = snapshot.statistics("traceback") + print("Top %s lines" % limit) + for index, stat in enumerate(top_stats[:limit], 1): + frame = stat.traceback[0] + print( + "#%s: %s:%s: %.1f KiB" + % (index, frame.filename, frame.lineno, stat.size / 1024) + ) + for _line in stat.traceback.format(): + print(" %s" % _line) + + # Other stats + other = top_stats[limit:] + if other: + size = sum(stat.size for stat in other) + print("%s other: %.1f KiB" % (len(other), size / 1024)) + total = sum(stat.size for stat in top_stats) + print("Total allocated size: %.1f KiB" % (total / 1024)) + + latest_size, latest_peak = tracemalloc.get_traced_memory() + tracemalloc.reset_peak() + + # Memory peaks + self.largest_peak = max(latest_peak, self.largest_peak) + print( + f"{latest_size=}, latest_peak={latest_peak/1024} largest_peak={self.largest_peak/1024}" + ) + + def _exec_log_used_resources(self): + try: + # trigger garbage collector to get a fresh memory picture + gc.collect() + # warning: blocking to monitor CPU usage, to be used in a thread + cpu, percent_ram, ram, process_ram, virtual_ram, unique_ram = os_util.get_cpu_and_ram_usage( + self.CPU_WATCHING_SECONDS + ) + self.logger.debug( + f"Used system resources: {cpu}% CPU, {ram:,.3f} GB in RAM ({percent_ram}% of total) " + f"Process memory usage: {unique_ram:,.3f} unique (without shared), {process_ram:,.3f} " + f"total non-swapped, {virtual_ram:,.3f} total including swap - in GB." + ) + if self.dump_resources: + self._dump_resources(cpu, percent_ram, ram, process_ram) + if self.watch_ram: + self._log_memory() + if self.dump_snapshot: + snapshot = tracemalloc.take_snapshot() + snapshot.dump(self.snapshot_file) + self.logger.debug(f"Memory snapshot dumped to {self.snapshot_file}") + except Exception as err: + self.logger.exception(err, False) + self.logger.debug(f"Error when checking system resources: {err}") + + async def _log_used_resources(self): + threading.Thread( + target=self._exec_log_used_resources, + daemon=True, + name=f"{self.__class__.__name__}-_exec_log_used_resources", + ).start() + + def _dump_resources(self, cpu, percent_ram, ram, process_ram): + reset_file = not self.initialized_output + self.initialized_output = True + mode = "w" if reset_file else "a" + row = ( + str(element).replace(".", ",") + for element in ( + timestamp_util.get_now_time(), + process_ram, + cpu, + percent_ram, + ram, + ) + ) + with open(self.output_file, mode, newline="") as csv_file: + writer = csv.writer(csv_file, delimiter=";") + if reset_file: + writer.writerow( + [ + "TIME", + "PROCESS USED RAM", + "% USED CPU", + "% USED RAM", + "TOTAL USED RAM", + ] + ) + writer.writerow(row) + + async def start(self): + """ + Synch the clock and start the clock synchronization loop if possible on this system + """ + self.logger.debug("Starting system resources watcher") + self.watcher_job = async_job.AsyncJob( + self._log_used_resources, + execution_interval_delay=self.watcher_interval, + ) + await self.watcher_job.run() + if self.watch_ram or self.dump_snapshot: + self.logger.debug("RAM watched enabled") + stored_frames = 5 + tracemalloc.start(stored_frames) + + def stop(self): + """ + Stop the synchronization loop + """ + if self.watcher_job is not None and not self.watcher_job.is_stopped(): + self.logger.debug("Stopping system resources watcher") + self.watcher_job.stop() + if self.watch_ram or self.dump_snapshot: + self.logger.debug("Stopping RAM watcher") + tracemalloc.stop() + + +async def start_system_resources_watcher( + dump_resources, + watch_ram, + output_file, + dump_snapshot=False +): + """ + Start the resources watcher loop + """ + await SystemResourcesWatcher.instance( + dump_resources, watch_ram, output_file, dump_snapshot + ).start() + + +async def stop_system_resources_watcher(): + """ + Stop the watcher loop + """ + return SystemResourcesWatcher.instance().stop() + + +def analyze_dump(filename): + """ + Analyze a memory snapshot dump file and print the top lines by memory consumption + """ + try: + snapshot = tracemalloc.Snapshot.load(filename) + stats = snapshot.statistics('lineno') + print("Top 10 lines by memory consumption:") + for index, stat in enumerate(stats[:10]): + print(f"#{index+1}: {stat.size / 1024:.2f} KiB in {stat.count} blocks: {stat.traceback}") + except FileNotFoundError: + print(f"Error: Dump file '{filename}' not found.") + except Exception as e: + print(f"An error occurred while loading or analyzing the snapshot: {e}") + +#analyze_dump("system_resources.csv_snapshot.dump") diff --git a/packages/commons/octobot_commons/tentacles_management/__init__.py b/packages/commons/octobot_commons/tentacles_management/__init__.py new file mode 100644 index 0000000000..c7d5896615 --- /dev/null +++ b/packages/commons/octobot_commons/tentacles_management/__init__.py @@ -0,0 +1,46 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_commons.tentacles_management import abstract_tentacle +from octobot_commons.tentacles_management import class_inspector + +from octobot_commons.tentacles_management.abstract_tentacle import AbstractTentacle +from octobot_commons.tentacles_management.class_inspector import ( + default_parent_inspection, + default_parents_inspection, + evaluator_parent_inspection, + trading_mode_parent_inspection, + get_class_from_parent_subclasses, + get_deep_class_from_parent_subclasses, + get_class_from_string, + is_abstract_using_inspection_and_class_naming, + get_all_classes_from_parent, + get_single_deepest_child_class, +) + +__all__ = [ + "AbstractTentacle", + "default_parent_inspection", + "default_parents_inspection", + "evaluator_parent_inspection", + "trading_mode_parent_inspection", + "get_class_from_parent_subclasses", + "get_deep_class_from_parent_subclasses", + "get_class_from_string", + "is_abstract_using_inspection_and_class_naming", + "get_all_classes_from_parent", + "get_single_deepest_child_class", +] diff --git a/packages/commons/octobot_commons/tentacles_management/abstract_tentacle.py b/packages/commons/octobot_commons/tentacles_management/abstract_tentacle.py new file mode 100644 index 0000000000..7764f42c06 --- /dev/null +++ b/packages/commons/octobot_commons/tentacles_management/abstract_tentacle.py @@ -0,0 +1,169 @@ +# pylint: disable=C0103,W0703,C0415 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import abc +import copy + + +import octobot_commons.enums as commons_enums +import octobot_commons.configuration.user_inputs as user_inputs +import octobot_commons.configuration as configuration + + +class AbstractTentacle: + """ + The parent class of any OctoBot tentacle + """ + + __metaclass__ = abc.ABCMeta + + ALLOW_SUPER_CLASS_CONFIG = ( + False # when True, the given tentacle can read its parent class configuration + ) + USER_INPUT_TENTACLE_TYPE = ( + commons_enums.UserInputTentacleTypes.UNDEFINED + ) # tentacle type, saved in user inputs + HISTORIZE_USER_INPUT_CONFIG = ( + False # when True, user input values can be saved and read from the run data db + ) + CLASS_UI = user_inputs.UserInputFactory( + commons_enums.UserInputTentacleTypes.UNDEFINED + ) # class-level user input factory. Used when initializing user inputs with classmethods + + def __init__(self): + self.logger = None + self.UI: user_inputs.UserInputFactory = user_inputs.UserInputFactory( + self.USER_INPUT_TENTACLE_TYPE + ) + self.UI.set_tentacle_class(self.__class__).set_tentacle_config_proxy( + self.get_local_config + ) + + @classmethod + def get_name(cls) -> str: + """ + Tentacle name based on class name + :return: the tentacle name + """ + return cls.__name__ + + @classmethod + def get_all_subclasses(cls) -> list: + """ + Return all subclasses of this tentacle + :return: the subclasses + """ + subclasses_list = cls.__subclasses__() + if cls.__subclasses__(): + for subclass in copy.deepcopy(subclasses_list): + subclasses_list += subclass.get_all_subclasses() + return subclasses_list + + @classmethod + def is_configurable(cls): + """ + Override if the tentacle is allowed to be configured + """ + return True + + @classmethod + def get_user_commands(cls) -> dict: + """ + Return the dict of user commands for this tentacle + :return: the commands dict + """ + return {} + + def get_local_config(self): + """ + Implementation required if cls.HISTORIZE_USER_INPUT_CONFIG is True + :return: the config of the tentacle + """ + raise NotImplementedError + + @classmethod + def create_local_instance(cls, config, tentacles_setup_config, tentacle_config): + """ + Implementation required if cls.HISTORIZE_USER_INPUT_CONFIG is True + :param config: the global configuration to give to the tentacle + :param tentacles_setup_config: the global tentacles setup configuration to give to the tentacle + :param tentacle_config: the tentacle configuration to give to the tentacle + :return: a local, aimed to be short-lived, tentacle instance + """ + raise NotImplementedError + + def init_user_inputs(self, inputs: dict) -> None: + """ + instance method API for user inputs. Used by load_and_save_user_inputs + Override if this tentacle has user inputs that should be initialized on a specific instance + Called right before starting the tentacle, should define all the tentacle's user inputs unless + those are defined somewhere else. + """ + + @classmethod + def init_user_inputs_from_class(cls, inputs: dict) -> None: + """ + classmethod API for user inputs. Used by init_user_inputs_from_class + Override if this tentacle has user inputs that can be initialized on a class level + Called by load_user_inputs_from_class, should define all the tentacle user inputs. + """ + + async def load_and_save_user_inputs(self, bot_id: str) -> dict: + """ + instance method API for user inputs + Initialize and save the tentacle user inputs in run data + :return: the filled user input configuration + """ + return await configuration.load_and_save_user_inputs(self, bot_id) + + @classmethod + def load_user_inputs_from_class( + cls, tentacles_setup_config, tentacle_config + ) -> dict: + """ + classmethod API for user inputs + Initialize the tentacle user inputs + Called by get_raw_config_and_user_inputs + """ + return configuration.load_user_inputs_from_class( + cls, tentacles_setup_config, tentacle_config + ) + + @classmethod + async def get_raw_config_and_user_inputs( + cls, config, tentacles_setup_config, bot_id + ): + """ + :return: the tentacle configuration and its list of user inputs + """ + if not cls.HISTORIZE_USER_INPUT_CONFIG: + return configuration.get_raw_config_and_user_inputs_from_class( + cls, tentacles_setup_config + ) + return await configuration.get_raw_config_and_user_inputs( + cls, config, tentacles_setup_config, bot_id + ) + + @classmethod + def get_tentacle_config_traded_symbols( + cls, config: dict, reference_market: str + ) -> list: + """ + :return: the traded symbols of the tentacle according to its tentacle configuration + """ + raise NotImplementedError( + "get_tentacle_config_traded_symbols is not implemented" + ) diff --git a/packages/commons/octobot_commons/tentacles_management/class_inspector.py b/packages/commons/octobot_commons/tentacles_management/class_inspector.py new file mode 100644 index 0000000000..1108225dff --- /dev/null +++ b/packages/commons/octobot_commons/tentacles_management/class_inspector.py @@ -0,0 +1,167 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import inspect + +import octobot_commons.logging as logging_util + + +def default_parent_inspection(element, parent): + """ + Check if the element bases has the specified parent + :param element: the element to check + :param parent: the expected parent + :return: the check result + """ + return parent in element.__bases__ + + +def default_parents_inspection(element, parent): + """ + Check if the element has the specified parent + :param element: the element to check + :param parent: the expected parent + :return: the check result + """ + return parent in element.mro() + + +def evaluator_parent_inspection(element, parent): + """ + Recursively check if the evaluator class has the specified parent + :param element: the element to check + :param parent: the expected parent + :return: the check result + """ + return hasattr( + element, "get_parent_evaluator_classes" + ) and element.get_parent_evaluator_classes(parent) + + +def trading_mode_parent_inspection(element, parent): + """ + Check if the trading class has the specified parent + :param element: the element to check + :param parent: the expected parent + :return: the check result + """ + return hasattr( + element, "get_parent_trading_mode_classes" + ) and element.get_parent_trading_mode_classes(parent) + + +def get_class_from_parent_subclasses(class_string, parent): + """ + Search the class string in parent subclasses + :param class_string: the class name to search + :param parent: the parent + :return: the class if found else None + """ + for found in parent.__subclasses__(): + if found.__name__ == class_string: + return found + return None + + +def get_deep_class_from_parent_subclasses(class_string, parent): + """ + Search for a class in parent subclasses "deeply" + :param class_string: the class name to search + :param parent: the expected parent + :return: the class if found else None + """ + found = get_class_from_parent_subclasses(class_string, parent) + if found is not None: + return found + + for parent_class in parent.__subclasses__(): + found = get_deep_class_from_parent_subclasses(class_string, parent_class) + if found is not None: + return found + return None + + +def get_class_from_string( + class_string: str, + parent, + module, + parent_inspection=default_parent_inspection, + error_when_not_found: bool = False, +): + """ + Search a class from a class string in a specified module for a specified parent + :param class_string: the class name to search + :param parent: the class expected parent + :param module: the class expected module + :param parent_inspection: the parent inspection + :param error_when_not_found: if errors should be raised + :return: the class if found else None + """ + if tentacle_class_by_name := { + m[0]: m[1] + for m in inspect.getmembers(module) + if (m[0] == class_string) + and hasattr(m[1], "__bases__") + and parent_inspection(m[1], parent) + }: + return tentacle_class_by_name[class_string] + if error_when_not_found: + raise ModuleNotFoundError(f"Cant find {class_string} module") + return None # no class found + + +def is_abstract_using_inspection_and_class_naming(clazz): + """ + Check if a class is abstract + :param clazz: the class to check + :return: the check result + """ + return inspect.isabstract(clazz) or "abstract" in clazz.__name__.lower() + + +def get_all_classes_from_parent(parent_class) -> list: + """ + Get all sub classes from parent including multi level sub-classes + :param parent_class: the parent class + :return: the class from parent + """ + classes = [] + for subclass in parent_class.__subclasses__(): + if subclass.__subclasses__(): + # append this subclass + classes.append(subclass) + + # and all its subclasses + classes += get_all_classes_from_parent(subclass) + else: + classes.append(subclass) + return classes + + +def get_single_deepest_child_class(clazz) -> object: + """ + Get the single deepest child class + :param clazz: the class + :return: the single deepest child class + """ + children_classes = clazz.__subclasses__() + if len(children_classes) == 0: + return clazz + if len(children_classes) > 1: + logging_util.get_logger(__name__).error( + f"More than one child class of {clazz}, expecting one, " + f"using {children_classes[0]}" + ) + return get_single_deepest_child_class(children_classes[0]) diff --git a/packages/commons/octobot_commons/tests/__init__.py b/packages/commons/octobot_commons/tests/__init__.py new file mode 100644 index 0000000000..f299f2ac11 --- /dev/null +++ b/packages/commons/octobot_commons/tests/__init__.py @@ -0,0 +1,31 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_commons.tests import test_config + +from octobot_commons.tests.test_config import ( + get_test_config, + init_config_time_frame_for_tests, + load_test_config, + TEST_CONFIG_FOLDER, +) + +__all__ = [ + "get_test_config", + "init_config_time_frame_for_tests", + "load_test_config", + "TEST_CONFIG_FOLDER", +] diff --git a/packages/commons/octobot_commons/tests/test_config.py b/packages/commons/octobot_commons/tests/test_config.py new file mode 100644 index 0000000000..2bd7c70289 --- /dev/null +++ b/packages/commons/octobot_commons/tests/test_config.py @@ -0,0 +1,66 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import os + +import octobot_commons.configuration as configuration +import octobot_commons.constants as constants +import octobot_commons.enums as enums + +TEST_FOLDER = "tests" +STATIC_FOLDER = "static" +TEST_CONFIG_FOLDER = f"{TEST_FOLDER}/static" + + +def get_test_config(test_folder=TEST_FOLDER): + """ + Return test default config + :return: test default config + """ + return os.path.join(test_folder, STATIC_FOLDER, constants.CONFIG_FILE) + + +def get_test_profile(test_folder=TEST_FOLDER): + """ + Return test default config + :return: test default config + """ + return test_folder + + +def init_config_time_frame_for_tests(config): + """ + Append time frames to config for tests + :param config: the test config + :return: the test config with time frames + """ + result = [] + for time_frame in config[constants.CONFIG_TIME_FRAME]: + result.append(enums.TimeFrames(time_frame)) + config[constants.CONFIG_TIME_FRAME] = result + + +def load_test_config(dict_only=True, test_folder=TEST_FOLDER): + """ + Return the complete default test configs + :return: the complete default test config + """ + config = configuration.Configuration( + get_test_config(test_folder=test_folder), + get_test_profile(test_folder=test_folder), + ) + config.read() + init_config_time_frame_for_tests(config.config) + return config.config if dict_only else config diff --git a/packages/commons/octobot_commons/thread_util.py b/packages/commons/octobot_commons/thread_util.py new file mode 100644 index 0000000000..fa1a8ac7f3 --- /dev/null +++ b/packages/commons/octobot_commons/thread_util.py @@ -0,0 +1,33 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import sys +import concurrent.futures as futures + + +# pylint: disable=W0212 +def stop_thread_pool_executor_non_gracefully(executor: futures.ThreadPoolExecutor): + """ + Should only be used in python 3.8 + From https://gist.github.com/clchiou/f2608cbe54403edb0b13 + Non graceful and non clean but only way to shutdown a ThreadPoolExecutor + :param executor: the ThreadPoolExecutor to stop + """ + if sys.version_info.minor >= 9: + executor.shutdown(True) + else: + executor.shutdown(False) + executor._threads.clear() + futures.thread._threads_queues.clear() diff --git a/packages/commons/octobot_commons/time_frame_manager.py b/packages/commons/octobot_commons/time_frame_manager.py new file mode 100644 index 0000000000..9cb4fe973a --- /dev/null +++ b/packages/commons/octobot_commons/time_frame_manager.py @@ -0,0 +1,176 @@ +# Drakkar-Software OctoBot +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import typing + +import octobot_commons.constants as constants +import octobot_commons.logging as logging_util +import octobot_commons.enums as enums + +LOGGER_TAG = "TimeFrameManager" + + +def _sort_time_frames(time_frames, reverse=False): + if time_frames: + time_frames = ( + time_frames + if isinstance(time_frames[0], enums.TimeFrames) + else [enums.TimeFrames(tf) for tf in time_frames] + ) + return sorted( + time_frames, key=enums.TimeFramesMinutes.__getitem__, reverse=reverse + ) + return time_frames + + +TimeFramesRank = _sort_time_frames(list(enums.TimeFramesMinutes)) + + +def get_config_time_frame(config) -> list: + """ + Get the time frame config list + Warning: requires EvaluatorCreator.init_time_frames_from_strategies(self.config) to be called previously + :param config: the config + :return: the time frame config list + """ + return config[constants.CONFIG_TIME_FRAME] + + +def sort_time_frames(time_frames, reverse=False) -> list: + """ + Sort a time frame list, shorted to longest + :param time_frames: the time frames to sort + :param reverse: if the sort should be reversed + :return: the time frame list sorted + """ + return _sort_time_frames(time_frames, reverse) + + +def sort_config_time_frames(config) -> None: + """ + Sort the time frame config and save it in config + :param config: the config + """ + config[constants.CONFIG_TIME_FRAME] = sort_time_frames( + config[constants.CONFIG_TIME_FRAME] + ) + + +def get_display_time_frame(config, default_display_time_frame): + """ + Get display time frame + :param config: the config + :param default_display_time_frame: the default time frame display + :return: the time frame display + """ + if default_display_time_frame in get_config_time_frame(config): + return default_display_time_frame + # else: return largest time frame + return config[constants.CONFIG_TIME_FRAME][-1] + + +def get_previous_time_frame(config_time_frames, time_frame, origin_time_frame): + """ + Get the previous time frame + :param config_time_frames: the time frame config + :param time_frame: the specified time frame + :param origin_time_frame: the origin time frame list + :return: the previous time frame of the specified time frame + """ + current_time_frame_index = TimeFramesRank.index(time_frame) + + if current_time_frame_index > 0: + previous = TimeFramesRank[current_time_frame_index - 1] + if previous in config_time_frames: + return previous + return get_previous_time_frame(config_time_frames, previous, origin_time_frame) + if time_frame in config_time_frames: + return time_frame + return origin_time_frame + + +def find_min_time_frame( + time_frames: list[typing.Union[str, enums.TimeFrames]], + min_time_frame: typing.Optional[str] = None +) -> enums.TimeFrames: + """ + Find the minimum time frame + :param time_frames: the time frame list + :param min_time_frame: the min time frame + :return: the minimal time frame + """ + time_frame_list = time_frames + if time_frames and isinstance(next(iter(time_frames)), enums.TimeFrames): + time_frame_list = [t.value for t in time_frames] + + if ( + not time_frame_list + ): # if exchange has no time frame list, returns minimal time frame + return TimeFramesRank[0] + + min_index = 0 + if min_time_frame: + min_index = TimeFramesRank.index(min_time_frame) + # TimeFramesRank is the ordered list of timeframes + for index, time_frame in enumerate(TimeFramesRank): + tf_val = time_frame.value + if index >= min_index and tf_val in time_frame_list: + try: + return enums.TimeFrames(tf_val) + except ValueError: + pass + return min_time_frame + + +def parse_time_frames(time_frames_string_list): + """ + Parse a time frame list as string + :param time_frames_string_list: the time frame list as string + :return: the parsed time frame list + """ + result_list = [] + for time_frame_string in time_frames_string_list: + try: + result_list.append(enums.TimeFrames(time_frame_string)) + except ValueError: + logging_util.get_logger(LOGGER_TAG).error( + "No time frame available for: '{0}'. Available time " + "frames are: {1}. '{0}' time frame requirement " + "ignored.".format( + time_frame_string, [t.value for t in enums.TimeFrames] + ) + ) + return result_list + + +def is_time_frame(value): + """ + :return: True if the value represents a TimeFrame + """ + try: + enums.TimeFrames(value) + return True + except ValueError: + return False + + +def get_last_timeframe_time( + time_frame: enums.TimeFrames, base_timestamp: float +) -> float: + """ + :return: the exact timestamp of the last give time_frame tick relatively to the given base_timestamp + """ + tf_seconds = enums.TimeFramesMinutes[time_frame] * constants.MINUTE_TO_SECONDS + return base_timestamp - (base_timestamp % tf_seconds) diff --git a/packages/commons/octobot_commons/timestamp_util.py b/packages/commons/octobot_commons/timestamp_util.py new file mode 100644 index 0000000000..b13244e252 --- /dev/null +++ b/packages/commons/octobot_commons/timestamp_util.py @@ -0,0 +1,111 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from datetime import datetime, timezone + + +LOCAL_TIMEZONE = datetime.now().astimezone().tzinfo + + +def convert_timestamp_to_datetime( + timestamp: float, time_format: str = "%d/%m/%y %H:%M", local_timezone: bool = False +) -> str: + """ + Convert a timestamp to a human readable string + :param timestamp: the timestamp to convert + :param time_format: the time format + :param local_timezone: if the local timezone should be used + :return: the created readable string + """ + return datetime.fromtimestamp( + timestamp, tz=(LOCAL_TIMEZONE if local_timezone else timezone.utc) + ).strftime(time_format) + + +def convert_timestamps_to_datetime( + timestamps: list[float], + time_format: str = "%d/%m/%y %H:%M", + local_timezone: bool = False, +) -> list[str]: + """ + Convert multiple timestamps to datetime objects + :param timestamps: the timestamp to convert list + :param time_format: the time format + :param local_timezone: if the local timezone should be used + :return: the created datetime objects + """ + return [ + convert_timestamp_to_datetime( + timestamp, time_format=time_format, local_timezone=local_timezone + ) + for timestamp in timestamps + ] + + +def is_valid_timestamp(timestamp: float) -> bool: + """ + Check if the timestamp is valid + :param timestamp: the timestamp to check + :return: the check result + """ + if timestamp: + try: + datetime.fromtimestamp(timestamp) + except (OSError, ValueError, OverflowError, TypeError): + return False + return True + + +def get_now_time( + time_format: str = "%Y-%m-%d %H:%M:%S", local_timezone: bool = True +) -> str: + """ + Get the current time + :param time_format: the time format + :return: the current timestamp + """ + return datetime.now( + tz=(LOCAL_TIMEZONE if local_timezone else timezone.utc) + ).strftime(time_format) + + +def datetime_to_timestamp( + date_time_str: str, date_time_format: str, local_timezone: bool = True +) -> float: + """ + Convert a datetime to timestamp + :param date_time_str: the datetime string + :param date_time_format: the datetime format + :return: the timestamp + """ + return create_datetime_from_string( + date_time_str, date_time_format, local_timezone=local_timezone + ).timestamp() + + +def create_datetime_from_string( + date_time_str: str, date_time_format: str, local_timezone: bool = True +) -> datetime: + """ + Convert a string to datetime + :param date_time_str: the datetime string + :param date_time_format: the datetime format + :return: the converted datetime + """ + # force local timezone or parsing might fail + return datetime.strptime(date_time_str, date_time_format).replace( + tzinfo=LOCAL_TIMEZONE if local_timezone else timezone.utc + ) diff --git a/packages/commons/octobot_commons/tree/__init__.py b/packages/commons/octobot_commons/tree/__init__.py new file mode 100644 index 0000000000..9579b5b867 --- /dev/null +++ b/packages/commons/octobot_commons/tree/__init__.py @@ -0,0 +1,48 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_commons.tree import base_tree + +from octobot_commons.tree.base_tree import ( + BaseTree, + BaseTreeNode, + NodeExistsError, +) + +from octobot_commons.tree import event_tree + +from octobot_commons.tree.event_tree import ( + EventTreeNode, + EventTree, +) + +from octobot_commons.tree import event_provider + +from octobot_commons.tree.event_provider import ( + EventProvider, + get_exchange_path, +) + + +__all__ = [ + "BaseTree", + "BaseTreeNode", + "NodeExistsError", + "EventTreeNode", + "EventTree", + "EventProvider", + "get_exchange_path", +] diff --git a/packages/commons/octobot_commons/tree/base_tree.py b/packages/commons/octobot_commons/tree/base_tree.py new file mode 100644 index 0000000000..5f5d347d7a --- /dev/null +++ b/packages/commons/octobot_commons/tree/base_tree.py @@ -0,0 +1,279 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + + +class BaseTreeNode: + """ + Node element of a BaseTree + """ + + __slots__ = [ + "node_value", + "node_value_time", + "node_type", + "node_description", + "node_metadata", + "children", + ] + + def __init__(self, node_value, node_type, **_): + self.node_value = node_value + self.node_value_time = None + self.node_type = node_type + self.node_description = None + self.node_metadata = {} + self.children = {} + + def set_child(self, key, child): + """ + Set a child at the given key + """ + self.children[key] = child + + def pop_child(self, key, default): + """ + Pop the child the given key + """ + return self.children.pop(key, default) + + +class NodeExistsError(Exception): + """ + Node doesn't exist error + """ + + +class BaseTree: + """ + Tree based on BaseTreeNode + """ + + TREE_NODE_CLASS = BaseTreeNode + __slots__ = ["root"] + + def __init__(self): + """ + Init the root node + """ + self.root = self.TREE_NODE_CLASS(None, None) + + def set_node(self, value, node_type, node, timestamp=0): + """ + Set the node attributes + Can raise an exception if the node doesn't exists + :param value: the node 'node_value' attribute to set + :param node_type: the node 'node_type' attribute to set + :param node: the node to update + :param timestamp: the value modification timestamp. + """ + self._set_node(node, value, node_type, timestamp=timestamp) + + # pylint: disable=too-many-arguments + def set_node_at_path( + self, value, node_type, path, timestamp=0, description=None, metadata=None + ): + """ + Set the node attributes + Creates the node if it doesn't exists + :param value: the node 'node_value' attribute to set + :param node_type: the node 'node_type' attribute to set + :param path: the node path (as a list of string) + :param timestamp: the value modification timestamp. + :param description: the node 'node_description' attribute to set + :param metadata: the node 'node_metadata' attribute to set + For example: + - If you created a first node with the path ["my-parent-node"] + - You can create a child node of my-parent-node by using ["my-parent-node", "my-new-child-node"] as `path` + :return: void + """ + self._set_node( + self.get_or_create_node(path), + value, + node_type, + timestamp=timestamp, + description=description, + metadata=metadata, + ) + + def get_node(self, path, starting_node=None): + """ + Get the node at the specified path + :param path: the node path (as a list of string). + For example: + - If you created a first node with the path ["my-parent-node"] + - You can create a child node of my-parent-node by using ["my-parent-node", "my-new-child-node"] as `path` + :param starting_node: the node to start the relative path + :return: the node instance or raise a NodeExistsError if the node doesn't exists + """ + try: + return self._get_node(path, starting_node=starting_node) + except KeyError: + raise NodeExistsError + + def clear(self): + """ + Clears the whole tree + """ + self.root = self.TREE_NODE_CLASS(None, None) + + def delete_node(self, path, starting_node=None): + """ + Delete the node at the specified path + :param path: the node path (as a list of string). + For example: + - If you created a first node with the path ["my-parent-node"] + - You can create a child node of my-parent-node by using ["my-parent-node", "my-new-child-node"] as `path` + :param starting_node: the node to start the relative path + :return: the deleted node or raise a NodeExistsError if the node doesn't exists + """ + try: + deleted_node = self._delete_node(path, starting_node=starting_node) + if deleted_node is None: + raise NodeExistsError + return deleted_node + except KeyError: + raise NodeExistsError + + def get_or_create_node(self, path, starting_node=None, **kwargs): + """ + Get the node at the specified path + Creates the node if it doesn't exists + :param path: the node path (as a list of string). + For example: + - If you created a first node with the path ["my-parent-node"] + - You can create a child node of my-parent-node by using ["my-parent-node", "my-new-child-node"] as `path` + :param starting_node: the node to start the relative path + :return: the node instance + """ + try: + return self._get_node(path, starting_node=starting_node) + except KeyError: + return self._create_node_path(path, starting_node=starting_node, **kwargs) + + def get_nested_children_with_path(self, path=None, select_leaves_only=True): + """ + Returns a generator iterating over the nodes children, including nested children. Children are yielded + together with their node path using a depth-first search (the most nested children are returned first) + :param path: the path (as a list of string) to the node + :param select_leaves_only: when True (default), only nodes that don't have children are returned + :return: a generator of (node, path) tuples + """ + path = path or [] + return self._get_nested_children_with_path(path, select_leaves_only) + + def _get_nested_children_with_path(self, parent_path, select_leaves_only): + children_keys = self.get_children_keys(parent_path) + node = self.get_node(parent_path) + if not children_keys or not select_leaves_only: + yield node, parent_path + for key in children_keys: + path = list(parent_path) + path.append(key) + yield from self._get_nested_children_with_path(path, select_leaves_only) + + def get_children_keys(self, path): + """ + Return the node's children keys + Can raise a KeyError if the path does not exists + :param path: the path (as a list of string) to the node + :return: children keys as a list + """ + return list(self.get_node(path).children) + + def _get_node(self, path, starting_node=None): + """ + Return the node corresponding to the path + Can raise a KeyError if the path does not exists + :param path: the path (as a list of string) to the node + :param starting_node: the node to start the path, root if None + :return: BaseTreeNode at path + """ + current_node = self.root if starting_node is None else starting_node + for key in path: + current_node = current_node.children[key] + return current_node + + def _delete_node(self, path, starting_node=None): + """ + Return the node corresponding to the path + Can raise a KeyError if the path does not exists + :param path: the path (as a list of string) to the node + :param starting_node: the node to start the path, root if None + :return: BaseTreeNode at path + """ + current_node = self.root if starting_node is None else starting_node + for key in path[:-1]: + current_node = current_node.children[key] + return current_node.pop_child(path[-1], None) + + def _create_node_path(self, path, starting_node=None, **kwargs): + """ + Expensive method that creates the path to the selected node + :param path: path (as a list of string) to the selected node + :param starting_node: the node to start the path, root if None + :return: the created node path + """ + current_node = self.root if starting_node is None else starting_node + for key in path: + try: + current_node = current_node.children[key] + except KeyError: + # create a new node as the current node child + # us it as the new node + current_node = self.child_factory(current_node, key, **kwargs) + + return current_node + + def child_factory(self, node, key, **kwargs): + """ + Create a new child an associate it to the given key + """ + node.set_child(key, self.TREE_NODE_CLASS(None, None, **kwargs)) + return node.children[key] + + # pylint: disable=too-many-arguments + def _set_node( + self, + node, + value=None, + node_type=None, + timestamp=0, + description=None, + metadata=None, + ): + """ + Sets the node attributes + :param node: the node instance to update + :param value: the node instance 'node_value' attribute to set (is ignored if None) + :param node_type: the node instance 'node_type' attribute to set (is ignored if None) + :param timestamp: the value modification timestamp. + :param description: the node instance 'node_description' attribute to set (is ignored if None) + :param metadata: the node instance 'node_metadata' attribute to set (is ignored if None) + """ + if value is not None: + node.node_value = value + + if node_type is not None: + node.node_type = node_type + + # set the node value modification timestamp + node.node_value_time = timestamp + + if description is not None: + node.node_description = description + + if metadata is not None: + node.node_metadata = metadata diff --git a/packages/commons/octobot_commons/tree/event_provider.py b/packages/commons/octobot_commons/tree/event_provider.py new file mode 100644 index 0000000000..9a74502d14 --- /dev/null +++ b/packages/commons/octobot_commons/tree/event_provider.py @@ -0,0 +1,119 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import asyncio +import concurrent.futures + +import octobot_commons.singleton as singleton +import octobot_commons.logging as logging +import octobot_commons.tree.event_tree as event_tree +import octobot_commons.tree.base_tree as base_tree + + +def _create_tree_if_missing(func): + """ + Create the required node if missing and then recalls the function + """ + + def wrapper(self, bot_id, *args, **kwargs): + try: + return func(self, bot_id, *args, **kwargs) + except KeyError: + self.create_event_tree(bot_id) + return func(self, bot_id, *args, **kwargs) + + return wrapper + + +class EventProvider(singleton.Singleton): + def __init__(self): + self.logger = logging.get_logger(self.__class__.__name__) + self._event_tree_by_bot_id = {} + + @_create_tree_if_missing + def get_or_create_event(self, bot_id, path, allow_creation=True): + """ + Return the event at the given path for the given bot_id (or create it) + """ + try: + return self._event_tree_by_bot_id[bot_id].get_node(path) + except base_tree.NodeExistsError: + if allow_creation: + self.create_event_at_path(bot_id, path, triggered=False) + return self._event_tree_by_bot_id[bot_id].get_node(path) + raise + + @_create_tree_if_missing + def trigger_event(self, bot_id, path, allow_creation=True): + """ + Trigger the event at the given path for the given bot_id (or create it) + """ + try: + self._event_tree_by_bot_id[bot_id].get_node(path).trigger() + except base_tree.NodeExistsError: + if allow_creation: + self.create_event_at_path(bot_id, path, triggered=True) + self._event_tree_by_bot_id[bot_id].get_node(path).trigger() + + @_create_tree_if_missing + def create_event_at_path(self, bot_id, path, triggered=False): + """ + Create a new event at the given path for the given bot_id + """ + return self._event_tree_by_bot_id[bot_id].create_node_at_path(path, triggered) + + def create_event_tree(self, bot_id): + """ + Create a new event tree for the given bot_id + """ + self._event_tree_by_bot_id[bot_id] = event_tree.EventTree() + + def remove_event_tree(self, bot_id): + """ + Removes the event tree for the given bot_id + """ + self._event_tree_by_bot_id.pop(bot_id, None) + + async def wait_for_event(self, bot_id, path, timeout) -> bool: + """ + Wait for the event at the given path for the given bot_id. + Returns instantly if the path doesn't lead to an event or if timeout is 0 + :return: False if the event is not triggered after timeout + """ + try: + event = self.get_or_create_event(bot_id, path, allow_creation=False) + if timeout == 0: + return event.is_triggered() + if not event.is_triggered(): + await asyncio.wait_for(event.wait(), timeout) + except base_tree.NodeExistsError: + # nothing to wait for + pass + except (asyncio.TimeoutError, concurrent.futures.TimeoutError): + return False + return True + + +def get_exchange_path(exchange, topic, symbol=None, time_frame=None): + """ + Return a path associated to the given exchange and topic + as well as symbol and timeframe if provided + """ + node_path = [exchange, topic] + if symbol is not None: + node_path.append(symbol) + if time_frame is not None: + node_path.append(time_frame) + return node_path diff --git a/packages/commons/octobot_commons/tree/event_tree.py b/packages/commons/octobot_commons/tree/event_tree.py new file mode 100644 index 0000000000..d852039e9e --- /dev/null +++ b/packages/commons/octobot_commons/tree/event_tree.py @@ -0,0 +1,213 @@ +# pylint: disable=R1725,W0221 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import asyncio + +import octobot_commons.tree.base_tree as base_tree +import octobot_commons.logging as logging + + +class EventTreeNode(base_tree.BaseTreeNode): + """ + Node element of an EventTreeNode. self.node_value is an asyncio.Event() that is triggered when all of its children + are triggered or is manually triggered. Adding an unset child will clear self. Children updates will overwrite + any manual trigger + """ + + __slots__ = [ + "_parent", + "_logger", + ] + + def __init__(self, *_, triggered=False, **__): + super().__init__(asyncio.Event(), asyncio.Event) + if triggered: + self._trigger() + self._parent = None + self._logger = logging.get_logger(self.__class__.__name__) + + def bind_parent(self, parent): + """ + Set the parent node and propagate the local state to the parent node + """ + self._parent = parent + self._propagate() + + def is_triggered(self): + """ + Return True if the local event is set + """ + return self.node_value.is_set() + + async def wait(self): + """ + Wait till the local event is triggered + """ + await self.node_value.wait() + + def trigger(self): + """ + Trigger the local event, propagate to the parent if any change + """ + if not self.is_triggered(): + self._trigger_and_log() + self._propagate() + + def _trigger_and_log(self): + """ + Set the event and log + """ + self._trigger() + path_to_root = self.get_path_to_root() + if path_to_root: + self._logger.debug(f"Event triggered for {'|'.join(path_to_root)}") + + def _trigger(self): + """ + Set the event and log + """ + self.node_value.set() + + def _clear(self): + """ + Clear the event and log + """ + self.node_value.clear() + path_to_root = self.get_path_to_root() + if path_to_root: + self._logger.debug(f"Event cleared for {'|'.join(path_to_root)}") + + def get_parent(self): + """ + Return self._parent + """ + return self._parent + + def get_path_to_root(self): + """ + Return the path to self from the root event + """ + node = self + path = [] + while node.get_parent() is not None: + parent = node.get_parent() + try: + path = [parent.get_child_key(node)] + path + except KeyError: + return path + node = parent + return path + + def get_child_key(self, child_to_find): + """ + Return the key of the given child in the current children + """ + for key, child in self.children.items(): + if child is child_to_find: + return key + raise KeyError(child_to_find) + + def clear(self): + """ + Clear the local event, propagate to the parent if any change + """ + if self.is_triggered(): + self._clear() + self._propagate() + + def set_child(self, key, child): + """ + Set a child at the given key + """ + super(EventTreeNode, self).set_child(key, child) + self.on_child_change() + + def pop_child(self, key, default): + """ + Pop the child the given key + """ + node = super(EventTreeNode, self).pop_child(key, default) + self.on_child_change() + return node + + def _untriggered_children(self): + """ + Return the list of children events that are not triggered + """ + return [key for key, child in self.children.items() if not child.is_triggered()] + + def on_child_change(self): + """ + Trigger or clear the local event depending on children states + then propagate to the parent if any change + """ + if not self.children: + # do not change event when no children + return + should_be_triggered = True + untriggered_children = self._untriggered_children() + if untriggered_children: + should_be_triggered = False + if not self.is_triggered(): + self._logger.debug( + f"Waiting children trigger for {'|'.join(self.get_path_to_root())}. " + f"Untriggered children: {untriggered_children}" + ) + if should_be_triggered != self.is_triggered(): + if self.is_triggered(): + self._clear() + else: + self._trigger_and_log() + self._propagate() + + def _propagate(self): + """ + Calls parent's on_child_change + """ + if self._parent is not None: + self._parent.on_child_change() + + +class EventTree(base_tree.BaseTree): + """ + Tree based on EventTreeNode where each node's event is synchronized with its children to be triggered when all + of its children are triggered. Adding an untriggered child will untrigger the parent node. + """ + + TREE_NODE_CLASS = EventTreeNode + + def create_node_at_path(self, path, triggered): + """ + Set the node attributes + Creates the node if it doesn't exist + :param path: the node path (as a list of string) + :param triggered: if the created node event should be initially triggered + For example: + - If you created a first node with the path ["my-parent-node"] + - You can create a child node of my-parent-node by using ["my-parent-node", "my-new-child-node"] as `path` + :return: void + """ + self._set_node(self.get_or_create_node(path, triggered=triggered)) + + def child_factory(self, node, key, triggered=False, **kwargs): + """ + Create a new child an associate it to the given key + """ + new_child = super(EventTree, self).child_factory( + node, key, triggered=triggered, **kwargs + ) + new_child.bind_parent(node) + return new_child diff --git a/packages/commons/requirements.txt b/packages/commons/requirements.txt new file mode 100644 index 0000000000..415c95e56f --- /dev/null +++ b/packages/commons/requirements.txt @@ -0,0 +1,10 @@ +# Setup requirements +numpy==2.4.2 + +# Commons requirements +cryptography +sortedcontainers +requests +pydantic + +aiohttp>=3.9.5 diff --git a/packages/commons/standard.rc b/packages/commons/standard.rc new file mode 100644 index 0000000000..978ec7d18b --- /dev/null +++ b/packages/commons/standard.rc @@ -0,0 +1,500 @@ +[MASTER] + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. +extension-pkg-whitelist= + +# Add files or directories to the blacklist. They should be base names, not +# paths. +ignore=CVS,tests + +# Add files or directories matching the regex patterns to the blacklist. The +# regex matches against base names, not paths. +ignore-patterns= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the +# number of processors available to use. +jobs=1 + +# Control the amount of potential inferred values when inferring a single +# object. This can help the performance when dealing with large functions or +# complex, nested conditions. +limit-inference-results=100 + +# List of plugins (as comma separated values of python module names) to load, +# usually to register additional checkers. +load-plugins= + +# Pickle collected data for later comparisons. +persistent=yes + +# Specify a configuration file. +#rcfile= + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levelaiohttp_util. Leave empty to show +# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED. +confidence= + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once). You can also use "--disable=all" to +# disable everything first and then reenable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use "--disable=all --enable=classes +# --disable=W". +disable=too-few-public-methods, + missing-module-docstring, + missing-class-docstring, + raise-missing-from, + consider-using-from-import, + use-maxsplit-arg, + unspecified-encoding, + consider-using-f-string, + too-many-positional-arguments + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +enable=c-extension-no-member + + +[REPORTS] + +# Python expression which should return a score less than or equal to 10. You +# have access to the variables 'error', 'warning', 'refactor', and 'convention' +# which contain the number of messages in each category, as well as 'statement' +# which is the total number of statements analyzed. This score is used by the +# global evaluation report (RP0004). +evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details. +#msg-template= + +# Set the output format. Available formats are text, parseable, colorized, json +# and msvs (visual studio). You can also give a reporter class, e.g. +# mypackage.mymodule.MyReporterClass. +output-format=text + +# Tells whether to display a full report or only the messages. +reports=no + +# Activate the evaluation score. +score=yes + + +[REFACTORING] + +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 + +# Complete name of functions that never returns. When checking for +# inconsistent-return-statements if a never returning function is called then +# it will be considered as an explicit return statement and no message will be +# printed. +never-returning-functions=sys.exit + + +[SIMILARITIES] + +# Ignore comments when computing similarities. +ignore-comments=yes + +# Ignore docstrings when computing similarities. +ignore-docstrings=yes + +# Ignore imports when computing similarities. +ignore-imports=no + +# Minimum lines number of a similarity. +min-similarity-lines=4 + + +[LOGGING] + +# Format style used to check logging format string. `old` means using % +# formatting, `new` is for `{}` formatting,and `fstr` is for f-strings. +logging-format-style=old + +# Logging modules to check that the string format arguments are in logging +# function parameter format. +logging-modules=logging + + +[STRING] + +# This flag controls whether the implicit-str-concat-in-sequence should +# generate a warning on implicit string concatenation in sequences defined over +# several lines. +check-str-concat-over-line-jumps=no + + +[SPELLING] + +# Limits count of emitted suggestions for spelling mistakes. +max-spelling-suggestions=4 + +# Spelling dictionary name. Available dictionaries: none. To make it work, +# install the python-enchant package. +spelling-dict= + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains the private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to the private dictionary (see the +# --spelling-private-dict-file option) instead of raising a message. +spelling-store-unknown-words=no + + +[FORMAT] + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )?<?https?://\S+>?$ + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Maximum number of characters on a single line. +max-line-length=120 + +# Maximum number of lines in a module. +max-module-lines=1000 + +# Allow the body of a class to be on the same line as the declaration if body +# contains single statement. +single-line-class-stmt=no + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME, + XXX, + TODO + + +[BASIC] + +# Naming style matching correct argument names. +argument-naming-style=snake_case + +# Regular expression matching correct argument names. Overrides argument- +# naming-style. +#argument-rgx= + +# Naming style matching correct attribute names. +attr-naming-style=snake_case + +# Regular expression matching correct attribute names. Overrides attr-naming- +# style. +#attr-rgx= + +# Bad variable names which should always be refused, separated by a comma. +bad-names=foo, + bar, + baz, + toto, + tutu, + tata + +# Naming style matching correct class attribute names. +class-attribute-naming-style=any + +# Regular expression matching correct class attribute names. Overrides class- +# attribute-naming-style. +#class-attribute-rgx= + +# Naming style matching correct class names. +class-naming-style=PascalCase + +# Regular expression matching correct class names. Overrides class-naming- +# style. +#class-rgx= + +# Naming style matching correct constant names. +const-naming-style=UPPER_CASE + +# Regular expression matching correct constant names. Overrides const-naming- +# style. +#const-rgx= + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + +# Naming style matching correct function names. +function-naming-style=snake_case + +# Regular expression matching correct function names. Overrides function- +# naming-style. +#function-rgx= + +# Good variable names which should always be accepted, separated by a comma. +good-names=i, + j, + k, + ex, + Run, + _ + +# Include a hint for the correct naming format with invalid-name. +include-naming-hint=no + +# Naming style matching correct inline iteration names. +inlinevar-naming-style=any + +# Regular expression matching correct inline iteration names. Overrides +# inlinevar-naming-style. +#inlinevar-rgx= + +# Naming style matching correct method names. +method-naming-style=snake_case + +# Regular expression matching correct method names. Overrides method-naming- +# style. +#method-rgx= + +# Naming style matching correct module names. +module-naming-style=snake_case + +# Regular expression matching correct module names. Overrides module-naming- +# style. +#module-rgx= + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +# These decorators are taken in consideration only for invalid-name. +property-classes=abc.abstractproperty + +# Naming style matching correct variable names. +variable-naming-style=snake_case + +# Regular expression matching correct variable names. Overrides variable- +# naming-style. +#variable-rgx= + + +[VARIABLES] + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid defining new builtins when possible. +additional-builtins= + +# Tells whether unused global variables should be treated as a violation. +allow-global-unused-variables=yes + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_, + _cb + +# A regular expression matching the name of dummy variables (i.e. expected to +# not be used). +dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ + +# Argument names that match this expression will be ignored. Default to name +# with leading underscore. +ignored-argument-names=_.*|^ignored_|^unused_ + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io + + +[TYPECHECK] + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + +# Tells whether missing members accessed in mixin class should be ignored. A +# mixin class is detected if its name ends with "mixin" (case insensitive). +ignore-mixin-members=yes + +# Tells whether to warn about missing members when the owner of the attribute +# is inferred to be None. +ignore-none=yes + +# This flag controls whether pylint should warn about no-member and similar +# checks whenever an opaque object is returned when inferring. The inference +# can return multiple potential results while evaluating a Python object, but +# some branches might not be evaluated, which results in partial inference. In +# that case, it might be useful to still emit no-member and other checks for +# the rest of the inferred objects. +ignore-on-opaque-inference=yes + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local + +# List of module names for which member attributes should not be checked +# (useful for modules/projects where namespaces are manipulated during runtime +# and thus existing member attributes cannot be deduced by static analysis). It +# supports qualified module names, as well as Unix pattern matching. +ignored-modules= + +# Show a hint with possible names when a member name was not found. The aspect +# of finding the hint is based on edit distance. +missing-member-hint=yes + +# The minimum edit distance a name should have in order to be considered a +# similar match for a missing member name. +missing-member-hint-distance=1 + +# The total number of similar names that should be taken in consideration when +# showing a hint for a missing member. +missing-member-max-choices=1 + +# List of decorators that change the signature of a decorated function. +signature-mutators= + + +[IMPORTS] + +# List of modules that can be imported at any level, not just the top level +# one. +allow-any-import-level= + +# Allow wildcard imports from modules that define __all__. +allow-wildcard-with-all=no + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + +# Deprecated modules which should not be used, separated by a comma. +deprecated-modules=optparse,tkinter.tix + +# Create a graph of external dependencies in the given file (report RP0402 must +# not be disabled). +ext-import-graph= + +# Create a graph of every (i.e. internal and external) dependencies in the +# given file (report RP0402 must not be disabled). +import-graph= + +# Create a graph of internal dependencies in the given file (report RP0402 must +# not be disabled). +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant + +# Couples of modules and preferred modules, separated by a comma. +preferred-modules= + + +[CLASSES] + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__, + __new__, + setUp, + __post_init__ + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict, + _fields, + _replace, + _source, + _make + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=cls + + +[DESIGN] + +# Maximum number of arguments for function / method. +max-args=5 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Maximum number of boolean expressions in an if statement (see R0916). +max-bool-expr=5 + +# Maximum number of branch for function / method body. +max-branches=12 + +# Maximum number of locals for function / method body. +max-locals=15 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 + +# Maximum number of return / yield for function / method body. +max-returns=6 + +# Maximum number of statements in function / method body. +max-statements=50 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when being caught. Defaults to +# "BaseException, Exception". +overgeneral-exceptions=builtins.BaseException, + builtins.Exception diff --git a/packages/commons/tests/__init__.py b/packages/commons/tests/__init__.py new file mode 100644 index 0000000000..673f0d94da --- /dev/null +++ b/packages/commons/tests/__init__.py @@ -0,0 +1,15 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. diff --git a/packages/commons/tests/configuration/__init__.py b/packages/commons/tests/configuration/__init__.py new file mode 100644 index 0000000000..673f0d94da --- /dev/null +++ b/packages/commons/tests/configuration/__init__.py @@ -0,0 +1,15 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. diff --git a/packages/commons/tests/configuration/test_configuration.py b/packages/commons/tests/configuration/test_configuration.py new file mode 100644 index 0000000000..f18ff01395 --- /dev/null +++ b/packages/commons/tests/configuration/test_configuration.py @@ -0,0 +1,337 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import os +import shutil +import json +import copy +import pytest +import mock +import octobot_commons.errors as errors +import octobot_commons.json_util +import octobot_commons.configuration as configuration +import octobot_commons.profiles as profiles +import octobot_commons.constants as constants +import octobot_commons.tests.test_config as test_config +from ..profiles import get_profiles_path + +DEFAULT_CONFIG = os.path.join(test_config.TEST_CONFIG_FOLDER, f"default_{constants.CONFIG_FILE}") + + +def get_fake_config_path(): + return os.path.join(test_config.TEST_CONFIG_FOLDER, f"test_{constants.CONFIG_FILE}") + + +def get_profile_path(): + return test_config.TEST_CONFIG_FOLDER + + +@pytest.fixture +def config(): + return configuration.Configuration(get_fake_config_path(), get_profile_path()) + + +@pytest.fixture +def default_config(): + return configuration.Configuration(DEFAULT_CONFIG, get_profile_path()) + + +def test_load_config(): + assert test_config.load_test_config() + + +def test_validate(config): + config.profile = profiles.Profile(config.profiles_path) + config._read_config = {} + with mock.patch.object(octobot_commons.json_util, "validate", mock.Mock()) as validate_mock: + config.validate() + assert validate_mock.mock_calls[0].args == (config._read_config, config.config_schema_path) + assert validate_mock.mock_calls[1].args == (config.profile.as_dict(), config.profile.schema_path) + + +def test_read(default_config): + with mock.patch.object(default_config, "load_profiles", mock.Mock()) as load_profiles_mock, \ + mock.patch.object(default_config, "_get_selected_profile", mock.Mock()) as _select_mock, \ + mock.patch.object(default_config, "select_profile", + mock.Mock()) as select_profile_mock: + default_config.read() + assert isinstance(default_config._read_config, dict) + assert isinstance(default_config.config, dict) + load_profiles_mock.assert_called_once() + _select_mock.assert_called_once() + select_profile_mock.assert_called_once() + + +def test_select_profile(config): + with mock.patch.object(config, "_generate_config_from_user_config_and_profile", + mock.Mock()) as _generate_config_from_user_config_and_profile_mock: + config.profile_by_id = { + "1": profiles.Profile("plop"), + "hoo": profiles.Profile("ah") + } + config.profile_by_id["1"].name = "ploup" + config.config = {} + config.select_profile("1") + assert config.config[constants.CONFIG_PROFILE] == "1" + assert config.profile is config.profile_by_id["1"] + + +def test_remove_profile(config): + config.profile = profiles.Profile(get_profile_path(), config.profile_schema_path) + config.profile.read_config() + config.profile.read_only = True + config.profile_by_id[config.profile.profile_id] = config.profile + # id not in loaded profiles + with pytest.raises(KeyError): + config.remove_profile("random_id") + # read only profile + with pytest.raises(errors.ProfileRemovalError): + config.remove_profile("default") + assert os.path.isdir(config.profile.path) + # valid profile removal + other_profile = profiles.Profile("path", config.profile_schema_path) + other_profile.profile_id = "profile_id" + config.profile_by_id[other_profile.profile_id] = other_profile + with mock.patch.object(shutil, "rmtree", mock.Mock()) as rmtree_mock: + config.remove_profile("profile_id") + rmtree_mock.assert_called_once_with("path") + assert "profile_id" not in config.profile_by_id + + +def test_generate_config_from_user_config_and_profile(config): + with open(DEFAULT_CONFIG) as config_file: + config._read_config = json.load(config_file) + config.profile = profiles.Profile(get_profile_path(), config.profile_schema_path) + config.profile.read_config() + for key in config.profile.FULLY_MANAGED_ELEMENTS: + assert key not in config._read_config + for key in config.profile.PARTIALLY_MANAGED_ELEMENTS: + assert key in config._read_config + config.config = copy.deepcopy(config._read_config) + config._generate_config_from_user_config_and_profile() + for key in config.profile.FULLY_MANAGED_ELEMENTS: + assert key in config.config + for key in config.profile.PARTIALLY_MANAGED_ELEMENTS: + assert key in config.config + assert config.config is not config._read_config + + +def test_save(config): + save_file = "saved_config.json" + config.config_path = save_file + if os.path.isfile(save_file): + os.remove(save_file) + # used as a restore file + shutil.copy(DEFAULT_CONFIG, save_file) + try: + with open(DEFAULT_CONFIG) as config_file: + config._read_config = json.load(config_file) + # add profile data + config.profile = profiles.Profile(get_profile_path(), config.profile_schema_path) + config.profile.read_config() + with mock.patch.object(config, "_get_config_without_profile_elements", + mock.Mock(return_value=config._read_config)) as _filter_mock, \ + mock.patch.object(config.profile, "save_config", mock.Mock()) as _save_profile_mock: + config.save() + assert os.path.isfile(save_file) + with open(save_file) as config_file: + saved_config = json.load(config_file) + assert saved_config == config._read_config + finally: + if os.path.isfile(save_file): + os.remove(save_file) + + +def test_is_loaded(config): + assert not config.is_loaded() + config.config = "" + assert config.is_loaded() + + +def test_is_config_empty_or_missing(config): + if os.path.isfile(get_fake_config_path()): + os.remove(get_fake_config_path()) + + assert config.is_config_file_empty_or_missing() + shutil.copy(os.path.join(test_config.TEST_CONFIG_FOLDER, constants.DEFAULT_CONFIG_FILE), get_fake_config_path()) + assert not config.is_config_file_empty_or_missing() + + if os.path.isfile(get_fake_config_path()): + os.remove(get_fake_config_path()) + + +def test_get_tentacles_config_path(config): + config.profile = profiles.Profile(get_profile_path(), config.profile_schema_path) + assert config.get_tentacles_config_path() == os.path.join(test_config.TEST_CONFIG_FOLDER, + constants.CONFIG_TENTACLES_FILE) + + +def test_get_metrics_enabled(config): + config.config = {} + assert config.get_metrics_enabled() is True + config.config = { + constants.CONFIG_METRICS: {} + } + assert config.get_metrics_enabled() is True + config.config = { + constants.CONFIG_METRICS: { + constants.CONFIG_ENABLED_OPTION: True + } + } + assert config.get_metrics_enabled() is True + config.config = { + constants.CONFIG_METRICS: { + constants.CONFIG_ENABLED_OPTION: False + } + } + assert config.get_metrics_enabled() is False + + +def test_accepted_terms(config): + config.config = {} + assert config.accepted_terms() is False + config.config = { + constants.CONFIG_ACCEPTED_TERMS: False + } + assert config.accepted_terms() is False + config.config = { + constants.CONFIG_ACCEPTED_TERMS: True + } + assert config.accepted_terms() is True + + +def test_update_config_fields(config): + config.config = {} + separator = "_" + with mock.patch.object(config, "save", mock.Mock()) as save_mock: + to_update_fields = {'crypto-currencies_01coin_pairs': ['dqd/dd']} + config.update_config_fields(to_update_fields, False, separator) + assert config.config == { + "crypto-currencies": { + "01coin": { + "pairs": ["dqd/dd"] + } + } + } + save_mock.assert_called_once() + save_mock.reset_mock() + to_update_fields = { + 'crypto-currencies_plop_p': ['dqd/dd', '111'], + 'rfzr_r_r': True + } + # no crypto-currencies update since in_backtesting = True + config.update_config_fields(to_update_fields, True, separator) + assert config.config == { + "crypto-currencies": { + "01coin": { + "pairs": ["dqd/dd"] + } + }, + "rfzr": { + "r": { + "r": True + } + } + } + save_mock.assert_called_once() + save_mock.reset_mock() + to_update_fields = { + 'crypto-currencies_plop_p': ['dqd/dd', '111'] + } + # change separator + config.update_config_fields(to_update_fields, False, "-") + assert config.config == { + "crypto-currencies": { + "01coin": { + "pairs": ["dqd/dd"] + } + }, + "crypto": { + "currencies_plop_p": ['dqd/dd', '111'] + }, + "rfzr": { + "r": { + "r": True + } + } + } + save_mock.assert_called_once() + save_mock.reset_mock() + # delete + config.update_config_fields(to_update_fields, False, "-", delete=True) + assert config.config == { + "crypto-currencies": { + "01coin": { + "pairs": ["dqd/dd"] + } + }, + "crypto": {}, + "rfzr": { + "r": { + "r": True + } + } + } + save_mock.assert_called_once() + + +def test_get_selected_profile(config): + config.profile_by_id = { + "55": "123", + "default": "456", + } + config._read_config = {} + # missing profile key + assert config._get_selected_profile() == "default" + # normal case + config._read_config[constants.CONFIG_PROFILE] = "55" + assert config._get_selected_profile() == "55" + # missing profile + config._read_config[constants.CONFIG_PROFILE] = "66" + assert config._get_selected_profile() == "default" + # no default + config.profile_by_id.pop("default") + config._read_config[constants.CONFIG_PROFILE] = "66" + with pytest.raises(errors.NoProfileError): + assert config._get_selected_profile() == "default" + config._read_config.pop(constants.CONFIG_PROFILE) + with pytest.raises(errors.NoProfileError): + assert config._get_selected_profile() == "default" + + +def test_load_profiles(config): + config.profiles_path = get_profiles_path() + nb_profiles = 1 + config.load_profiles() + assert len(config.profile_by_id) == nb_profiles + loaded_profile = config.profile_by_id["default"] + # reload profile, keep loaded ones + config.load_profiles() + assert config.profile_by_id["default"] is loaded_profile + + +def test_get_config_without_profile_elements(config): + config.profile = profiles.Profile(config.profiles_path) + config.config = { + "plop": 1, + "plip": True, + profiles.Profile.FULLY_MANAGED_ELEMENTS[0]: "dd", + next(iter(profiles.Profile.PARTIALLY_MANAGED_ELEMENTS)): "tt" + } + assert config._get_config_without_profile_elements() == { + "plop": 1, + "plip": True, + next(iter(profiles.Profile.PARTIALLY_MANAGED_ELEMENTS)): "tt" + } diff --git a/packages/commons/tests/configuration/test_fields_util.py b/packages/commons/tests/configuration/test_fields_util.py new file mode 100644 index 0000000000..e4cea454da --- /dev/null +++ b/packages/commons/tests/configuration/test_fields_util.py @@ -0,0 +1,28 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import uuid +import octobot_commons.configuration as configuration + + +def test_get_password_hash(): + assert len(configuration.get_password_hash("")) == 64 + assert len(configuration.get_password_hash("1")) == 64 + assert len(configuration.get_password_hash("1a")) == 64 + for _ in range(100): + rand_password = str(uuid.uuid4()) + hashed_password = configuration.get_password_hash(rand_password) + assert len(hashed_password) == 64 + assert not hashed_password == rand_password diff --git a/packages/commons/tests/configuration/test_historical_configuration.py b/packages/commons/tests/configuration/test_historical_configuration.py new file mode 100644 index 0000000000..ec29d49e47 --- /dev/null +++ b/packages/commons/tests/configuration/test_historical_configuration.py @@ -0,0 +1,207 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import pytest +import octobot_commons.constants as constants +import octobot_commons.configuration as configuration + + +class TestHistoricalConfiguration: + """Test class for historical configuration functions""" + + def setup_method(self): + """Set up test fixtures""" + self.empty_master_config = {} + self.master_config_with_historical = {} + for ts, config in ( + [1000.0, {"param1": "value1", "param2": "value2"}], + [2000.0, {"param1": "value3", "param2": "value4"}], + [3000.0, {"param1": "value5", "param2": "value6"}], + ): + # add config using add_historical_tentacle_config to maintain order + configuration.add_historical_tentacle_config(self.master_config_with_historical, ts, config) + + self.test_config = {"test_param": "test_value"} + self.test_start_time = 1500.0 + + def test_add_historical_tentacle_config_empty_master_config(self): + assert constants.CONFIG_HISTORICAL_CONFIGURATION not in self.empty_master_config + """Test adding historical config to empty master config""" + configuration.add_historical_tentacle_config( + self.empty_master_config, self.test_start_time, self.test_config + ) + + assert constants.CONFIG_HISTORICAL_CONFIGURATION in self.empty_master_config + assert len(self.empty_master_config[constants.CONFIG_HISTORICAL_CONFIGURATION]) == 1 + assert self.empty_master_config[constants.CONFIG_HISTORICAL_CONFIGURATION][0] == [ + self.test_start_time, self.test_config + ] + + def test_add_historical_tentacle_config_existing_master_config(self): + """Test adding historical config to existing master config""" + original_length = len(self.master_config_with_historical[constants.CONFIG_HISTORICAL_CONFIGURATION]) + + configuration.add_historical_tentacle_config( + self.master_config_with_historical, self.test_start_time, self.test_config + ) + + # Check that the new config was added + assert len(self.master_config_with_historical[constants.CONFIG_HISTORICAL_CONFIGURATION]) == original_length + 1 + + # Check that the list is sorted by timestamp in descending order (most recent first) + timestamps = [config[0] for config in self.master_config_with_historical[constants.CONFIG_HISTORICAL_CONFIGURATION]] + assert timestamps == sorted(timestamps, reverse=True) + + # Check that our new config is in the list + config_found = False + for config in self.master_config_with_historical[constants.CONFIG_HISTORICAL_CONFIGURATION]: + if config[0] == self.test_start_time and config[1] == self.test_config: + config_found = True + break + assert config_found + + def test_add_historical_tentacle_config_multiple_additions(self): + """Test adding multiple historical configs maintains sorting""" + configuration.add_historical_tentacle_config(self.empty_master_config, 1000.0, {"config": "old"}) + configuration.add_historical_tentacle_config(self.empty_master_config, 3000.0, {"config": "new"}) + configuration.add_historical_tentacle_config(self.empty_master_config, 2000.0, {"config": "middle"}) + + timestamps = [config[0] for config in self.empty_master_config[constants.CONFIG_HISTORICAL_CONFIGURATION]] + assert timestamps == [3000.0, 2000.0, 1000.0] + + def test_has_any_historical_tentacle_config_empty(self): + """Test has_any_historical_tentacle_config with empty config""" + assert not configuration.has_any_historical_tentacle_config(self.empty_master_config) + + def test_has_any_historical_tentacle_config_with_historical(self): + """Test has_any_historical_tentacle_config with existing historical config""" + assert configuration.has_any_historical_tentacle_config(self.master_config_with_historical) + + def test_has_any_historical_tentacle_config_other_keys(self): + """Test has_any_historical_tentacle_config with other keys but not historical""" + config_with_other_keys = {"other_key": "value", "another_key": "value2"} + assert not configuration.has_any_historical_tentacle_config(config_with_other_keys) + + def test_get_historical_tentacle_config_exact_match(self): + """Test getting historical config with exact time match""" + result = configuration.get_historical_tentacle_config(self.master_config_with_historical, 2000.0) + assert result == {"param1": "value3", "param2": "value4"} + + def test_get_historical_tentacle_config_between_times(self): + """Test getting historical config with time between existing configs""" + result = configuration.get_historical_tentacle_config(self.master_config_with_historical, 1500.0) + assert result == {"param1": "value1", "param2": "value2"} + + def test_get_historical_tentacle_config_before_oldest(self): + """Test getting historical config with time before oldest config""" + result = configuration.get_historical_tentacle_config(self.master_config_with_historical, 500.0) + # Should return the oldest config (last in the list) + assert result == {"param1": "value1", "param2": "value2"} + + def test_get_historical_tentacle_config_after_newest(self): + """Test getting historical config with time after newest config""" + result = configuration.get_historical_tentacle_config(self.master_config_with_historical, 4000.0) + # Should return the newest config (first in the list) + assert result == {"param1": "value5", "param2": "value6"} + + def test_get_historical_tentacle_config_key_error(self): + """Test get_historical_tentacle_config raises KeyError when key not found""" + with pytest.raises(KeyError) as exc_info: + configuration.get_historical_tentacle_config(self.empty_master_config, 1000.0) + assert constants.CONFIG_HISTORICAL_CONFIGURATION in str(exc_info.value) + + def test_get_historical_tentacle_configs_empty_interval(self): + """Test getting historical configs with empty time interval""" + result = configuration.get_historical_tentacle_configs(self.master_config_with_historical, 5000.0, 6000.0) + assert result == [] + + def test_get_historical_tentacle_configs_full_interval(self): + """Test getting historical configs with full time interval""" + result = configuration.get_historical_tentacle_configs(self.master_config_with_historical, 500.0, 4000.0) + assert len(result) == 3 + # Should be ordered by most recent first + assert result[0] == {"param1": "value5", "param2": "value6"} + assert result[1] == {"param1": "value3", "param2": "value4"} + assert result[2] == {"param1": "value1", "param2": "value2"} + + def test_get_historical_tentacle_configs_partial_interval(self): + """Test getting historical configs with partial time interval""" + result = configuration.get_historical_tentacle_configs(self.master_config_with_historical, 1500.0, 2500.0) + assert len(result) == 1 + assert result[0] == {"param1": "value3", "param2": "value4"} + + def test_get_historical_tentacle_configs_exact_boundaries(self): + """Test getting historical configs with exact boundary times""" + result = configuration.get_historical_tentacle_configs(self.master_config_with_historical, 1000.0, 3000.0) + assert len(result) == 3 + + def test_get_historical_tentacle_configs_key_error(self): + """Test get_historical_tentacle_configs raises KeyError when key not found""" + with pytest.raises(KeyError) as exc_info: + configuration.get_historical_tentacle_configs(self.empty_master_config, 1000.0, 2000.0) + assert constants.CONFIG_HISTORICAL_CONFIGURATION in str(exc_info.value) + + def test_get_oldest_historical_tentacle_config_time_success(self): + """Test getting oldest historical config time successfully""" + result = configuration.get_oldest_historical_tentacle_config_time(self.master_config_with_historical) + assert result == 1000.0 + + def test_get_oldest_historical_tentacle_config_time_empty_list(self): + """Test getting oldest historical config time with empty list""" + config_with_empty_list = {constants.CONFIG_HISTORICAL_CONFIGURATION: []} + with pytest.raises(ValueError) as exc_info: + configuration.get_oldest_historical_tentacle_config_time(config_with_empty_list) + assert "No historical configuration found" in str(exc_info.value) + + def test_get_oldest_historical_tentacle_config_time_missing_key(self): + """Test getting oldest historical config time with missing key""" + # Should return the minimum of an empty list, which raises ValueError + with pytest.raises(ValueError) as exc_info: + configuration.get_oldest_historical_tentacle_config_time(self.empty_master_config) + assert "No historical configuration found" in str(exc_info.value) + + def test_get_oldest_historical_tentacle_config_time_single_config(self): + """Test getting oldest historical config time with single config""" + single_config = { + constants.CONFIG_HISTORICAL_CONFIGURATION: [[1500.0, {"test": "config"}]] + } + result = configuration.get_oldest_historical_tentacle_config_time(single_config) + assert result == 1500.0 + + def test_integration_scenario(self): + """Test integration scenario with multiple operations""" + # Start with empty config + master_config = {} + + # Add multiple configs + configuration.add_historical_tentacle_config(master_config, 1000.0, {"version": "1.0"}) + configuration.add_historical_tentacle_config(master_config, 2000.0, {"version": "2.0"}) + configuration.add_historical_tentacle_config(master_config, 1500.0, {"version": "1.5"}) + + # Check that historical config exists + assert configuration.has_any_historical_tentacle_config(master_config) + + # Get config for different times + assert configuration.get_historical_tentacle_config(master_config, 500.0) == {"version": "1.0"} + assert configuration.get_historical_tentacle_config(master_config, 1200.0) == {"version": "1.0"} + assert configuration.get_historical_tentacle_config(master_config, 1600.0) == {"version": "1.5"} + assert configuration.get_historical_tentacle_config(master_config, 2500.0) == {"version": "2.0"} + + # Get configs for time intervals + configs_1000_2000 = configuration.get_historical_tentacle_configs(master_config, 1000.0, 2000.0) + assert len(configs_1000_2000) == 3 + + # Get oldest time + assert configuration.get_oldest_historical_tentacle_config_time(master_config) == 1000.0 diff --git a/packages/commons/tests/cryptography/test_encryption.py b/packages/commons/tests/cryptography/test_encryption.py new file mode 100644 index 0000000000..450066bb88 --- /dev/null +++ b/packages/commons/tests/cryptography/test_encryption.py @@ -0,0 +1,538 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import pytest + +import octobot_commons.cryptography as cryptography + + +def test_generate_rsa_key_pair_default_size(): + """Test RSA key pair generation with default size (4096 bits).""" + private_key_pem, public_key_pem = cryptography.generate_rsa_key_pair() + + assert isinstance(private_key_pem, bytes) + assert isinstance(public_key_pem, bytes) + assert b"BEGIN RSA PRIVATE KEY" in private_key_pem or b"BEGIN PRIVATE KEY" in private_key_pem + assert b"BEGIN PUBLIC KEY" in public_key_pem + assert b"END RSA PRIVATE KEY" in private_key_pem or b"END PRIVATE KEY" in private_key_pem + assert b"END PUBLIC KEY" in public_key_pem + + +def test_generate_rsa_key_pair_custom_size(): + """Test RSA key pair generation with custom key size.""" + private_key_pem, public_key_pem = cryptography.generate_rsa_key_pair(key_size=2048) + + assert isinstance(private_key_pem, bytes) + assert isinstance(public_key_pem, bytes) + assert b"BEGIN RSA PRIVATE KEY" in private_key_pem or b"BEGIN PRIVATE KEY" in private_key_pem + assert b"BEGIN PUBLIC KEY" in public_key_pem + + +def test_generate_rsa_key_pair_different_keys(): + """Test that generating multiple key pairs produces different keys.""" + private_key_1, public_key_1 = cryptography.generate_rsa_key_pair() + private_key_2, public_key_2 = cryptography.generate_rsa_key_pair() + + assert private_key_1 != private_key_2 + assert public_key_1 != public_key_2 + + +def test_generate_rsa_key_pair_invalid_size(): + """Test that generating RSA key with invalid size raises ValueError.""" + with pytest.raises(ValueError, match="RSA key size must be at least 2048 bits"): + cryptography.generate_rsa_key_pair(key_size=1024) + + +def test_generate_aes_key_default_size(): + """Test AES key generation with default size (32 bytes).""" + aes_key = cryptography.generate_aes_key() + + assert isinstance(aes_key, bytes) + assert len(aes_key) == 32 + + +def test_generate_aes_key_custom_size(): + """Test AES key generation with custom size.""" + aes_key = cryptography.generate_aes_key(key_size=16) + + assert isinstance(aes_key, bytes) + assert len(aes_key) == 16 + + +def test_generate_aes_key_different_keys(): + """Test that generating multiple AES keys produces different keys.""" + aes_key_1 = cryptography.generate_aes_key() + aes_key_2 = cryptography.generate_aes_key() + + assert aes_key_1 != aes_key_2 + + +def test_generate_aes_key_invalid_size(): + """Test that generating AES key with invalid size raises ValueError.""" + with pytest.raises(ValueError, match="Key size must be at least 1 byte"): + cryptography.generate_aes_key(key_size=0) + + +def test_generate_iv_default_size(): + """Test IV generation with default size (12 bytes).""" + iv = cryptography.generate_iv() + + assert isinstance(iv, bytes) + assert len(iv) == 12 + + +def test_generate_iv_custom_size(): + """Test IV generation with custom size.""" + iv = cryptography.generate_iv(iv_size=16) + + assert isinstance(iv, bytes) + assert len(iv) == 16 + + +def test_generate_iv_different_ivs(): + """Test that generating multiple IVs produces different values.""" + iv_1 = cryptography.generate_iv() + iv_2 = cryptography.generate_iv() + + assert iv_1 != iv_2 + + +def test_generate_iv_invalid_size(): + """Test that generating IV with invalid size raises ValueError.""" + with pytest.raises(ValueError, match="IV size must be at least 1 byte"): + cryptography.generate_iv(iv_size=0) + + +def test_rsa_encrypt_decrypt_aes_key(): + """Test RSA encryption and decryption of AES key round trip.""" + private_key_pem, public_key_pem = cryptography.generate_rsa_key_pair() + aes_key = cryptography.generate_aes_key() + + encrypted_key = cryptography.rsa_encrypt_aes_key(aes_key, public_key_pem) + decrypted_key = cryptography.rsa_decrypt_aes_key(encrypted_key, private_key_pem) + + assert decrypted_key == aes_key + assert encrypted_key != aes_key + + +def test_rsa_encrypt_aes_key_different_encryptions(): + """Test that encrypting the same AES key multiple times produces different ciphertexts.""" + _, public_key_pem = cryptography.generate_rsa_key_pair() + aes_key = cryptography.generate_aes_key() + + encrypted_key_1 = cryptography.rsa_encrypt_aes_key(aes_key, public_key_pem) + encrypted_key_2 = cryptography.rsa_encrypt_aes_key(aes_key, public_key_pem) + + # RSA OAEP uses random padding, so same plaintext produces different ciphertexts + assert encrypted_key_1 != encrypted_key_2 + + +def test_rsa_encrypt_aes_key_invalid_public_key(): + """Test that encrypting with invalid public key raises ValueError.""" + invalid_key = b"-----BEGIN PUBLIC KEY-----\nInvalid\n-----END PUBLIC KEY-----" + aes_key = cryptography.generate_aes_key() + + with pytest.raises(ValueError): + cryptography.rsa_encrypt_aes_key(aes_key, invalid_key) + + +def test_rsa_decrypt_aes_key_invalid_private_key(): + """Test that decrypting with invalid private key raises ValueError.""" + private_key_pem, public_key_pem = cryptography.generate_rsa_key_pair() + aes_key = cryptography.generate_aes_key() + encrypted_key = cryptography.rsa_encrypt_aes_key(aes_key, public_key_pem) + + invalid_key = b"-----BEGIN PRIVATE KEY-----\nInvalid\n-----END PRIVATE KEY-----" + + with pytest.raises(ValueError): + cryptography.rsa_decrypt_aes_key(encrypted_key, invalid_key) + + +def test_rsa_decrypt_aes_key_wrong_key(): + """Test that decrypting with wrong private key fails.""" + private_key_pem_1, public_key_pem_1 = cryptography.generate_rsa_key_pair() + private_key_pem_2, _ = cryptography.generate_rsa_key_pair() + aes_key = cryptography.generate_aes_key() + + encrypted_key = cryptography.rsa_encrypt_aes_key(aes_key, public_key_pem_1) + + with pytest.raises(Exception): # Should raise decryption error + cryptography.rsa_decrypt_aes_key(encrypted_key, private_key_pem_2) + + +def test_rsa_encrypt_aes_key_with_ecdsa_key_raises_error(): + """Test that encrypting with an ECDSA key (not RSA) raises ValueError.""" + ecdsa_private_key, ecdsa_public_key = cryptography.generate_ecdsa_key_pair() + aes_key = cryptography.generate_aes_key() + + with pytest.raises(ValueError, match="Public key must be an RSA key"): + cryptography.rsa_encrypt_aes_key(aes_key, ecdsa_public_key) + + +def test_rsa_decrypt_aes_key_with_ecdsa_key_raises_error(): + """Test that decrypting with an ECDSA key (not RSA) raises ValueError.""" + ecdsa_private_key, _ = cryptography.generate_ecdsa_key_pair() + encrypted_key = b"fake encrypted data" + + with pytest.raises(ValueError, match="Private key must be an RSA key"): + cryptography.rsa_decrypt_aes_key(encrypted_key, ecdsa_private_key) + + +def test_aes_gcm_encrypt_decrypt_round_trip(): + """Test AES-GCM encryption and decryption round trip.""" + aes_key = cryptography.generate_aes_key() + iv = cryptography.generate_iv() + plaintext = b"Hello, World! This is test data." + + encrypted_data = cryptography.aes_gcm_encrypt(plaintext, aes_key, iv) + decrypted_data = cryptography.aes_gcm_decrypt(encrypted_data, aes_key, iv) + + assert decrypted_data == plaintext + assert encrypted_data != plaintext + assert len(encrypted_data) == len(plaintext) + 16 # ciphertext + 16-byte tag + + +def test_aes_gcm_encrypt_decrypt_with_associated_data(): + """Test AES-GCM encryption and decryption with associated data.""" + aes_key = cryptography.generate_aes_key() + iv = cryptography.generate_iv() + plaintext = b"Test data" + associated_data = b"Additional authenticated data" + + encrypted_data = cryptography.aes_gcm_encrypt(plaintext, aes_key, iv, associated_data) + decrypted_data = cryptography.aes_gcm_decrypt(encrypted_data, aes_key, iv, associated_data) + + assert decrypted_data == plaintext + + +def test_aes_gcm_decrypt_wrong_associated_data(): + """Test that decrypting with wrong associated data fails.""" + aes_key = cryptography.generate_aes_key() + iv = cryptography.generate_iv() + plaintext = b"Test data" + associated_data = b"Correct AAD" + wrong_aad = b"Wrong AAD" + + encrypted_data = cryptography.aes_gcm_encrypt(plaintext, aes_key, iv, associated_data) + + with pytest.raises(Exception): # Should raise authentication error + cryptography.aes_gcm_decrypt(encrypted_data, aes_key, iv, wrong_aad) + + +def test_aes_gcm_encrypt_decrypt_various_data_sizes(): + """Test AES-GCM encryption/decryption with various data sizes.""" + aes_key = cryptography.generate_aes_key() + iv = cryptography.generate_iv() + + test_cases = [ + b"", + b"a", + b"Short message", + b"Medium length message with some content", + b"Very long message " * 100, + ] + + for plaintext in test_cases: + encrypted_data = cryptography.aes_gcm_encrypt(plaintext, aes_key, iv) + decrypted_data = cryptography.aes_gcm_decrypt(encrypted_data, aes_key, iv) + assert decrypted_data == plaintext, f"Failed for data: {plaintext[:50]}" + + +def test_aes_gcm_encrypt_different_plaintexts(): + """Test that encrypting different plaintexts produces different ciphertexts.""" + aes_key = cryptography.generate_aes_key() + iv = cryptography.generate_iv() + + plaintext_1 = b"First message" + plaintext_2 = b"Second message" + + encrypted_1 = cryptography.aes_gcm_encrypt(plaintext_1, aes_key, iv) + encrypted_2 = cryptography.aes_gcm_encrypt(plaintext_2, aes_key, iv) + + assert encrypted_1 != encrypted_2 + + +def test_aes_gcm_encrypt_same_plaintext_different_ivs(): + """Test that encrypting same plaintext with different IVs produces different ciphertexts.""" + aes_key = cryptography.generate_aes_key() + iv_1 = cryptography.generate_iv() + iv_2 = cryptography.generate_iv() + plaintext = b"Same message" + + encrypted_1 = cryptography.aes_gcm_encrypt(plaintext, aes_key, iv_1) + encrypted_2 = cryptography.aes_gcm_encrypt(plaintext, aes_key, iv_2) + + assert encrypted_1 != encrypted_2 + + +def test_aes_gcm_decrypt_wrong_key(): + """Test that decrypting with wrong key fails.""" + aes_key_1 = cryptography.generate_aes_key() + aes_key_2 = cryptography.generate_aes_key() + iv = cryptography.generate_iv() + plaintext = b"Test data" + + encrypted_data = cryptography.aes_gcm_encrypt(plaintext, aes_key_1, iv) + + with pytest.raises(Exception): # Should raise authentication error + cryptography.aes_gcm_decrypt(encrypted_data, aes_key_2, iv) + + +def test_aes_gcm_decrypt_wrong_iv(): + """Test that decrypting with wrong IV fails.""" + aes_key = cryptography.generate_aes_key() + iv_1 = cryptography.generate_iv() + iv_2 = cryptography.generate_iv() + plaintext = b"Test data" + + encrypted_data = cryptography.aes_gcm_encrypt(plaintext, aes_key, iv_1) + + with pytest.raises(Exception): # Should raise authentication error + cryptography.aes_gcm_decrypt(encrypted_data, aes_key, iv_2) + + +def test_aes_gcm_decrypt_corrupted_data(): + """Test that decrypting corrupted data fails.""" + aes_key = cryptography.generate_aes_key() + iv = cryptography.generate_iv() + plaintext = b"Test data" + + encrypted_data = cryptography.aes_gcm_encrypt(plaintext, aes_key, iv) + # Corrupt the data + corrupted_data = encrypted_data[:-1] + b"\x00" + + with pytest.raises(Exception): # Should raise authentication error + cryptography.aes_gcm_decrypt(corrupted_data, aes_key, iv) + + +def test_aes_gcm_encrypt_invalid_key_size(): + """Test that encrypting with invalid key size raises ValueError.""" + invalid_key = b"invalid" * 4 # 28 bytes, not 32 + iv = cryptography.generate_iv() + plaintext = b"Test data" + + with pytest.raises(ValueError, match="AES key must be 32 bytes"): + cryptography.aes_gcm_encrypt(plaintext, invalid_key, iv) + + +def test_aes_gcm_decrypt_invalid_key_size(): + """Test that decrypting with invalid key size raises ValueError.""" + invalid_key = b"invalid" * 4 # 28 bytes, not 32 + iv = cryptography.generate_iv() + encrypted_data = b"fake encrypted data" + b"\x00" * 16 # Add fake tag + + with pytest.raises(ValueError, match="AES key must be 32 bytes"): + cryptography.aes_gcm_decrypt(encrypted_data, invalid_key, iv) + + +def test_aes_gcm_encrypt_invalid_iv_size(): + """Test that encrypting with invalid IV size raises ValueError.""" + aes_key = cryptography.generate_aes_key() + invalid_iv = b"invalid" # 7 bytes, not 12 + plaintext = b"Test data" + + with pytest.raises(ValueError, match="IV must be 12 bytes"): + cryptography.aes_gcm_encrypt(plaintext, aes_key, invalid_iv) + + +def test_aes_gcm_decrypt_invalid_iv_size(): + """Test that decrypting with invalid IV size raises ValueError.""" + aes_key = cryptography.generate_aes_key() + invalid_iv = b"invalid" # 7 bytes, not 12 + encrypted_data = b"fake encrypted data" + b"\x00" * 16 # Add fake tag + + with pytest.raises(ValueError, match="IV must be 12 bytes"): + cryptography.aes_gcm_decrypt(encrypted_data, aes_key, invalid_iv) + + +def test_aes_gcm_decrypt_too_short_data(): + """Test that decrypting data that's too short raises ValueError.""" + aes_key = cryptography.generate_aes_key() + iv = cryptography.generate_iv() + short_data = b"short" # Less than 16 bytes (no tag) + + with pytest.raises(ValueError, match="Encrypted data too short"): + cryptography.aes_gcm_decrypt(short_data, aes_key, iv) + + +def test_pbkdf2_derive_key_from_pin(): + """Test PBKDF2 key derivation from PIN.""" + pin = "1234" + salt = cryptography.generate_iv(iv_size=16) + iterations = 100000 + + derived_key = cryptography.pbkdf2_derive_key_from_pin(pin, salt, iterations) + + assert isinstance(derived_key, bytes) + assert len(derived_key) == 32 # Default key size + + +def test_pbkdf2_derive_key_from_pin_custom_size(): + """Test PBKDF2 key derivation with custom key size.""" + pin = "123456" + salt = cryptography.generate_iv(iv_size=16) + iterations = 100000 + + derived_key = cryptography.pbkdf2_derive_key_from_pin(pin, salt, iterations, key_size=16) + + assert isinstance(derived_key, bytes) + assert len(derived_key) == 16 + + +def test_pbkdf2_derive_key_from_pin_different_salts(): + """Test that different salts produce different keys.""" + pin = "1234" + salt_1 = cryptography.generate_iv(iv_size=16) + salt_2 = cryptography.generate_iv(iv_size=16) + iterations = 100000 + + key_1 = cryptography.pbkdf2_derive_key_from_pin(pin, salt_1, iterations) + key_2 = cryptography.pbkdf2_derive_key_from_pin(pin, salt_2, iterations) + + assert key_1 != key_2 + + +def test_pbkdf2_derive_key_from_pin_different_pins(): + """Test that different PINs produce different keys.""" + pin_1 = "1234" + pin_2 = "5678" + salt = cryptography.generate_iv(iv_size=16) + iterations = 100000 + + key_1 = cryptography.pbkdf2_derive_key_from_pin(pin_1, salt, iterations) + key_2 = cryptography.pbkdf2_derive_key_from_pin(pin_2, salt, iterations) + + assert key_1 != key_2 + + +def test_pbkdf2_derive_key_from_pin_invalid_salt(): + """Test that empty salt raises ValueError.""" + pin = "1234" + empty_salt = b"" + iterations = 100000 + + with pytest.raises(ValueError, match="Salt must be at least 1 byte"): + cryptography.pbkdf2_derive_key_from_pin(pin, empty_salt, iterations) + + +def test_pbkdf2_encrypt_decrypt_aes_key_round_trip(): + """Test PBKDF2 encryption and decryption of AES key round trip.""" + aes_key = cryptography.generate_aes_key() + pin = "1234" + + encrypted_key, salt, iv = cryptography.pbkdf2_encrypt_aes_key(aes_key, pin) + decrypted_key = cryptography.pbkdf2_decrypt_aes_key(encrypted_key, pin, salt, iv) + + assert decrypted_key == aes_key + assert encrypted_key != aes_key + assert isinstance(salt, bytes) + assert len(salt) == 16 + assert isinstance(iv, bytes) + assert len(iv) == 12 + + +def test_pbkdf2_encrypt_aes_key_custom_salt(): + """Test PBKDF2 encryption with custom salt.""" + aes_key = cryptography.generate_aes_key() + pin = "123456" + custom_salt = cryptography.generate_iv(iv_size=16) + + encrypted_key, salt, iv = cryptography.pbkdf2_encrypt_aes_key(aes_key, pin, salt=custom_salt) + decrypted_key = cryptography.pbkdf2_decrypt_aes_key(encrypted_key, pin, salt, iv) + + assert decrypted_key == aes_key + assert salt == custom_salt + + +def test_pbkdf2_decrypt_aes_key_wrong_pin(): + """Test that decrypting with wrong PIN fails.""" + aes_key = cryptography.generate_aes_key() + pin = "1234" + wrong_pin = "5678" + + encrypted_key, salt, iv = cryptography.pbkdf2_encrypt_aes_key(aes_key, pin) + + with pytest.raises(ValueError, match="Failed to decrypt AES key"): + cryptography.pbkdf2_decrypt_aes_key(encrypted_key, wrong_pin, salt, iv) + + +def test_pbkdf2_decrypt_aes_key_wrong_salt(): + """Test that decrypting with wrong salt fails.""" + aes_key = cryptography.generate_aes_key() + pin = "1234" + + encrypted_key, salt, iv = cryptography.pbkdf2_encrypt_aes_key(aes_key, pin) + wrong_salt = cryptography.generate_iv(iv_size=16) + + with pytest.raises(ValueError, match="Failed to decrypt AES key"): + cryptography.pbkdf2_decrypt_aes_key(encrypted_key, pin, wrong_salt, iv) + + +def test_pbkdf2_encrypt_aes_key_invalid_key_size(): + """Test that encrypting with invalid AES key size raises ValueError.""" + invalid_key = b"invalid" * 4 # 28 bytes, not 32 + pin = "1234" + + with pytest.raises(ValueError, match="AES key must be 32 bytes"): + cryptography.pbkdf2_encrypt_aes_key(invalid_key, pin) + + +def test_pbkdf2_encrypt_decrypt_aes_key_different_iterations(): + """Test that same PIN with different iterations produces different keys.""" + aes_key = cryptography.generate_aes_key() + pin = "1234" + salt = cryptography.generate_iv(iv_size=16) + + encrypted_key_1, salt_1, iv_1 = cryptography.pbkdf2_encrypt_aes_key( + aes_key, pin, salt=salt, iterations=100000 + ) + encrypted_key_2, salt_2, iv_2 = cryptography.pbkdf2_encrypt_aes_key( + aes_key, pin, salt=salt, iterations=200000 + ) + + # Different iterations should produce different encrypted keys + assert encrypted_key_1 != encrypted_key_2 + assert salt_1 == salt_2 # Same salt was provided + + # But both should decrypt correctly with their respective iterations + decrypted_1 = cryptography.pbkdf2_decrypt_aes_key(encrypted_key_1, pin, salt_1, iv_1, iterations=100000) + decrypted_2 = cryptography.pbkdf2_decrypt_aes_key(encrypted_key_2, pin, salt_2, iv_2, iterations=200000) + + assert decrypted_1 == aes_key + assert decrypted_2 == aes_key + + +def test_hybrid_encryption_round_trip(): + """Test complete hybrid encryption (RSA for AES key, AES-GCM for data) round trip.""" + # Generate keys + rsa_private_key, rsa_public_key = cryptography.generate_rsa_key_pair() + aes_key = cryptography.generate_aes_key() + iv = cryptography.generate_iv() + plaintext = b"This is a test message for hybrid encryption." + + # Encrypt AES key with RSA + encrypted_aes_key = cryptography.rsa_encrypt_aes_key(aes_key, rsa_public_key) + + # Encrypt data with AES-GCM + encrypted_data = cryptography.aes_gcm_encrypt(plaintext, aes_key, iv) + + # Decrypt AES key with RSA + decrypted_aes_key = cryptography.rsa_decrypt_aes_key(encrypted_aes_key, rsa_private_key) + + # Decrypt data with AES-GCM + decrypted_data = cryptography.aes_gcm_decrypt(encrypted_data, decrypted_aes_key, iv) + + assert decrypted_data == plaintext + assert decrypted_aes_key == aes_key diff --git a/packages/commons/tests/cryptography/test_signing.py b/packages/commons/tests/cryptography/test_signing.py new file mode 100644 index 0000000000..c758279e66 --- /dev/null +++ b/packages/commons/tests/cryptography/test_signing.py @@ -0,0 +1,228 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import base64 + +import pytest + +from cryptography.hazmat.primitives.asymmetric import ec + +import octobot_commons.cryptography as cryptography + + +def test_generate_ecdsa_key_pair_default_curve(): + """Test ECDSA key pair generation with default curve (SECP256R1).""" + private_key_pem, public_key_pem = cryptography.generate_ecdsa_key_pair() + + assert isinstance(private_key_pem, bytes) + assert isinstance(public_key_pem, bytes) + assert b"BEGIN PRIVATE KEY" in private_key_pem + assert b"BEGIN PUBLIC KEY" in public_key_pem + assert b"END PRIVATE KEY" in private_key_pem + assert b"END PUBLIC KEY" in public_key_pem + + +def test_generate_ecdsa_key_pair_custom_curve(): + """Test ECDSA key pair generation with custom curve.""" + private_key_pem, public_key_pem = cryptography.generate_ecdsa_key_pair(curve=ec.SECP384R1()) + + assert isinstance(private_key_pem, bytes) + assert isinstance(public_key_pem, bytes) + assert b"BEGIN PRIVATE KEY" in private_key_pem + assert b"BEGIN PUBLIC KEY" in public_key_pem + + +def test_generate_ecdsa_key_pair_different_keys(): + """Test that generating multiple key pairs produces different keys.""" + private_key_1, public_key_1 = cryptography.generate_ecdsa_key_pair() + private_key_2, public_key_2 = cryptography.generate_ecdsa_key_pair() + + assert private_key_1 != private_key_2 + assert public_key_1 != public_key_2 + + +def test_sign_data(): + """Test signing data with ECDSA private key.""" + private_key_pem, public_key_pem = cryptography.generate_ecdsa_key_pair() + data = b"Hello, World! This is test data." + + signature = cryptography.sign_data(data, private_key_pem) + + assert isinstance(signature, bytes) + assert len(signature) > 0 + + +def test_sign_data_different_data_produces_different_signatures(): + """Test that signing different data produces different signatures.""" + private_key_pem, _ = cryptography.generate_ecdsa_key_pair() + data1 = b"First message" + data2 = b"Second message" + + signature1 = cryptography.sign_data(data1, private_key_pem) + signature2 = cryptography.sign_data(data2, private_key_pem) + + assert signature1 != signature2 + + +def test_sign_data_same_data_produces_different_signatures(): + """Test that signing the same data multiple times produces different signatures (ECDSA is non-deterministic).""" + private_key_pem, _ = cryptography.generate_ecdsa_key_pair() + data = b"Same message" + + signature1 = cryptography.sign_data(data, private_key_pem) + signature2 = cryptography.sign_data(data, private_key_pem) + + # ECDSA signatures are non-deterministic, so they should be different + assert signature1 != signature2 + + +def test_verify_signature_valid(): + """Test verifying a valid signature.""" + private_key_pem, public_key_pem = cryptography.generate_ecdsa_key_pair() + data = b"Test data to sign and verify" + + signature = cryptography.sign_data(data, private_key_pem) + is_valid = cryptography.verify_signature(data, public_key_pem, signature) + + assert is_valid is True + + +def test_verify_signature_invalid_data(): + """Test verifying a signature with wrong data.""" + private_key_pem, public_key_pem = cryptography.generate_ecdsa_key_pair() + original_data = b"Original data" + wrong_data = b"Wrong data" + + signature = cryptography.sign_data(original_data, private_key_pem) + is_valid = cryptography.verify_signature(wrong_data, public_key_pem, signature) + + assert is_valid is False + + +def test_verify_signature_invalid_signature(): + """Test verifying with a corrupted signature.""" + private_key_pem, public_key_pem = cryptography.generate_ecdsa_key_pair() + data = b"Test data" + + signature = cryptography.sign_data(data, private_key_pem) + # Corrupt the signature + corrupted_signature = signature[:-1] + b"\x00" + + is_valid = cryptography.verify_signature(data, public_key_pem, corrupted_signature) + + assert is_valid is False + + +def test_verify_signature_wrong_public_key(): + """Test verifying a signature with a different public key.""" + private_key_pem_1, public_key_pem_1 = cryptography.generate_ecdsa_key_pair() + _, public_key_pem_2 = cryptography.generate_ecdsa_key_pair() + data = b"Test data" + + signature = cryptography.sign_data(data, private_key_pem_1) + is_valid = cryptography.verify_signature(data, public_key_pem_2, signature) + + assert is_valid is False + + +def test_sign_and_verify_round_trip(): + """Test complete sign and verify round trip with various data sizes.""" + private_key_pem, public_key_pem = cryptography.generate_ecdsa_key_pair() + + test_cases = [ + b"", + b"a", + b"Short message", + b"Medium length message with some content", + b"Very long message " * 100, + ] + + for data in test_cases: + signature = cryptography.sign_data(data, private_key_pem) + is_valid = cryptography.verify_signature(data, public_key_pem, signature) + assert is_valid is True, f"Verification failed for data: {data[:50]}" + + +def test_sign_data_invalid_private_key(): + """Test signing with invalid private key raises ValueError.""" + invalid_key = b"-----BEGIN PRIVATE KEY-----\nInvalid\n-----END PRIVATE KEY-----" + data = b"Test data" + + with pytest.raises(ValueError): + cryptography.sign_data(data, invalid_key) + + +def test_verify_signature_invalid_public_key(): + """Test verifying with invalid public key raises ValueError.""" + _, public_key_pem = cryptography.generate_ecdsa_key_pair() + invalid_key = b"-----BEGIN PUBLIC KEY-----\nInvalid\n-----END PUBLIC KEY-----" + data = b"Test data" + signature = b"fake signature" + + with pytest.raises(ValueError): + cryptography.verify_signature(data, invalid_key, signature) + + +def test_parse_private_key_pem_raw_pem(): + private_key_pem, _ = cryptography.generate_ecdsa_key_pair() + raw_pem_str = private_key_pem.decode("utf-8") + result = cryptography.parse_private_key_pem(raw_pem_str) + assert result == private_key_pem.strip() + + +def test_parse_private_key_pem_base64_encoded(): + private_key_pem, _ = cryptography.generate_ecdsa_key_pair() + b64_str = base64.b64encode(private_key_pem).decode() + result = cryptography.parse_private_key_pem(b64_str) + assert result == private_key_pem + + +def test_parse_private_key_pem_base64_with_whitespace(): + private_key_pem, _ = cryptography.generate_ecdsa_key_pair() + clean_b64 = base64.b64encode(private_key_pem).decode() + dirty_b64 = "\n".join(clean_b64[i:i+76] for i in range(0, len(clean_b64), 76)) + "\n" + result = cryptography.parse_private_key_pem(dirty_b64) + assert result == private_key_pem + + +def test_parse_private_key_pem_raw_pem_with_whitespace(): + private_key_pem, _ = cryptography.generate_ecdsa_key_pair() + padded_pem = " \n" + private_key_pem.decode("utf-8") + "\n " + result = cryptography.parse_private_key_pem(padded_pem) + assert result == private_key_pem.strip() + + +def test_parse_private_key_pem_invalid_base64(): + with pytest.raises(Exception): + cryptography.parse_private_key_pem("not-valid-base64!!!") + + +def test_sign_data_with_rsa_key_raises_error(): + """Test that signing with an RSA key (not ECDSA) raises ValueError.""" + rsa_private_key, _ = cryptography.generate_rsa_key_pair() + data = b"Test data" + + with pytest.raises(ValueError, match="Private key must be an ECDSA key"): + cryptography.sign_data(data, rsa_private_key) + + +def test_verify_signature_with_rsa_key_raises_error(): + """Test that verifying with an RSA key (not ECDSA) raises ValueError.""" + _, rsa_public_key = cryptography.generate_rsa_key_pair() + data = b"Test data" + signature = b"fake signature" + + with pytest.raises(ValueError, match="Public key must be an ECDSA key"): + cryptography.verify_signature(data, rsa_public_key, signature) diff --git a/packages/commons/tests/databases/__init__.py b/packages/commons/tests/databases/__init__.py new file mode 100644 index 0000000000..b98f1648fb --- /dev/null +++ b/packages/commons/tests/databases/__init__.py @@ -0,0 +1 @@ +# Copyright diff --git a/packages/commons/tests/databases/document_database_adaptors/__init__.py b/packages/commons/tests/databases/document_database_adaptors/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/packages/commons/tests/databases/document_database_adaptors/test_tinydb_adaptor.py b/packages/commons/tests/databases/document_database_adaptors/test_tinydb_adaptor.py new file mode 100644 index 0000000000..55eed16e8e --- /dev/null +++ b/packages/commons/tests/databases/document_database_adaptors/test_tinydb_adaptor.py @@ -0,0 +1,736 @@ +# type: ignore +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import contextlib +import mock +import os +import pytest +import tempfile + +import tinydb + +import octobot_commons.constants as constants +import octobot_commons.errors as errors +import octobot_commons.databases.document_database_adaptors.tinydb_adaptor as tinydb_adaptor + +# All test coroutines will be treated as marked. +pytestmark = pytest.mark.asyncio + + +# ============== Fixtures and Helpers ============== + +@contextlib.contextmanager +def get_temp_directory(): + """Context manager for creating a temporary directory.""" + with tempfile.TemporaryDirectory() as temp_dir: + yield temp_dir + + +@contextlib.asynccontextmanager +async def get_temp_database(cache_size=None): + """Context manager for creating a temporary TinyDB database.""" + with get_temp_directory() as temp_dir: + db_path = os.path.join(temp_dir, "test_db.json") + adaptor = tinydb_adaptor.TinyDBAdaptor(db_path, cache_size=cache_size) + adaptor.initialize() + try: + yield adaptor + finally: + await adaptor.close() + + +# ============== Static Methods Tests ============== + +def test_is_file_system_based(): + assert tinydb_adaptor.TinyDBAdaptor.is_file_system_based() is True + + +def test_get_db_file_ext(): + assert tinydb_adaptor.TinyDBAdaptor.get_db_file_ext() == constants.TINYDB_EXT + assert tinydb_adaptor.TinyDBAdaptor.get_db_file_ext() == ".json" + + +# ============== Initialization Tests ============== + +def test_init_sets_attributes(): + adaptor = tinydb_adaptor.TinyDBAdaptor("/some/path/db.json", cache_size=100) + assert adaptor.db_path == "/some/path/db.json" + assert adaptor.cache_size == 100 + assert adaptor.database is None + + +def test_init_with_default_cache_size(): + adaptor = tinydb_adaptor.TinyDBAdaptor("/some/path/db.json") + assert adaptor.cache_size is None + assert adaptor.database is None + + +def test_initialize_creates_database(): + with get_temp_directory() as temp_dir: + db_path = os.path.join(temp_dir, "test_db.json") + adaptor = tinydb_adaptor.TinyDBAdaptor(db_path) + adaptor.initialize() + try: + assert adaptor.database is not None + assert isinstance(adaptor.database, tinydb.TinyDB) + finally: + adaptor.database.close() + + +def test_initialize_with_custom_cache_size(): + with get_temp_directory() as temp_dir: + db_path = os.path.join(temp_dir, "test_db.json") + custom_cache_size = 1000 + adaptor = tinydb_adaptor.TinyDBAdaptor(db_path, cache_size=custom_cache_size) + adaptor.initialize() + try: + assert adaptor.database is not None + # Verify cache size was applied to the middleware + assert adaptor.database.storage.WRITE_CACHE_SIZE == custom_cache_size + finally: + adaptor.database.close() + + +def test_initialize_uses_default_cache_size(): + with get_temp_directory() as temp_dir: + db_path = os.path.join(temp_dir, "test_db.json") + adaptor = tinydb_adaptor.TinyDBAdaptor(db_path) + adaptor.initialize() + try: + assert adaptor.database.storage.WRITE_CACHE_SIZE == tinydb_adaptor.TinyDBAdaptor.DEFAULT_WRITE_CACHE_SIZE + finally: + adaptor.database.close() + + +def test_initialize_raises_database_not_found_error(): + # Path with non-existent parent directory + db_path = "/non/existent/path/db.json" + adaptor = tinydb_adaptor.TinyDBAdaptor(db_path) + with pytest.raises(errors.DatabaseNotFoundError): + adaptor.initialize() + + +# ============== Identifier Operations Tests ============== + +async def test_create_identifier(): + with get_temp_directory() as temp_dir: + identifier_path = os.path.join(temp_dir, "new_identifier") + assert not os.path.exists(identifier_path) + await tinydb_adaptor.TinyDBAdaptor.create_identifier(identifier_path) + assert os.path.exists(identifier_path) + assert os.path.isdir(identifier_path) + + +async def test_identifier_exists_file(): + with get_temp_directory() as temp_dir: + file_path = os.path.join(temp_dir, "test_file.json") + # File doesn't exist yet + assert await tinydb_adaptor.TinyDBAdaptor.identifier_exists(file_path, is_full_identifier=True) is False + # Create file + with open(file_path, "w") as f: + f.write("{}") + assert await tinydb_adaptor.TinyDBAdaptor.identifier_exists(file_path, is_full_identifier=True) is True + + +async def test_identifier_exists_directory(): + with get_temp_directory() as temp_dir: + dir_path = os.path.join(temp_dir, "test_dir") + # Directory doesn't exist yet + assert await tinydb_adaptor.TinyDBAdaptor.identifier_exists(dir_path, is_full_identifier=False) is False + # Create directory + os.makedirs(dir_path) + assert await tinydb_adaptor.TinyDBAdaptor.identifier_exists(dir_path, is_full_identifier=False) is True + + +async def test_get_sub_identifiers(): + with get_temp_directory() as temp_dir: + # Create subdirectories + os.makedirs(os.path.join(temp_dir, "sub1")) + os.makedirs(os.path.join(temp_dir, "sub2")) + os.makedirs(os.path.join(temp_dir, "ignored_sub")) + # Create a file (should not be yielded) + with open(os.path.join(temp_dir, "file.txt"), "w") as f: + f.write("test") + + ignored = ["ignored_sub"] + sub_identifiers = [] + async for name in tinydb_adaptor.TinyDBAdaptor.get_sub_identifiers(temp_dir, ignored): + sub_identifiers.append(name) + + assert "sub1" in sub_identifiers + assert "sub2" in sub_identifiers + assert "ignored_sub" not in sub_identifiers + assert "file.txt" not in sub_identifiers + + +async def test_get_single_sub_identifier_returns_name(): + with get_temp_directory() as temp_dir: + # Create single subdirectory + os.makedirs(os.path.join(temp_dir, "only_sub")) + + result = await tinydb_adaptor.TinyDBAdaptor.get_single_sub_identifier(temp_dir, []) + assert result == "only_sub" + + +async def test_get_single_sub_identifier_returns_none_for_multiple(): + with get_temp_directory() as temp_dir: + # Create multiple subdirectories + os.makedirs(os.path.join(temp_dir, "sub1")) + os.makedirs(os.path.join(temp_dir, "sub2")) + + result = await tinydb_adaptor.TinyDBAdaptor.get_single_sub_identifier(temp_dir, []) + assert result is None + + +async def test_get_single_sub_identifier_returns_none_for_empty(): + with get_temp_directory() as temp_dir: + result = await tinydb_adaptor.TinyDBAdaptor.get_single_sub_identifier(temp_dir, []) + assert result is None + + +async def test_get_single_sub_identifier_ignores_specified(): + with get_temp_directory() as temp_dir: + os.makedirs(os.path.join(temp_dir, "sub1")) + os.makedirs(os.path.join(temp_dir, "ignored")) + + result = await tinydb_adaptor.TinyDBAdaptor.get_single_sub_identifier(temp_dir, ["ignored"]) + assert result == "sub1" + + +# ============== Document Operations Tests ============== + +async def test_get_uuid(): + async with get_temp_database() as adaptor: + doc_id = await adaptor.insert("test_table", {"name": "test"}) + documents = await adaptor.select("test_table", None) + assert len(documents) == 1 + assert adaptor.get_uuid(documents[0]) == doc_id + + +async def test_select_all(): + async with get_temp_database() as adaptor: + await adaptor.insert("test_table", {"name": "doc1"}) + await adaptor.insert("test_table", {"name": "doc2"}) + await adaptor.insert("test_table", {"name": "doc3"}) + + results = await adaptor.select("test_table", None) + assert len(results) == 3 + names = [doc["name"] for doc in results] + assert "doc1" in names + assert "doc2" in names + assert "doc3" in names + + +async def test_select_with_query(): + async with get_temp_database() as adaptor: + await adaptor.insert("test_table", {"name": "alice", "age": 30}) + await adaptor.insert("test_table", {"name": "bob", "age": 25}) + await adaptor.insert("test_table", {"name": "charlie", "age": 30}) + + query = await adaptor.query_factory() + results = await adaptor.select("test_table", query.age == 30) + assert len(results) == 2 + names = [doc["name"] for doc in results] + assert "alice" in names + assert "charlie" in names + + +async def test_select_by_uuid(): + async with get_temp_database() as adaptor: + doc1_id = await adaptor.insert("test_table", {"name": "doc1"}) + await adaptor.insert("test_table", {"name": "doc2"}) + + result = await adaptor.select("test_table", None, uuid=doc1_id) + assert result["name"] == "doc1" + + +async def test_tables(): + async with get_temp_database() as adaptor: + await adaptor.insert("table1", {"data": 1}) + await adaptor.insert("table2", {"data": 2}) + await adaptor.insert("table3", {"data": 3}) + + tables = await adaptor.tables() + assert "table1" in tables + assert "table2" in tables + assert "table3" in tables + + +async def test_insert(): + async with get_temp_database() as adaptor: + doc_id = await adaptor.insert("test_table", {"name": "test", "value": 42}) + assert isinstance(doc_id, int) + assert doc_id > 0 + + results = await adaptor.select("test_table", None) + assert len(results) == 1 + assert results[0]["name"] == "test" + assert results[0]["value"] == 42 + + +async def test_insert_many(): + async with get_temp_database() as adaptor: + rows = [ + {"name": "doc1", "value": 1}, + {"name": "doc2", "value": 2}, + {"name": "doc3", "value": 3}, + ] + doc_ids = await adaptor.insert_many("test_table", rows) + assert len(doc_ids) == 3 + assert all(isinstance(doc_id, int) for doc_id in doc_ids) + + results = await adaptor.select("test_table", None) + assert len(results) == 3 + + +async def test_upsert_insert(): + async with get_temp_database() as adaptor: + query = await adaptor.query_factory() + await adaptor.upsert("test_table", {"name": "new_doc", "value": 100}, query.name == "new_doc") + + results = await adaptor.select("test_table", None) + assert len(results) == 1 + assert results[0]["name"] == "new_doc" + assert results[0]["value"] == 100 + + +async def test_upsert_update_with_query(): + async with get_temp_database() as adaptor: + await adaptor.insert("test_table", {"name": "existing2", "value": 150}) + await adaptor.insert("test_table", {"name": "existing", "value": 50}) + + query = await adaptor.query_factory() + await adaptor.upsert("test_table", {"name": "existing", "value": 200}, query.name == "existing") + + query = await adaptor.query_factory() + results = await adaptor.select("test_table", query.name == "existing") + assert len(results) == 1 + assert results[0]["name"] == "existing" + assert results[0]["value"] == 200 + + +async def test_upsert_update_with_uuid(): + async with get_temp_database() as adaptor: + doc_id = await adaptor.insert("test_table", {"name": "existing", "value": 50}) + + await adaptor.upsert("test_table", {"name": "updated", "value": 300}, None, uuid=doc_id) + + result = await adaptor.select("test_table", None, uuid=doc_id) + assert result["name"] == "updated" + assert result["value"] == 300 + + +async def test_update_with_query(): + async with get_temp_database() as adaptor: + await adaptor.insert("test_table", {"name": "alice", "status": "active"}) + await adaptor.insert("test_table", {"name": "bob", "status": "active"}) + + query = await adaptor.query_factory() + await adaptor.update("test_table", {"status": "inactive"}, query.name == "alice") + + results = await adaptor.select("test_table", query.status == "inactive") + assert len(results) == 1 + assert results[0]["name"] == "alice" + + +async def test_update_with_uuid(): + async with get_temp_database() as adaptor: + doc_id = await adaptor.insert("test_table", {"name": "test", "value": 10}) + doc_id2 = await adaptor.insert("test_table", {"name": "test2", "value": 10}) + + await adaptor.update("test_table", {"value": 999}, None, uuid=doc_id) + + result = await adaptor.select("test_table", None, uuid=doc_id) + assert result["value"] == 999 + + +async def test_update_many(): + async with get_temp_database() as adaptor: + doc1_id = await adaptor.insert("test_table", {"name": "doc1", "value": 1}) + doc2_id = await adaptor.insert("test_table", {"name": "doc2", "value": 2}) + + query = await adaptor.query_factory() + update_values = [ + ({"value": 100}, query.name == "doc1"), + ({"value": 200}, query.name == "doc2"), + ] + await adaptor.update_many("test_table", update_values) + + result1 = await adaptor.select("test_table", None, uuid=doc1_id) + result2 = await adaptor.select("test_table", None, uuid=doc2_id) + assert result1["value"] == 100 + assert result2["value"] == 200 + + +async def test_delete_with_query(): + async with get_temp_database() as adaptor: + await adaptor.insert("test_table", {"name": "keep", "type": "a"}) + await adaptor.insert("test_table", {"name": "delete", "type": "b"}) + + query = await adaptor.query_factory() + await adaptor.delete("test_table", query.type == "b") + + results = await adaptor.select("test_table", None) + assert len(results) == 1 + assert results[0]["name"] == "keep" + + +async def test_delete_with_uuid(): + async with get_temp_database() as adaptor: + doc1_id = await adaptor.insert("test_table", {"name": "doc1"}) + await adaptor.insert("test_table", {"name": "doc2"}) + + await adaptor.delete("test_table", None, uuid=doc1_id) + + results = await adaptor.select("test_table", None) + assert len(results) == 1 + assert results[0]["name"] == "doc2" + + +async def test_delete_drop_table(): + async with get_temp_database() as adaptor: + await adaptor.insert("test_table", {"name": "doc1"}) + await adaptor.insert("test_table", {"name": "doc2"}) + await adaptor.insert("other_table", {"name": "other"}) + + tables_before = await adaptor.tables() + assert "test_table" in tables_before + + await adaptor.delete("test_table", None) + + tables_after = await adaptor.tables() + assert "test_table" not in tables_after + assert "other_table" in tables_after + + +async def test_count(): + async with get_temp_database() as adaptor: + await adaptor.insert("test_table", {"name": "alice", "type": "user"}) + await adaptor.insert("test_table", {"name": "bob", "type": "user"}) + await adaptor.insert("test_table", {"name": "admin", "type": "admin"}) + + query = await adaptor.query_factory() + user_count = await adaptor.count("test_table", query.type == "user") + admin_count = await adaptor.count("test_table", query.type == "admin") + + assert user_count == 2 + assert admin_count == 1 + + +async def test_query_factory(): + async with get_temp_database() as adaptor: + query = await adaptor.query_factory() + assert isinstance(query, tinydb.Query) + + +# ============== Lifecycle Operations Tests ============== + +async def test_hard_reset(): + with get_temp_directory() as temp_dir: + db_path = os.path.join(temp_dir, "test_db.json") + adaptor = tinydb_adaptor.TinyDBAdaptor(db_path) + adaptor.initialize() + + # Insert some data + await adaptor.insert("test_table", {"name": "test"}) + await adaptor.flush() + + # Verify file exists + assert os.path.exists(db_path) + + # Hard reset with mocked close and initialize to verify they are called + with mock.patch.object(adaptor, "close", wraps=adaptor.close) as mock_close, \ + mock.patch.object(adaptor, "initialize", wraps=adaptor.initialize) as mock_initialize: + await adaptor.hard_reset() + mock_close.assert_called_once() + mock_initialize.assert_called_once() + + # Verify database was reset (file recreated, empty tables) + tables = await adaptor.tables() + assert "test_table" not in tables or len(await adaptor.select("test_table", None)) == 0 + + await adaptor.close() + + +async def test_flush(): + with get_temp_directory() as temp_dir: + db_path = os.path.join(temp_dir, "test_db.json") + adaptor = tinydb_adaptor.TinyDBAdaptor(db_path) + adaptor.initialize() + + try: + await adaptor.insert("test_table", {"name": "test"}) + # Flush should not raise + await adaptor.flush() + finally: + await adaptor.close() + + +async def test_close(): + with get_temp_directory() as temp_dir: + db_path = os.path.join(temp_dir, "test_db.json") + adaptor = tinydb_adaptor.TinyDBAdaptor(db_path) + adaptor.initialize() + + await adaptor.insert("test_table", {"name": "test"}) + # Close should not raise + await adaptor.close() + + +async def test_close_handles_attribute_error(): + adaptor = tinydb_adaptor.TinyDBAdaptor("/some/path/db.json") + # database is None, close should handle AttributeError gracefully + await adaptor.close() # Should not raise + + +async def test_close_handles_type_error(): + with get_temp_directory() as temp_dir: + db_path = os.path.join(temp_dir, "test_db.json") + adaptor = tinydb_adaptor.TinyDBAdaptor(db_path) + adaptor.initialize() + + # Mock the database.close to raise TypeError + with mock.patch.object(adaptor.database, "close", side_effect=TypeError("test error")): + # Should not raise, but log the error + await adaptor.close() + + +# ============== LazyJSONStorage Tests ============== + +def test_lazy_storage_does_not_create_file_on_init(): + with get_temp_directory() as temp_dir: + db_path = os.path.join(temp_dir, "lazy_test.json") + LazyJSONStorage = tinydb_adaptor.TinyDBAdaptor._get_storage() + storage = LazyJSONStorage(db_path) + + # File should not exist yet + assert not os.path.exists(db_path) + storage.close() + + +def test_lazy_storage_creates_file_on_handle_access(): + with get_temp_directory() as temp_dir: + db_path = os.path.join(temp_dir, "lazy_test.json") + LazyJSONStorage = tinydb_adaptor.TinyDBAdaptor._get_storage() + storage = LazyJSONStorage(db_path) + + assert not os.path.exists(db_path) + + # Access _handle to trigger file creation + _ = storage._handle + + assert os.path.exists(db_path) + storage.close() + + +def test_lazy_storage_close_without_open(): + with get_temp_directory() as temp_dir: + db_path = os.path.join(temp_dir, "lazy_test.json") + LazyJSONStorage = tinydb_adaptor.TinyDBAdaptor._get_storage() + storage = LazyJSONStorage(db_path) + + # Close without ever opening - should not raise + storage.close() + assert not os.path.exists(db_path) + + +def test_lazy_storage_raises_file_not_found_for_missing_parent(): + LazyJSONStorage = tinydb_adaptor.TinyDBAdaptor._get_storage() + with pytest.raises(FileNotFoundError): + LazyJSONStorage("/non/existent/path/db.json") + + +# ============== Functional Integration Test ============== + +async def test_full_workflow_functional(): + """ + Comprehensive functional test exercising most TinyDBAdaptor methods + without any mocks - using real TinyDB operations. + """ + with get_temp_directory() as temp_dir: + # 1. Create identifier (directory structure) + identifier_path = os.path.join(temp_dir, "data", "exchange1") + await tinydb_adaptor.TinyDBAdaptor.create_identifier(identifier_path) + assert os.path.isdir(identifier_path) + + # 2. Verify identifier_exists for directory + assert await tinydb_adaptor.TinyDBAdaptor.identifier_exists( + identifier_path, is_full_identifier=False + ) is True + + # 3. Initialize database + db_path = os.path.join(identifier_path, "database.json") + adaptor = tinydb_adaptor.TinyDBAdaptor(db_path, cache_size=100) + adaptor.initialize() + + try: + # 4. Verify static methods + assert tinydb_adaptor.TinyDBAdaptor.is_file_system_based() is True + assert tinydb_adaptor.TinyDBAdaptor.get_db_file_ext() == ".json" + + # 5. Insert documents into multiple tables + + # 5.a: Verify database file does not exist (lazy initialization) + db_path = os.path.join(identifier_path, "database.json") + assert not os.path.exists(db_path) + user1_id = await adaptor.insert("users", {"name": "Alice", "age": 30, "role": "admin"}) + # 5.b: Verify database file NOW exists (lazy initialization tirggered) + assert os.path.exists(db_path) + + user2_id = await adaptor.insert("users", {"name": "Bob", "age": 25, "role": "user"}) + user3_id = await adaptor.insert("users", {"name": "Charlie", "age": 35, "role": "user"}) + + order_ids = await adaptor.insert_many("orders", [ + {"user_id": user1_id, "product": "Widget", "quantity": 5}, + {"user_id": user2_id, "product": "Gadget", "quantity": 3}, + {"user_id": user1_id, "product": "Gizmo", "quantity": 2}, + ]) + assert len(order_ids) == 3 + + # 6. Select all and verify + all_users = await adaptor.select("users", None) + assert len(all_users) == 3 + + all_orders = await adaptor.select("orders", None) + assert len(all_orders) == 3 + + # 7. Query with tinydb.Query + query = await adaptor.query_factory() + admins = await adaptor.select("users", query.role == "admin") + assert len(admins) == 1 + assert admins[0]["name"] == "Alice" + + users_over_25 = await adaptor.select("users", query.age > 25) + assert len(users_over_25) == 2 + + # 8. Select by UUID + user1_doc = await adaptor.select("users", None, uuid=user1_id) + assert user1_doc["name"] == "Alice" + + # 9. Get UUID from document + assert adaptor.get_uuid(all_users[0]) > 0 + + # 10. Upsert - insert new + await adaptor.upsert("users", {"name": "Diana", "age": 28, "role": "user"}, query.name == "Diana") + all_users = await adaptor.select("users", None) + assert len(all_users) == 4 + + # 11. Upsert - update existing by query + await adaptor.upsert("users", {"name": "Alice", "age": 31, "role": "superadmin"}, query.name == "Alice") + alice = await adaptor.select("users", query.name == "Alice") + assert alice[0]["age"] == 31 + assert alice[0]["role"] == "superadmin" + + # 12. Upsert - update existing by UUID + await adaptor.upsert("users", {"name": "Bob", "age": 26, "role": "moderator"}, None, uuid=user2_id) + bob = await adaptor.select("users", None, uuid=user2_id) + assert bob["age"] == 26 + assert bob["role"] == "moderator" + + # 13. Update by query + await adaptor.update("orders", {"status": "pending"}, query.product == "Widget") + widget_orders = await adaptor.select("orders", query.product == "Widget") + assert widget_orders[0]["status"] == "pending" + + # 14. Update by UUID + await adaptor.update("orders", {"status": "shipped"}, None, uuid=order_ids[0]) + order = await adaptor.select("orders", None, uuid=order_ids[0]) + assert order["status"] == "shipped" + + # 15. Update many + await adaptor.update_many("orders", [ + ({"priority": "high"}, query.user_id == user1_id), + ({"priority": "normal"}, query.user_id == user2_id), + ]) + high_priority = await adaptor.select("orders", query.priority == "high") + assert len(high_priority) == 2 # Alice has 2 orders + + # 16. Count documents + user_count = await adaptor.count("users", query.role != "superadmin") + assert user_count == 3 + + total_orders = await adaptor.count("orders", query.quantity >= 1) + assert total_orders == 3 + + # 17. Get tables list + tables = await adaptor.tables() + assert "users" in tables + assert "orders" in tables + + # 18. Delete by query + await adaptor.delete("orders", query.product == "Gizmo") + remaining_orders = await adaptor.select("orders", None) + assert len(remaining_orders) == 2 + + # 19. Delete by UUID + await adaptor.delete("users", None, uuid=user3_id) + remaining_users = await adaptor.select("users", None) + assert len(remaining_users) == 3 + charlie = await adaptor.select("users", query.name == "Charlie") + assert len(charlie) == 0 + + # 20. Delete (drop table) + await adaptor.delete("orders", None) + tables = await adaptor.tables() + assert "orders" not in tables + assert "users" in tables + + # 21. Flush to ensure data is written + await adaptor.flush() + + # 22. Verify identifier_exists for file + assert await tinydb_adaptor.TinyDBAdaptor.identifier_exists( + db_path, is_full_identifier=True + ) is True + + # 23. Hard reset + await adaptor.hard_reset() + + # 24. Verify database is empty after reset + tables_after_reset = await adaptor.tables() + assert "users" not in tables_after_reset + + # 25. Verify we can still use the database after reset + await adaptor.insert("new_table", {"data": "after_reset"}) + new_data = await adaptor.select("new_table", None) + assert len(new_data) == 1 + assert new_data[0]["data"] == "after_reset" + + # 26. Test get_sub_identifiers + os.makedirs(os.path.join(temp_dir, "data", "exchange2")) + os.makedirs(os.path.join(temp_dir, "data", "ignored_exchange")) + sub_ids = [] + async for sub_id in tinydb_adaptor.TinyDBAdaptor.get_sub_identifiers( + os.path.join(temp_dir, "data"), ["ignored_exchange"] + ): + sub_ids.append(sub_id) + assert "exchange1" in sub_ids + assert "exchange2" in sub_ids + assert "ignored_exchange" not in sub_ids + + # 27. Test get_single_sub_identifier + single_sub = await tinydb_adaptor.TinyDBAdaptor.get_single_sub_identifier( + identifier_path, [] + ) + # Should return None since there are no subdirectories in exchange1 + assert single_sub is None + + finally: + # 28. Close database + await adaptor.close() + + # 29. Verify file still exists after close + assert os.path.exists(db_path) diff --git a/packages/commons/tests/databases/global_storage/__init__.py b/packages/commons/tests/databases/global_storage/__init__.py new file mode 100644 index 0000000000..b98f1648fb --- /dev/null +++ b/packages/commons/tests/databases/global_storage/__init__.py @@ -0,0 +1 @@ +# Copyright diff --git a/packages/commons/tests/databases/global_storage/test_global_shared_memory_storage.py b/packages/commons/tests/databases/global_storage/test_global_shared_memory_storage.py new file mode 100644 index 0000000000..9fb1712e82 --- /dev/null +++ b/packages/commons/tests/databases/global_storage/test_global_shared_memory_storage.py @@ -0,0 +1,74 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.databases as databases + + +def test_remove_oldest_elements(): + databases.GlobalSharedMemoryStorage.instance()["10"] = "a" + databases.GlobalSharedMemoryStorage.instance()["2"] = 1 + databases.GlobalSharedMemoryStorage.instance()["-1"] = "af" + databases.GlobalSharedMemoryStorage.instance()["0"] = "as" + databases.GlobalSharedMemoryStorage.instance()[10] = "asfgvfs" + databases.GlobalSharedMemoryStorage.instance()["12"] = "bbcc" + assert len(databases.GlobalSharedMemoryStorage.instance()) == 6 + assert databases.GlobalSharedMemoryStorage.instance() == { + "10": "a", + "2": 1, + "-1": "af", + "0": "as", + 10: "asfgvfs", + "12": "bbcc", + } + databases.GlobalSharedMemoryStorage.instance().remove_oldest_elements(0) + assert databases.GlobalSharedMemoryStorage.instance() == { + "10": "a", + "2": 1, + "-1": "af", + "0": "as", + 10: "asfgvfs", + "12": "bbcc", + } + databases.GlobalSharedMemoryStorage.instance().remove_oldest_elements(1) + assert databases.GlobalSharedMemoryStorage.instance() == { + "2": 1, + "-1": "af", + "0": "as", + 10: "asfgvfs", + "12": "bbcc", + } + databases.GlobalSharedMemoryStorage.instance().remove_oldest_elements(4) + assert databases.GlobalSharedMemoryStorage.instance() == { + "12": "bbcc", + } + databases.GlobalSharedMemoryStorage.instance()["2"] = 1 + assert databases.GlobalSharedMemoryStorage.instance() == { + "12": "bbcc", + "2": 1, + } + databases.GlobalSharedMemoryStorage.instance().remove_oldest_elements(1) + assert databases.GlobalSharedMemoryStorage.instance() == { + "2": 1, + } + databases.GlobalSharedMemoryStorage.instance().remove_oldest_elements(10) + assert databases.GlobalSharedMemoryStorage.instance() == {} + + +def test_get_bytes_size(): + assert 0 < databases.GlobalSharedMemoryStorage.instance().get_bytes_size() < 1000 + for i in range(10000): + databases.GlobalSharedMemoryStorage.instance()[i] = "aaaaaaaaaaaaaaaaaaaaaa" + assert 200000 < databases.GlobalSharedMemoryStorage.instance().get_bytes_size() < 800000 + databases.GlobalSharedMemoryStorage.instance().remove_oldest_elements(10000) diff --git a/packages/commons/tests/databases/relational_databases/__init__.py b/packages/commons/tests/databases/relational_databases/__init__.py new file mode 100644 index 0000000000..b98f1648fb --- /dev/null +++ b/packages/commons/tests/databases/relational_databases/__init__.py @@ -0,0 +1 @@ +# Copyright diff --git a/packages/commons/tests/databases/relational_databases/sqlite/__init__.py b/packages/commons/tests/databases/relational_databases/sqlite/__init__.py new file mode 100644 index 0000000000..b98f1648fb --- /dev/null +++ b/packages/commons/tests/databases/relational_databases/sqlite/__init__.py @@ -0,0 +1 @@ +# Copyright diff --git a/packages/commons/tests/databases/relational_databases/sqlite/test_sqlite_database.py b/packages/commons/tests/databases/relational_databases/sqlite/test_sqlite_database.py new file mode 100644 index 0000000000..9eeab0bd12 --- /dev/null +++ b/packages/commons/tests/databases/relational_databases/sqlite/test_sqlite_database.py @@ -0,0 +1,288 @@ +# Drakkar-Software OctoBot +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import mock +import pytest +import os +import asyncio +import sqlite3 +import contextlib + + +import octobot_commons.asyncio_tools as asyncio_tools +import octobot_commons.errors as errors +import octobot_commons.databases as databases +import octobot_commons.enums as enums + +# All test coroutines will be treated as marked. +pytestmark = pytest.mark.asyncio + +DATA_FILE1 = "ExchangeHistoryDataCollector_1589740606.4862757.data" +DATA_FILE2 = "second_ExchangeHistoryDataCollector_1589740606.4862757.data" +OHLCV = mock.Mock(value="ohlcv") +KLINE = mock.Mock(value="kline") + + +# use context manager instead of fixture to prevent pytest threads issues +@contextlib.asynccontextmanager +async def get_database(data_file=DATA_FILE1): + async with databases.new_sqlite_database(os.path.join("tests", "static", data_file)) as db: + yield db + # prevent "generator didn't stop after athrow(), see https://github.com/python-trio/trio/issues/2081" + await asyncio_tools.wait_asyncio_next_cycle() + + +# use context manager instead of fixture to prevent pytest threads issues +@contextlib.asynccontextmanager +async def get_temp_empty_database(): + database_name = "temp_empty_database" + try: + async with databases.new_sqlite_database(database_name) as db: + yield db + finally: + # prevent "generator didn't stop after athrow(), see https://github.com/python-trio/trio/issues/2081" + await asyncio_tools.wait_asyncio_next_cycle() + os.remove(database_name) + + +async def test_invalid_file(): + file_name = "plop" + db = databases.SQLiteDatabase(file_name) + try: + await db.initialize() + assert not await db.check_table_exists(KLINE) + with pytest.raises(sqlite3.OperationalError): + await db.check_table_not_empty(KLINE) + finally: + await db.stop() + os.remove(file_name) + + +async def test_select(): + async with get_database() as database: + # default values + with pytest.raises(errors.DatabaseNotFoundError): + await database.select(KLINE) + + ohlcv = await database.select(OHLCV) + assert len(ohlcv) == 6531 + + ohlcv = await database.select(OHLCV, time_frame="1h") + assert len(ohlcv) == 500 + + ohlcv = await database.select(OHLCV, symbol="xyz") + assert len(ohlcv) == 0 + + ohlcv = await database.select(OHLCV, symbol="ETH/BTC") + assert len(ohlcv) == 6531 + + changed_order_ohlcv = await database.select(OHLCV, order_by="time_frame", symbol="ETH/BTC") + assert changed_order_ohlcv[0] != ohlcv[0] + + ohlcv = await database.select(OHLCV, xyz="xyz") + assert len(ohlcv) == 0 + + +async def test_select_max(): + async with get_database() as database: + assert await database.select_max(OHLCV, ["timestamp"]) == [(1590883200,)] + assert await database.select_max(OHLCV, ["timestamp"], time_frame="1h") == [(1589742000,)] + assert await database.select_max(OHLCV, ["timestamp"], ["symbol"], time_frame="1h") == \ + [(1589742000, "ETH/BTC")] + + +async def test_select_min(): + async with get_database() as database: + assert await database.select_min(OHLCV, ["timestamp"]) == [(1500249600,)] + assert await database.select_min(OHLCV, ["timestamp"], time_frame="1h") == [(1587945600,)] + assert await database.select_min(OHLCV, ["timestamp"], ["symbol"], time_frame="1h") == \ + [(1587945600, "ETH/BTC")] + + +async def test_select_count(): + async with get_database() as database: + assert await database.select_count(OHLCV, ["*"]) == [(6531,)] + assert await database.select_count(OHLCV, ["*"], time_frame="1h") == [(500,)] + assert await database.select_count(OHLCV, ["*"], time_frame="1M") == [(35,)] + + +async def test_select_from_timestamp(): + async with get_database() as database: + operations = [enums.DataBaseOperations.INF_EQUALS.value] + candles = await database.select_from_timestamp(OHLCV, ["1587960000"], operations) + assert len(candles) > 0 + assert all(candle[0] <= 1587960000 for candle in candles) + + operations = [enums.DataBaseOperations.INF_EQUALS.value, enums.DataBaseOperations.SUP_EQUALS.value] + candles = await database.select_from_timestamp(OHLCV, + ["1587960000", "1587960000"], + operations) + assert len(candles) > 0 + assert all(candle[0] == 1587960000 for candle in candles) + + operations = [enums.DataBaseOperations.INF_EQUALS.value, enums.DataBaseOperations.SUP_EQUALS.value] + candles = await database.select_from_timestamp(OHLCV, + ["1587960000", "1587945600"], + operations) + assert len(candles) == 15 + assert all(1587945600 <= candle[0] <= 1587960000 for candle in candles) + + operations = [enums.DataBaseOperations.INF_EQUALS.value, enums.DataBaseOperations.SUP_EQUALS.value] + candles = await database.select_from_timestamp(OHLCV, + ["1587960000", "1587945600"], + operations, + symbol="xyz") + assert len(candles) == 0 + + +async def test_gather_concurrent_select(): + async with get_database() as database: + timestamps_1h = [ohlcv[0] for ohlcv in await database.select(OHLCV, time_frame="1h")] + timestamps_4h = [ohlcv[0] for ohlcv in await database.select(OHLCV, time_frame="4h")] + coros = [_check_select_result(database, ts, "1h") for ts in timestamps_1h] + coros += [_check_select_result(database, ts, "4h") for ts in timestamps_4h] + await asyncio.gather(*coros) + + +async def test_create_tasks_concurrent_selects(): + async with get_database() as database: + timestamps_1h = [ohlcv[0] for ohlcv in await database.select(OHLCV, time_frame="1h")] + timestamps_1m = [ohlcv[0] for ohlcv in await database.select(OHLCV, time_frame="1m")] + timestamps_4h = [ohlcv[0] for ohlcv in await database.select(OHLCV, time_frame="4h", + size=50)] + + calls_count = len(timestamps_1h) + len(timestamps_4h) + len(timestamps_1m) + failed_calls = [] + success_calls = [] + + async def select_task(db, timestamp, time_frame): + try: + await _check_select_result(db, timestamp, time_frame) + success_calls.append((timestamp, time_frame)) + except Exception as e: + failed_calls.append((timestamp, time_frame, e)) + + tasks = [] + for ts in timestamps_1h: + tasks.append(asyncio.get_event_loop().create_task(select_task(database, ts, "1h"))) + for ts in timestamps_4h: + tasks.append(asyncio.get_event_loop().create_task(select_task(database, ts, "4h"))) + for ts in timestamps_1m: + tasks.append(asyncio.get_event_loop().create_task(select_task(database, ts, "1m"))) + # for wait for next cycle to make previous requests end and re-use previous cursors + await asyncio_tools.wait_asyncio_next_cycle() + + await asyncio.gather(*tasks) + assert len(success_calls) == calls_count + assert failed_calls == [] + + +async def test_stop_while_concurrent_select(): + async with get_database() as database: + timestamps = [ohlcv[0] for ohlcv in await database.select(OHLCV, time_frame="1h")] + await _check_select_result(database, timestamps[0]) + asyncio.create_task(asyncio.wait( + asyncio.gather(*[_check_select_result(database, ts, expected_exception=sqlite3.ProgrammingError) + for ts in timestamps]))) + # not enough time to finish all requests, most if not all will remaining pending + await asyncio_tools.wait_asyncio_next_cycle() + + +async def test_double_database(): + async with get_database() as database1, get_database(DATA_FILE2) as database2: + timestamps1 = [ohlcv[0] for ohlcv in await database1.select(OHLCV, time_frame="1h")] + timestamps2 = [ohlcv[0] for ohlcv in await database2.select(OHLCV, time_frame="1h")] + await asyncio.gather(*[_check_select_result(database1, ts) for ts in timestamps1]) + await asyncio.gather(*[_check_select_result(database2, ts) for ts in timestamps2]) + + +async def test_double_database_stop_while_concurrent_select(): + async with get_database() as database1, get_database(DATA_FILE2) as database2: + timestamps1 = [ohlcv[0] for ohlcv in await database1.select(OHLCV, time_frame="1h")] + timestamps2 = [ohlcv[0] for ohlcv in await database2.select(OHLCV, time_frame="1h")] + await _check_select_result(database1, timestamps1[0]) + await _check_select_result(database2, timestamps2[0]) + asyncio.create_task(asyncio.wait( + asyncio.gather(*[_check_select_result(database1, ts, expected_exception=sqlite3.ProgrammingError) + for ts in timestamps1]))) + asyncio.create_task(asyncio.wait( + asyncio.gather(*[_check_select_result(database2, ts, expected_exception=sqlite3.ProgrammingError) + for ts in timestamps2]))) + # not enough time to finish all requests, most if not all will remaining pending + await asyncio_tools.wait_asyncio_next_cycle() + + +async def test_insert(): + async with get_temp_empty_database() as temp_empty_database: + await temp_empty_database.insert(OHLCV, symbol="xyz", timestamp=1, price=1, date="01") + assert await temp_empty_database.select(OHLCV) == [(1, 'xyz', '1', '01')] + + +async def test_insert_all(): + async with get_temp_empty_database() as temp_empty_database: + await temp_empty_database.insert_all(OHLCV, + symbol=["xyz", "abc"], + timestamp=[1, 2], + price=[1, 10], + date=["01", "05"]) + assert await temp_empty_database.select(OHLCV) == [(2, 'abc', '10', '05'), (1, 'xyz', '1', '01')] + assert await temp_empty_database.select(OHLCV, date="05") == [(2, 'abc', '10', '05')] + + +async def test_delete(): + async with get_temp_empty_database() as temp_empty_database: + await temp_empty_database.insert_all(OHLCV, + symbol=["xyz", "abc"], + timestamp=[1, 2], + price=[1, 10], + date=["01", "05"]) + assert await temp_empty_database.select(OHLCV) == [(2, 'abc', '10', '05'), (1, 'xyz', '1', '01')] + # no matching row to delete + await temp_empty_database.delete(OHLCV, symbol="plop") + assert await temp_empty_database.select(OHLCV) == [(2, 'abc', '10', '05'), (1, 'xyz', '1', '01')] + await temp_empty_database.delete(OHLCV, symbol="xyz") + assert await temp_empty_database.select(OHLCV) == [(2, 'abc', '10', '05')] + await temp_empty_database.insert_all(OHLCV, + symbol=["hoho", "dd"], + timestamp=[11, 11], + price=[1, 10], + date=["01", "05"]) + assert await temp_empty_database.select(OHLCV) == [ + (11, 'dd', '10', '05'), (11, 'hoho', '1', '01'), (2, 'abc', '10', '05') + ] + await temp_empty_database.delete(OHLCV, timestamp="11") + assert await temp_empty_database.select(OHLCV) == [(2, 'abc', '10', '05')] + await temp_empty_database.delete(OHLCV, date="05") + assert await temp_empty_database.select(OHLCV) == [] + + +async def test_create_index(): + async with get_temp_empty_database() as temp_empty_database: + await temp_empty_database.insert(OHLCV, 1, symbol="xyz", price="1", date="01") + # ensure no exception + await temp_empty_database.create_index(OHLCV, ["symbol", "timestamp"]) + assert await temp_empty_database.select(OHLCV) == [(1, 'xyz', '1', '01')] + + +async def _check_select_result(database, timestamp, time_frame="1h", expected_exception=None): + try: + ohlcv = await database.select(OHLCV, time_frame=time_frame, timestamp=str(timestamp)) + assert len(ohlcv) == 1 + assert ohlcv[0][0] == timestamp + except Exception as e: + if e.__class__ is expected_exception: + pass + else: + raise diff --git a/packages/commons/tests/databases/run_databases/__init__.py b/packages/commons/tests/databases/run_databases/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/packages/commons/tests/databases/run_databases/test_run_databases_provider.py b/packages/commons/tests/databases/run_databases/test_run_databases_provider.py new file mode 100644 index 0000000000..2bd853c5ce --- /dev/null +++ b/packages/commons/tests/databases/run_databases/test_run_databases_provider.py @@ -0,0 +1,55 @@ +# Drakkar-Software OctoBot +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import mock +import pytest +import pytest_asyncio +import octobot_commons.databases as databases + + +# All test coroutines will be treated as marked. +pytestmark = pytest.mark.asyncio + + +@pytest.fixture +def run_database_identifier(): + return mock.Mock( + initialize=mock.AsyncMock(), + close=mock.AsyncMock() + ) + + +@pytest.fixture +def run_database_provider(): + return databases.RunDatabasesProvider.instance() + + +async def test_add_bot_id(run_database_provider, run_database_identifier): + await run_database_provider.add_bot_id("123", run_database_identifier) + run_database_provider.get_run_databases_identifier("123").initialize.assert_called_once() + assert "123" in run_database_provider.run_databases + + +async def test_has_bot_id(run_database_provider, run_database_identifier): + await run_database_provider.add_bot_id("123", run_database_identifier) + assert run_database_provider.has_bot_id("123") is True + assert run_database_provider.has_bot_id("1232") is False + + +async def test_close(run_database_provider, run_database_identifier): + await run_database_provider.add_bot_id("123", run_database_identifier) + await run_database_provider.close("123") + with pytest.raises(KeyError): + await run_database_provider.close("aa") diff --git a/packages/commons/tests/dataclasses/test_flexible_dataclass.py b/packages/commons/tests/dataclasses/test_flexible_dataclass.py new file mode 100644 index 0000000000..093af43269 --- /dev/null +++ b/packages/commons/tests/dataclasses/test_flexible_dataclass.py @@ -0,0 +1,103 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library.* +import dataclasses +import pydantic +import enum +import datetime +import typing + +import octobot_commons.dataclasses + + +class JobType(enum.Enum): + FULL_TIME = "full-time" + PART_TIME = "part-time" + + +class Job(pydantic.BaseModel): + id: int = 0 + name: str = "" + description: typing.Optional[str] = None + type: JobType = JobType.FULL_TIME + created_at: datetime.datetime = datetime.datetime.now() + updated_at: typing.Optional[datetime.datetime] = None + + +@dataclasses.dataclass +class TestPersonClass(octobot_commons.dataclasses.FlexibleDataclass): + name: str = "" + age: int = 0 + job: Job = dataclasses.field(default_factory=Job) + likes: list = dataclasses.field(default_factory=list) + + +@dataclasses.dataclass +class TestPersonGroupClass(octobot_commons.dataclasses.FlexibleDataclass): + identifier: str = "" + present_people: list[TestPersonClass] = dataclasses.field(default_factory=list) + absent_people: list[TestPersonClass] = dataclasses.field(default_factory=list) + leader: TestPersonClass = dataclasses.field(default_factory=TestPersonClass) + + def __post_init__(self): + if self.present_people and isinstance(self.present_people[0], dict): + self.present_people = [TestPersonClass.from_dict(p) for p in self.present_people] if self.present_people else [] + if self.absent_people and isinstance(self.absent_people[0], dict): + self.absent_people = [TestPersonClass.from_dict(p) for p in self.absent_people] if self.absent_people else [] + + +def test_from_dict(): + person_1 = TestPersonClass( + name="rhombur", age=33, job=Job(id=1, name="prince", description="Ixian prince", type=JobType.PART_TIME, created_at=datetime.datetime(2026, 1, 1, 12, 0, 0)) + ) + dict_1 = dataclasses.asdict(person_1) + person_1_1 = TestPersonClass.from_dict(dict_1) + assert list(person_1_1.get_field_names()) == list(person_1.get_field_names()) == ['name', 'age', 'job', 'likes'] + assert person_1 == person_1_1 # ensure parsing is working + person_1_1.name = "leto" + + group_1 = TestPersonGroupClass(identifier="plop", absent_people=[person_1], leader=person_1_1) + dict_group = dataclasses.asdict(group_1) + assert TestPersonGroupClass.from_dict(dict_group) == group_1 + + # added values are not an issue + dict_group["new_attr"] = 1 + dict_group["absent_people"][0]["other_attr"] = None + dict_group["leader"].pop("age", None) + dict_group["leader"]["age2"] = 22 + + new_group = TestPersonGroupClass.from_dict(dict_group) + assert new_group.leader.age == 0 # default value + + +def test_default_values(): + group_0 = TestPersonGroupClass() + group_0_1 = TestPersonGroupClass() + + assert group_0.leader.name == group_0_1.leader.name + assert group_0.leader is not group_0_1.leader + group_0.leader.name = "erasme" + assert group_0.leader.name != group_0_1.leader.name + + +def test_get_field_names(): + person_1 = TestPersonClass() + person_2 = TestPersonClass() + group_1 = TestPersonGroupClass() + group_2 = TestPersonGroupClass() + + assert list(person_1.get_field_names()) == list(person_2.get_field_names()) == ['name', 'age', 'job', 'likes'] + assert list(group_1.get_field_names()) == list(group_2.get_field_names()) == \ + ['identifier', 'present_people', 'absent_people', 'leader'] diff --git a/packages/commons/tests/dataclasses/test_minimizable_dataclass.py b/packages/commons/tests/dataclasses/test_minimizable_dataclass.py new file mode 100644 index 0000000000..d4e5e61c33 --- /dev/null +++ b/packages/commons/tests/dataclasses/test_minimizable_dataclass.py @@ -0,0 +1,142 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library.* +import dataclasses +import pydantic +import enum +import datetime +import typing + +import octobot_commons.dataclasses + + +class JobType(enum.Enum): + FULL_TIME = "full-time" + PART_TIME = "part-time" + + +class Job(pydantic.BaseModel): + id: int = 0 + name: str = "" + description: typing.Optional[str] = None + type: JobType = JobType.FULL_TIME + created_at: datetime.datetime = datetime.datetime.now(datetime.timezone.utc) + updated_at: typing.Optional[datetime.datetime] = None + + +@dataclasses.dataclass +class TestPersonClass(octobot_commons.dataclasses.MinimizableDataclass): + name: str = "" + age: int = 0 + job: Job = dataclasses.field(default_factory=Job) + likes: list = dataclasses.field(default_factory=list) + + +@dataclasses.dataclass +class TestPersonGroupClass(octobot_commons.dataclasses.MinimizableDataclass): + identifier: str = "" + present_people: list[TestPersonClass] = dataclasses.field(default_factory=list) + absent_people: list[TestPersonClass] = dataclasses.field(default_factory=list) + leader: TestPersonClass = dataclasses.field(default_factory=TestPersonClass) + + def __post_init__(self): + if self.present_people and isinstance(self.present_people[0], dict): + self.present_people = [TestPersonClass.from_dict(p) for p in self.present_people] if self.present_people else [] + if self.absent_people and isinstance(self.absent_people[0], dict): + self.absent_people = [TestPersonClass.from_dict(p) for p in self.absent_people] if self.absent_people else [] + + +def test_to_dict_include_default_values(): + """to_dict(include_default_values=True) returns full dict with all fields.""" + person = TestPersonClass(name="rhombur", age=33, job=Job(id=1, name="prince", description="Ixian prince", type=JobType.PART_TIME, created_at=datetime.datetime(2026, 1, 1, 12, 0, 0, tzinfo=datetime.timezone.utc))) + result = person.to_dict(include_default_values=True) + + assert result == { + "name": "rhombur", + "age": 33, + 'job': { + 'id': 1, + 'name': 'prince', + 'description': 'Ixian prince', + 'type': 'part-time', + 'created_at': "2026-01-01T12:00:00Z", + }, + "likes": [], + } + + +def test_to_dict_exclude_default_values(): + """to_dict(include_default_values=False) returns only non-default values.""" + person = TestPersonClass(name="rhombur", age=33, job=Job(name="prince", description="Ixian prince", type=JobType.PART_TIME, created_at=datetime.datetime(2026, 1, 1, 12, 0, 0, tzinfo=datetime.timezone.utc))) + result = person.to_dict(include_default_values=False) + + assert result == { + "name": "rhombur", + "age": 33, + 'job': { + 'name': 'prince', + 'description': 'Ixian prince', + 'type': 'part-time', + 'created_at': "2026-01-01T12:00:00Z", + }, + } + assert "likes" not in result + + +def test_to_dict_exclude_default_values_all_defaults(): + """to_dict(include_default_values=False) returns empty dict when all values are default.""" + person = TestPersonClass() + result = person.to_dict(include_default_values=False) + + assert result == {} + + +def test_to_dict_exclude_default_values_nested(): + """to_dict(include_default_values=False) minimizes nested MinimizableDataclass instances.""" + leader = TestPersonClass(name="leto", age=25, job=Job(name="prince", description="Caladan prince", type=JobType.FULL_TIME)) + group = TestPersonGroupClass(identifier="atreides", leader=leader) + + result = group.to_dict(include_default_values=False) + + assert result["identifier"] == "atreides" + assert result["leader"] == { + "name": "leto", "age": 25, + 'job': {'name': 'prince', 'description': 'Caladan prince'} + } + assert "present_people" not in result + assert "absent_people" not in result + + +def test_to_dict_exclude_default_values_with_list(): + """to_dict(include_default_values=False) handles lists of MinimizableDataclass.""" + person = TestPersonClass(name="paul", age=15) + group = TestPersonGroupClass(present_people=[person]) + + result = group.to_dict(include_default_values=False) + + assert result["present_people"] == [{"name": "paul", "age": 15}] + assert result["leader"] == {} + assert "absent_people" not in result + + +def test_to_dict_roundtrip(): + """to_dict then from_dict preserves data.""" + person = TestPersonClass(name="chani", age=20, likes=["desert", "stillsuit"]) + as_dict = person.to_dict(include_default_values=True) + restored = TestPersonClass.from_dict(as_dict) + + assert restored.name == person.name + assert restored.age == person.age + assert restored.likes == person.likes diff --git a/packages/commons/tests/dataclasses/test_updatable_dataclass.py b/packages/commons/tests/dataclasses/test_updatable_dataclass.py new file mode 100644 index 0000000000..e115717450 --- /dev/null +++ b/packages/commons/tests/dataclasses/test_updatable_dataclass.py @@ -0,0 +1,310 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library.* +import copy +import dataclasses + +import octobot_commons.dataclasses + + +@dataclasses.dataclass +class TestPersonClass(octobot_commons.dataclasses.UpdatableDataclass): + name: str = "" + age: int = 0 + likes: list = dataclasses.field(default_factory=list) + + +@dataclasses.dataclass +class TestPersonGroupClass(octobot_commons.dataclasses.UpdatableDataclass): + identifier: str = "" + present_people: list[TestPersonClass] = dataclasses.field(default_factory=list) + absent_people: list[TestPersonClass] = dataclasses.field(default_factory=list) + leader: TestPersonClass = dataclasses.field(default_factory=TestPersonClass) + + +def test_simple_update(): + person_1 = TestPersonClass("olive", 25) + person_2 = TestPersonClass("olive", 25, ["football", "poe"]) + person_2_update = TestPersonClass("tom", likes=["football"]) + + assert person_1.likes == [] + person_1.update(person_2) + # no _updated_fields: no update + assert person_1.name == "olive" + assert person_1.age == 25 + assert person_1.likes == [] + person_2._updated_fields = ("name", "age", "likes") + + # _updated_fields is set: update + person_1.update(person_2) + assert person_1.name == "olive" + assert person_1.age == 25 + assert person_1.likes == ["football", "poe"] + + person_2_update._updated_fields = ["name", "likes"] + person_2.update(person_2_update) + assert person_2.name == "tom" + assert person_2.age == 25 # not in _updated_fields + assert person_2.likes == ["football"] + + +def test_nested_update(): + person_1 = TestPersonClass("olive", 25) + person_2 = TestPersonClass("olive", 25, ["football", "poe"]) + person_3 = TestPersonClass("mr plop", 29, ["crypto", "poe"]) + person_2_update = TestPersonClass("tom", likes=["football"], _updated_fields=["name", "likes"]) + group_1 = TestPersonGroupClass() # empty group + group_2 = TestPersonGroupClass( + identifier="identifier_1", + present_people=[person_1, person_3], + absent_people=[person_2] + ) + group_2_update_no_absent = TestPersonGroupClass( + absent_people=[], + _updated_fields=["absent_people"] + ) + group_2_update_empty_identifier = TestPersonGroupClass( + _updated_fields=["identifier"] + ) + group_2_update_person_1 = TestPersonGroupClass( + present_people=[person_2_update, person_3], + _updated_fields=["present_people"] + ) + + assert group_1.identifier == "" + assert group_1.present_people == [] + assert group_1.absent_people == [] + + group_1.update(group_2) + # no updated_fields + assert group_1.identifier == "" + assert group_1.present_people == [] + assert group_1.absent_people == [] + + group_1.identifier = "plop" + assert group_1.identifier == "plop" + group_1.update(group_2_update_empty_identifier) + assert group_1.identifier == "" # updated + assert group_1.present_people == [] + assert group_1.absent_people == [] + + assert len(group_2.absent_people) == 1 + group_2.update(group_2_update_no_absent) + assert group_2.absent_people == [] # updated + + assert group_2.identifier == "identifier_1" + group_2.update(group_2_update_empty_identifier) + assert group_2.identifier == "" # updated + + assert group_2.present_people[0].name == "olive" + assert group_2.present_people[0].age == 25 + assert group_2.present_people[0].likes == [] + assert group_2.present_people[1].name == "mr plop" + assert group_2.present_people[1].age == 29 + assert group_2.present_people[1].likes == ["crypto", "poe"] + group_2.update(group_2_update_person_1) + assert group_2.present_people[0].name == "tom" # updated + assert group_2.present_people[0].age == 25 + assert group_2.present_people[0].likes == ["football"] # updated + assert group_2.present_people[1].name == "mr plop" + assert group_2.present_people[1].age == 29 + assert group_2.present_people[1].likes == ["crypto", "poe"] + + group_2_update_person_2 = TestPersonGroupClass( + present_people=[person_2_update], + _updated_fields=["present_people"] + ) + group_2.update(group_2_update_person_2) + # group_2.present_people[1] got removed + assert len(group_2.present_people) == 1 + assert group_2.present_people[0].name == "tom" + assert group_2.present_people[0].age == 25 + assert group_2.present_people[0].likes == ["football"] + + group_2.update(group_2_update_person_1) + # group_2.present_people[1] is added back + assert group_2.present_people[0].name == "tom" # updated + assert group_2.present_people[0].age == 25 + assert group_2.present_people[0].likes == ["football"] # updated + assert group_2.present_people[1].name == "mr plop" + assert group_2.present_people[1].age == 29 + assert group_2.present_people[1].likes == ["crypto", "poe"] + + +def test_get_update(): + person_1 = TestPersonClass("olive", 25) + person_2 = TestPersonClass("olive", 25, ["football", "poe"]) + person_3 = TestPersonClass("mr plop", 29, ["crypto", "poe"]) + + # only likes changed + update = person_1.get_update(person_2) + assert update.name == "" + assert update.age == 0 + assert update.likes == ["football", "poe"] + assert update._updated_fields == ["likes"] + + # everything changed + update = person_1.get_update(person_3) + assert update.name == "mr plop" + assert update.age == 29 + assert update.likes == ["crypto", "poe"] + assert update._updated_fields == ["name", "age", "likes"] + + # likes got removed + update = person_2.get_update(person_1) + assert update.name == "" + assert update.age == 0 + assert update.likes == [] + assert update._updated_fields == ["likes"] + + +def test_nested_get_update(): + person_1 = TestPersonClass("olive", 25) + person_2 = TestPersonClass("olive", 25, ["football", "poe"]) + person_3 = TestPersonClass("mr plop", 29, ["crypto", "poe"]) + person_3_update = TestPersonClass("mr plop the second", 10, ["crypto", "poe", "metal"]) + group_1 = TestPersonGroupClass() # empty group + group_2 = TestPersonGroupClass( + identifier="identifier_1", + present_people=[person_1, person_3], + absent_people=[person_2], + leader=person_1, + ) + group_3 = TestPersonGroupClass( + identifier="identifier_1", + present_people=[person_1], + absent_people=[person_2, person_3], + leader=person_3, + ) + group_3_updated_person_3 = TestPersonGroupClass( + identifier="identifier_1", + present_people=[person_1], + absent_people=[person_2, person_3_update], + leader=person_3_update, + ) + + update = group_1.get_update(group_1) + assert update._updated_fields == [] + + update = group_2.get_update(group_2) + assert update._updated_fields == [] + + update = group_1.get_update(group_2) + assert update.identifier == "identifier_1" + assert update.present_people == [person_1, person_3] + assert update.absent_people == [person_2] + assert update.leader.name == "olive" + assert update.leader.age == 25 + assert update.leader._updated_fields == ["name", "age"] + assert update._updated_fields == ["identifier", "present_people", "absent_people", "leader"] + + update = group_2.get_update(group_1) + assert update.identifier == "" + assert update.present_people == [] + assert update.absent_people == [] + assert update.leader.name == "" + assert update.leader.age == 0 + assert update.leader.likes == [] + assert update.leader._updated_fields == ["name", "age"] + assert update._updated_fields == ["identifier", "present_people", "absent_people", "leader"] + + update = group_2.get_update(group_3) + assert update.identifier == "" + assert update.present_people == [TestPersonClass()] # TestPersonClass() when no change + assert update.absent_people == [TestPersonClass(), person_3] # TestPersonClass() when no change + assert update.leader.name == person_3.name + assert update.leader.age == person_3.age + assert update.leader.likes == person_3.likes + assert update.leader._updated_fields == ["name", "age", "likes"] + assert update._updated_fields == ["present_people", "absent_people", "leader"] + + update = group_3.get_update(group_3_updated_person_3) + assert update.identifier == "" + assert update.present_people == [] + assert len(update.absent_people) == 2 + assert update.absent_people[0] == TestPersonClass() + assert update.absent_people[1].name == person_3_update.name + assert update.absent_people[1].age == person_3_update.age + assert update.absent_people[1].likes == person_3_update.likes + assert update.absent_people[1]._updated_fields == ["name", "age", "likes"] + assert update._updated_fields == ["absent_people", "leader"] + assert update.leader.name == person_3_update.name + assert update.leader.age == person_3_update.age + assert update.leader.likes == person_3_update.likes + assert update.leader._updated_fields == ["name", "age", "likes"] + + +def test_get_update_and_update(): + person_1 = TestPersonClass("olive", 25) + person_2 = TestPersonClass("olive", 25, likes=["football", "poe"]) + person_3 = TestPersonClass("mr plop", 29, ["crypto", "poe"]) + group_1 = TestPersonGroupClass() # empty group + group_2 = TestPersonGroupClass( + identifier="identifier_1", + present_people=[person_1, person_3], + absent_people=[person_2], + leader=copy.deepcopy(person_1), + ) + group_3 = TestPersonGroupClass( + identifier="identifier_1", + present_people=[person_1], + absent_people=[person_2, person_3], + leader=copy.deepcopy(person_3), + ) + + person_1_2 = copy.deepcopy(person_1) + person_1_2.update(person_1.get_update(person_2)) + assert person_1_2.name == person_1.name + assert person_1_2.age == person_1.age + assert person_1_2.likes == person_2.likes + assert person_1_2._updated_fields == [] + + person_3_1 = copy.deepcopy(person_3) + person_3_1.update(person_3.get_update(person_1)) + assert person_3_1.name == person_1.name + assert person_3_1.age == person_1.age + assert person_3_1.likes == person_1.likes + assert person_3_1._updated_fields == [] + + group_2_3 = copy.deepcopy(group_2) + update = group_2.get_update(group_3) + assert update.identifier == "" + assert update.present_people == [TestPersonClass()] + assert len(update.absent_people) == 2 + assert update.absent_people[0] == TestPersonClass() + assert update.absent_people[1].name == person_3.name + assert update.leader.name == person_3.name + group_2_3.update(update) + assert group_2_3.identifier == group_2.identifier + assert group_2_3.present_people == group_3.present_people + assert group_2_3.absent_people == group_3.absent_people + assert group_2_3.leader == group_3.leader + + +def test_to_dict_without_updated_fields(): + person_2 = TestPersonClass("olive", 25, likes=["football", "poe"]) + # _updated_fields included (default behavior) + assert dataclasses.asdict(person_2) == { + "_updated_fields": [], + "name": "olive", + "age": 25, + "likes": ["football", "poe"] + } + # _updated_fields not included + assert person_2.to_dict_without_updated_fields() == { + "name": "olive", + "age": 25, + "likes": ["football", "poe"] + } diff --git a/packages/commons/tests/dsl_interpreter/operators/test_re_callable_operator_mixin.py b/packages/commons/tests/dsl_interpreter/operators/test_re_callable_operator_mixin.py new file mode 100644 index 0000000000..6b727209e2 --- /dev/null +++ b/packages/commons/tests/dsl_interpreter/operators/test_re_callable_operator_mixin.py @@ -0,0 +1,248 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import time +import mock + +import octobot_commons.dsl_interpreter as dsl_interpreter +import octobot_commons.dsl_interpreter.operator_parameter as operator_parameter +import octobot_commons.dsl_interpreter.operators.re_callable_operator_mixin as re_callable_operator_mixin + + +class TestReCallingOperatorResult: + def test_is_re_calling_operator_result_with_reset_to_id(self): + assert re_callable_operator_mixin.ReCallingOperatorResult.is_re_calling_operator_result( + {re_callable_operator_mixin.ReCallingOperatorResult.__name__: {"reset_to_id": "some_id"}} + ) is True + + def test_is_re_calling_operator_result_with_last_execution_result(self): + assert re_callable_operator_mixin.ReCallingOperatorResult.is_re_calling_operator_result( + { + re_callable_operator_mixin.ReCallingOperatorResult.__name__: { + "last_execution_result": {"waiting_time": 5, "last_execution_time": 1000.0}, + } + } + ) is True + + def test_is_re_calling_operator_result_false_for_non_dict(self): + assert re_callable_operator_mixin.ReCallingOperatorResult.is_re_calling_operator_result(None) is False + assert re_callable_operator_mixin.ReCallingOperatorResult.is_re_calling_operator_result([]) is False + assert re_callable_operator_mixin.ReCallingOperatorResult.is_re_calling_operator_result("str") is False + + def test_is_re_calling_operator_result_false_for_dict_without_keys(self): + assert re_callable_operator_mixin.ReCallingOperatorResult.is_re_calling_operator_result({}) is False + assert re_callable_operator_mixin.ReCallingOperatorResult.is_re_calling_operator_result( + {"other_key": "value"} + ) is False + + def test_get_next_call_time_with_full_data(self): + with mock.patch.object(time, "time", return_value=1000.0): + result = re_callable_operator_mixin.ReCallingOperatorResult( + reset_to_id=None, + last_execution_result={ + re_callable_operator_mixin.ReCallingOperatorResultKeys.LAST_EXECUTION_TIME.value: 1000.0, + re_callable_operator_mixin.ReCallingOperatorResultKeys.WAITING_TIME.value: 5.0, + }, + ) + assert result.get_next_call_time() == 1005.0 + + def test_get_next_call_time_with_missing_last_execution_time_uses_current_time(self): + with mock.patch.object(time, "time", return_value=2000.0): + result = re_callable_operator_mixin.ReCallingOperatorResult( + reset_to_id=None, + last_execution_result={ + re_callable_operator_mixin.ReCallingOperatorResultKeys.WAITING_TIME.value: 10.0, + }, + ) + assert result.get_next_call_time() == 2010.0 + + def test_get_next_call_time_returns_none_when_no_last_execution_result(self): + result = re_callable_operator_mixin.ReCallingOperatorResult( + reset_to_id=None, + last_execution_result=None, + ) + assert result.get_next_call_time() is None + + def test_get_next_call_time_returns_none_when_waiting_time_is_zero(self): + result = re_callable_operator_mixin.ReCallingOperatorResult( + reset_to_id=None, + last_execution_result={ + re_callable_operator_mixin.ReCallingOperatorResultKeys.LAST_EXECUTION_TIME.value: 1000.0, + re_callable_operator_mixin.ReCallingOperatorResultKeys.WAITING_TIME.value: 0, + }, + ) + assert result.get_next_call_time() is None + + +class _TestReCallableOperator(dsl_interpreter.ReCallableOperatorMixin): + """Minimal operator using the mixin for testing.""" + + def __init__(self): + pass + + +class _ReCreateScriptTestOperator(dsl_interpreter.Operator, dsl_interpreter.ReCallableOperatorMixin): + """Operator with parameters so re_create_script can call resove_operator_params.""" + + @staticmethod + def get_name() -> str: + return "test_wait" + + @classmethod + def get_parameters(cls): + return [ + operator_parameter.OperatorParameter( + name="seconds", + description="wait duration", + required=True, + type=float, + ), + operator_parameter.OperatorParameter( + name="label", + description="optional label", + required=False, + type=str, + ), + ] + + def compute(self): + return None + + +class TestReCallableOperatorMixin: + def test_last_execution_result_key(self): + assert dsl_interpreter.ReCallableOperatorMixin.LAST_EXECUTION_RESULT_KEY == "last_execution_result" + + def test_get_re_callable_parameters(self): + params = dsl_interpreter.ReCallableOperatorMixin.get_re_callable_parameters() + assert len(params) == 1 + assert params[0].name == "last_execution_result" + assert params[0].required is False + assert params[0].default is None + + def test_get_last_execution_result_returns_none_when_param_missing(self): + operator = _TestReCallableOperator() + assert operator.get_last_execution_result({}) is None + assert operator.get_last_execution_result({"other": "value"}) is None + + def test_get_last_execution_result_returns_none_when_param_is_none(self): + operator = _TestReCallableOperator() + assert operator.get_last_execution_result({ + dsl_interpreter.ReCallableOperatorMixin.LAST_EXECUTION_RESULT_KEY: None, + }) is None + + def test_get_last_execution_result_returns_none_when_not_re_calling_format(self): + operator = _TestReCallableOperator() + assert operator.get_last_execution_result({ + dsl_interpreter.ReCallableOperatorMixin.LAST_EXECUTION_RESULT_KEY: {"wrong": "structure"}, + }) is None + + def test_get_last_execution_result_returns_inner_dict_for_valid_format(self): + operator = _TestReCallableOperator() + inner = { + re_callable_operator_mixin.ReCallingOperatorResultKeys.LAST_EXECUTION_TIME.value: 1000.0, + re_callable_operator_mixin.ReCallingOperatorResultKeys.WAITING_TIME.value: 5.0, + } + result = operator.get_last_execution_result({ + dsl_interpreter.ReCallableOperatorMixin.LAST_EXECUTION_RESULT_KEY: { + re_callable_operator_mixin.ReCallingOperatorResult.__name__: { + "last_execution_result": inner, + }, + }, + }) + assert result == inner + + def test_get_last_execution_result_with_reset_to_id_format(self): + operator = _TestReCallableOperator() + inner = {"waiting_time": 3.0, "last_execution_time": 500.0} + result = operator.get_last_execution_result({ + dsl_interpreter.ReCallableOperatorMixin.LAST_EXECUTION_RESULT_KEY: { + re_callable_operator_mixin.ReCallingOperatorResult.__name__: { + "reset_to_id": "abc", + "last_execution_result": inner, + }, + }, + }) + assert result == inner + + def test_create_re_callable_result_dict(self): + operator = _TestReCallableOperator() + result = operator.create_re_callable_result_dict( + keyword="recall", + last_execution_time=1000.0, + waiting_time=5.0, + ) + inner = result[re_callable_operator_mixin.ReCallingOperatorResult.__name__] + assert inner["keyword"] == "recall" + assert "last_execution_result" in inner + assert inner["last_execution_result"][ + re_callable_operator_mixin.ReCallingOperatorResultKeys.LAST_EXECUTION_TIME.value + ] == 1000.0 + assert inner["last_execution_result"][ + re_callable_operator_mixin.ReCallingOperatorResultKeys.WAITING_TIME.value + ] == 5.0 + + def test_create_re_callable_result_dict_with_reset_to_id(self): + operator = _TestReCallableOperator() + result = operator.create_re_callable_result_dict( + keyword="recall", + reset_to_id="target_123", + last_execution_time=1000.0, + waiting_time=5.0, + ) + inner = result[re_callable_operator_mixin.ReCallingOperatorResult.__name__] + assert inner["reset_to_id"] == "target_123" + assert "last_execution_result" in inner + + def test_create_re_callable_result_dict_with_extra_kwargs(self): + operator = _TestReCallableOperator() + result = operator.create_re_callable_result_dict( + keyword="recall", + last_execution_time=1000.0, + waiting_time=5.0, + extra_field=42, + ) + inner = result[re_callable_operator_mixin.ReCallingOperatorResult.__name__] + assert inner["last_execution_result"]["extra_field"] == 42 + + def test_re_create_script_drops_last_execution_result_and_formats_params(self): + operator = _ReCreateScriptTestOperator(0.0) + previous_call_payload = { + re_callable_operator_mixin.ReCallingOperatorResult.__name__: { + "keyword": "recall", + "last_execution_result": {"waiting_time": 1.0}, + }, + } + script = operator.re_create_script({ + "seconds": 9.0, + "label": "retry", + dsl_interpreter.ReCallableOperatorMixin.LAST_EXECUTION_RESULT_KEY: previous_call_payload, + }) + assert script == "test_wait(9.0, label='retry')" + assert dsl_interpreter.ReCallableOperatorMixin.LAST_EXECUTION_RESULT_KEY not in script + + def test_re_create_script_with_only_last_execution_result_yields_empty_call(self): + operator = _ReCreateScriptTestOperator(1.0) + script = operator.re_create_script({ + dsl_interpreter.ReCallableOperatorMixin.LAST_EXECUTION_RESULT_KEY: { + re_callable_operator_mixin.ReCallingOperatorResult.__name__: {}, + }, + }) + assert script == "test_wait()" + + def test_re_create_script_required_only(self): + operator = _ReCreateScriptTestOperator(0.0) + script = operator.re_create_script({"seconds": 42}) + assert script == "test_wait(42)" diff --git a/packages/commons/tests/dsl_interpreter/test_custom_operators.py b/packages/commons/tests/dsl_interpreter/test_custom_operators.py new file mode 100644 index 0000000000..1b4728dbd8 --- /dev/null +++ b/packages/commons/tests/dsl_interpreter/test_custom_operators.py @@ -0,0 +1,744 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import typing +import pytest +import ast +import re + +import octobot_commons.dsl_interpreter as dsl_interpreter +import octobot_commons.enums as commons_enums +import octobot_commons.constants as commons_constants +import octobot_commons.errors as commons_errors + + +async def get_x_value_async() -> int: + return 666 + + +class SumPlusXOperatorWithoutInit(dsl_interpreter.NaryOperator): + def __init__(self, *parameters: dsl_interpreter.OperatorParameterType, **kwargs: typing.Any): + super().__init__(*parameters, **kwargs) + self.x_value = 42 + + @staticmethod + def get_name() -> str: + return "plus_42" + + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + computed_parameters = self.get_computed_parameters() + return sum(computed_parameters) + self.x_value + + +class SumPlusXOperatorWithPreCompute(dsl_interpreter.NaryOperator): + def __init__(self, *parameters: dsl_interpreter.OperatorParameterType, **kwargs: typing.Any): + super().__init__(*parameters, **kwargs) + self.x_value = 42 + + @staticmethod + def get_name() -> str: + return "plus_x" + + @classmethod + def get_parameters(cls) -> list[dsl_interpreter.OperatorParameter]: + return [ + dsl_interpreter.OperatorParameter(name="data", description="the data to compute the sum of", required=True, type=int), + dsl_interpreter.OperatorParameter(name="data2", description="the data to compute the sum of", required=False, type=int), + ] + + async def pre_compute(self) -> None: + await super().pre_compute() + self.x_value = await get_x_value_async() + + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + computed_parameters = self.get_computed_parameters() + return sum(computed_parameters) + self.x_value + + +class TimeFrameToSecondsOperator(dsl_interpreter.CallOperator): + MIN_PARAMS = 1 + MAX_PARAMS = 1 + + def __init__(self, *params, **kwargs: typing.Any): + super().__init__(*params, **kwargs) + + @staticmethod + def get_name() -> str: + return "time_frame_to_seconds" + + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + computed_parameters = self.get_computed_parameters() + return commons_enums.TimeFramesMinutes[commons_enums.TimeFrames(computed_parameters[0])] * commons_constants.MINUTE_TO_SECONDS + + +class AddOperator(dsl_interpreter.BinaryOperator): + @staticmethod + def get_name() -> str: + return ast.Add.__name__ + + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + left, right = self.get_computed_left_and_right_parameters() + return left + right + + +class SubOperator(dsl_interpreter.BinaryOperator): + @staticmethod + def get_name() -> str: + return ast.Sub.__name__ + + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + left, right = self.get_computed_left_and_right_parameters() + return left - right + + +class LtOperator(dsl_interpreter.CompareOperator): + @staticmethod + def get_name() -> str: + return ast.Lt.__name__ + + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + left, right = self.get_computed_left_and_right_parameters() + return left < right + + +class LtEOperator(dsl_interpreter.CompareOperator): + @staticmethod + def get_name() -> str: + return ast.LtE.__name__ + + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + left, right = self.get_computed_left_and_right_parameters() + return left <= right + + +class GtOperator(dsl_interpreter.CompareOperator): + @staticmethod + def get_name() -> str: + return ast.Gt.__name__ + + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + left, right = self.get_computed_left_and_right_parameters() + return left > right + + +class GtEOperator(dsl_interpreter.CompareOperator): + @staticmethod + def get_name() -> str: + return ast.GtE.__name__ + + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + left, right = self.get_computed_left_and_right_parameters() + return left >= right + + +class EqOperator(dsl_interpreter.CompareOperator): + @staticmethod + def get_name() -> str: + return ast.Eq.__name__ + + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + left, right = self.get_computed_left_and_right_parameters() + return left == right + + +class NotEqOperator(dsl_interpreter.CompareOperator): + @staticmethod + def get_name() -> str: + return ast.NotEq.__name__ + + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + left, right = self.get_computed_left_and_right_parameters() + return left != right + + +class IsOperator(dsl_interpreter.CompareOperator): + @staticmethod + def get_name() -> str: + return ast.Is.__name__ + + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + left, right = self.get_computed_left_and_right_parameters() + return left is right + + +class IsNotOperator(dsl_interpreter.CompareOperator): + @staticmethod + def get_name() -> str: + return ast.IsNot.__name__ + + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + left, right = self.get_computed_left_and_right_parameters() + return left is not right + + +class AndOperator(dsl_interpreter.NaryOperator): + MIN_PARAMS = 1 + + @staticmethod + def get_name() -> str: + return ast.And.__name__ + + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + return all(self.get_computed_parameters()) + + +class OrOperator(dsl_interpreter.NaryOperator): + MIN_PARAMS = 1 + + @staticmethod + def get_name() -> str: + return ast.Or.__name__ + + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + return any(self.get_computed_parameters()) + +class Add2Operator(dsl_interpreter.CallOperator): + @staticmethod + def get_name() -> str: + return "add2" + + @classmethod + def get_parameters(cls) -> list[dsl_interpreter.OperatorParameter]: + return [ + dsl_interpreter.OperatorParameter(name="left", description="the left operand", required=True, type=int), + dsl_interpreter.OperatorParameter(name="right", description="the right operand", required=True, type=int), + ] + + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + left, right = self.get_computed_left_and_right_parameters() + return left + right + +class PreComputeSumOperator(dsl_interpreter.PreComputingCallOperator): + @staticmethod + def get_name() -> str: + return "pre_compute_sum" + + @classmethod + def get_parameters(cls) -> list[dsl_interpreter.OperatorParameter]: + return [ + dsl_interpreter.OperatorParameter(name="a", description="first value", required=True, type=int), + dsl_interpreter.OperatorParameter(name="b", description="second value", required=True, type=int), + ] + + async def pre_compute(self) -> None: + await super().pre_compute() + value_by_parameter = self.get_computed_value_by_parameter() + self.value = value_by_parameter["a"] + value_by_parameter["b"] + + +class CallWithDefaultParametersOperator(dsl_interpreter.CallOperator): + @staticmethod + def get_name() -> str: + return "call_with_default_parameters" + + @classmethod + def get_parameters(cls) -> list[dsl_interpreter.OperatorParameter]: + return [ + dsl_interpreter.OperatorParameter(name="value1", description="the first value", required=True, type=int), + dsl_interpreter.OperatorParameter(name="value2", description="the second value", required=False, type=int, default=0), + dsl_interpreter.OperatorParameter(name="added_extra_value", description="value to add to the result", required=False, type=int, default=0), + dsl_interpreter.OperatorParameter(name="substracted_extra_value", description="value to substract from the result", required=False, type=int, default=0), + ] + + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + value_by_parameter = self.get_computed_value_by_parameter() + return ( + value_by_parameter["value1"] + + value_by_parameter["value2"] + + value_by_parameter["added_extra_value"] + - value_by_parameter["substracted_extra_value"] + ) + + +class ParamMerger(dsl_interpreter.CallOperator): + @staticmethod + def get_name() -> str: + return "param_merger" + + @classmethod + def get_parameters(cls) -> list[dsl_interpreter.OperatorParameter]: + return [ + dsl_interpreter.OperatorParameter(name="p1", description="the first value", required=True, type=int), + dsl_interpreter.OperatorParameter(name="p2", description="the second value", required=True, type=int), + ] + + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + value_by_parameter = self.get_computed_value_by_parameter() + return str(value_by_parameter) + + +class NestedDictSumOperator(dsl_interpreter.CallOperator): + @staticmethod + def get_name() -> str: + return "nested_dict_sum" + + @classmethod + def get_parameters(cls) -> list[dsl_interpreter.OperatorParameter]: + return [ + dsl_interpreter.OperatorParameter(name="values", description="the dictionary to sum the values of", required=True, type=dict), + ] + + def nested_sum(self, values: dict) -> float: + return sum( + self.nested_sum(value) if isinstance(value, dict) else float(value) + for value in values.values() + ) + + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + value_by_parameter = self.get_computed_value_by_parameter() + return self.nested_sum(value_by_parameter["values"]) + + +@pytest.fixture +def interpreter(): + return dsl_interpreter.Interpreter( + dsl_interpreter.get_all_operators() + [ + SumPlusXOperatorWithoutInit, SumPlusXOperatorWithPreCompute, TimeFrameToSecondsOperator, + AddOperator, SubOperator, Add2Operator, PreComputeSumOperator, CallWithDefaultParametersOperator, + NestedDictSumOperator, ParamMerger, + LtOperator, LtEOperator, GtOperator, GtEOperator, EqOperator, NotEqOperator, + IsOperator, IsNotOperator, AndOperator, OrOperator + ] + ) + + +@pytest.mark.asyncio +async def test_interpreter_basic_operations(interpreter): + assert await interpreter.interprete("plus_42()") == 42 + assert await interpreter.interprete("plus_42(6)") == 48 + assert await interpreter.interprete("plus_42(1, 2, 3)") == 48 + assert await interpreter.interprete("plus_42(1, 1 + 1, 1.5 +1.5)") == 48 + assert await interpreter.interprete("plus_x(1, 1)") == 668 + assert await interpreter.interprete("10 + (plus_x(1, 1) + plus_x(1, 1))") == 10 + (668 + 668) == 1346 + assert await interpreter.interprete("time_frame_to_seconds('1m')") == 60 + assert await interpreter.interprete("time_frame_to_seconds('1d')") == 86400 + assert await interpreter.interprete("time_frame_to_seconds('1'+'h')") == 3600 + + +@pytest.mark.asyncio +async def test_interpreter_basic_operations_with_named_parameters(interpreter): + assert await interpreter.interprete("param_merger(1, 2)") == "{'p1': 1, 'p2': 2}" + assert await interpreter.interprete("param_merger(1, p2=2)") == "{'p1': 1, 'p2': 2}" + assert await interpreter.interprete("param_merger(p1=1, p2=2)") == "{'p1': 1, 'p2': 2}" + assert await interpreter.interprete("param_merger(p2=1, p1=2)") == "{'p1': 2, 'p2': 1}" + + +@pytest.mark.asyncio +async def test_pre_computing_call_operator(interpreter): + assert await interpreter.interprete("pre_compute_sum(1, 2)") == 3 + assert await interpreter.interprete("pre_compute_sum(10, 20)") == 30 + assert await interpreter.interprete("pre_compute_sum(1 + 1, 2 + 2)") == 6 + with pytest.raises(commons_errors.DSLInterpreterError, match="has not been pre_computed"): + operator = PreComputeSumOperator(1, 2) + operator.compute() + + +@pytest.mark.asyncio +async def test_interpreter_call_with_default_parameters(interpreter): + assert await interpreter.interprete("call_with_default_parameters(1)") == 1 + assert await interpreter.interprete("call_with_default_parameters(1, 2)") == 3 + assert await interpreter.interprete("call_with_default_parameters(1, 2, 3)") == 6 + assert await interpreter.interprete("call_with_default_parameters(1, 2, 3, 4)") == 2 + assert await interpreter.interprete("call_with_default_parameters(1, 2, added_extra_value=3)") == 6 + assert await interpreter.interprete("call_with_default_parameters(1, 2, 3, substracted_extra_value=4)") == 2 + assert await interpreter.interprete("call_with_default_parameters(1, 2, substracted_extra_value=3)") == 0 + assert await interpreter.interprete("call_with_default_parameters(1, 2, added_extra_value=4, substracted_extra_value=5)") == 2 + with pytest.raises(commons_errors.InvalidParametersError, match="call_with_default_parameters requires at least 1 parameter"): + await interpreter.interprete("call_with_default_parameters()") + # Too many positional args: rejected in parameters_util.resolve_operator_args_and_kwargs + too_many_positional_msg = re.escape("call_with_default_parameters supports up to 4 parameters:") + with pytest.raises(commons_errors.InvalidParametersError, match=too_many_positional_msg): + await interpreter.interprete("call_with_default_parameters(1, 2, 3, 4, 5)") + with pytest.raises(commons_errors.InvalidParametersError, match=re.escape("Parameter(s) 'added_extra_value' have multiple values")): + await interpreter.interprete("call_with_default_parameters(1, 2, 3, added_extra_value=4)") + # Positional slot full + duplicate keyword: Operator._validate_parameters + too_many_total_msg = re.escape( + "call_with_default_parameters got 5 parameters (1, 2, 3, 4, 5) but supports up to 4 parameters:" + ) + with pytest.raises(commons_errors.InvalidParametersError, match=too_many_total_msg): + await interpreter.interprete("call_with_default_parameters(1, 2, 3, 4, added_extra_value=5)") + + +@pytest.mark.asyncio +async def test_interpreter_nested_dict_sum(interpreter): + assert await interpreter.interprete("nested_dict_sum({})") == 0 + assert await interpreter.interprete("nested_dict_sum({'a': 1})") == 1 + assert await interpreter.interprete("nested_dict_sum({'a': 1 + 1})") == 2 + assert await interpreter.interprete("nested_dict_sum({'a': 1, 'b': 2})") == 3 + assert await interpreter.interprete("nested_dict_sum({'a': 1, 'b': {'c': 2, 'd': 3}})") == 6 + assert await interpreter.interprete("nested_dict_sum({'a': 1, 'b': {'c': 2, 'd': {'e': 3}}})") == 6 + assert await interpreter.interprete("nested_dict_sum({'a': 1, 'b': {'c': 2, 'd': {'e': 3, 'f': {'g': 4}}}})") == 10 + assert await interpreter.interprete("nested_dict_sum({'a': 1, 'b': {'c': 2, 'd': {'e': 3, 'f': {'g': 4, 'h': {'i': 5}}}}})") == 15 + assert await interpreter.interprete("nested_dict_sum({'a': 1, 'b': {'c': 2, 'd': {'e': 3, 'f': {'g': 4, 'h': {'i': 5, 'j': {'k': 6}}}}}})") == 21 + assert await interpreter.interprete("nested_dict_sum({'a': 1, 'b': {'c': 2, 'd': {'e': 3, 'f': {'g': 4, 'h': {'i': 5, 'j': {'k': 6, 'l': {'m': 7}}}}}}})") == 28 + assert await interpreter.interprete("nested_dict_sum({'a': 1, 'b': {'c': 2, 'd': {'e': 3, 'f': {'g': 4, 'h': {'i': 5, 'j': {'k': 6, 'l': {'m': 7, 'n': {'o': 8}}}}}}, 'p': 9 + 0.1}})") == 45.1 + +@pytest.mark.asyncio +async def test_interpreter_invalid_parameters(interpreter): + with pytest.raises(commons_errors.InvalidParametersError, match=re.escape("plus_x requires at least 1 parameter(s): 1: data")): + interpreter.prepare("plus_x()") + with pytest.raises(commons_errors.InvalidParametersError, match=re.escape("plus_x requires at least 1 parameter(s): 1: data")): + await interpreter.interprete("plus_x()") + with pytest.raises(commons_errors.InvalidParametersError, match=re.escape("add2 requires at least 2 parameter(s): 1: left")): + interpreter.prepare("add2()") + with pytest.raises(commons_errors.InvalidParametersError, match=re.escape("add2 requires at least 2 parameter(s): 1: left")): + await interpreter.interprete("add2()") + add2_too_many = re.escape("add2 supports up to 2 parameters:") + with pytest.raises(commons_errors.InvalidParametersError, match=add2_too_many): + interpreter.prepare("add2(1, 2, 3)") + with pytest.raises(commons_errors.InvalidParametersError, match=add2_too_many): + await interpreter.interprete("add2(1, 2, 3)") + with pytest.raises(commons_errors.InvalidParametersError, match=re.escape("time_frame_to_seconds requires at least 1 parameter(s)")): + interpreter.prepare("time_frame_to_seconds()") + with pytest.raises(commons_errors.InvalidParametersError, match=re.escape("time_frame_to_seconds requires at least 1 parameter(s)")): + await interpreter.interprete("time_frame_to_seconds()") + with pytest.raises(commons_errors.InvalidParametersError, match="time_frame_to_seconds supports up to 1 parameters"): + interpreter.prepare("time_frame_to_seconds(1, 2, 3)") + with pytest.raises(commons_errors.InvalidParametersError, match="time_frame_to_seconds supports up to 1 parameters"): + await interpreter.interprete("time_frame_to_seconds(1, 2, 3)") + + +def test_get_input_value_by_parameter(): + # Positional arguments + operator = ParamMerger(1, 2) + assert operator.get_input_value_by_parameter() == {"p1": 1, "p2": 2} + + # Keyword arguments + operator = ParamMerger(p1=10, p2=20) + assert operator.get_input_value_by_parameter() == {"p1": 10, "p2": 20} + + # Mixed positional and keyword + operator = ParamMerger(1, p2=2) + assert operator.get_input_value_by_parameter() == {"p1": 1, "p2": 2} + + # Reversed keyword order + operator = ParamMerger(p2=100, p1=200) + assert operator.get_input_value_by_parameter() == {"p1": 200, "p2": 100} + + # Default values for optional parameters + operator = CallWithDefaultParametersOperator(42) + assert operator.get_input_value_by_parameter() == { + "value1": 42, + "value2": 0, + "added_extra_value": 0, + "substracted_extra_value": 0, + } + + # Nested operator as raw (uncomputed) parameter + nested_add = AddOperator(1, 2) + operator = Add2Operator(nested_add, 3) + value_by_param = operator.get_input_value_by_parameter() + assert value_by_param["left"] is nested_add + assert value_by_param["right"] == 3 + + # Dict parameter + operator = NestedDictSumOperator({"a": 1, "b": 2}) + assert operator.get_input_value_by_parameter() == {"values": {"a": 1, "b": 2}} + + # Unknown parameters raise InvalidParametersError + with pytest.raises( + commons_errors.InvalidParametersError, + match=re.escape("Parameter(s) 'unknown_param' are unknown. Supported parameters: p1, p2"), + ): + ParamMerger(1, unknown_param=3).get_input_value_by_parameter() + with pytest.raises( + commons_errors.InvalidParametersError, + match=re.escape( + "param_merger got 4 parameters (1, 2, 99, 1) but supports up to 2 parameters:" + ), + ): + ParamMerger(p1=1, p2=2, extra=99, another=1).get_input_value_by_parameter() + + +class OperatorWithName(dsl_interpreter.Operator): + NAME = "custom_name" + DESCRIPTION = "A custom operator with NAME set" + EXAMPLE = "custom_name(1, 2)" + + @staticmethod + def get_name() -> str: + return "fallback_name" + + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + return sum(self.get_computed_parameters()) + + +class OperatorWithoutName(dsl_interpreter.Operator): + DESCRIPTION = "An operator without NAME, uses get_name()" + EXAMPLE = "fallback_name(5)" + + @staticmethod + def get_name() -> str: + return "fallback_name" + + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + return sum(self.get_computed_parameters()) + + +class OperatorWithParameters(dsl_interpreter.Operator): + NAME = "param_op" + DESCRIPTION = "Operator with parameters" + EXAMPLE = "param_op(1, 2)" + + @staticmethod + def get_name() -> str: + return "param_op" + + @classmethod + def get_parameters(cls) -> list[dsl_interpreter.OperatorParameter]: + return [ + dsl_interpreter.OperatorParameter(name="x", description="first parameter", required=True, type=int), + dsl_interpreter.OperatorParameter(name="y", description="second parameter", required=False, type=int), + ] + + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + return sum(self.get_computed_parameters()) + + +class OperatorWithoutParameters(dsl_interpreter.Operator): + NAME = "no_param_op" + DESCRIPTION = "Operator without parameters" + EXAMPLE = "no_param_op()" + + @staticmethod + def get_name() -> str: + return "no_param_op" + + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + return 42 + + +class OperatorWithCustomLibrary(dsl_interpreter.Operator): + NAME = "custom_lib_op" + DESCRIPTION = "Operator with custom library" + EXAMPLE = "custom_lib_op()" + + @staticmethod + def get_name() -> str: + return "custom_lib_op" + + @staticmethod + def get_library() -> str: + return "custom_library" + + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + return 42 + + +class OperatorWithEmptyFields(dsl_interpreter.Operator): + # NAME, DESCRIPTION, EXAMPLE all empty/default + @staticmethod + def get_name() -> str: + return "empty_fields_op" + + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + return 42 + + +def test_get_docs_with_name_set(): + """Test get_docs() when NAME class attribute is set""" + docs = OperatorWithName.get_docs() + assert isinstance(docs, dsl_interpreter.OperatorDocs) + assert docs.name == "custom_name" # Should use NAME, not get_name() + assert docs.description == "A custom operator with NAME set" + assert docs.type == commons_constants.BASE_OPERATORS_LIBRARY + assert docs.example == "custom_name(1, 2)" + assert docs.parameters == [] + + +def test_get_docs_without_name_uses_get_name(): + """Test get_docs() when NAME is not set, should use get_name()""" + docs = OperatorWithoutName.get_docs() + assert isinstance(docs, dsl_interpreter.OperatorDocs) + assert docs.name == "fallback_name" # Should use get_name() when NAME is empty + assert docs.description == "An operator without NAME, uses get_name()" + assert docs.type == commons_constants.BASE_OPERATORS_LIBRARY + assert docs.example == "fallback_name(5)" + assert docs.parameters == [] + + +def test_get_docs_with_parameters(): + """Test get_docs() when operator has parameters""" + docs = OperatorWithParameters.get_docs() + assert isinstance(docs, dsl_interpreter.OperatorDocs) + assert docs.name == "param_op" + assert docs.description == "Operator with parameters" + assert docs.type == commons_constants.BASE_OPERATORS_LIBRARY + assert docs.example == "param_op(1, 2)" + assert len(docs.parameters) == 2 + assert isinstance(docs.parameters[0], dsl_interpreter.OperatorParameter) + assert docs.parameters[0].name == "x" + assert docs.parameters[0].description == "first parameter" + assert docs.parameters[0].required + assert docs.parameters[0].type == int + assert isinstance(docs.parameters[1], dsl_interpreter.OperatorParameter) + assert docs.parameters[1].name == "y" + assert docs.parameters[1].description == "second parameter" + assert not docs.parameters[1].required + assert docs.parameters[1].type == int + + +def test_get_docs_without_parameters(): + """Test get_docs() when operator has no parameters""" + docs = OperatorWithoutParameters.get_docs() + assert docs.name == "no_param_op" + assert docs.description == "Operator without parameters" + assert docs.type == commons_constants.BASE_OPERATORS_LIBRARY + assert docs.example == "no_param_op()" + assert docs.parameters == [] + + +def test_get_docs_with_custom_library(): + """Test get_docs() when operator has custom library""" + docs = OperatorWithCustomLibrary.get_docs() + assert docs.name == "custom_lib_op" + assert docs.description == "Operator with custom library" + assert docs.type == "custom_library" # Should use custom library, not default + assert docs.example == "custom_lib_op()" + assert docs.parameters == [] + + +def test_get_docs_with_empty_fields(): + """Test get_docs() when NAME, DESCRIPTION, EXAMPLE are empty""" + docs = OperatorWithEmptyFields.get_docs() + assert docs.name == "empty_fields_op" # Should use get_name() + assert docs.description == "" # Empty DESCRIPTION + assert docs.type == commons_constants.BASE_OPERATORS_LIBRARY + assert docs.example == "" # Empty EXAMPLE + assert docs.parameters == [] + + +def test_get_docs_returns_operator_docs_instance(): + """Test that get_docs() returns an OperatorDocs instance""" + docs = OperatorWithName.get_docs() + assert isinstance(docs, dsl_interpreter.OperatorDocs) + + +def test_get_docs_to_json(): + """Test that the OperatorDocs returned by get_docs() can be serialized to JSON""" + docs = OperatorWithParameters.get_docs() + json_data = docs.to_json() + assert isinstance(json_data, dict) + assert json_data["name"] == "param_op" + assert json_data["description"] == "Operator with parameters" + assert json_data["type"] == commons_constants.BASE_OPERATORS_LIBRARY + assert json_data["example"] == "param_op(1, 2)" + assert len(json_data["parameters"]) == 2 + assert json_data["parameters"][0]["name"] == "x" + assert json_data["parameters"][0]["description"] == "first parameter" + assert json_data["parameters"][0]["required"] is True + assert json_data["parameters"][0]["type"] == "int" + assert json_data["parameters"][1]["name"] == "y" + assert json_data["parameters"][1]["description"] == "second parameter" + assert json_data["parameters"][1]["required"] is False + assert json_data["parameters"][1]["type"] == "int" + + +@pytest.mark.asyncio +async def test_chained_comparison_two_ops(interpreter): + # 0 < 5 <= 10 => (0 < 5) and (5 <= 10) => True + assert await interpreter.interprete("0 < 5 <= 10") is True + # 0 < 10 <= 10 => (0 < 10) and (10 <= 10) => True + assert await interpreter.interprete("0 < 10 <= 10") is True + # 0 < 15 <= 10 => (0 < 15) and (15 <= 10) => False (second fails) + assert await interpreter.interprete("0 < 15 <= 10") is False + # 5 < 3 <= 10 => (5 < 3) and (3 <= 10) => False (first fails) + assert await interpreter.interprete("5 < 3 <= 10") is False + # both fail: 10 < 5 <= 3 + assert await interpreter.interprete("10 < 5 <= 3") is False + + +@pytest.mark.asyncio +async def test_chained_comparison_three_ops(interpreter): + # 1 < 2 < 3 < 4 => all True + assert await interpreter.interprete("1 < 2 < 3 < 4") is True + # 1 < 2 < 3 < 3 => last fails (3 < 3 is False) + assert await interpreter.interprete("1 < 2 < 3 < 3") is False + # 1 <= 1 <= 1 <= 1 => all True + assert await interpreter.interprete("1 <= 1 <= 1 <= 1") is True + + +@pytest.mark.asyncio +async def test_chained_comparison_mixed_operators(interpreter): + # 0 < 5 >= 3 => (0 < 5) and (5 >= 3) => True + assert await interpreter.interprete("0 < 5 >= 3") is True + # 0 < 5 >= 6 => (0 < 5) and (5 >= 6) => False + assert await interpreter.interprete("0 < 5 >= 6") is False + # 1 <= 2 > 1 => (1 <= 2) and (2 > 1) => True + assert await interpreter.interprete("1 <= 2 > 1") is True + # 1 != 2 < 3 => (1 != 2) and (2 < 3) => True + assert await interpreter.interprete("1 != 2 < 3") is True + # 1 == 1 < 2 => (1 == 1) and (1 < 2) => True + assert await interpreter.interprete("1 == 1 < 2") is True + # 1 == 1 < 0 => (1 == 1) and (1 < 0) => False + assert await interpreter.interprete("1 == 1 < 0") is False + + +@pytest.mark.asyncio +async def test_chained_comparison_with_expressions(interpreter): + # chained comparison where operands are arithmetic expressions + # 0 < (2 + 3) <= 10 => 0 < 5 <= 10 => True + assert await interpreter.interprete("0 < 2 + 3 <= 10") is True + # 0 < (10 - 3) <= 5 => 0 < 7 <= 5 => False + assert await interpreter.interprete("0 < 10 - 3 <= 5") is False + + +@pytest.mark.asyncio +async def test_chained_comparison_with_function_calls(interpreter): + # plus_42() returns 42 => 0 < 42 <= 100 => True + assert await interpreter.interprete("0 < plus_42() <= 100") is True + # 0 < 42 <= 41 => False + assert await interpreter.interprete("0 < plus_42() <= 41") is False + # 40 < 42 < 50 => True + assert await interpreter.interprete("40 < plus_42() < 50") is True + # middle operand shared: 0 < plus_42() <= plus_42() => 0 < 42 <= 42 => True + assert await interpreter.interprete("0 < plus_42() <= plus_42()") is True + + +@pytest.mark.asyncio +async def test_chained_comparison_in_bool_context(interpreter): + # chained comparison as part of a larger boolean expression + # (0 < 5 <= 10) and (1 < 2) => True and True => True + assert await interpreter.interprete("0 < 5 <= 10 and 1 < 2") is True + # (0 < 15 <= 10) and (1 < 2) => False and True => False + assert await interpreter.interprete("0 < 15 <= 10 and 1 < 2") is False + # (0 < 15 <= 10) or (1 < 2) => False or True => True + assert await interpreter.interprete("0 < 15 <= 10 or 1 < 2") is True + + +@pytest.mark.asyncio +async def test_chained_comparison_boundary_values(interpreter): + # exact boundary: 0 < 0 <= 10 => (0 < 0) is False + assert await interpreter.interprete("0 < 0 <= 10") is False + # exact boundary: 0 < 10 <= 10 => True + assert await interpreter.interprete("0 < 10 <= 10") is True + # negative values via expression: (0 - 5) < 0 < 5 => True + assert await interpreter.interprete("0 - 5 < 0 < 5") is True + # float boundaries + assert await interpreter.interprete("0.0 < 0.5 <= 1.0") is True + assert await interpreter.interprete("0.0 < 1.0 <= 0.5") is False + + +@pytest.mark.asyncio +async def test_chained_comparison_without_and_operator_raises(interpreter): + # create an interpreter without the And operator to verify the error message + interpreter_no_and = dsl_interpreter.Interpreter([ + LtOperator, LtEOperator, + ]) + # single comparison still works + assert await interpreter_no_and.interprete("1 < 2") is True + # chained comparison requires And and should raise + with pytest.raises(commons_errors.UnsupportedOperatorError, match="Chained comparisons require the 'And' operator"): + interpreter_no_and.prepare("0 < 5 <= 10") \ No newline at end of file diff --git a/packages/commons/tests/dsl_interpreter/test_dictionnaries.py b/packages/commons/tests/dsl_interpreter/test_dictionnaries.py new file mode 100644 index 0000000000..37332d1a4f --- /dev/null +++ b/packages/commons/tests/dsl_interpreter/test_dictionnaries.py @@ -0,0 +1,141 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import pytest + +import octobot_commons.constants +import octobot_commons.dsl_interpreter + + + + +class BinOperator1(octobot_commons.dsl_interpreter.BinaryOperator): + @staticmethod + def get_name() -> str: + return "b1" + + def compute(self) -> octobot_commons.dsl_interpreter.ComputedOperatorParameterType: + return 0 + + +class BinOperator2(octobot_commons.dsl_interpreter.BinaryOperator): + @staticmethod + def get_name() -> str: + return "b2" + + def compute(self) -> octobot_commons.dsl_interpreter.ComputedOperatorParameterType: + return 0 + + +class BinOperator3(octobot_commons.dsl_interpreter.BinaryOperator): + @staticmethod + def get_name() -> str: + return "b3" + + def compute(self) -> octobot_commons.dsl_interpreter.ComputedOperatorParameterType: + return 0 + + +class UnaryOperator1(octobot_commons.dsl_interpreter.UnaryOperator): + @staticmethod + def get_name() -> str: + return "u1" + + def compute(self) -> octobot_commons.dsl_interpreter.ComputedOperatorParameterType: + return 0 + + +class UnaryOperator2(octobot_commons.dsl_interpreter.UnaryOperator): + @staticmethod + def get_name() -> str: + return "u2" + + def compute(self) -> octobot_commons.dsl_interpreter.ComputedOperatorParameterType: + return 0 + + +class ContextualOperator(octobot_commons.dsl_interpreter.CallOperator): + @staticmethod + def get_name() -> str: + return "c1" + + @staticmethod + def get_library() -> str: + return octobot_commons.constants.CONTEXTUAL_OPERATORS_LIBRARY + + def compute(self) -> octobot_commons.dsl_interpreter.ComputedOperatorParameterType: + return 0 + + +@pytest.mark.parametrize( + "libraries", + [tuple(), (octobot_commons.constants.BASE_OPERATORS_LIBRARY, )] +) +def test_get_all_operators(libraries): + assert octobot_commons.dsl_interpreter.get_all_operators(*libraries) is not None + assert len(octobot_commons.dsl_interpreter.get_all_operators(*libraries)) > 0 + operators = octobot_commons.dsl_interpreter.get_all_operators(*libraries) + assert ContextualOperator not in operators + operator_types = [ + octobot_commons.dsl_interpreter.BinaryOperator, + octobot_commons.dsl_interpreter.UnaryOperator, + ] + operator_by_type = { + operator_type.__name__: [] for operator_type in operator_types + } + for operator in operators: + name = operator.get_name() + assert len(name) > 0 + for operator_type in operator_types: + if issubclass(operator, operator_type): + operator_by_type[operator_type.__name__].append(operator) + break + for operator_type, operators in operator_by_type.items(): + assert len(operators) > 1, f"Expected at least 2 {operator_type} operators. {operator_by_type=}" + + +def test_get_all_operators_unknown_library(): + assert octobot_commons.dsl_interpreter.get_all_operators("unknown_library") == [] + # now include base library as well + operators = octobot_commons.dsl_interpreter.get_all_operators("base", "unknown_library") + assert len(operators) > 4 + assert ContextualOperator not in operators + + +def test_clear_get_all_operators_cache(): + + def create_new_operator(): + class NewOperator(octobot_commons.dsl_interpreter.Operator): + @staticmethod + def get_name() -> str: + return "new_operator" + def compute(self) -> octobot_commons.dsl_interpreter.ComputedOperatorParameterType: + return 0 + return NewOperator + + first_get_all_operators = octobot_commons.dsl_interpreter.get_all_operators() + assert len(first_get_all_operators) > 0 + assert octobot_commons.dsl_interpreter.get_all_operators() == first_get_all_operators + new_operator_class = create_new_operator() + assert octobot_commons.dsl_interpreter.get_all_operators() == first_get_all_operators + # new operator should not be in the list: list was cached before the new operator was created + assert new_operator_class not in first_get_all_operators + assert ContextualOperator not in first_get_all_operators + # now clear the cache and check that the new operator is in the list + octobot_commons.dsl_interpreter.clear_get_all_operators_cache() + second_get_all_operators = octobot_commons.dsl_interpreter.get_all_operators() + assert len(second_get_all_operators) == len(first_get_all_operators) + 1 + assert new_operator_class in second_get_all_operators + assert ContextualOperator not in second_get_all_operators diff --git a/packages/commons/tests/dsl_interpreter/test_interpreter.py b/packages/commons/tests/dsl_interpreter/test_interpreter.py new file mode 100644 index 0000000000..d037a04335 --- /dev/null +++ b/packages/commons/tests/dsl_interpreter/test_interpreter.py @@ -0,0 +1,134 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import mock +import dataclasses +import typing +import pytest +import octobot_commons.dsl_interpreter as dsl_interpreter +import octobot_commons.enums as commons_enums +import octobot_commons.constants as commons_constants +import ast + + +@dataclasses.dataclass +class ChannelDependency(dsl_interpreter.InterpreterDependency): + channel_name: str + + +class SumPlusXOperatorWithoutInit(dsl_interpreter.NaryOperator): + def __init__(self, *parameters: dsl_interpreter.OperatorParameterType, **kwargs: typing.Any): + super().__init__(*parameters, **kwargs) + self.x_value = 42 + + @staticmethod + def get_name() -> str: + return "plus_42" + + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + computed_parameters = self.get_computed_parameters() + return sum(computed_parameters) + self.x_value + + +class TimeFrameToSecondsOperator(dsl_interpreter.CallOperator): + def __init__(self, operand: dsl_interpreter.OperatorParameterType, **kwargs: typing.Any): + super().__init__(operand, **kwargs) + + @staticmethod + def get_name() -> str: + return "time_frame_to_seconds" + + def get_dependencies(self) -> typing.List[dsl_interpreter.InterpreterDependency]: + dependencies = super().get_dependencies() + dependencies.append(ChannelDependency("time_channel")) + return dependencies + + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + computed_parameters = self.get_computed_parameters() + return commons_enums.TimeFramesMinutes[commons_enums.TimeFrames(computed_parameters[0])] * commons_constants.MINUTE_TO_SECONDS + + +class AddOperator(dsl_interpreter.BinaryOperator): + @staticmethod + def get_name() -> str: + return ast.Add.__name__ + + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + left, right = self.get_computed_left_and_right_parameters() + return left + right + + +@pytest.fixture +def interpreter(): + interpreter = dsl_interpreter.Interpreter( + dsl_interpreter.get_all_operators() + ) + interpreter.extend([ + SumPlusXOperatorWithoutInit, TimeFrameToSecondsOperator, AddOperator + ]) + return interpreter + +@pytest.mark.asyncio +async def test_interprete(interpreter): + assert isinstance(await interpreter.interprete("plus_42()"), int) + assert await interpreter.interprete("time_frame_to_seconds('1m') + plus_42()") == 60 + 42 + + +@pytest.mark.asyncio +async def test_prepare_and_compute_expression(interpreter): + assert interpreter._operator_tree_or_constant is None + interpreter.prepare("plus_42()") + assert isinstance(interpreter._operator_tree_or_constant, dsl_interpreter.Operator) + assert await interpreter.compute_expression() == 42 + assert await interpreter.compute_expression() == 42 # return the same value as the first time + + assert isinstance(interpreter._operator_tree_or_constant, SumPlusXOperatorWithoutInit) + + async def compute_new_value(): + interpreter._operator_tree_or_constant.x_value = 100 + with mock.patch.object( + interpreter._operator_tree_or_constant, 'pre_compute', mock.AsyncMock(side_effect=compute_new_value) + ): + # now returns 100 because the same operator now has a new value (set during pre_compute()) + assert await interpreter.compute_expression() == 100 + assert await interpreter.compute_expression() == 100 + + # 100 value has been saved + assert await interpreter.compute_expression() == 100 + + +@pytest.mark.asyncio +async def test_get_dependencies(interpreter): + interpreter.prepare("plus_42()") + assert interpreter.get_dependencies() == [] + + interpreter.prepare("time_frame_to_seconds('1m') + plus_42()") + assert interpreter.get_dependencies() == [ + ChannelDependency("time_channel") + ] + + interpreter.prepare("time_frame_to_seconds('1m') + time_frame_to_seconds('1m')") + # don't return the same dependency twice + assert interpreter.get_dependencies() == [ + ChannelDependency("time_channel") + ] + + # more than one dependency + with mock.patch.object(SumPlusXOperatorWithoutInit, 'get_dependencies', mock.Mock(return_value=[ChannelDependency("plop_channel")])): + interpreter.prepare("time_frame_to_seconds('1m') + (2 + plus_42())") + assert interpreter.get_dependencies() == [ + ChannelDependency("time_channel"), + ChannelDependency("plop_channel") + ] diff --git a/packages/commons/tests/dsl_interpreter/test_parameters_util.py b/packages/commons/tests/dsl_interpreter/test_parameters_util.py new file mode 100644 index 0000000000..7e249d03c6 --- /dev/null +++ b/packages/commons/tests/dsl_interpreter/test_parameters_util.py @@ -0,0 +1,431 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import mock +import pytest + +import octobot_commons.constants as constants +import octobot_commons.dsl_interpreter.parameters_util as parameters_util +import octobot_commons.dsl_interpreter.operator_parameter as operator_parameter +import octobot_commons.errors as commons_errors + + +class TestFormatParameterValue: + def test_none(self): + assert parameters_util.format_parameter_value(None) == "None" + + def test_true(self): + assert parameters_util.format_parameter_value(True) == "True" + + def test_false(self): + assert parameters_util.format_parameter_value(False) == "False" + + def test_int(self): + assert parameters_util.format_parameter_value(42) == "42" + assert parameters_util.format_parameter_value(-10) == "-10" + + def test_float(self): + assert parameters_util.format_parameter_value(3.14) == "3.14" + assert parameters_util.format_parameter_value(1.0) == "1.0" + + def test_plain_string(self): + assert parameters_util.format_parameter_value("hello") == "'hello'" + assert parameters_util.format_parameter_value("") == "''" + + def test_string_json_list(self): + assert parameters_util.format_parameter_value("[1, 2, 3]") == "[1, 2, 3]" + + def test_string_json_dict(self): + assert parameters_util.format_parameter_value('{"a": 1}') == "{'a': 1}" + + def test_string_invalid_json(self): + assert parameters_util.format_parameter_value("not valid json") == "'not valid json'" + + def test_list(self): + assert parameters_util.format_parameter_value([1, 2, 3]) == "[1, 2, 3]" + assert parameters_util.format_parameter_value([]) == "[]" + + def test_dict(self): + assert parameters_util.format_parameter_value({"a": 1}) == "{'a': 1}" + assert parameters_util.format_parameter_value({}) == "{}" + + def test_other_type_uses_repr(self): + class Custom: + def __repr__(self): + return "Custom()" + assert parameters_util.format_parameter_value(Custom()) == "Custom()" + + +class TestResoveOperatorParams: + def test_empty_params_and_empty_values(self): + operator_class = mock.Mock() + operator_class.get_parameters.return_value = [] + result = parameters_util.resove_operator_params(operator_class, {}) + assert result == [] + + def test_required_params_only(self): + param_a = operator_parameter.OperatorParameter( + name="a", description="first", required=True, type=int + ) + param_b = operator_parameter.OperatorParameter( + name="b", description="second", required=True, type=str + ) + operator_class = mock.Mock() + operator_class.get_parameters.return_value = [param_a, param_b] + param_value_by_name = {"a": 1, "b": "hello"} + result = parameters_util.resove_operator_params(operator_class, param_value_by_name) + assert result == ["1", "'hello'"] + + def test_optional_params_only(self): + param_x = operator_parameter.OperatorParameter( + name="x", description="optional", required=False, type=int + ) + operator_class = mock.Mock() + operator_class.get_parameters.return_value = [param_x] + param_value_by_name = {"x": 99} + result = parameters_util.resove_operator_params(operator_class, param_value_by_name) + assert result == ["x=99"] + + def test_mixed_required_and_optional(self): + param_req = operator_parameter.OperatorParameter( + name="req", description="required", required=True, type=int + ) + param_opt = operator_parameter.OperatorParameter( + name="opt", description="optional", required=False, type=str + ) + operator_class = mock.Mock() + operator_class.get_parameters.return_value = [param_req, param_opt] + param_value_by_name = {"req": 42, "opt": "value"} + result = parameters_util.resove_operator_params(operator_class, param_value_by_name) + assert result == ["42", "opt='value'"] + + def test_skips_missing_params(self): + param_req = operator_parameter.OperatorParameter( + name="req", description="required", required=True, type=int + ) + param_opt = operator_parameter.OperatorParameter( + name="opt", description="optional", required=False, type=str + ) + operator_class = mock.Mock() + operator_class.get_parameters.return_value = [param_req, param_opt] + param_value_by_name = {"req": 1} + result = parameters_util.resove_operator_params(operator_class, param_value_by_name) + assert result == ["1"] + + def test_extra_values_ignored(self): + param_a = operator_parameter.OperatorParameter( + name="a", description="first", required=True, type=int + ) + operator_class = mock.Mock() + operator_class.get_parameters.return_value = [param_a] + param_value_by_name = {"a": 1, "unknown": "ignored"} + result = parameters_util.resove_operator_params(operator_class, param_value_by_name) + assert result == ["1"] + + +class TestResolveOperatorArgsAndKwargs: + def test_empty_params_returns_unchanged(self): + operator_class = mock.Mock() + operator_class.get_parameters.return_value = [] + args, kwargs = parameters_util.resolve_operator_args_and_kwargs( + operator_class, [1, 2], {"extra": "val"} + ) + assert args == [1, 2] + assert kwargs == {"extra": "val"} + + def test_positional_args_only(self): + param_a = operator_parameter.OperatorParameter( + name="a", description="first", required=True, type=int + ) + param_b = operator_parameter.OperatorParameter( + name="b", description="second", required=True, type=int + ) + operator_class = mock.Mock() + operator_class.get_parameters.return_value = [param_a, param_b] + args, kwargs = parameters_util.resolve_operator_args_and_kwargs( + operator_class, [1, 2], {} + ) + assert args == [1, 2] + assert kwargs == {} + + def test_positional_arg_as_keyword_arg(self): + param_a = operator_parameter.OperatorParameter( + name="a", description="first", required=True, type=int + ) + param_b = operator_parameter.OperatorParameter( + name="b", description="second", required=True, type=int + ) + operator_class = mock.Mock() + operator_class.get_parameters.return_value = [param_a, param_b] + args, kwargs = parameters_util.resolve_operator_args_and_kwargs( + operator_class, [1], {"b": 3} + ) + assert args == [1, 3] + assert kwargs == {} + + def test_positional_arg_as_keyword_arg_in_a_wrong_order(self): + param_a = operator_parameter.OperatorParameter( + name="a", description="first", required=True, type=int + ) + param_b = operator_parameter.OperatorParameter( + name="b", description="second", required=True, type=int + ) + param_c = operator_parameter.OperatorParameter( + name="c", description="third", required=True, type=int + ) + operator_class = mock.Mock() + operator_class.get_parameters.return_value = [param_a, param_b, param_c] + args, kwargs = parameters_util.resolve_operator_args_and_kwargs( + operator_class, [1], {"c": 3, "b": 2} + ) + assert args == [1, 2, 3] + assert kwargs == {} + + def test_positional_all_args_as_keywords_in_a_wrong_order(self): + param_a = operator_parameter.OperatorParameter( + name="a", description="first", required=True, type=int + ) + param_b = operator_parameter.OperatorParameter( + name="b", description="second", required=True, type=int + ) + param_c = operator_parameter.OperatorParameter( + name="c", description="third", required=True, type=int + ) + operator_class = mock.Mock() + operator_class.get_parameters.return_value = [param_a, param_b, param_c] + args, kwargs = parameters_util.resolve_operator_args_and_kwargs( + operator_class, [], {"b": 2, "a": 1, "c": 3} + ) + assert args == [1, 2, 3] + assert kwargs == {} + + def test_kwargs_only(self): + param_a = operator_parameter.OperatorParameter( + name="a", description="first", required=True, type=int + ) + param_b = operator_parameter.OperatorParameter( + name="b", description="second", required=True, type=int + ) + operator_class = mock.Mock() + operator_class.get_parameters.return_value = [param_a, param_b] + args, kwargs = parameters_util.resolve_operator_args_and_kwargs( + operator_class, [], {"a": 1, "b": 2} + ) + assert args == [1, 2] + assert kwargs == {} + + def test_mixed_args_and_kwargs(self): + param_a = operator_parameter.OperatorParameter( + name="a", description="first", required=True, type=int + ) + param_b = operator_parameter.OperatorParameter( + name="b", description="second", required=True, type=int + ) + param_c = operator_parameter.OperatorParameter( + name="c", description="optional", required=False, type=int + ) + operator_class = mock.Mock() + operator_class.get_parameters.return_value = [param_a, param_b, param_c] + args, kwargs = parameters_util.resolve_operator_args_and_kwargs( + operator_class, [1], {"b": 2, "c": 3} + ) + assert args == [1, 2, 3] + assert kwargs == {} + + def test_extra_kwargs_preserved(self): + param_a = operator_parameter.OperatorParameter( + name="a", description="first", required=True, type=int + ) + operator_class = mock.Mock() + operator_class.get_parameters.return_value = [param_a] + args, kwargs = parameters_util.resolve_operator_args_and_kwargs( + operator_class, [1], {"other": "value"} + ) + assert args == [1] + assert kwargs == {"other": "value"} + + def test_raises_when_too_many_positional_args(self): + param_a = operator_parameter.OperatorParameter( + name="a", description="first", required=True, type=int + ) + operator_class = mock.Mock() + operator_class.get_parameters.return_value = [param_a] + operator_class.get_name.return_value = "test_op" + operator_class.get_parameters_description.return_value = "1: a [int] - first" + with pytest.raises(commons_errors.InvalidParametersError, match="test_op supports up to 1 parameters"): + parameters_util.resolve_operator_args_and_kwargs( + operator_class, [1, 2, 3], {} + ) + + def test_partial_params_allowed(self): + param_a = operator_parameter.OperatorParameter( + name="a", description="first", required=True, type=int + ) + param_b = operator_parameter.OperatorParameter( + name="b", description="second", required=False, type=int + ) + operator_class = mock.Mock() + operator_class.get_parameters.return_value = [param_a, param_b] + args, kwargs = parameters_util.resolve_operator_args_and_kwargs( + operator_class, [1], {} + ) + assert args == [1] + assert kwargs == {} + + +class TestApplyResolvedParameterValue: + def test_replaces_single_parameter_with_int(self): + script = f"op(x=1, y={constants.UNRESOLVED_PARAMETER_PLACEHOLDER})" + result = parameters_util.apply_resolved_parameter_value(script, "y", 42) + assert result == "op(x=1, y=42)" + + def test_replaces_single_parameter_with_string(self): + script = f"op(name={constants.UNRESOLVED_PARAMETER_PLACEHOLDER})" + result = parameters_util.apply_resolved_parameter_value(script, "name", "hello") + assert result == "op(name='hello')" + + def test_replaces_single_parameter_with_bool(self): + script = f"op(flag={constants.UNRESOLVED_PARAMETER_PLACEHOLDER})" + result = parameters_util.apply_resolved_parameter_value(script, "flag", True) + assert result == "op(flag=True)" + + def test_replaces_single_parameter_with_list(self): + script = f"op(items={constants.UNRESOLVED_PARAMETER_PLACEHOLDER})" + result = parameters_util.apply_resolved_parameter_value(script, "items", [1, 2]) + assert result == "op(items=[1, 2])" + + def test_replaces_single_parameter_with_dict(self): + script = f"op(config={constants.UNRESOLVED_PARAMETER_PLACEHOLDER})" + result = parameters_util.apply_resolved_parameter_value( + script, "config", {"a": 1} + ) + assert result == "op(config={'a': 1})" + + def test_replaces_single_parameter_with_none(self): + script = f"op(val={constants.UNRESOLVED_PARAMETER_PLACEHOLDER})" + result = parameters_util.apply_resolved_parameter_value(script, "val", None) + assert result == "op(val=None)" + + def test_raises_when_parameter_not_found(self): + script = "op(x=1, y=2)" + with pytest.raises(commons_errors.ResolvedParameterNotFoundError, match="Parameter z not found in script"): + parameters_util.apply_resolved_parameter_value(script, "z", 42) + + def test_raises_when_placeholder_not_in_script_for_parameter(self): + script = f"op(x={constants.UNRESOLVED_PARAMETER_PLACEHOLDER}, y=2)" + with pytest.raises(commons_errors.ResolvedParameterNotFoundError, match="Parameter z not found in script"): + parameters_util.apply_resolved_parameter_value(script, "z", 42) + + def test_replaces_only_exact_parameter_pattern(self): + script = f"op(a=1, b={constants.UNRESOLVED_PARAMETER_PLACEHOLDER})" + result = parameters_util.apply_resolved_parameter_value(script, "b", 100) + assert result == "op(a=1, b=100)" + # Ensure 'a' was not touched + assert "a=1" in result + + +class TestAddResolvedParameterValue: + def test_adds_to_call_with_no_parenthesis(self): + result = parameters_util.add_resolved_parameter_value("op", "op", "x", "a") + assert result == "op(x='a')" + + def test_adds_to_empty_params_op(self): + result = parameters_util.add_resolved_parameter_value("op()", "op", "x", 42) + assert result == "op(x=42)" + + def test_adds_to_empty_params_with_spaces(self): + result = parameters_util.add_resolved_parameter_value("op( )", "op", "x", 42) + assert result == "op( x=42)" + + def test_adds_after_positional_arg(self): + result = parameters_util.add_resolved_parameter_value("op(1)", "op", "x", 42) + assert result == "op(1, x=42)" + + def test_adds_after_keyword_arg(self): + result = parameters_util.add_resolved_parameter_value("op(a=1)", "op", "x", 42) + assert result == "op(a=1, x=42)" + + def test_adds_after_multiple_args(self): + result = parameters_util.add_resolved_parameter_value("op(1, b=2)", "op", "x", 42) + assert result == "op(1, b=2, x=42)" + + def test_adds_string_value(self): + result = parameters_util.add_resolved_parameter_value("op()", "op", "name", "hello") + assert result == "op(name='hello')" + + def test_adds_to_all_instances_of_operator(self): + script = "wait(1, a=True) if wait(blockchain_wallet_balance() < 1) else True" + result = parameters_util.add_resolved_parameter_value( + script, "wait", "last_execution_result", [123, "plop"] + ) + assert result == ( + "wait(1, a=True, last_execution_result=[123, 'plop']) if " + "wait(blockchain_wallet_balance() < 1, last_execution_result=[123, 'plop']) else True" + ) + + def test_does_not_touch_other_operators(self): + result = parameters_util.add_resolved_parameter_value( + "foo(1) if wait(2) else bar()", "wait", "x", 1 + ) + assert result == "foo(1) if wait(2, x=1) else bar()" + + def test_raises_when_operator_not_found_and_script_has_calls(self): + with pytest.raises( + commons_errors.InvalidParametersError, + match=r"Operator 'wait' call sites not found", + ): + parameters_util.add_resolved_parameter_value("foo(1)", "wait", "x", 1) + + def test_raises_when_parameter_already_in_kwargs(self): + with pytest.raises(commons_errors.InvalidParametersError, match="Parameter x is already in operator keyword args"): + parameters_util.add_resolved_parameter_value("op(x=1)", "op", "x", 42) + + def test_raises_when_parameter_already_first_kwarg(self): + with pytest.raises(commons_errors.InvalidParametersError, match="Parameter a is already"): + parameters_util.add_resolved_parameter_value("op(a=1, b=2)", "op", "a", 99) + + def test_raises_when_parameter_already_last_kwarg(self): + with pytest.raises(commons_errors.InvalidParametersError, match="Parameter b is already"): + parameters_util.add_resolved_parameter_value("op(a=1, b=2)", "op", "b", 99) + + def test_raises_when_script_has_unclosed_parenthesis(self): + with pytest.raises(commons_errors.InvalidParametersError, match="has unclosed parenthesis"): + parameters_util.add_resolved_parameter_value("op(1", "op", "x", 42) + + +class TestHasUnresolvedParameters: + def test_returns_true_when_placeholder_present(self): + script = f"op(x={constants.UNRESOLVED_PARAMETER_PLACEHOLDER})" + assert parameters_util.has_unresolved_parameters(script) is True + + def test_returns_true_when_multiple_placeholders(self): + placeholder = constants.UNRESOLVED_PARAMETER_PLACEHOLDER + script = f"op(a={placeholder}, b={placeholder})" + assert parameters_util.has_unresolved_parameters(script) is True + + def test_returns_false_when_no_placeholder(self): + script = "op(x=1, y=2)" + assert parameters_util.has_unresolved_parameters(script) is False + + def test_returns_false_for_empty_script(self): + assert parameters_util.has_unresolved_parameters("") is False + + def test_returns_true_when_placeholder_part_of_larger_string(self): + script = f"op(x='prefix_{constants.UNRESOLVED_PARAMETER_PLACEHOLDER}_suffix')" + assert parameters_util.has_unresolved_parameters(script) is True + + def test_returns_true_when_placeholder_alone(self): + script = constants.UNRESOLVED_PARAMETER_PLACEHOLDER + assert parameters_util.has_unresolved_parameters(script) is True diff --git a/packages/commons/tests/logging/__init__.py b/packages/commons/tests/logging/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/packages/commons/tests/logging/test_context_based_file_handler.py b/packages/commons/tests/logging/test_context_based_file_handler.py new file mode 100644 index 0000000000..b98c3bf3a2 --- /dev/null +++ b/packages/commons/tests/logging/test_context_based_file_handler.py @@ -0,0 +1,178 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import logging +import os +import tempfile + +import mock +import pytest + +import octobot_commons.logging.context_based_file_handler as context_based_file_handler + + +@pytest.fixture +def temp_info_logs_and_cleanup_folder(): + with tempfile.TemporaryDirectory() as tmpdir: + root = logging.getLogger() + original_level = root.level + root.setLevel(logging.INFO) + yield tmpdir + root.setLevel(original_level) + for handler in root.handlers[:]: + if isinstance(handler, context_based_file_handler.ContextBasedFileHandler): + handler.close() + root.removeHandler(handler) + + +def test_context_based_file_handler_writes_to_file_when_provider_returns_name(temp_info_logs_and_cleanup_folder): + file_name_provider = mock.Mock(return_value="my_context") + handler = context_based_file_handler.ContextBasedFileHandler( + temp_info_logs_and_cleanup_folder, file_name_provider + ) + handler.setLevel(logging.INFO) + logging.getLogger().addHandler(handler) + + logger = logging.getLogger("test_logger") + logger.setLevel(logging.INFO) + logger.info("test message") + + handler.flush() + log_path = os.path.join(temp_info_logs_and_cleanup_folder, "my_context.log") + with open(log_path, encoding="utf-8") as f: + content = f.read() + assert "test message" in content + + +def test_context_based_file_handler_does_not_write_when_provider_returns_none(temp_info_logs_and_cleanup_folder): + file_name_provider = mock.Mock(return_value=None) + handler = context_based_file_handler.ContextBasedFileHandler( + temp_info_logs_and_cleanup_folder, file_name_provider + ) + handler.setLevel(logging.INFO) + logging.getLogger().addHandler(handler) + + logger = logging.getLogger("test_logger") + logger.setLevel(logging.INFO) + logger.info("test message") + + handler.flush() + assert not os.listdir(temp_info_logs_and_cleanup_folder) + + +def test_context_based_file_handler_creates_multiple_files_for_different_contexts( + temp_info_logs_and_cleanup_folder, +): + contexts = [] + + def rotating_provider(): + return contexts[0] if contexts else None + + handler = context_based_file_handler.ContextBasedFileHandler( + temp_info_logs_and_cleanup_folder, rotating_provider + ) + handler.setLevel(logging.INFO) + logging.getLogger().addHandler(handler) + logger = logging.getLogger("test_logger") + logger.setLevel(logging.INFO) + + contexts.append("ctx_a") + logger.info("message a") + handler.flush() + + contexts[0] = "ctx_b" + logger.info("message b") + handler.flush() + + files = sorted(os.listdir(temp_info_logs_and_cleanup_folder)) + assert files == ["ctx_a.log", "ctx_b.log"] + + with open(f"{temp_info_logs_and_cleanup_folder}/ctx_a.log", encoding="utf-8") as f: + assert "message a" in f.read() + with open(f"{temp_info_logs_and_cleanup_folder}/ctx_b.log", encoding="utf-8") as f: + assert "message b" in f.read() + + +def test_context_based_file_handler_removes_oldest_when_max_handlers_reached(temp_info_logs_and_cleanup_folder): + with mock.patch.object( + context_based_file_handler, + "MAX_CONTEXT_BASED_FILE_HANDLERS_PER_CATEGORY", + 3, + ): + contexts = [] + + def rotating_provider(): + return contexts[0] if contexts else None + + handler = context_based_file_handler.ContextBasedFileHandler( + temp_info_logs_and_cleanup_folder, rotating_provider + ) + handler.setLevel(logging.INFO) + logging.getLogger().addHandler(handler) + logger = logging.getLogger("test_logger") + logger.setLevel(logging.INFO) + + contexts.append("ctx_1") + logger.info("msg 1") + handler.flush() + + contexts[0] = "ctx_2" + logger.info("msg 2") + handler.flush() + + contexts[0] = "ctx_3" + logger.info("msg 3") + handler.flush() + + contexts[0] = "ctx_4" + logger.info("msg 4") + handler.flush() + + assert len(handler._custom_handlers) == 3 + assert "ctx_1" not in handler._custom_handlers + assert "ctx_2" in handler._custom_handlers + assert "ctx_3" in handler._custom_handlers + assert "ctx_4" in handler._custom_handlers + + +def test_add_context_based_file_handler_adds_handler_to_root_logger(temp_info_logs_and_cleanup_folder): + file_name_provider = mock.Mock(return_value=None) + root = logging.getLogger() + initial_count = len(root.handlers) + + context_based_file_handler.add_context_based_file_handler( + temp_info_logs_and_cleanup_folder, file_name_provider + ) + + assert len(root.handlers) == initial_count + 1 + added = root.handlers[-1] + assert isinstance(added, context_based_file_handler.ContextBasedFileHandler) + + +def test_context_based_file_handler_creates_logs_folder_if_missing(temp_info_logs_and_cleanup_folder): + nested = f"{temp_info_logs_and_cleanup_folder}/nested/logs" + file_name_provider = mock.Mock(return_value="ctx") + handler = context_based_file_handler.ContextBasedFileHandler( + nested, file_name_provider + ) + handler.setLevel(logging.INFO) + logging.getLogger().addHandler(handler) + + assert os.path.isdir(nested) + logger = logging.getLogger("test") + logger.setLevel(logging.INFO) + logger.info("msg") + handler.flush() + assert os.path.isfile(f"{nested}/ctx.log") diff --git a/packages/commons/tests/logging/test_logging_util.py b/packages/commons/tests/logging/test_logging_util.py new file mode 100644 index 0000000000..8f89d6c7c0 --- /dev/null +++ b/packages/commons/tests/logging/test_logging_util.py @@ -0,0 +1,132 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import mock +import pytest + +import octobot_commons.constants as commons_constants +import octobot_commons.logging as logging +import octobot_commons.logging.logging_util as logging_util + + +@pytest.fixture +def logger(): + return logging.get_logger("test") + + +@pytest.fixture +def call_wrapper(): + callback_mock = mock.Mock() + + class Wrapper: + def __init__(self): + self.callback_mock = callback_mock + + def other_callback(self, *args, **kwargs): + callback_mock(*args, **kwargs) + return Wrapper() + + +def test_register_error_callback(): + def other_call_back(): + pass + logging.BotLogger.register_error_callback(logging_util._default_callback) + assert logging_util._ERROR_CALLBACK is logging_util._default_callback + logging.BotLogger.register_error_callback(other_call_back) + assert logging_util._ERROR_CALLBACK is other_call_back + + +def test_error(logger, call_wrapper): + logging.BotLogger.register_error_callback(call_wrapper.other_callback) + logger.error("err") + call_wrapper.callback_mock.assert_called_once_with(None, "err") + call_wrapper.callback_mock.reset_mock() + + logger.error("err", skip_post_callback=True) + call_wrapper.callback_mock.assert_not_called() + + +def test_exception(logger, call_wrapper): + logging.BotLogger.register_error_callback(call_wrapper.other_callback) + err = None + def raiser(): + def other(): + 1/0 + other() + try: + raiser() + except Exception as exc: + err = exc + logger.exception(err, True, "error") + call_wrapper.callback_mock.assert_called_once_with(err, "error") + call_wrapper.callback_mock.reset_mock() + + logger.exception(err, True, "error", skip_post_callback=True) + call_wrapper.callback_mock.assert_not_called() + + +@mock.patch("octobot_commons.logging.logging_util.constants.ALLOW_PRIVATE_DATA_LOGS", True) +def test_get_private_minimized_message_when_allowed_returns_unchanged(): + private_message = "secret-api-key-very-long" + assert logging_util.get_private_minimized_message_if_necessary(private_message) == private_message + + +@mock.patch("octobot_commons.logging.logging_util.constants.ALLOW_PRIVATE_DATA_LOGS", True) +def test_get_private_minimized_message_when_allowed_preserves_non_string(): + non_string_payload = {"wallet": "data"} + assert logging_util.get_private_minimized_message_if_necessary(non_string_payload) is non_string_payload + + +@mock.patch("octobot_commons.logging.logging_util.constants.ALLOW_PRIVATE_DATA_LOGS", False) +def test_get_private_minimized_message_when_disallowed_truncates_with_placeholder(): + private_message = "abcdefghijklmnop" + head = private_message[: commons_constants.PRIVATE_MESSAGE_ALLOWED_CHARS_COUNT] + tail = private_message[-commons_constants.PRIVATE_MESSAGE_ALLOWED_CHARS_COUNT :] + expected = head + commons_constants.PRIVATE_MESSAGE_PLACEHOLDER + tail + assert logging_util.get_private_minimized_message_if_necessary(private_message) == expected + + +@mock.patch("octobot_commons.logging.logging_util.constants.ALLOW_PRIVATE_DATA_LOGS", False) +def test_get_private_minimized_message_when_disallowed_returns_none(): + assert logging_util.get_private_minimized_message_if_necessary(None) is None + + +@mock.patch("octobot_commons.logging.logging_util.constants.ALLOW_PRIVATE_DATA_LOGS", False) +def test_get_private_minimized_message_when_disallowed_coerces_non_string(): + numeric_token = 123456789 + numeric_str = str(numeric_token) + head = numeric_str[: commons_constants.PRIVATE_MESSAGE_ALLOWED_CHARS_COUNT] + tail = numeric_str[-commons_constants.PRIVATE_MESSAGE_ALLOWED_CHARS_COUNT :] + expected = head + commons_constants.PRIVATE_MESSAGE_PLACEHOLDER + tail + assert logging_util.get_private_minimized_message_if_necessary(numeric_token) == expected + + +@mock.patch("octobot_commons.logging.logging_util.constants.ALLOW_PRIVATE_DATA_LOGS", True) +def test_get_private_placeholder_when_allowed_returns_message(): + assert logging_util.get_private_placeholder_if_necessary("sensitive-value") == "sensitive-value" + + +@mock.patch("octobot_commons.logging.logging_util.constants.ALLOW_PRIVATE_DATA_LOGS", True) +def test_get_private_placeholder_when_allowed_preserves_non_string(): + non_string_payload = object() + assert logging_util.get_private_placeholder_if_necessary(non_string_payload) is non_string_payload + + +@mock.patch("octobot_commons.logging.logging_util.constants.ALLOW_PRIVATE_DATA_LOGS", False) +def test_get_private_placeholder_when_disallowed_returns_placeholder_constant(): + assert ( + logging_util.get_private_placeholder_if_necessary("any-content") + == commons_constants.PRIVATE_MESSAGE_PLACEHOLDER + ) diff --git a/packages/commons/tests/profiles/__init__.py b/packages/commons/tests/profiles/__init__.py new file mode 100644 index 0000000000..95b67fe133 --- /dev/null +++ b/packages/commons/tests/profiles/__init__.py @@ -0,0 +1,39 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import os + +import pytest +import pathlib +import octobot_commons.profiles as profiles +import octobot_commons.tests.test_config as test_config + + +def get_profile_path(): + return test_config.TEST_CONFIG_FOLDER + + +def get_profiles_path(): + return pathlib.Path(get_profile_path()).parent + + +@pytest.fixture +def profile(): + return profiles.Profile(get_profile_path()) + + +@pytest.fixture +def invalid_profile(): + return profiles.Profile(os.path.join(get_profile_path(), "invalid_profile")) diff --git a/packages/commons/tests/profiles/test_profile.py b/packages/commons/tests/profiles/test_profile.py new file mode 100644 index 0000000000..c8a2cab280 --- /dev/null +++ b/packages/commons/tests/profiles/test_profile.py @@ -0,0 +1,398 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import os +import copy +import json +import shutil +import pytest +import mock +import octobot_commons.json_util +import octobot_commons.profiles as profiles +import octobot_commons.constants as constants +import octobot_commons.enums as enums +import octobot_commons.tests.test_config as test_config + +from tests.profiles import profile, get_profile_path, get_profiles_path + + +def test_read_config(profile): + save_ref = profile + assert profile.read_config() is save_ref + assert profile.profile_id == "default" + assert profile.name == "default" + assert profile.description == "OctoBot default profile." + assert profile.avatar == "default_profile.png" + assert profile.avatar_path == os.path.join(test_config.TEST_CONFIG_FOLDER, "default_profile.png") + assert profile.origin_url == "https://default.url" + # default value: distribution is not in profile config + assert profile.config[constants.CONFIG_DISTRIBUTION] == constants.DEFAULT_DISTRIBUTION + assert len(profile.config) == 6 + assert isinstance(profile.config, dict) + + profile.path = "" + with pytest.raises(FileNotFoundError): + profile.read_config() + + +def test_save_config(profile): + with mock.patch.object(profile, "validate_and_save_config", mock.Mock()) as validate_and_save_config_mock, \ + mock.patch.object(profile, "_filter_fill_elements", mock.Mock()) as _filter_fill_elements_mock: + profile.config = {} + # nothing to operate on + global_config = {} + profile.save_config(global_config) + assert profile.config == {} + validate_and_save_config_mock.assert_called_once() + _filter_fill_elements_mock.assert_not_called() + + validate_and_save_config_mock.reset_mock() + _filter_fill_elements_mock.reset_mock() + profile.config = {} + # things in config + global_config = { + profile.FULLY_MANAGED_ELEMENTS[0]: "plop", + profile.FULLY_MANAGED_ELEMENTS[1]: "plip", + "stuff": "plip" + } + profile.save_config(global_config) + assert profile.config == { + profile.FULLY_MANAGED_ELEMENTS[0]: "plop", + profile.FULLY_MANAGED_ELEMENTS[1]: "plip" + } + validate_and_save_config_mock.assert_called_once() + _filter_fill_elements_mock.assert_not_called() + + validate_and_save_config_mock.reset_mock() + _filter_fill_elements_mock.reset_mock() + profile.config = {} + # things in config + global_config = { + profile.FULLY_MANAGED_ELEMENTS[0]: "plop", + profile.FULLY_MANAGED_ELEMENTS[1]: "plip", + "stuff": "plip", + next(iter(profile.PARTIALLY_MANAGED_ELEMENTS)): {"ploup": True}, + } + profile.save_config(global_config) + assert profile.config == { + profile.FULLY_MANAGED_ELEMENTS[0]: "plop", + profile.FULLY_MANAGED_ELEMENTS[1]: "plip", + } + validate_and_save_config_mock.assert_called_once() + _filter_fill_elements_mock.assert_called_once_with(global_config, + profile.config, + next(iter(profile.PARTIALLY_MANAGED_ELEMENTS)), + profile.PARTIALLY_MANAGED_ELEMENTS_ALLOWED_KEYS[ + next(iter(profile.PARTIALLY_MANAGED_ELEMENTS)) + ]) + + +def test_validate(profile): + with mock.patch.object(octobot_commons.json_util, "validate", mock.Mock()) as validate_mock: + profile.validate() + validate_mock.assert_called_once_with(profile.as_dict(), profile.schema_path) + + +def test_validate_and_save_config(profile): + save_file = "profile_config.json" + with mock.patch.object(profile, "validate", mock.Mock()) as validate_mock, \ + mock.patch.object(profile, "config_file", mock.Mock(return_value=save_file)), \ + mock.patch.object(profile, "save", mock.Mock()) as save_mock: + profile.validate_and_save_config() + validate_mock.assert_called_once() + save_mock.assert_called_once() + + +def test_save(profile): + save_file = "profile_config.json" + if os.path.isfile(save_file): + os.remove(save_file) + try: + profile.read_config() + with mock.patch.object(profile, "config_file", mock.Mock(return_value=save_file)): + profile.save() + with open(save_file) as config_file: + saved_profile = json.load(config_file) + assert saved_profile == profile.as_dict() + finally: + if os.path.isfile(save_file): + os.remove(save_file) + + +def test_duplicate(profile): + with mock.patch.object(shutil, "copytree", mock.Mock()) as copytree_mock, \ + mock.patch.object(profiles.Profile, "save", mock.Mock()) as save_mock: + profile.read_only = True + profile.imported = True + profile.origin_url = "hello" + clone = profile.duplicate() + assert clone.name == profile.name + assert clone.description == profile.description + assert clone.profile_id != profile.description + assert clone.path != profile.path + assert clone.profile_id in clone.path + assert clone.profile_id is not None + # duplicates are not read_only + assert clone.read_only is False + # duplicates are never imported nor have an origin url + assert clone.imported is False + assert clone.origin_url is None + copytree_mock.assert_called_with(profile.path, clone.path) + save_mock.assert_called_once() + + clone = profile.duplicate(name="123", description="456") + assert clone.name == "123" + assert clone.name != profile.name + assert clone.description == "456" + assert clone.description != profile.description + + +def test_as_dict(profile): + assert profile.as_dict() == { + constants.CONFIG_PROFILE: { + constants.CONFIG_ID: None, + constants.CONFIG_NAME: None, + constants.CONFIG_DESCRIPTION: None, + constants.CONFIG_AVATAR: None, + constants.CONFIG_ORIGIN_URL: None, + constants.CONFIG_READ_ONLY: False, + constants.CONFIG_IMPORTED: False, + constants.CONFIG_AUTO_UPDATE: False, + constants.CONFIG_SLUG: None, + constants.CONFIG_COMPLEXITY: enums.ProfileComplexity.MEDIUM.value, + constants.CONFIG_RISK: enums.ProfileRisk.MODERATE.value, + constants.CONFIG_TYPE: enums.ProfileType.LIVE.value, + constants.CONFIG_EXTRA_BACKTESTING_TIME_FRAMES: [], + constants.CONFIG_HIDDEN: False, + }, + constants.PROFILE_CONFIG: {}, + } + profile.read_config() + # do not test read config + profile.config = {"a": 1} + profile.imported = True + profile.complexity = enums.ProfileComplexity.DIFFICULT + profile.risk = enums.ProfileRisk.LOW + profile.auto_update = True + profile.slug = "slugg" + profile.profile_type = enums.ProfileType.BACKTESTING + profile.extra_backtesting_time_frames = [enums.TimeFrames.ONE_DAY.value] + assert profile.as_dict() == { + constants.CONFIG_PROFILE: { + constants.CONFIG_ID: "default", + constants.CONFIG_NAME: "default", + constants.CONFIG_DESCRIPTION: "OctoBot default profile.", + constants.CONFIG_AVATAR: "default_profile.png", + constants.CONFIG_ORIGIN_URL: "https://default.url", + constants.CONFIG_READ_ONLY: False, + constants.CONFIG_IMPORTED: True, + constants.CONFIG_AUTO_UPDATE: True, + constants.CONFIG_SLUG: "slugg", + constants.CONFIG_COMPLEXITY: enums.ProfileComplexity.DIFFICULT.value, + constants.CONFIG_RISK: enums.ProfileRisk.LOW.value, + constants.CONFIG_TYPE: enums.ProfileType.BACKTESTING.value, + constants.CONFIG_EXTRA_BACKTESTING_TIME_FRAMES: [enums.TimeFrames.ONE_DAY.value], + constants.CONFIG_HIDDEN: False, + }, + constants.PROFILE_CONFIG: { + "a": 1 + }, + } + + +def test_config_file(profile): + assert profile.config_file() == os.path.join(get_profile_path(), constants.PROFILE_CONFIG_FILE) + + +def test_merge_partially_managed_element_into_config(profile): + with mock.patch.object(profiles.Profile, "_merge_partially_managed_element", mock.Mock()) as _merge_mock: + config = {} + profile.merge_partially_managed_element_into_config(config, constants.CONFIG_EXCHANGES) + _merge_mock.assert_called_once_with(config, + profile.config, + constants.CONFIG_EXCHANGES, + profile.PARTIALLY_MANAGED_ELEMENTS[constants.CONFIG_EXCHANGES]) + + +def test_merge_partially_managed_element(profile): + profile.read_config() + element = next(iter(profile.PARTIALLY_MANAGED_ELEMENTS)) + template = profile.PARTIALLY_MANAGED_ELEMENTS[element] + config = { + constants.CONFIG_EXCHANGES: { + "binance": { + constants.CONFIG_EXCHANGE_KEY: constants.DEFAULT_API_KEY, + constants.CONFIG_EXCHANGE_SECRET: constants.DEFAULT_API_SECRET, + } + } + } + # add constants.CONFIG_ENABLED_OPTION + profile._merge_partially_managed_element(config, profile.config, element, template) + assert config == { + constants.CONFIG_EXCHANGES: { + "binance": { + constants.CONFIG_EXCHANGE_KEY: constants.DEFAULT_API_KEY, + constants.CONFIG_EXCHANGE_SECRET: constants.DEFAULT_API_SECRET, + constants.CONFIG_ENABLED_OPTION: True + } + } + } + config = { + constants.CONFIG_EXCHANGES: {} + } + profile.config[constants.CONFIG_EXCHANGES]["binance"][constants.CONFIG_ENABLED_OPTION] = False + # add whole exchange + profile._merge_partially_managed_element(config, profile.config, element, template) + assert config == { + constants.CONFIG_EXCHANGES: { + "binance": { + constants.CONFIG_EXCHANGE_KEY: constants.DEFAULT_API_KEY, + constants.CONFIG_EXCHANGE_SECRET: constants.DEFAULT_API_SECRET, + constants.CONFIG_EXCHANGE_PASSWORD: constants.DEFAULT_API_PASSWORD, + constants.CONFIG_EXCHANGE_TYPE: constants.DEFAULT_EXCHANGE_TYPE, + constants.CONFIG_ENABLED_OPTION: False + } + } + } + config = {} + # add whole exchange and exchanges key with 2 exchanges in profile + profile.config[constants.CONFIG_EXCHANGES]["kucoin"] = { + constants.CONFIG_ENABLED_OPTION: True, + constants.CONFIG_EXCHANGE_TYPE: constants.CONFIG_EXCHANGE_FUTURE + } + profile._merge_partially_managed_element(config, profile.config, element, template) + assert config == { + constants.CONFIG_EXCHANGES: { + "binance": { + constants.CONFIG_EXCHANGE_KEY: constants.DEFAULT_API_KEY, + constants.CONFIG_EXCHANGE_SECRET: constants.DEFAULT_API_SECRET, + constants.CONFIG_EXCHANGE_PASSWORD: constants.DEFAULT_API_PASSWORD, + constants.CONFIG_EXCHANGE_TYPE: constants.DEFAULT_EXCHANGE_TYPE, + constants.CONFIG_ENABLED_OPTION: False + }, + "kucoin": { + constants.CONFIG_EXCHANGE_KEY: constants.DEFAULT_API_KEY, + constants.CONFIG_EXCHANGE_SECRET: constants.DEFAULT_API_SECRET, + constants.CONFIG_EXCHANGE_PASSWORD: constants.DEFAULT_API_PASSWORD, + constants.CONFIG_EXCHANGE_TYPE: constants.CONFIG_EXCHANGE_FUTURE, + constants.CONFIG_ENABLED_OPTION: True + } + } + } + config = { + constants.CONFIG_EXCHANGES: { + "binance": { + constants.CONFIG_EXCHANGE_KEY: constants.DEFAULT_API_KEY, + constants.CONFIG_EXCHANGE_SECRET: constants.DEFAULT_API_SECRET, + constants.CONFIG_ENABLED_OPTION: True + } + } + } + # add constants.CONFIG_ENABLED_OPTION with 2 exchanges in profile, update constants.CONFIG_ENABLED_OPTION + profile._merge_partially_managed_element(config, profile.config, element, template) + assert config == { + constants.CONFIG_EXCHANGES: { + "binance": { + constants.CONFIG_EXCHANGE_KEY: constants.DEFAULT_API_KEY, + constants.CONFIG_EXCHANGE_SECRET: constants.DEFAULT_API_SECRET, + constants.CONFIG_ENABLED_OPTION: False + }, + "kucoin": { + constants.CONFIG_EXCHANGE_KEY: constants.DEFAULT_API_KEY, + constants.CONFIG_EXCHANGE_SECRET: constants.DEFAULT_API_SECRET, + constants.CONFIG_EXCHANGE_PASSWORD: constants.DEFAULT_API_PASSWORD, + constants.CONFIG_EXCHANGE_TYPE: constants.CONFIG_EXCHANGE_FUTURE, + constants.CONFIG_ENABLED_OPTION: True + } + } + } + + +def test_remove_deleted_elements(profile): + profile.read_config() + element = next(iter(profile.PARTIALLY_MANAGED_ELEMENTS)) + config = { + constants.CONFIG_EXCHANGES: { + "binance": { + constants.CONFIG_EXCHANGE_KEY: constants.DEFAULT_API_KEY, + constants.CONFIG_EXCHANGE_SECRET: constants.DEFAULT_API_SECRET, + constants.CONFIG_ENABLED_OPTION: True, + } + } + } + before_sync_elements_count = len(profile.config[element]) + profile.remove_deleted_elements(config) + # did not remove any element + assert before_sync_elements_count == len(profile.config[element]) + profile.config[element]["plop"] = config[constants.CONFIG_EXCHANGES]["binance"] + assert len(profile.config[element]) == before_sync_elements_count + 1 + profile.remove_deleted_elements(config) + assert before_sync_elements_count == len(profile.config[element]) + # removed "plop" element + assert list(profile.config[element]) == ["binance"] + + +def test_get_element_from_template(profile): + element = next(iter(profile.PARTIALLY_MANAGED_ELEMENTS)) + template = profile.PARTIALLY_MANAGED_ELEMENTS[element] + template_copy = copy.deepcopy(template) + + template_copy["plop"] = 1 + assert profile._get_element_from_template(template, {"plop": 1}) == template_copy + assert "plop" not in template + + +def test_filter_fill_elements(profile): + profile.read_config() + config = { + constants.CONFIG_EXCHANGES: { + "binance": { + constants.CONFIG_EXCHANGE_KEY: constants.DEFAULT_API_KEY, + constants.CONFIG_EXCHANGE_SECRET: constants.DEFAULT_API_SECRET, + constants.CONFIG_EXCHANGE_PASSWORD: constants.DEFAULT_API_PASSWORD, + constants.CONFIG_ENABLED_OPTION: True + } + } + } + allowed_keys = [constants.CONFIG_ENABLED_OPTION, constants.CONFIG_EXCHANGE_PASSWORD] + profile._filter_fill_elements(config, profile.config, constants.CONFIG_EXCHANGES, allowed_keys) + assert profile.config[constants.CONFIG_EXCHANGES] == { + "binance": { + constants.CONFIG_EXCHANGE_PASSWORD: constants.DEFAULT_API_PASSWORD, + constants.CONFIG_ENABLED_OPTION: True + } + } + + +def test_get_all_profiles(): + with mock.patch.object(profiles.Profile, "_load_profile", mock.Mock()) as _load_profile_mock: + nb_files = len(os.listdir(get_profiles_path())) + assert nb_files > 1 + profiles.Profile.get_all_profiles(get_profiles_path()) + assert _load_profile_mock.call_count == nb_files + + +def test_load_profile(): + schema_path = "schema_path" + with mock.patch.object(profiles.Profile, "read_config", mock.Mock()) as read_config_mock: + profile = profiles.Profile._load_profile(test_config.TEST_CONFIG_FOLDER, schema_path) + assert profile.path == test_config.TEST_CONFIG_FOLDER + assert profile.schema_path == schema_path + read_config_mock.assert_called_once() + + +def test_get_existing_profiles_ids(profile): + assert profiles.Profile.get_all_profiles_ids(get_profiles_path()) == ["default"] + assert profiles.Profile.get_all_profiles_ids(get_profiles_path(), ignore=profile.path) == [] diff --git a/packages/commons/tests/profiles/test_profile_data.py b/packages/commons/tests/profiles/test_profile_data.py new file mode 100644 index 0000000000..c80e734edf --- /dev/null +++ b/packages/commons/tests/profiles/test_profile_data.py @@ -0,0 +1,532 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library.* +import copy + +import pytest + +import octobot_commons.profiles as profiles +import octobot_commons.profiles.profile_data as profile_data_import +import octobot_commons.constants as constants +import octobot_commons.enums as enums + +from tests.profiles import get_profile_path, profile + + +@pytest.fixture +def profile_data_dict(): + return { + 'distribution': 'not_default', + 'profile_details': { + 'name': 'profile_name 42', + 'id': 'default', + 'version': "42.42.1b", + 'bot_id': "1234-1224-0000", + 'user_id': "plop-id", + 'nested_strategy_config_id': "123-1221" + }, + 'crypto_currencies': [ + { + 'trading_pairs': ['BTC/USDT'], + 'name': 'Bitcoin', + 'enabled': True + }, + { + 'trading_pairs': ['ETH/USDT'], + 'name': 'ETH', + 'enabled': False + } + ], 'exchanges': [ + { + 'exchange_credential_id': '123-plop', + 'exchange_id': '123-exchange', + 'exchange_account_id': '123-exchange_account_id', + 'exchange_type': 'spot', + 'internal_name': 'cryptocom', + 'sandboxed': True, + } + ], 'future_exchange_data': { + 'default_leverage': 10, + 'symbol_data': [ + { + 'symbol': 'BTC/USDT', + 'leverage': None, + }, + { + 'symbol': 'ETH/USDT', + 'leverage': 5, + } + ] + }, 'trader': { + 'enabled': True + }, 'trader_simulator': { + 'enabled': False, + 'starting_portfolio': { + 'BTC': 10, + 'USDT': 1000 + }, + 'maker_fees': 0.1, + 'taker_fees': 0.1 + }, 'trading': { + 'reference_market': 'BTC', + 'paused': False, + 'minimal_funds': [ + { + "asset": "BTC", + "available": 12, + "total": 12, + }, + { + "asset": "PLOP", + "available": 0.1111, + "total": 0.2222, + } + ], + 'sub_portfolio': { + "BTC": 0.1, + "USDT": 1000 + }, + 'risk': 0.5, + 'sellable_assets': ["BTC", "ETH"] + }, 'tentacles': [ + { + 'name': 'plopEvaluator', + 'config': {}, + }, + { + 'name': 'plopEvaluator', + 'config': { + 'a': True, + 'other': { + 'l': [1, 2], + 'n': None, + } + }, + }, + ], 'options': { + 'values': { + 'plop_key': 'hola senior' + } + }, 'backtesting_context': { + 'start_time_delta': 11313.22, + 'update_interval': 444, + 'starting_portfolio': { + 'plop_key': 'hola senior' + }, + 'exchanges': [ + 'binance', 'kucoin' + ] + } + } + + +@pytest.fixture +def min_profile_data_dict(): + return { + 'profile_details': { + 'name': 'min_profile', + }, + 'crypto_currencies': [ + { + 'trading_pairs': ['BTC/USDT'], + }, + { + 'trading_pairs': ['ETH/USDT'], + 'enabled': False + } + ], 'trading': { + 'reference_market': 'BTC', + }, 'tentacles': [ + { + 'name': 'plopEvaluator', + 'config': {}, + }, + { + 'name': 'plopEvaluator', + 'config': { + 'a': True, + 'other': { + 'l': [1, 2], + 'n': None, + } + }, + }, + ], + 'backtesting_context': { + 'start_time_delta': 11313.22 + }, 'options': { + 'values': { + 'plop_key': 'hola !!!', + 'jour': 'nuit', + } + } + } + + +def test_from_profile(profile): + profile_data = profiles.ProfileData.from_profile(profile.read_config()) + # check one element per attribute to be sure it's all parsed + assert profile_data.distribution == "default" + assert profile_data.profile_details.name == "default" + assert profile_data.crypto_currencies[0].trading_pairs == ['BTC/USDT'] + assert profile_data.exchanges == [] + assert profile_data.trader.enabled is False + assert profile_data.trader_simulator.enabled is True + assert profile_data.trader_simulator.starting_portfolio == {'BTC': 10, 'USDT': 1000} + assert profile_data.trading.risk == 0.5 + assert profile_data.tentacles == [] + + +def test_to_profile(profile): + profile_data = profiles.ProfileData.from_profile(profile.read_config()) + created_profile = profile_data.to_profile("plop_path") + # force missing values + for crypto_data in profile.config[constants.CONFIG_CRYPTO_CURRENCIES].values(): + crypto_data[constants.CONFIG_ENABLED_OPTION] = crypto_data.get(constants.CONFIG_ENABLED_OPTION, True) + # remove not stored values + profile.config[constants.CONFIG_EXCHANGES] = {} + profile.avatar = profile.description = "" + profile.complexity = enums.ProfileComplexity.MEDIUM + profile.risk = enums.ProfileRisk.MODERATE + profile.profile_type = enums.ProfileType.LIVE + profile.origin_url = None + # if both parsing and transforming return the same profile as original one, the whole chain works + profile_dict = profile.as_dict() + assert profile_dict == created_profile.as_dict() + + +def test_from_dict(profile_data_dict): + # use second MinimalFund syntax + profile_data_dict = copy.deepcopy(profile_data_dict) + profile_data_dict["trading"]['minimal_funds'].append( + { + "asset": "ETH", + "value": 111.2, + } + ) + profile_data = profiles.ProfileData.from_dict(profile_data_dict) + # check one element per attribute to be sure it's all parsed + assert profile_data.profile_details.name == "profile_name 42" + assert profile_data.crypto_currencies[0].trading_pairs == ['BTC/USDT'] + assert profile_data.distribution == "not_default" + assert profile_data.future_exchange_data.default_leverage == 10 + assert profile_data.future_exchange_data.symbol_data[0].symbol == "BTC/USDT" + assert profile_data.future_exchange_data.symbol_data[0].leverage == None + assert profile_data.future_exchange_data.symbol_data[1].symbol == "ETH/USDT" + assert profile_data.future_exchange_data.symbol_data[1].leverage == 5 + assert profile_data.exchanges[0].exchange_credential_id == "123-plop" + assert profile_data.exchanges[0].exchange_account_id == "123-exchange_account_id" + assert profile_data.exchanges[0].internal_name == "cryptocom" + assert profile_data.exchanges[0].sandboxed is True + assert profile_data.exchanges[0].exchange_type == "spot" + assert profile_data.trader.enabled is True + assert profile_data.trader_simulator.enabled is False + assert profile_data.trader_simulator.starting_portfolio == {'BTC': 10, 'USDT': 1000} + assert profile_data.trading.risk == 0.5 + assert profile_data.trading.minimal_funds == [ + profiles.MinimalFund("BTC", 12, 12), + profiles.MinimalFund("PLOP", 0.1111, 0.2222), + profiles.MinimalFund("ETH", 111.2, 111.2), + ] + assert profile_data.tentacles[0].name == "plopEvaluator" + assert profile_data.tentacles[1].config["other"]["l"] == [1, 2] + + +def test_from_min_dict(min_profile_data_dict): + profile_data = profiles.ProfileData.from_dict(min_profile_data_dict) + # check one element per attribute to be sure it's all parsed + assert profile_data.profile_details.name == "min_profile" + assert profile_data.crypto_currencies[0].trading_pairs == ['BTC/USDT'] + assert profile_data.crypto_currencies[0].name is None + assert profile_data.crypto_currencies[0].enabled is True + assert profile_data.crypto_currencies[1].trading_pairs == ['ETH/USDT'] + assert profile_data.crypto_currencies[1].name is None + assert profile_data.crypto_currencies[1].enabled is False + assert profile_data.future_exchange_data.default_leverage is None + assert profile_data.future_exchange_data.symbol_data == [] + assert profile_data.exchanges == [] + assert profile_data.trader.enabled is True + assert profile_data.trader_simulator.enabled is False + assert profile_data.trader_simulator.starting_portfolio == {} + assert profile_data.trading.risk == 1 + assert profile_data.tentacles[0].name == "plopEvaluator" + assert profile_data.tentacles[1].config["other"]["l"] == [1, 2] + assert profile_data.options.values["jour"] == "nuit" + assert profile_data.options.values["plop_key"] == "hola !!!" + assert profile_data.distribution == constants.DEFAULT_DISTRIBUTION + full_profile_data_dict = profile_data.to_dict(include_default_values=True) + assert len(full_profile_data_dict) > len(min_profile_data_dict) + profile_data_dict = profile_data.to_dict(include_default_values=False) + # default values in values but: keys are present except for exchanges & distribution, which content is empty (default) + full_profile_data_dict_keys_without_exchange = list(full_profile_data_dict.keys()) + full_profile_data_dict_keys_without_exchange.remove("exchanges") + full_profile_data_dict_keys_without_exchange.remove("distribution") + assert sorted(list(profile_data_dict.keys()) + ["_updated_fields", ]) == sorted(full_profile_data_dict_keys_without_exchange) + + +def test_from_dict_objects(profile_data_dict): + profile_data_objects = profiles.ProfileData.from_dict(profile_data_dict) + profile_data = profiles.ProfileData.from_dict({ + "profile_details": profile_data_objects.profile_details, + "crypto_currencies": profile_data_objects.crypto_currencies, + "exchanges": profile_data_objects.exchanges, + "trader": profile_data_objects.trader, + "trader_simulator": profile_data_objects.trader_simulator, + "trading": profile_data_objects.trading, + "tentacles": profile_data_objects.tentacles, + "options": profile_data_objects.options, + }) + # check one element per attribute to be sure it's all parsed + assert profile_data.profile_details.version == "42.42.1b" + assert profile_data.profile_details.nested_strategy_config_id == "123-1221" + assert profile_data.crypto_currencies[0].trading_pairs == ['BTC/USDT'] + assert profile_data.exchanges[0].exchange_credential_id == "123-plop" + assert profile_data.exchanges[0].exchange_account_id == "123-exchange_account_id" + assert profile_data.trader.enabled is True + assert profile_data.trader_simulator.enabled is False + assert profile_data.trader_simulator.starting_portfolio == {'BTC': 10, 'USDT': 1000} + assert profile_data.trading.risk == 0.5 + assert profile_data.trading.paused is False + assert profile_data.tentacles[0].name == "plopEvaluator" + assert profile_data.tentacles[1].config["other"]["l"] == [1, 2] + assert profile_data.options.values['plop_key'] == 'hola senior' + assert profile_data.backtesting_context == profile_data_import.BacktestingContext() + + +def test_to_dict(profile_data_dict): + profile_data = profiles.ProfileData.from_dict(profile_data_dict) + # if both parsing and transforming return the same profile as original one, the whole chain works + assert profile_data_dict == _remove_updated_fields(profile_data.to_dict()) + + dict_without_default_values = profile_data.to_dict(include_default_values=False) + assert profile_data_dict != _remove_updated_fields(dict_without_default_values) + + # ensure no empty elements + assert len(dict_without_default_values) == len(profile_data_dict) + for values in dict_without_default_values.values(): + assert len(values) + + +def test_get_update(profile_data_dict): + """Test get_update() method with various field changes and edge cases""" + profile_data_1 = profiles.ProfileData.from_dict(profile_data_dict) + profile_data_2 = copy.deepcopy(profile_data_1) + + # Test simple field changes + profile_data_2.profile_details.name = "new_profile_name" + profile_data_2.trading.risk = 0.8 + update = profile_data_1.get_update(profile_data_2) + assert update.profile_details.name == "new_profile_name" + assert update.profile_details._updated_fields == ["name"] + assert update.trading.risk == 0.8 + assert update.trading._updated_fields == ["risk"] + assert "profile_details" in update._updated_fields + assert "trading" in update._updated_fields + + # Test nested object changes + profile_data_2.profile_details.version = "1.0.0" + profile_data_2.trading.reference_market = "USDT" + update = profile_data_1.get_update(profile_data_2) + assert set(update.profile_details._updated_fields) == {"name", "version"} + assert set(update.trading._updated_fields) == {"risk", "reference_market"} + + # Test list field changes + profile_data_2.crypto_currencies[0].enabled = False + profile_data_2.crypto_currencies[0].trading_pairs = ["BTC/USDT", "ETH/USDT"] + new_exchange = profile_data_import.ExchangeData( + exchange_type="futures", + internal_name="binance", + sandboxed=False + ) + profile_data_2.exchanges.append(new_exchange) + profile_data_2.tentacles[1].config["new_key"] = "new_value" + update = profile_data_1.get_update(profile_data_2) + assert update.crypto_currencies[0].enabled is False + assert update.crypto_currencies[0]._updated_fields == ["trading_pairs", "enabled"] + assert len(update.exchanges) == len(profile_data_2.exchanges) + assert update.exchanges[-1].internal_name == "binance" + assert "crypto_currencies" in update._updated_fields + assert "exchanges" in update._updated_fields + assert "tentacles" in update._updated_fields + + # Test no changes + profile_data_3 = copy.deepcopy(profile_data_2) + update = profile_data_2.get_update(profile_data_3) + assert update._updated_fields == [] + + # Test empty to populated + profile_data_empty = profiles.ProfileData() + update = profile_data_empty.get_update(profile_data_1) + assert "profile_details" in update._updated_fields + assert "crypto_currencies" in update._updated_fields + assert "trading" in update._updated_fields + + +def test_update(profile_data_dict): + """Test update() method with various field changes, lists, and edge cases""" + # Test simple field updates + profile_data_1 = profiles.ProfileData.from_dict(profile_data_dict) + profile_data_2 = copy.deepcopy(profile_data_1) + profile_data_2.profile_details.name = "updated_name" + profile_data_2.trading.risk = 0.9 + update = profile_data_1.get_update(profile_data_2) + original_name = profile_data_1.profile_details.name + original_risk = profile_data_1.trading.risk + profile_data_1.update(update) + assert profile_data_1.profile_details.name == "updated_name" + assert profile_data_1.trading.risk == 0.9 + assert profile_data_1.profile_details.name != original_name + assert profile_data_1.trading.risk != original_risk + + # Test nested object updates + profile_data_1 = profiles.ProfileData.from_dict(profile_data_dict) + profile_data_2 = copy.deepcopy(profile_data_1) + profile_data_2.profile_details.version = "2.0.0" + profile_data_2.trader.enabled = False + profile_data_2.trader_simulator.enabled = True + profile_data_2.trader_simulator.maker_fees = 0.2 + update = profile_data_1.get_update(profile_data_2) + profile_data_1.update(update) + assert profile_data_1.profile_details.version == "2.0.0" + assert profile_data_1.trader.enabled is False + assert profile_data_1.trader_simulator.enabled is True + assert profile_data_1.trader_simulator.maker_fees == 0.2 + + # Test list element modifications + profile_data_1 = profiles.ProfileData.from_dict(profile_data_dict) + profile_data_2 = copy.deepcopy(profile_data_1) + profile_data_2.crypto_currencies[0].enabled = False + profile_data_2.crypto_currencies[0].trading_pairs.append("LTC/USDT") + profile_data_2.exchanges[0].sandboxed = False + update = profile_data_1.get_update(profile_data_2) + original_enabled = profile_data_1.crypto_currencies[0].enabled + profile_data_1.update(update) + assert profile_data_1.crypto_currencies[0].enabled is False + assert profile_data_1.crypto_currencies[0].enabled != original_enabled + assert "LTC/USDT" in profile_data_1.crypto_currencies[0].trading_pairs + assert profile_data_1.exchanges[0].sandboxed is False + + # Test adding list elements + profile_data_1 = profiles.ProfileData.from_dict(profile_data_dict) + profile_data_2 = copy.deepcopy(profile_data_1) + new_crypto = profile_data_import.CryptoCurrencyData( + trading_pairs=["LTC/USDT"], + name="Litecoin", + enabled=True + ) + profile_data_2.crypto_currencies.append(new_crypto) + new_tentacle = profile_data_import.TentaclesData( + name="NewEvaluator", + config={"key": "value"} + ) + profile_data_2.tentacles.append(new_tentacle) + update = profile_data_1.get_update(profile_data_2) + original_crypto_count = len(profile_data_1.crypto_currencies) + original_tentacle_count = len(profile_data_1.tentacles) + profile_data_1.update(update) + assert len(profile_data_1.crypto_currencies) == original_crypto_count + 1 + assert profile_data_1.crypto_currencies[-1].name == "Litecoin" + assert len(profile_data_1.tentacles) == original_tentacle_count + 1 + assert profile_data_1.tentacles[-1].name == "NewEvaluator" + + # Test removing list elements + profile_data_1 = profiles.ProfileData.from_dict(profile_data_dict) + profile_data_2 = copy.deepcopy(profile_data_1) + profile_data_2.crypto_currencies.pop() + profile_data_2.tentacles.pop() + update = profile_data_1.get_update(profile_data_2) + original_crypto_count = len(profile_data_1.crypto_currencies) + original_tentacle_count = len(profile_data_1.tentacles) + profile_data_1.update(update) + assert len(profile_data_1.crypto_currencies) == original_crypto_count - 1 + assert len(profile_data_1.tentacles) == original_tentacle_count - 1 + + # Test future exchange data updates + profile_data_1 = profiles.ProfileData.from_dict(profile_data_dict) + profile_data_2 = copy.deepcopy(profile_data_1) + profile_data_2.future_exchange_data.default_leverage = 20 + profile_data_2.future_exchange_data.symbol_data[0].leverage = 15 + update = profile_data_1.get_update(profile_data_2) + original_leverage = profile_data_1.future_exchange_data.default_leverage + profile_data_1.update(update) + assert profile_data_1.future_exchange_data.default_leverage == 20 + assert profile_data_1.future_exchange_data.default_leverage != original_leverage + assert profile_data_1.future_exchange_data.symbol_data[0].leverage == 15 + + # Test trading minimal_funds updates + profile_data_1 = profiles.ProfileData.from_dict(profile_data_dict) + profile_data_2 = copy.deepcopy(profile_data_1) + new_fund = profile_data_import.MinimalFund("ETH", 50.0, 50.0) + profile_data_2.trading.minimal_funds.append(new_fund) + profile_data_2.trading.minimal_funds[0].available = 20.0 + update = profile_data_1.get_update(profile_data_2) + original_fund_count = len(profile_data_1.trading.minimal_funds) + profile_data_1.update(update) + assert len(profile_data_1.trading.minimal_funds) == original_fund_count + 1 + assert profile_data_1.trading.minimal_funds[0].available == 20.0 + assert profile_data_1.trading.minimal_funds[-1].asset == "ETH" + + # Test update without _updated_fields (should not update) + profile_data_1 = profiles.ProfileData.from_dict(profile_data_dict) + profile_data_2 = copy.deepcopy(profile_data_1) + profile_data_2.profile_details.name = "should_not_update" + profile_data_2.trading.risk = 0.99 + update = profile_data_1.get_update(profile_data_2) + update._updated_fields = [] + update.profile_details._updated_fields = [] + update.trading._updated_fields = [] + original_name = profile_data_1.profile_details.name + original_risk = profile_data_1.trading.risk + profile_data_1.update(update) + assert profile_data_1.profile_details.name == original_name + assert profile_data_1.trading.risk == original_risk + + +def test_get_update_and_update_roundtrip(profile_data_dict): + """Test that get_update() and update() work together correctly""" + profile_data_1 = profiles.ProfileData.from_dict(profile_data_dict) + profile_data_2 = copy.deepcopy(profile_data_1) + + # Make various changes + profile_data_2.profile_details.name = "roundtrip_name" + profile_data_2.profile_details.version = "3.0.0" + profile_data_2.trading.risk = 0.7 + profile_data_2.trading.reference_market = "ETH" + profile_data_2.crypto_currencies[0].enabled = False + profile_data_2.trader.enabled = True + + # Get update and apply it + update = profile_data_1.get_update(profile_data_2) + profile_data_1.update(update) + + # Verify profile_data_1 matches profile_data_2 for changed fields + assert profile_data_1.profile_details.name == profile_data_2.profile_details.name + assert profile_data_1.profile_details.version == profile_data_2.profile_details.version + assert profile_data_1.trading.risk == profile_data_2.trading.risk + assert profile_data_1.trading.reference_market == profile_data_2.trading.reference_market + assert profile_data_1.crypto_currencies[0].enabled == profile_data_2.crypto_currencies[0].enabled + assert profile_data_1.trader.enabled == profile_data_2.trader.enabled + + +def _remove_updated_fields(profile_data_dict: dict) -> dict: + profile_data_dict.pop("_updated_fields", None) + for value in profile_data_dict.values(): + if isinstance(value, dict): + _remove_updated_fields(value) + elif isinstance(value, list): + for item in value: + if isinstance(item, dict): + _remove_updated_fields(item) + return profile_data_dict diff --git a/packages/commons/tests/profiles/test_profile_sharing.py b/packages/commons/tests/profiles/test_profile_sharing.py new file mode 100644 index 0000000000..f287594815 --- /dev/null +++ b/packages/commons/tests/profiles/test_profile_sharing.py @@ -0,0 +1,195 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import os +import shutil +import zipfile +import contextlib +import mock +import pathlib + +import pytest + +import octobot_commons.constants as constants +import octobot_commons.errors as commons_errors +import octobot_commons.profiles as profiles +import octobot_commons.profiles.profile_sharing as profile_sharing +from octobot_commons.profiles.profile_sharing import _get_unique_profile_folder, _ensure_unique_profile_id, \ + _get_profile_name +import octobot_commons.tests.test_config as test_config + +from tests.profiles import profile, get_profile_path, invalid_profile + + +def test_export_profile(profile): + export_path = "exported" + exported_file = f"{export_path}.zip" + tentacles_config = os.path.join(get_profile_path(), "tentacles_config.json") + spec_tentacles_config = os.path.join(get_profile_path(), "specific_config") + other_profile = os.path.join(test_config.TEST_FOLDER, "other_profile") + with _cleaned_tentacles(export_path, + exported_file, + tentacles_config, + dir1=spec_tentacles_config, + dir2=other_profile): + # create fake tentacles config + shutil.copy(profile.config_file(), tentacles_config) + os.mkdir(spec_tentacles_config) + shutil.copy(profile.config_file(), os.path.join(spec_tentacles_config, "t1.json")) + shutil.copy(profile.config_file(), os.path.join(spec_tentacles_config, "t2.json")) + with mock.patch.object(os, "remove", mock.Mock()) as remove_mock: + profiles.export_profile(profile, export_path) + remove_mock.assert_not_called() + profiles.export_profile(profile, export_path) + assert os.path.isfile(exported_file) + with zipfile.ZipFile(exported_file) as zipped: + zipped.extractall(other_profile) + # ensure all files got zipped + for root, dirs, files in os.walk(profile.path): + dir_path = os.path.join(other_profile, "specific_config") if "specific_config" in root else other_profile + assert all( + os.path.isfile(os.path.join(dir_path, f)) + for f in files + ) + + +def test_export_profile_with_existing_file(profile): + export_path = "exported" + exported_file = f"{export_path}.zip" + tentacles_config = os.path.join(get_profile_path(), "tentacles_config.json") + spec_tentacles_config = os.path.join(get_profile_path(), "specific_config") + other_profile = os.path.join(test_config.TEST_FOLDER, "other_profile") + with _cleaned_tentacles(export_path, + exported_file, + tentacles_config, + dir1=spec_tentacles_config, + dir2=other_profile): + # create fake tentacles config + shutil.copy(profile.config_file(), tentacles_config) + os.mkdir(spec_tentacles_config) + shutil.copy(profile.config_file(), os.path.join(spec_tentacles_config, "t1.json")) + shutil.copy(profile.config_file(), os.path.join(spec_tentacles_config, "t2.json")) + shutil.copy(profile.config_file(), f"{export_path}.{constants.PROFILE_EXPORT_FORMAT}") + with mock.patch.object(os, "remove", mock.Mock()) as remove_mock: + profiles.export_profile(profile, export_path) + remove_mock.assert_called_once_with(f"{export_path}.{constants.PROFILE_EXPORT_FORMAT}") + assert os.path.isfile(exported_file) + with zipfile.ZipFile(exported_file) as zipped: + zipped.extractall(other_profile) + # ensure all files got zipped + for root, dirs, files in os.walk(profile.path): + dir_path = os.path.join(other_profile, "specific_config") if "specific_config" in root else other_profile + assert all( + os.path.isfile(os.path.join(dir_path, f)) + for f in files + ) + + +def test_import_install_profile(profile, invalid_profile): + export_path = os.path.join(test_config.TEST_FOLDER, "super_profile") + exported_file = f"{export_path}.zip" + spec_tentacles_config = os.path.join(get_profile_path(), "specific_config") + tentacles_config = os.path.join(get_profile_path(), "tentacles_config.json") + other_profile = os.path.join(constants.USER_PROFILES_FOLDER, "default") + profile_schema = os.path.join(test_config.TEST_CONFIG_FOLDER, "profile_schema.json") + with _cleaned_tentacles(export_path, + exported_file, + tentacles_config, + dir1=other_profile, + dir2=constants.USER_FOLDER, + dir3=spec_tentacles_config): + # create fake tentacles config + shutil.copy(profile.config_file(), tentacles_config) + os.mkdir(spec_tentacles_config) + shutil.copy(profile.config_file(), os.path.join(spec_tentacles_config, "t1.json")) + shutil.copy(profile.config_file(), os.path.join(spec_tentacles_config, "t2.json")) + profiles.export_profile(profile, export_path) + imported_profile_path = os.path.join(constants.USER_PROFILES_FOLDER, "default") + with mock.patch.object(profile_sharing, "_ensure_unique_profile_id", mock.Mock()) \ + as _ensure_unique_profile_id_mock: + imported_profile = profiles.import_profile(exported_file, profile_schema, origin_url="plop.wow") + assert isinstance(imported_profile, profiles.Profile) + profile.read_config() + assert profile.name == imported_profile.name + assert profile.path != imported_profile.path + assert profile.imported is False + assert imported_profile.imported is True + assert imported_profile.origin_url == "plop.wow" + _ensure_unique_profile_id_mock.assert_called_once() + assert os.path.isdir(imported_profile_path) + # ensure all files got imported + for root, dirs, files in os.walk(profile.path): + dir_path = os.path.join(other_profile, "specific_config") if "specific_config" in root else other_profile + assert all( + os.path.isfile(os.path.join(dir_path, f)) + for f in files + ) + assert isinstance(profiles.import_profile(exported_file, profile_schema), profiles.Profile) + assert os.path.isdir(f"{imported_profile_path}_2") + assert os.path.isdir(imported_profile_path) + assert not os.path.isdir(f"{imported_profile_path}_3") + + # now with invalid profile + profiles.export_profile(invalid_profile, export_path) + with pytest.raises(commons_errors.ProfileImportError): + profiles.import_profile(exported_file, profile_schema) + + +def test_get_unique_profile_folder(profile): + assert _get_unique_profile_folder(profile.config_file()) == f"{profile.config_file()}_2" + other_file = f"{profile.config_file()}_2" + other_file_2 = f"{profile.config_file()}_3" + other_file_3 = f"{profile.config_file()}_5" + with _cleaned_tentacles(other_file, other_file_2, other_file_3): + shutil.copy(profile.config_file(), other_file) + assert _get_unique_profile_folder(profile.config_file()) == f"{profile.config_file()}_3" + shutil.copy(profile.config_file(), other_file_2) + assert _get_unique_profile_folder(profile.config_file()) == f"{profile.config_file()}_4" + shutil.copy(profile.config_file(), other_file_3) + assert _get_unique_profile_folder(profile.config_file()) == f"{profile.config_file()}_4" + + +def test_ensure_unique_profile_id(profile): + other_profile = "second_profile" + profiles_path = pathlib.Path(profile.path).parent + other_profile_path = profiles_path.joinpath(other_profile) + with _cleaned_tentacles(dir1=other_profile_path): + shutil.copytree(profile.path, other_profile_path) + other_profile = profiles.Profile(other_profile_path).read_config() + _ensure_unique_profile_id(other_profile) + other_profile.save() + ids = profiles.Profile.get_all_profiles_ids(profiles_path) + assert len(ids) == 2 + # changed new profile id + assert ids[0] != ids[1] + + +@contextlib.contextmanager +def _cleaned_tentacles(*items, **dirs): + try: + for item in items: + if os.path.isfile(item): + os.remove(item) + for directory in dirs.values(): + if os.path.isdir(directory): + shutil.rmtree(directory) + yield + finally: + for item in items: + if os.path.isfile(item): + os.remove(item) + for directory in dirs.values(): + if os.path.isdir(directory): + shutil.rmtree(directory) diff --git a/packages/commons/tests/signals/__init__.py b/packages/commons/tests/signals/__init__.py new file mode 100644 index 0000000000..61fb9459fc --- /dev/null +++ b/packages/commons/tests/signals/__init__.py @@ -0,0 +1,77 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import pytest + +import octobot_commons.signals as signals +import octobot_commons.enums as enums + + +@pytest.fixture +def signal(): + return signals.Signal("hello topic", {"hi": "plop"}) + + +@pytest.fixture +def signal_bundle(signal): + return signals.SignalBundle("hello identifier", [signal], "version") + + +@pytest.fixture +def signal_bundle_builder(): + return signals.SignalBundleBuilder("hello builder identifier") + + +@pytest.fixture +def signal_builder_wrapper(): + return signals.SignalBuilderWrapper( + "hello wrapper identifier", + signal_builder_class=signals.SignalBundleBuilder, + timeout=-2 + ) + + +@pytest.fixture +def signal_dict(): + return { + enums.SignalsAttrs.TOPIC.value: "dict topic", + enums.SignalsAttrs.CONTENT.value: {"dict": "content", "hi": 1}, + enums.SignalsAttrs.DEPENDENCIES.value: None, + } + + +@pytest.fixture +def signal_with_dependencies_dict(): + return { + enums.SignalsAttrs.TOPIC.value: "dict topic", + enums.SignalsAttrs.CONTENT.value: {"dict": "content", "hi": 1}, + enums.SignalsAttrs.DEPENDENCIES.value: { + enums.SignalDependenciesAttrs.DEPENDENCY.value: [ + {"plop": "123"}, + {"PLIP": "456"} + ] + }, + } + + +@pytest.fixture +def signal_bundle_dict(signal_dict): + return { + enums.CommunityFeedAttrs.VALUE.value: { + enums.SignalBundlesAttrs.IDENTIFIER.value: "dict identifier", + enums.SignalBundlesAttrs.SIGNALS.value: [signal_dict], + enums.SignalBundlesAttrs.VERSION.value: "dict_version" + }, + } diff --git a/packages/commons/tests/signals/test_signal.py b/packages/commons/tests/signals/test_signal.py new file mode 100644 index 0000000000..d9262c7a8c --- /dev/null +++ b/packages/commons/tests/signals/test_signal.py @@ -0,0 +1,46 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.enums +import octobot_commons.signals as signals + + +from tests.signals import signal + + +def test_to_dict(signal): + assert signal.to_dict() == { + octobot_commons.enums.SignalsAttrs.TOPIC.value: "hello topic", + octobot_commons.enums.SignalsAttrs.CONTENT.value: {"hi": "plop"}, + octobot_commons.enums.SignalsAttrs.DEPENDENCIES.value: None, + } + signal.dependencies = signals.SignalDependencies([ + {"plop": "123"}, + {"PLIP": "123"} + ]) + assert signal.to_dict() == { + octobot_commons.enums.SignalsAttrs.TOPIC.value: "hello topic", + octobot_commons.enums.SignalsAttrs.CONTENT.value: {"hi": "plop"}, + octobot_commons.enums.SignalsAttrs.DEPENDENCIES.value: { + octobot_commons.enums.SignalDependenciesAttrs.DEPENDENCY.value: [ + {"plop": "123"}, + {"PLIP": "123"} + ] + }, + } + + +def test__str__(signal): + assert all(sub_str in str(signal) for sub_str in ("hello topic", "hi", "plop")) diff --git a/packages/commons/tests/signals/test_signal_builder_wrapper.py b/packages/commons/tests/signals/test_signal_builder_wrapper.py new file mode 100644 index 0000000000..b190e93f64 --- /dev/null +++ b/packages/commons/tests/signals/test_signal_builder_wrapper.py @@ -0,0 +1,53 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.enums +import octobot_commons.signals as signals + +from tests.signals import signal_builder_wrapper + + +def test_register_user(signal_builder_wrapper): + assert signal_builder_wrapper._users_count == 0 + signal_builder_wrapper.register_user() + assert signal_builder_wrapper._users_count == 1 + signal_builder_wrapper.register_user() + assert signal_builder_wrapper._users_count == 2 + + +def test_unregister_user(signal_builder_wrapper): + assert signal_builder_wrapper._users_count == 0 + signal_builder_wrapper.register_user() + assert signal_builder_wrapper._users_count == 1 + signal_builder_wrapper.register_user() + assert signal_builder_wrapper._users_count == 2 + signal_builder_wrapper.unregister_user() + assert signal_builder_wrapper._users_count == 1 + signal_builder_wrapper.register_user() + assert signal_builder_wrapper._users_count == 2 + signal_builder_wrapper.unregister_user() + assert signal_builder_wrapper._users_count == 1 + signal_builder_wrapper.unregister_user() + assert signal_builder_wrapper._users_count == 0 + + +def test_has_single_user(signal_builder_wrapper): + assert signal_builder_wrapper.has_single_user() is False + signal_builder_wrapper._users_count = 1 + assert signal_builder_wrapper.has_single_user() is True + signal_builder_wrapper._users_count = 10 + assert signal_builder_wrapper.has_single_user() is False + signal_builder_wrapper._users_count = 1 + assert signal_builder_wrapper.has_single_user() is True diff --git a/packages/commons/tests/signals/test_signal_bundle.py b/packages/commons/tests/signals/test_signal_bundle.py new file mode 100644 index 0000000000..f1905a0977 --- /dev/null +++ b/packages/commons/tests/signals/test_signal_bundle.py @@ -0,0 +1,35 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.enums + +from tests.signals import signal, signal_bundle + + +def test_to_dict(signal_bundle, signal): + assert signal_bundle.to_dict() == { + octobot_commons.enums.SignalBundlesAttrs.IDENTIFIER.value: "hello identifier", + octobot_commons.enums.SignalBundlesAttrs.SIGNALS.value: [signal.to_dict()], + octobot_commons.enums.SignalBundlesAttrs.VERSION.value: "version", + } + + +def test__str__(signal_bundle): + assert all(sub_str in str(signal_bundle) + for sub_str in ("hello identifier", "version", "hello topic", "hi", "plop")) + + +def test_get_version(signal_bundle): + assert signal_bundle._get_version() == "1.0.0" diff --git a/packages/commons/tests/signals/test_signal_bundle_builder.py b/packages/commons/tests/signals/test_signal_bundle_builder.py new file mode 100644 index 0000000000..95bdb98b89 --- /dev/null +++ b/packages/commons/tests/signals/test_signal_bundle_builder.py @@ -0,0 +1,60 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.enums +import octobot_commons.signals as signals + +from tests.signals import signal_bundle_builder + + +def test_register_signal(signal_bundle_builder): + assert signal_bundle_builder.signals == [] + signal_bundle_builder.register_signal("plop_topic", {"content_key1": 0}, other_kwarg=11) + assert len(signal_bundle_builder.signals) == 1 + assert signal_bundle_builder.signals[0].topic == "plop_topic" + assert signal_bundle_builder.signals[0].content == {"content_key1": 0} + + +def test_create_signal(signal_bundle_builder): + created_signal = signal_bundle_builder.create_signal("plop_topic", {"content_key1": 0}, other_kwarg=11) + assert created_signal.topic == "plop_topic" + assert created_signal.content == {"content_key1": 0} + + +def test_is_empty(signal_bundle_builder): + assert signal_bundle_builder.is_empty() + signal_bundle_builder.register_signal("plop_topic", {"content_key1": 0}, other_kwarg=11) + assert not signal_bundle_builder.is_empty() + + +def test_build(signal_bundle_builder): + signal_bundle_builder.version = "1" + empty_build_bundle = signal_bundle_builder.build() + assert empty_build_bundle.identifier == "hello builder identifier" + assert empty_build_bundle.signals == [] + assert empty_build_bundle.version == "1" + signal_bundle_builder.register_signal("plop_topic", {"content_key1": 0}, other_kwarg=11) + full_build_bundle = signal_bundle_builder.build() + assert full_build_bundle.identifier == "hello builder identifier" + assert full_build_bundle.signals is signal_bundle_builder.signals + assert full_build_bundle.version == "1" + + +def test_reset(signal_bundle_builder): + assert signal_bundle_builder.signals == [] + signal_bundle_builder.register_signal("plop_topic", {"content_key1": 0}, other_kwarg=11) + assert len(signal_bundle_builder.signals) == 1 + signal_bundle_builder.reset() + assert signal_bundle_builder.signals == [] diff --git a/packages/commons/tests/signals/test_signal_dependencies.py b/packages/commons/tests/signals/test_signal_dependencies.py new file mode 100644 index 0000000000..c8e70aa678 --- /dev/null +++ b/packages/commons/tests/signals/test_signal_dependencies.py @@ -0,0 +1,114 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import pytest +import copy + +import octobot_commons.enums +import octobot_commons.signals as signals + + +@pytest.fixture +def no_dependencies(): + return signals.SignalDependencies() + + +@pytest.fixture +def single_dependencies(): + return signals.SignalDependencies([ + {"PLIP": "123"} + ]) + + +@pytest.fixture +def dual_dependencies(): + return signals.SignalDependencies([ + {"plop": "123"}, + {"PLIP": "123"} + ]) + + +def test_extend(no_dependencies, single_dependencies, dual_dependencies): + no_dependencies.extend(no_dependencies) + assert no_dependencies == no_dependencies + no_dependencies.extend(single_dependencies) + assert no_dependencies == single_dependencies + single_dependencies.extend(dual_dependencies) + assert single_dependencies == signals.SignalDependencies([ + {"PLIP": "123"}, + {"plop": "123"}, + {"PLIP": "123"} + ]) + + +def test_is_filled_by(no_dependencies, single_dependencies, dual_dependencies): + assert no_dependencies.is_filled_by(no_dependencies) is True + assert no_dependencies.is_filled_by(single_dependencies) is True + assert no_dependencies.is_filled_by(dual_dependencies) is True + + + assert single_dependencies.is_filled_by(no_dependencies) is False + assert single_dependencies.is_filled_by(single_dependencies) is True + assert single_dependencies.is_filled_by(dual_dependencies) is True + + assert dual_dependencies.is_filled_by(no_dependencies) is False + assert dual_dependencies.is_filled_by(single_dependencies) is False + assert dual_dependencies.is_filled_by(dual_dependencies) is True + + # with extended dependencies + saved_no_dependencies = copy.deepcopy(no_dependencies) + no_dependencies.extend(single_dependencies) + assert no_dependencies.is_filled_by(saved_no_dependencies) is False + assert no_dependencies.is_filled_by(single_dependencies) is True + assert no_dependencies.is_filled_by(dual_dependencies) is True + + +def test_to_dict(no_dependencies, single_dependencies, dual_dependencies): + assert no_dependencies.to_dict() == { + octobot_commons.enums.SignalDependenciesAttrs.DEPENDENCY.value: [] + } + assert single_dependencies.to_dict() == { + octobot_commons.enums.SignalDependenciesAttrs.DEPENDENCY.value: [ + {"PLIP": "123"} + ] + } + assert dual_dependencies.to_dict() == { + octobot_commons.enums.SignalDependenciesAttrs.DEPENDENCY.value: [ + {"plop": "123"}, + {"PLIP": "123"} + ] + } + + +def test_eq(no_dependencies, single_dependencies, dual_dependencies): + assert no_dependencies != None + assert no_dependencies == no_dependencies + assert no_dependencies == signals.SignalDependencies() + assert single_dependencies == single_dependencies + assert single_dependencies == signals.SignalDependencies([ + {"PLIP": "123"} + ]) + assert dual_dependencies == dual_dependencies + assert dual_dependencies == signals.SignalDependencies([ + {"plop": "123"}, + {"PLIP": "123"} + ]) + assert no_dependencies != single_dependencies + + +def test_bool(no_dependencies, single_dependencies, dual_dependencies): + assert bool(no_dependencies) is False + assert bool(single_dependencies) is True + assert bool(dual_dependencies) is True diff --git a/packages/commons/tests/signals/test_signal_factory.py b/packages/commons/tests/signals/test_signal_factory.py new file mode 100644 index 0000000000..4f42c5de00 --- /dev/null +++ b/packages/commons/tests/signals/test_signal_factory.py @@ -0,0 +1,43 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.signals as signals + +from tests.signals import signal_dict, signal_bundle_dict, signal_with_dependencies_dict + + +def test_create_signal_bundle(signal_bundle_dict): + created_bundle = signals.create_signal_bundle(signal_bundle_dict) + assert len(created_bundle.signals) == 1 + assert created_bundle.identifier == "dict identifier" + assert created_bundle.version == "dict_version" + assert created_bundle.signals[0].topic == "dict topic" + assert created_bundle.signals[0].content == {"dict": "content", "hi": 1} + + +def test_create_signal(signal_dict): + created_signal = signals.create_signal(signal_dict) + assert created_signal.topic == "dict topic" + assert created_signal.content == {"dict": "content", "hi": 1} + + +def test_create_signal_with_dependencies(signal_with_dependencies_dict): + created_signal = signals.create_signal(signal_with_dependencies_dict) + assert created_signal.topic == "dict topic" + assert created_signal.content == {"dict": "content", "hi": 1} + assert created_signal.dependencies == signals.SignalDependencies([ + {"plop": "123"}, + {"PLIP": "456"} + ]) diff --git a/packages/commons/tests/signals/test_signal_publisher.py b/packages/commons/tests/signals/test_signal_publisher.py new file mode 100644 index 0000000000..5b929a22ae --- /dev/null +++ b/packages/commons/tests/signals/test_signal_publisher.py @@ -0,0 +1,209 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import time +import mock +import pytest +import asyncio + +import octobot_commons.errors +import octobot_commons.signals as signals +import octobot_commons.signals.signals_emitter as signals_emitter +import octobot_commons.asyncio_tools as asyncio_tools + +from tests.signals import signal_builder_wrapper + + +@pytest.fixture +def publisher(): + try: + yield signals.SignalPublisher.instance() + finally: + signals.SignalPublisher.instance()._timeout_watcher_tasks = {} + signals.SignalPublisher.instance()._signal_builder_wrappers = {} + + +def test_get_signal_bundle_builder(publisher, signal_builder_wrapper): + with pytest.raises(octobot_commons.errors.MissingSignalBuilder): + signals.SignalPublisher.instance().get_signal_bundle_builder("") + signals.SignalPublisher.instance()._signal_builder_wrappers["Hi"] = signal_builder_wrapper + with pytest.raises(octobot_commons.errors.MissingSignalBuilder): + signals.SignalPublisher.instance().get_signal_bundle_builder("") + assert signals.SignalPublisher.instance().get_signal_bundle_builder("Hi") \ + is signal_builder_wrapper.signal_bundle_builder + + +@pytest.mark.asyncio +async def test_remote_signal_bundle_builder(publisher, signal_builder_wrapper): + async with signals.SignalPublisher.instance().remote_signal_bundle_builder( + "wkey", "widentifier" + ) as builder: + assert isinstance(builder, signals.SignalPublisher.DEFAULT_SIGNAL_BUILDER_CLASS) + assert "wkey" in signals.SignalPublisher.instance()._signal_builder_wrappers + assert "wkey" not in signals.SignalPublisher.instance()._timeout_watcher_tasks + assert "wkey" not in signals.SignalPublisher.instance()._signal_builder_wrappers + assert "wkey" not in signals.SignalPublisher.instance()._timeout_watcher_tasks + + class OtherSignalBuilder(signals.SignalPublisher.DEFAULT_SIGNAL_BUILDER_CLASS): + def __init__(self, identifier: str, other_arg): + super().__init__(identifier) + self.other_arg = other_arg + with mock.patch.object(signals.SignalPublisher.instance(), "_emit_signal_if_necessary", mock.AsyncMock()) as \ + _emit_signal_if_necessary_mock: + with pytest.raises(TypeError): + # missing builder custom arg + async with signals.SignalPublisher.instance().remote_signal_bundle_builder( + "wkey", "widentifier", timeout=1, signal_builder_class=OtherSignalBuilder + ): + pass + _emit_signal_if_necessary_mock.assert_not_called() + assert "wkey" not in signals.SignalPublisher.instance()._signal_builder_wrappers + assert "wkey" not in signals.SignalPublisher.instance()._timeout_watcher_tasks + with pytest.raises(RuntimeError): + async with signals.SignalPublisher.instance().remote_signal_bundle_builder( + "wkey", "widentifier", timeout=1, signal_builder_class=OtherSignalBuilder, builder_args=("other", ) + ) as builder: + assert isinstance(builder, OtherSignalBuilder) + assert builder.other_arg == "other" + assert "wkey" in signals.SignalPublisher.instance()._signal_builder_wrappers + assert "wkey" in signals.SignalPublisher.instance()._timeout_watcher_tasks + raise RuntimeError + _emit_signal_if_necessary_mock.assert_not_called() + assert "wkey" not in signals.SignalPublisher.instance()._signal_builder_wrappers + assert "wkey" not in signals.SignalPublisher.instance()._timeout_watcher_tasks + + async with signals.SignalPublisher.instance().remote_signal_bundle_builder( + "wkey", "widentifier", timeout=1, signal_builder_class=OtherSignalBuilder, builder_args=("other", ) + ) as builder: + assert isinstance(builder, OtherSignalBuilder) + assert "wkey" in signals.SignalPublisher.instance()._signal_builder_wrappers + assert "wkey" in signals.SignalPublisher.instance()._timeout_watcher_tasks + wrapper = signals.SignalPublisher.instance()._signal_builder_wrappers["wkey"] + _emit_signal_if_necessary_mock.assert_called_once_with(wrapper) + assert "wkey" not in signals.SignalPublisher.instance()._signal_builder_wrappers + assert "wkey" not in signals.SignalPublisher.instance()._timeout_watcher_tasks + + +def test_stop(publisher): + signals.SignalPublisher.instance().stop() + assert not signals.SignalPublisher.instance()._timeout_watcher_tasks + assert not signals.SignalPublisher.instance()._signal_builder_wrappers + + cancel_mock_1 = mock.Mock() + cancel_mock_2 = mock.Mock() + signals.SignalPublisher.instance()._timeout_watcher_tasks = { + "h": mock.Mock(cancel=cancel_mock_1), + "i": mock.Mock(cancel=cancel_mock_2) + } + signals.SignalPublisher.instance()._signal_builder_wrappers = {"g": 0, "i": "fdfs"} + signals.SignalPublisher.instance().stop() + cancel_mock_1.assert_called_once() + cancel_mock_2.assert_called_once() + assert not signals.SignalPublisher.instance()._timeout_watcher_tasks + assert not signals.SignalPublisher.instance()._signal_builder_wrappers + + +def test_create_or_get_signal_builder_wrapper(publisher): + with pytest.raises(TypeError): + signals.SignalPublisher.instance()._create_or_get_signal_builder_wrapper( + "key2", "id", 1, signals.SignalPublisher.DEFAULT_SIGNAL_BUILDER_CLASS, ("unexpected_args", ) + ) + wrapper_1 = signals.SignalPublisher.instance()._create_or_get_signal_builder_wrapper( + "key", "id", 1, signals.SignalPublisher.DEFAULT_SIGNAL_BUILDER_CLASS, None + ) + assert signals.SignalPublisher.instance()._create_or_get_signal_builder_wrapper( + "key", "id", 1, signals.SignalPublisher.DEFAULT_SIGNAL_BUILDER_CLASS, None + ) is wrapper_1 + + wrapper_2 = signals.SignalPublisher.instance()._create_or_get_signal_builder_wrapper( + "key2", "id", 1, signals.SignalPublisher.DEFAULT_SIGNAL_BUILDER_CLASS, None + ) + assert wrapper_1 != wrapper_2 + assert "key" in signals.SignalPublisher.instance()._signal_builder_wrappers + assert "key2" in signals.SignalPublisher.instance()._signal_builder_wrappers + + +@pytest.mark.asyncio +async def test_emit_signal_if_necessary(publisher, signal_builder_wrapper): + with mock.patch.object(signals_emitter, "emit_signal_bundle", mock.AsyncMock()) as emit_signal_bundle_mock: + signal_builder_mock = mock.Mock( + is_empty=mock.Mock(return_value=True), build=mock.Mock(return_value="build_res"), reset=mock.Mock() + ) + signal_builder_wrapper.signal_bundle_builder = signal_builder_mock + await signals.SignalPublisher.instance()._emit_signal_if_necessary(signal_builder_wrapper) + signal_builder_mock.is_empty.assert_called_once() + signal_builder_mock.build.assert_not_called() + signal_builder_mock.reset.assert_not_called() + emit_signal_bundle_mock.assert_not_called() + + signal_builder_mock.is_empty.reset_mock() + signal_builder_mock.is_empty.return_value = False + await signals.SignalPublisher.instance()._emit_signal_if_necessary(signal_builder_wrapper) + signal_builder_mock.is_empty.assert_called_once() + signal_builder_mock.build.assert_called_once() + signal_builder_mock.reset.assert_called_once() + emit_signal_bundle_mock.assert_called_once_with("build_res") + + +@pytest.mark.asyncio +async def test_schedule_signal_auto_emit(publisher, signal_builder_wrapper): + with mock.patch.object(signals.SignalPublisher.instance(), "_emit_signal_if_necessary", mock.AsyncMock()) as \ + _emit_signal_if_necessary_mock: + await signals.SignalPublisher.instance()._schedule_signal_auto_emit("key", 0.001) + _emit_signal_if_necessary_mock.assert_not_called() + + signal_builder_wrapper.signal_emit_time = time.time() - 1 + signals.SignalPublisher.instance()._signal_builder_wrappers["key"] = signal_builder_wrapper + + async def auto_remove_wrapper(key): + await asyncio.sleep(0.1) + signals.SignalPublisher.instance()._signal_builder_wrappers.pop(key) + asyncio.create_task(auto_remove_wrapper("key")) + await asyncio_tools.wait_asyncio_next_cycle() + await signals.SignalPublisher.instance()._schedule_signal_auto_emit("key", 0.001) + assert _emit_signal_if_necessary_mock.call_count > 1 + + +def test_register_timeout_if_any(publisher, signal_builder_wrapper): + with mock.patch.object(asyncio, "create_task", mock.Mock(return_value="created_task")) as create_task_mock, \ + mock.patch.object(signals.SignalPublisher.instance(), "_schedule_signal_auto_emit", + mock.Mock(return_value="task")) as _schedule_signal_auto_emit_mock: + with pytest.raises(KeyError): + signals.SignalPublisher.instance()._register_timeout_if_any("key") + create_task_mock.assert_not_called() + _schedule_signal_auto_emit_mock.assert_not_called() + + assert not signals.SignalPublisher.instance()._timeout_watcher_tasks + signal_builder_wrapper.timeout = signal_builder_wrapper.NO_TIMEOUT_VALUE + signals.SignalPublisher.instance()._signal_builder_wrappers["key"] = signal_builder_wrapper + signals.SignalPublisher.instance()._register_timeout_if_any("key") + create_task_mock.assert_not_called() + _schedule_signal_auto_emit_mock.assert_not_called() + assert not signals.SignalPublisher.instance()._timeout_watcher_tasks + + signal_builder_wrapper.timeout = 1 + signals.SignalPublisher.instance()._register_timeout_if_any("key") + create_task_mock.assert_called_once_with("task") + _schedule_signal_auto_emit_mock.assert_called_once_with("key", 1) + assert signals.SignalPublisher.instance()._timeout_watcher_tasks["key"] == "created_task" + + +def test_unregister_timeout(publisher): + task = mock.Mock(cancel=mock.Mock()) + signals.SignalPublisher.instance()._unregister_timeout("key") + task.cancel.assert_not_called() + signals.SignalPublisher.instance()._timeout_watcher_tasks["key"] = task + signals.SignalPublisher.instance()._unregister_timeout("key") + task.cancel.assert_called_once() diff --git a/packages/commons/tests/static/ExchangeHistoryDataCollector_1589740606.4862757.data b/packages/commons/tests/static/ExchangeHistoryDataCollector_1589740606.4862757.data new file mode 100644 index 0000000000..f06877d18e Binary files /dev/null and b/packages/commons/tests/static/ExchangeHistoryDataCollector_1589740606.4862757.data differ diff --git a/packages/commons/tests/static/config.json b/packages/commons/tests/static/config.json new file mode 100644 index 0000000000..c4e951335f --- /dev/null +++ b/packages/commons/tests/static/config.json @@ -0,0 +1,9 @@ +{ + "time_frame": ["1h", "4h", "1d"], + "crypto-currencies":{ + "Bitcoin": { + "pairs" : ["BTC/USDT", "BTC/USD"] + } + }, + "exchanges": {} +} \ No newline at end of file diff --git a/packages/commons/tests/static/default_config.json b/packages/commons/tests/static/default_config.json new file mode 100644 index 0000000000..efe1c1777f --- /dev/null +++ b/packages/commons/tests/static/default_config.json @@ -0,0 +1,19 @@ +{ + "backtesting": { + "files": [] + }, + "exchanges": { + "binance": { + "api-key": "your-api-key-here", + "api-secret": "your-api-secret-here" + } + }, + "services": {}, + "notification":{ + "global-info": true, + "price-alerts": true, + "trades": true, + "notification-type": [] + }, + "accepted_terms": false +} diff --git a/packages/commons/tests/static/default_profile.png b/packages/commons/tests/static/default_profile.png new file mode 100644 index 0000000000..e69de29bb2 diff --git a/packages/commons/tests/static/invalid_profile/profile.json b/packages/commons/tests/static/invalid_profile/profile.json new file mode 100644 index 0000000000..c9b7317527 --- /dev/null +++ b/packages/commons/tests/static/invalid_profile/profile.json @@ -0,0 +1,50 @@ +{ + "profile": { + "avatar": "default_profile.png", + "description": "OctoBot default profile.", + "id": "invalid_profile", + "name": "default", + "origin_url": "https://default.url" + }, + "config": { + "crypto-currencies": { + "Bitcoin": { + "pairs": [ + "BTC/USDT" + ] + }, + "plop": { + "pairs": [ + "BTC/USDT" + ], + "config": { + "i should not be there": true + } + } + }, + "exchanges": { + "binance": { + "enabled": true + } + }, + "trading": { + "reference-market": "BTC", + "risk": 0.5 + }, + "trader": { + "enabled": false, + "load-trade-history": true + }, + "trader-simulator": { + "enabled": true, + "fees": { + "maker": 0.1, + "taker": 0.1 + }, + "starting-portfolio": { + "BTC": 10, + "USDT": 1000 + } + } + } +} diff --git a/packages/commons/tests/static/profile.json b/packages/commons/tests/static/profile.json new file mode 100644 index 0000000000..caa51ba337 --- /dev/null +++ b/packages/commons/tests/static/profile.json @@ -0,0 +1,43 @@ +{ + "profile": { + "avatar": "default_profile.png", + "description": "OctoBot default profile.", + "id": "default", + "name": "default", + "origin_url": "https://default.url" + }, + "config": { + "crypto-currencies": { + "Bitcoin": { + "pairs": [ + "BTC/USDT" + ] + } + }, + "exchanges": { + "binance": { + "enabled": true + } + }, + "trading": { + "paused": false, + "reference-market": "BTC", + "risk": 0.5 + }, + "trader": { + "enabled": false, + "load-trade-history": true + }, + "trader-simulator": { + "enabled": true, + "fees": { + "maker": 0.1, + "taker": 0.1 + }, + "starting-portfolio": { + "BTC": 10, + "USDT": 1000 + } + } + } +} diff --git a/packages/commons/tests/static/profile_schema.json b/packages/commons/tests/static/profile_schema.json new file mode 100644 index 0000000000..752cb304c8 --- /dev/null +++ b/packages/commons/tests/static/profile_schema.json @@ -0,0 +1,181 @@ +{ + "type": "object", + "properties": { + "profile": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "name": { + "type": "string" + }, + "imported": { + "type": "boolean" + }, + "description": { + "type": ["string", "null"] + }, + "avatar": { + "type": ["string", "null"] + }, + "origin_url": { + "type": ["string", "null"] + }, + "read_only": { + "type": "boolean" + } + } + }, + "config": { + "type": "object", + "properties": { + "distribution": { + "type": "string" + }, + "crypto-currencies": { + "type": "object", + "patternProperties": { + "^.*$": { + "type": "object", + "additionalProperties":false, + "properties": { + "enabled": { + "type": "boolean" + }, + "pairs": { + "type": "array", + "uniqueItems": true, + "items": { + "type": "string" + } + }, + "quote": { + "type": "string" + }, + "add": { + "type": "array", + "uniqueItems": true, + "items": { + "type": "string" + } + } + } + } + } + }, + "exchanges": { + "type": "object", + "patternProperties": { + "^.*$": { + "type": "object", + "additionalProperties":false, + "properties": { + "enabled": { + "type": "boolean" + }, + "exchange-type": { + "type": "string" + } + }, + "required": [ + "enabled" + ] + } + } + }, + "trader": { + "type": "object", + "additionalProperties":false, + "properties": { + "enabled": { + "type": "boolean" + }, + "load-trade-history": { + "type": "boolean" + } + }, + "required": [ + "enabled" + ] + }, + "trader-simulator": { + "type": "object", + "additionalProperties":false, + "properties": { + "enabled": { + "type": "boolean" + }, + "fees": { + "type": "object", + "additionalProperties":false, + "properties": { + "maker": { + "type": "number", + "minimum": -100, + "maximum": 100 + }, + "taker": { + "type": "number", + "minimum": -100, + "maximum": 100 + } + }, + "required": [ + "maker", + "taker" + ] + }, + "starting-portfolio": { + "type": "object", + "additionalProperties":false, + "patternProperties": { + "^.*$": { + "type": "number", + "minimum": 0 + } + } + } + }, + "required": [ + "enabled", + "fees", + "starting-portfolio" + ] + }, + "trading": { + "type": "object", + "additionalProperties":false, + "properties": { + "paused": { + "type": "boolean" + }, + "reference-market": { + "type": "string" + }, + "risk": { + "type": "number", + "minimum": 0, + "maximum": 1 + }, + "current-live-id": { + "type": "integer", + "minimum": 1 + } + }, + "required": [ + "reference-market", + "risk" + ] + } + }, + "required": [ + "crypto-currencies", + "exchanges", + "trading", + "trader", + "trader-simulator" + ] + } + } +} diff --git a/packages/commons/tests/static/second_ExchangeHistoryDataCollector_1589740606.4862757.data b/packages/commons/tests/static/second_ExchangeHistoryDataCollector_1589740606.4862757.data new file mode 100644 index 0000000000..f06877d18e Binary files /dev/null and b/packages/commons/tests/static/second_ExchangeHistoryDataCollector_1589740606.4862757.data differ diff --git a/packages/commons/tests/symbols/__init__.py b/packages/commons/tests/symbols/__init__.py new file mode 100644 index 0000000000..b98f1648fb --- /dev/null +++ b/packages/commons/tests/symbols/__init__.py @@ -0,0 +1 @@ +# Copyright diff --git a/packages/commons/tests/symbols/test_symbol.py b/packages/commons/tests/symbols/test_symbol.py new file mode 100644 index 0000000000..7684159394 --- /dev/null +++ b/packages/commons/tests/symbols/test_symbol.py @@ -0,0 +1,151 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import pytest + +import octobot_commons.symbols +import octobot_commons.enums + + +@pytest.fixture +def spot_symbol(): + return octobot_commons.symbols.Symbol("BTC/USDT") + + +@pytest.fixture +def perpetual_future_symbol(): + return octobot_commons.symbols.Symbol("BTC/USDT:BTC") + + +@pytest.fixture +def future_symbol(): + return octobot_commons.symbols.Symbol("ETH/USDT:USDT-210625") + + +@pytest.fixture +def option_symbol(): + return octobot_commons.symbols.Symbol("ETH/USDT:USDT-211225-40000-C") + + +@pytest.fixture +def put_option_symbol(): + return octobot_commons.symbols.Symbol("BTC/USDT:BTC-211225-60000-P") + + +def test_parse_spot_symbol(spot_symbol): + assert spot_symbol.base == "BTC" + assert spot_symbol.quote == "USDT" + assert spot_symbol.settlement_asset == spot_symbol.identifier == spot_symbol.strike_price == "" + assert spot_symbol.option_type is None + + +def test_parse_perpetual_future_symbol(perpetual_future_symbol): + assert perpetual_future_symbol.base == "BTC" + assert perpetual_future_symbol.quote == "USDT" + assert perpetual_future_symbol.settlement_asset == "BTC" + assert perpetual_future_symbol.strike_price == "" + assert perpetual_future_symbol.option_type is None + + +def test_parse_future_symbol(future_symbol): + assert future_symbol.base == "ETH" + assert future_symbol.quote == "USDT" + assert future_symbol.settlement_asset == "USDT" + assert future_symbol.identifier == "210625" + assert future_symbol.strike_price == "" + assert future_symbol.option_type is None + + +def test_parse_option_symbol(option_symbol): + assert option_symbol.base == "ETH" + assert option_symbol.quote == "USDT" + assert option_symbol.settlement_asset == "USDT" + assert option_symbol.identifier == "211225" + assert option_symbol.strike_price == "40000" + assert option_symbol.option_type == octobot_commons.enums.OptionTypes.CALL.value + + +def test_base_and_quote(spot_symbol, option_symbol): + assert spot_symbol.base_and_quote() == ("BTC", "USDT") + assert option_symbol.base_and_quote() == ("ETH", "USDT") + + +def test_is_linear(spot_symbol, perpetual_future_symbol, option_symbol): + assert spot_symbol.is_linear() is True + assert perpetual_future_symbol.is_linear() is False + assert option_symbol.is_linear() is True + + +def test_is_inverse(spot_symbol, perpetual_future_symbol, option_symbol): + assert spot_symbol.is_inverse() is False + assert perpetual_future_symbol.is_inverse() is True + assert option_symbol.is_inverse() is False + + +def test_merged_str_symbol_with_full_option(option_symbol, put_option_symbol): + call_symbol = octobot_commons.symbols.Symbol("ETH/USDT:USDT-211225-40000-C") + call_symbol.option_type = octobot_commons.enums.OptionTypes.CALL.value + call_symbol.strike_price = 40000 + call_symbol.identifier = "211225" + assert call_symbol.merged_str_symbol() == "ETH/USDT:USDT-211225-40000-C" + + put_symbol = octobot_commons.symbols.Symbol("BTC/USDT:BTC-211225-60000-P") + put_symbol.option_type = octobot_commons.enums.OptionTypes.PUT.value + put_symbol.strike_price = 60000 + put_symbol.identifier = "211225" + assert put_symbol.merged_str_symbol() == "BTC/USDT:BTC-211225-60000-P" + + custom_symbol = octobot_commons.symbols.Symbol("BTC/USDT:BTC-211225-60000-yES") + custom_symbol.option_type = "YES" + custom_symbol.strike_price = 60000 + custom_symbol.identifier = "211225" + assert custom_symbol.merged_str_symbol() == "BTC/USDT:BTC-211225-60000-YES" + + +def test_is_put_option(): + put_symbol = octobot_commons.symbols.Symbol("BTC/USDT:BTC-211225-60000-P") + assert put_symbol.is_put_option() is True + assert put_symbol.is_call_option() is False + + +def test_is_call_option(): + call_symbol = octobot_commons.symbols.Symbol("ETH/USDT:USDT-211225-40000-C") + assert call_symbol.is_call_option() is True + assert call_symbol.is_put_option() is False + + +def test_is_put_and_call_option_with_non_option_symbols(spot_symbol, perpetual_future_symbol, future_symbol): + assert spot_symbol.is_put_option() is False + assert spot_symbol.is_call_option() is False + assert perpetual_future_symbol.is_put_option() is False + assert perpetual_future_symbol.is_call_option() is False + assert future_symbol.is_put_option() is False + assert future_symbol.is_call_option() is False + + +def test_does_expire(spot_symbol, perpetual_future_symbol, future_symbol, option_symbol, put_option_symbol): + assert spot_symbol.does_expire() is False + assert perpetual_future_symbol.does_expire() is False + assert future_symbol.does_expire() is True + assert option_symbol.does_expire() is True + assert put_option_symbol.does_expire() is True + + +def test__eq__(spot_symbol, option_symbol): + assert spot_symbol == octobot_commons.symbols.Symbol("BTC/USDT") + assert spot_symbol != octobot_commons.symbols.Symbol("BTC/USD") + assert spot_symbol != option_symbol + assert option_symbol == octobot_commons.symbols.Symbol("ETH/USDT:USDT-211225-40000-C") + assert option_symbol != octobot_commons.symbols.Symbol("ETH/USDT:USDT-211225-40000-P") diff --git a/packages/commons/tests/symbols/test_symbol_util.py b/packages/commons/tests/symbols/test_symbol_util.py new file mode 100644 index 0000000000..fd5989c046 --- /dev/null +++ b/packages/commons/tests/symbols/test_symbol_util.py @@ -0,0 +1,98 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.symbols + + +def test_parse_symbol(): + assert octobot_commons.symbols.parse_symbol("BTC/USDT") == octobot_commons.symbols.Symbol("BTC/USDT") + + +def test_merge_symbol(): + assert octobot_commons.symbols.merge_symbol("BTC/USDT") == "BTCUSDT" + assert octobot_commons.symbols.merge_symbol("BTC/USDT:USDT") == "BTCUSDT_USDT" + + +def test_merge_currencies(): + assert octobot_commons.symbols.merge_currencies("BTC", "USDT") == "BTC/USDT" + assert octobot_commons.symbols.merge_currencies("BTC", "USDT", "BTC") == "BTC/USDT:BTC" + assert octobot_commons.symbols.merge_currencies("BTC", "USDT", settlement_asset="XXX", market_separator="g", settlement_separator="d") == "BTCgUSDTdXXX" + assert ( + octobot_commons.symbols.merge_currencies( + "will-bitcoin-replace-sha-256-before-2027", + "USDC", + "USDC", + "261231", + "0", + octobot_commons.enums.OptionTypes.PUT.value + ) + == "will-bitcoin-replace-sha-256-before-2027/USDC:USDC-261231-0-P" + ) + assert ( + octobot_commons.symbols.merge_currencies( + "will-bitcoin-replace-sha-256-before-2027", + "USDC", + "USDC", + "261231", + "0", + "test" + ) + == "will-bitcoin-replace-sha-256-before-2027/USDC:USDC-261231-0-TEST" + ) + assert ( + octobot_commons.symbols.merge_currencies( + "will-bitcoin-replace-sha-256-before-2027", + "USDC", + "USDC", + "261231", + "0", + None + ) + == "will-bitcoin-replace-sha-256-before-2027/USDC:USDC" + ) + + +def test_convert_symbol(): + assert octobot_commons.symbols.convert_symbol("BTC-USDT", symbol_separator="-") == "BTC/USDT" + + +def test_is_symbol(): + # Test with default separator (/) + assert octobot_commons.symbols.is_symbol("BTC/USDT") is True + assert octobot_commons.symbols.is_symbol("ETH/USDT") is True + assert octobot_commons.symbols.is_symbol("BTC/USDT:USDT") is True + assert octobot_commons.symbols.is_symbol("BTC") is False + assert octobot_commons.symbols.is_symbol("USDT") is False + assert octobot_commons.symbols.is_symbol("ETH") is False + + # Test with custom separator (-) + assert octobot_commons.symbols.is_symbol("BTC-USDT", separator="-") is True + assert octobot_commons.symbols.is_symbol("ETH-USDT", separator="-") is True + assert octobot_commons.symbols.is_symbol("BTC", separator="-") is False + assert octobot_commons.symbols.is_symbol("USDT", separator="-") is False + + # Test with custom separator (:) + assert octobot_commons.symbols.is_symbol("BTC/USDT:USDT", separator=":") is True + assert octobot_commons.symbols.is_symbol("BTC/USDT", separator=":") is False + assert octobot_commons.symbols.is_symbol("BTC", separator=":") is False + + # Test with custom separator (|) + assert octobot_commons.symbols.is_symbol("BTC|USDT", separator="|") is True + assert octobot_commons.symbols.is_symbol("BTC", separator="|") is False + + # Test edge cases + assert octobot_commons.symbols.is_symbol("", separator="/") is False + assert octobot_commons.symbols.is_symbol("/", separator="/") is True + assert octobot_commons.symbols.is_symbol("BTC/USDT/ETH", separator="/") is True diff --git a/packages/commons/tests/tentacles_management/__init__.py b/packages/commons/tests/tentacles_management/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/packages/commons/tests/tentacles_management/test_abstract_tentacle.py b/packages/commons/tests/tentacles_management/test_abstract_tentacle.py new file mode 100644 index 0000000000..b3965b0631 --- /dev/null +++ b/packages/commons/tests/tentacles_management/test_abstract_tentacle.py @@ -0,0 +1,56 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.configuration as configuration +from octobot_commons.tentacles_management.abstract_tentacle import AbstractTentacle + + +class TentacleTest(AbstractTentacle): + def __init__(self): + super().__init__() + + +class TentacleTestChild(TentacleTest): + def __init__(self): + super().__init__() + self.plop = 1 + + +def test_get_name(): + assert TentacleTest().get_name() == "TentacleTest" + assert TentacleTestChild().get_name() == "TentacleTestChild" + + +def test_get_all_subclasses(): + assert TentacleTest().get_all_subclasses() == [TentacleTestChild] + + +def test_user_input_factories(): + tentacle = TentacleTest() + assert isinstance(tentacle.UI, configuration.UserInputFactory) + assert isinstance(tentacle.CLASS_UI, configuration.UserInputFactory) + assert isinstance(tentacle.__class__.CLASS_UI, configuration.UserInputFactory) + assert isinstance(TentacleTestChild.CLASS_UI, configuration.UserInputFactory) + assert TentacleTestChild.CLASS_UI is tentacle.CLASS_UI + + child_tentacle = TentacleTestChild() + assert TentacleTestChild.CLASS_UI is child_tentacle.CLASS_UI + assert child_tentacle.plop == 1 + assert isinstance(child_tentacle.UI, configuration.UserInputFactory) + assert isinstance(child_tentacle.CLASS_UI, configuration.UserInputFactory) + assert isinstance(tentacle.__class__.CLASS_UI, configuration.UserInputFactory) + assert isinstance(TentacleTestChild.CLASS_UI, configuration.UserInputFactory) + assert TentacleTestChild.CLASS_UI is child_tentacle.CLASS_UI + assert tentacle.CLASS_UI is child_tentacle.CLASS_UI diff --git a/packages/commons/tests/tentacles_management/test_class_inspector.py b/packages/commons/tests/tentacles_management/test_class_inspector.py new file mode 100644 index 0000000000..a4653ecf59 --- /dev/null +++ b/packages/commons/tests/tentacles_management/test_class_inspector.py @@ -0,0 +1,81 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_commons.tentacles_management.class_inspector import default_parent_inspection, default_parents_inspection, \ + get_class_from_parent_subclasses, get_deep_class_from_parent_subclasses, \ + is_abstract_using_inspection_and_class_naming, get_all_classes_from_parent, get_single_deepest_child_class + + +class AbstractParent: + pass + + +class Parent(AbstractParent): + pass + + +class BasicChild(Parent): + pass + + +class ChildOfChild(BasicChild): + pass + + +def test_default_parent_inspection(): + assert default_parent_inspection(BasicChild, Parent) + assert not default_parent_inspection(ChildOfChild, Parent) + assert default_parent_inspection(ChildOfChild, BasicChild) + assert not default_parent_inspection(BasicChild, ChildOfChild) + + +def test_default_parents_inspection(): + assert default_parents_inspection(BasicChild, Parent) + assert default_parents_inspection(ChildOfChild, Parent) + assert default_parents_inspection(ChildOfChild, BasicChild) + assert not default_parents_inspection(BasicChild, ChildOfChild) + + +def test_get_class_from_parent_subclasses(): + assert get_class_from_parent_subclasses("BasicChild", Parent) is BasicChild + assert get_class_from_parent_subclasses("ChildOfChild", Parent) is None + assert get_class_from_parent_subclasses("ChildOfChild", BasicChild) is ChildOfChild + assert get_class_from_parent_subclasses("BasicChild", ChildOfChild) is None + + +def test_get_deep_class_from_parent_subclasses(): + assert get_deep_class_from_parent_subclasses("BasicChild", Parent) is BasicChild + assert get_deep_class_from_parent_subclasses("ChildOfChild", Parent) is ChildOfChild + assert get_deep_class_from_parent_subclasses("ChildOfChild", BasicChild) is ChildOfChild + assert get_deep_class_from_parent_subclasses("BasicChild", ChildOfChild) is None + + +def test_is_abstract_using_inspection_and_class_naming(): + assert is_abstract_using_inspection_and_class_naming(AbstractParent) + assert not is_abstract_using_inspection_and_class_naming(Parent) + assert not is_abstract_using_inspection_and_class_naming(ChildOfChild) + + +def test_get_all_classes_from_parent(): + assert get_all_classes_from_parent(Parent) == [BasicChild, ChildOfChild] + assert get_all_classes_from_parent(BasicChild) == [ChildOfChild] + assert get_all_classes_from_parent(ChildOfChild) == [] + + +def test_get_single_deepest_child_class(): + assert get_single_deepest_child_class(Parent) == ChildOfChild + assert get_single_deepest_child_class(BasicChild) == ChildOfChild + assert get_single_deepest_child_class(ChildOfChild) == ChildOfChild diff --git a/packages/commons/tests/test_aiohttp_util.py b/packages/commons/tests/test_aiohttp_util.py new file mode 100644 index 0000000000..bfe6844283 --- /dev/null +++ b/packages/commons/tests/test_aiohttp_util.py @@ -0,0 +1,109 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import contextlib +import mock +import pytest +import aiohttp +import certifi + +import octobot_commons.aiohttp_util as aiohttp_util +import octobot_commons.constants as commons_constants + +pytestmark = pytest.mark.asyncio + + +async def test_get_ssl_fallback_aiohttp_client_session(): + origin_where = certifi.where + ok_get_mock_calls = [] + ko_get_mock = [] + + @contextlib.asynccontextmanager + async def _ok_get_mock(*args, **kwargs): + ok_get_mock_calls.append(1) + yield mock.Mock(status=200) + + @contextlib.asynccontextmanager + async def _ko_get_mock(*args, **kwargs): + ko_get_mock.append(1) + yield mock.Mock(status=200) + raise aiohttp.ClientConnectorCertificateError("ssl blabla", RuntimeError()) + + with mock.patch.object(certifi, "where", mock.Mock(side_effect=origin_where)) as where_mock: + + # no need for certifi + with mock.patch.object(aiohttp.ClientSession, "get", _ok_get_mock): + session = await aiohttp_util.get_ssl_fallback_aiohttp_client_session( + commons_constants.KNOWN_POTENTIALLY_SSL_FAILED_REQUIRED_URL + ) + assert isinstance(session, aiohttp.ClientSession) + assert len(ok_get_mock_calls) == 1 + ok_get_mock_calls.clear() + assert len(ko_get_mock) == 0 + where_mock.assert_not_called() + await session.close() + + # need for certifi + with mock.patch.object(aiohttp.ClientSession, "get", _ko_get_mock): + async with aiohttp_util.ssl_fallback_aiohttp_client_session( + commons_constants.KNOWN_POTENTIALLY_SSL_FAILED_REQUIRED_URL + ): + assert isinstance(session, aiohttp.ClientSession) + assert len(ok_get_mock_calls) == 0 + assert len(ko_get_mock) == 1 + where_mock.assert_called_once() + + +async def test_fetch_test_url_with_and_without_certify(): + base_session = aiohttp.ClientSession() + certify_session = aiohttp_util._get_certify_aiohttp_client_session() + try: + async with base_session.get(commons_constants.KNOWN_POTENTIALLY_SSL_FAILED_REQUIRED_URL) as resp: + assert resp.status < 400 + base_text = await resp.text() + assert "DrakkarSoftware" in base_text + async with certify_session.get(commons_constants.KNOWN_POTENTIALLY_SSL_FAILED_REQUIRED_URL) as resp: + assert resp.status < 400 + certifi_text = await resp.text() + assert base_text == certifi_text + finally: + if base_session: + await base_session.close() + if certify_session: + await certify_session.close() + + +async def test_certify_aiohttp_client_session(): + origin_where = certifi.where + + with mock.patch.object(certifi, "where", mock.Mock(side_effect=origin_where)) as where_mock: + async with aiohttp_util.certify_aiohttp_client_session() as session: + assert isinstance(session, aiohttp.ClientSession) + where_mock.assert_called_once() + + +async def test_counter_client_session(): + async with aiohttp_util.CounterClientSession("test") as session: + assert isinstance(session, aiohttp.ClientSession) + assert session.per_min.name == "test" + assert session.per_hour.name == "test" + assert session.per_day.name == "test" + + # ensure counters work + async with session.get("https://ifconfig.me/", headers={"Content-Type": "application/json"}) as resp: + assert resp.status == 200 + assert session.per_min.paths == {"[GET] /": 1} + assert session.per_hour.paths == {"[GET] /": 1} + assert session.per_day.paths == {"[GET] /": 1} diff --git a/packages/commons/tests/test_async_job.py b/packages/commons/tests/test_async_job.py new file mode 100644 index 0000000000..5fcdf54258 --- /dev/null +++ b/packages/commons/tests/test_async_job.py @@ -0,0 +1,167 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import asyncio +import os +import time + +from mock import AsyncMock, patch +import pytest + +from octobot_commons.async_job import AsyncJob +from octobot_commons.asyncio_tools import wait_asyncio_next_cycle + +pytestmark = pytest.mark.asyncio + + +async def callback(): + pass + + +async def test_has_enough_time_elapsed(): + job = AsyncJob(callback) + if not os.getenv('CYTHON_IGNORE'): + assert job._has_enough_time_elapsed() + job.last_execution_time = time.time() + assert job._has_enough_time_elapsed() + job.stop() + + +async def test_has_enough_time_elapsed_with_delays(): + job = AsyncJob(callback, min_execution_delay=0.2) + if not os.getenv('CYTHON_IGNORE'): + assert job._has_enough_time_elapsed() + job.last_execution_time = time.time() + assert not job._has_enough_time_elapsed() + await asyncio.sleep(0.2) + assert job._has_enough_time_elapsed() + job.stop() + + +async def test_should_run_with_dependencies(): + job2 = AsyncJob(callback) + job3 = AsyncJob(callback) + job = AsyncJob(callback) + job.add_job_dependency(job2) + job.add_job_dependency(job3) + if not os.getenv('CYTHON_IGNORE'): + assert job._should_run_job() + job2.idle_task_event.clear() + assert not job._are_job_dependencies_idle() + job2.idle_task_event.clear() + job3.idle_task_event.clear() + assert not job._are_job_dependencies_idle() + job2.idle_task_event.set() + assert not job._are_job_dependencies_idle() + job3.idle_task_event.set() + assert job._are_job_dependencies_idle() + job.stop() + + +async def test_first_execution_delay(): + callback_mock = AsyncMock(__name__="callback_mock") + job = AsyncJob(callback_mock, first_execution_delay=0.4, execution_interval_delay=5, min_execution_delay=5) + await wait_asyncio_next_cycle() + callback_mock.assert_not_called() + await job.run() + await wait_asyncio_next_cycle() + await asyncio.sleep(0.1) + callback_mock.assert_not_called() + await asyncio.sleep(0.4) + callback_mock.assert_called_once() + job.stop() + + +async def test_clear(): + job = AsyncJob(callback) + job.clear() + if not os.getenv('CYTHON_IGNORE'): + assert not job.job_dependencies + assert not job.job_task + assert not job.job_periodic_task + + +async def test_run_stop_run(): + job = AsyncJob(callback, execution_interval_delay=0.5, min_execution_delay=0.2) + if not os.getenv('CYTHON_IGNORE'): + with patch.object(job, 'callback', new=AsyncMock()) as mocked_test_job_callback: + await wait_asyncio_next_cycle() + mocked_test_job_callback.assert_not_called() + assert not job.is_stopped() + assert not job.is_started + await job.run() + await wait_asyncio_next_cycle() + mocked_test_job_callback.assert_called_once() + assert job.is_started + assert not job.is_stopped() + job.stop() + assert job.is_stopped() + await wait_asyncio_next_cycle() + assert not job.is_started + mocked_test_job_callback.assert_called_once() + await job.run() + assert not job.is_stopped() + await asyncio.sleep(0.7) + assert job.is_started + assert mocked_test_job_callback.call_count == 2 + + await asyncio.sleep(0.5) + assert mocked_test_job_callback.call_count in (3, 4) # can be 3 or 4 depending on the current computer + job.stop() + assert job.is_stopped() + + +async def test_run(): + job = AsyncJob(callback, execution_interval_delay=0.5, min_execution_delay=0.2) + if not os.getenv('CYTHON_IGNORE'): + with patch.object(job, 'callback', new=AsyncMock()) as mocked_test_job_callback: + await wait_asyncio_next_cycle() + mocked_test_job_callback.assert_not_called() + assert not job.is_started + await job.run() + await wait_asyncio_next_cycle() + mocked_test_job_callback.assert_called_once() + assert job.is_started + + # delay has not been waited + await job.run() + await wait_asyncio_next_cycle() + mocked_test_job_callback.assert_called_once() + assert job.is_started + + await asyncio.sleep(0.1) + mocked_test_job_callback.assert_called_once() + + await asyncio.sleep(0.6) + assert mocked_test_job_callback.call_count == 2 + + await job.run(force=True, wait_for_task_execution=True) + assert mocked_test_job_callback.call_count == 3 + + await wait_asyncio_next_cycle() + # no periodic trigger yet + assert mocked_test_job_callback.call_count == 3 + + await job.run(force=True, wait_for_task_execution=False) + # task not yet executed + assert mocked_test_job_callback.call_count == 3 + await wait_asyncio_next_cycle() + assert mocked_test_job_callback.call_count == 4 + + await asyncio.sleep(0.7) + # periodic auto trigger + assert mocked_test_job_callback.call_count in (5, 6) # can be 5 or 6 depending on the current computer + + job.stop() diff --git a/packages/commons/tests/test_asyncio_tools.py b/packages/commons/tests/test_asyncio_tools.py new file mode 100644 index 0000000000..91494eac06 --- /dev/null +++ b/packages/commons/tests/test_asyncio_tools.py @@ -0,0 +1,306 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import asyncio +import mock +import pytest + +import octobot_commons.asyncio_tools as asyncio_tools + +# All test coroutines will be treated as marked. +pytestmark = pytest.mark.asyncio + + +async def test_without_error_container(): + # will not propagate exception + asyncio.get_event_loop().call_soon(_exception_raiser) + + +async def test_with_error_container(): + error_container = asyncio_tools.ErrorContainer() + error_container.print_received_exceptions = False + asyncio.get_event_loop().set_exception_handler(error_container.exception_handler) + # will propagate exception + asyncio.get_event_loop().call_soon(_exception_raiser) + with pytest.raises(AssertionError): + # ensure exception is caught + await asyncio.create_task(error_container.check()) + + +async def test_with_error_container_2_exceptions(): + error_container = asyncio_tools.ErrorContainer() + error_container.print_received_exceptions = False + asyncio.get_event_loop().set_exception_handler(error_container.exception_handler) + # will propagate exception + asyncio.get_event_loop().call_soon(_exception_raiser) + asyncio.get_event_loop().call_soon(_exception_raiser) + with pytest.raises(AssertionError): + # ensure exception is caught + await asyncio.create_task(error_container.check()) + + +async def test_gather_waiting_for_all_before_raising(): + # Test successful case: all coroutines complete successfully + async def success_coro_1(): + await asyncio.sleep(0.01) + return 1 + + async def success_coro_2(): + await asyncio.sleep(0.01) + return 2 + + results = await asyncio_tools.gather_waiting_for_all_before_raising( + success_coro_1(), success_coro_2() + ) + assert results == [1, 2] + + # Test failure case: one coroutine raises an exception + # All coroutines should complete before the exception is raised + completion_order = [] + + async def failing_coro(): + await asyncio.sleep(0.02) + completion_order.append("failing") + raise ValueError("Test error") + + async def slow_success_coro(): + await asyncio.sleep(0.1) + completion_order.append("slow_success") + return "success" + + async def fast_success_coro(): + await asyncio.sleep(0.01) + completion_order.append("fast_success") + return "fast" + + with pytest.raises(ValueError, match="Test error"): + await asyncio_tools.gather_waiting_for_all_before_raising( + failing_coro(), slow_success_coro(), fast_success_coro() + ) + + # Verify all coroutines completed before exception was raised + assert len(completion_order) == 3 + assert "fast_success" in completion_order + assert "failing" in completion_order + assert "slow_success" in completion_order + + # Test multiple exceptions: should raise the first one encountered + completion_order.clear() + + async def failing_coro_1(): + await asyncio.sleep(0.02) + completion_order.append("failing_1") + raise ValueError("First error") + + async def failing_coro_2(): + await asyncio.sleep(0.1) + completion_order.append("failing_2") + raise RuntimeError("Second error") + + async def success_coro(): + await asyncio.sleep(0.01) + completion_order.append("success") + return "ok" + + with pytest.raises(ValueError, match="First error"): + await asyncio_tools.gather_waiting_for_all_before_raising( + failing_coro_1(), failing_coro_2(), success_coro() + ) + + # Verify all coroutines completed + assert len(completion_order) == 3 + + +async def test_RLock_valid_setup(): + lock_1 = asyncio_tools.RLock() + lock_2 = asyncio_tools.RLock() + + passed_a = passed_b = passed_c = passed_d = passed_e = False + async with lock_1: + passed_a = True + async with lock_1: + passed_b = True + assert lock_1._depth == 2 + async with lock_2: + passed_c = True + async with lock_2: + assert lock_2._depth == 2 + passed_d = True + async with lock_1: + assert lock_1._task is asyncio.current_task() + assert lock_2._task is asyncio.current_task() + assert lock_1._depth == 3 + passed_e = True + assert lock_1._depth == 2 + assert lock_1._depth == 0 + assert lock_2._depth == 0 + assert lock_1._locked is False + assert lock_2._locked is False + assert lock_1._task is None + assert lock_2._task is None + assert all((passed_a, passed_b, passed_c, passed_d, passed_e)) + + +async def test_RLock_multiple_tasks(): + lock = asyncio_tools.RLock() + started = { + "a": False, + "b": False, + "c": False, + } + passed = { + "a": False, + "b": False, + "c": False, + } + released = { + "a": False, + "b": False, + "c": False, + } + + async def passing_task(identifier, is_waiting): + started[identifier] = True + async with lock: + if is_waiting: + await asyncio_tools.wait_asyncio_next_cycle() + assert lock._depth == 1 + assert lock._task == asyncio.current_task() + passed[identifier] = True + released[identifier] = True + + asyncio.create_task(passing_task("a", True)) + asyncio.create_task(passing_task("b", False)) + asyncio.create_task(passing_task("c", False)) + + # tasks did not run yet + assert all(v is False for v in started.values()) + + # let the loop run once + await asyncio_tools.wait_asyncio_next_cycle() + + # all tasks started + assert started == { + "a": True, + "b": True, + "c": True, + } + # "a" waits for next cycle, others wait for lock that "a" got + assert passed == { + "a": False, + "b": False, + "c": False, + } + assert released == { + "a": False, + "b": False, + "c": False, + } + await asyncio_tools.wait_asyncio_next_cycle() + # "a" release the lock, "b" can get it + assert passed == { + "a": True, + "b": True, + "c": False, + } + assert released == { + "a": True, + "b": True, + "c": False, + } + # "b" release the lock, "c" can get it + await asyncio_tools.wait_asyncio_next_cycle() + + assert all(v is True for v in passed.values()) + assert all(v is True for v in released.values()) + + +async def test_RLock_error_setup_1(): + lock = asyncio_tools.RLock() + + passed_a = passed_b = False + + # _depth is too high: lock is not cleared + async with lock: + passed_a = True + lock._depth += 1 + async with lock: + passed_b = True + assert lock._depth == 3 + assert lock._depth == 1 + assert lock._task is asyncio.current_task() + assert lock._locked is True + + assert all((passed_a, passed_b)) + + +async def test_RLock_error_setup_2(): + lock = asyncio_tools.RLock() + lock._depth = 1 + + # a lock can't have a non 0 depth when firstly acquired + with pytest.raises(RuntimeError): + async with lock: + pass + + +async def test_logged_waiter_cancels_task_on_quick_exit(): + mock_self = mock.Mock() + mock_self.logger = mock.Mock() + + with asyncio_tools.logged_waiter(mock_self, "quick op", sleep_time=30): + await asyncio.sleep(0.001) + + mock_self.logger.info.assert_not_called() + + +async def test_logged_waiter_logs_when_body_runs_long(): + mock_self = mock.Mock() + mock_self.logger = mock.Mock() + + with asyncio_tools.logged_waiter(mock_self, "long op", sleep_time=0.05): + await asyncio.sleep(0.15) + + assert mock_self.logger.info.call_count >= 1 + call_args = mock_self.logger.info.call_args[0][0] + assert "long op" in call_args + assert "is still processing" in call_args + + +async def test_logged_waiter_cancels_on_exception(): + mock_self = mock.Mock() + mock_self.logger = mock.Mock() + + with pytest.raises(ValueError, match="body failed"): + with asyncio_tools.logged_waiter(mock_self, "failing op", sleep_time=30): + raise ValueError("body failed") + + mock_self.logger.info.assert_not_called() + + +async def test_logged_waiter_uses_custom_sleep_time(): + mock_self = mock.Mock() + mock_self.logger = mock.Mock() + + with mock.patch.object(asyncio, "sleep", wraps=asyncio.sleep) as mock_sleep: + with asyncio_tools.logged_waiter(mock_self, "custom sleep", sleep_time=0.1): + await asyncio.sleep(0.2) + + sleep_calls = [c[0][0] for c in mock_sleep.call_args_list] + assert 0.1 in sleep_calls + + +def _exception_raiser(): + raise RuntimeError("error") diff --git a/packages/commons/tests/test_cache_util.py b/packages/commons/tests/test_cache_util.py new file mode 100644 index 0000000000..a0e6da69f5 --- /dev/null +++ b/packages/commons/tests/test_cache_util.py @@ -0,0 +1,85 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import asyncio +import pytest + +import octobot_commons.cache_util as cache_util + +pytestmark = pytest.mark.asyncio + + +class _TestClass: + def __init__(self): + self.call_count = 0 + + @cache_util.prevented_multiple_calls + async def decorated_method(self, *args, **kwargs): + self.call_count += 1 + return self.call_count + + +async def test_prevented_multiple_calls_without_max_period(): + """Without max_period, function should always be called.""" + obj = _TestClass() + await obj.decorated_method(max_period=None) + await obj.decorated_method(max_period=None) + await obj.decorated_method(max_period=None) + assert obj.call_count == 3 + + +async def test_prevented_multiple_calls_with_max_period_blocks_duplicate(): + """With max_period, duplicate calls within period should be skipped.""" + obj = _TestClass() + # First call executes + result1 = await obj.decorated_method(1, 2, max_period=1.0) + assert result1 == 1 + assert obj.call_count == 1 + + # Second call with same args within period is skipped + result2 = await obj.decorated_method(1, 2, max_period=1.0) + assert result2 is None + assert obj.call_count == 1 + + +async def test_prevented_multiple_calls_with_max_period_allows_after_period(): + """With max_period, call after period should execute.""" + obj = _TestClass() + result1 = await obj.decorated_method(1, max_period=0.05) + assert result1 == 1 + + await asyncio.sleep(0.06) + result2 = await obj.decorated_method(1, max_period=0.05) + assert result2 == 2 + assert obj.call_count == 2 + + +async def test_prevented_multiple_calls_different_args_independent(): + """Different args should have independent cache entries.""" + obj = _TestClass() + await obj.decorated_method(1, max_period=1.0) + await obj.decorated_method(2, max_period=1.0) + await obj.decorated_method(1, max_period=1.0) # Same as first, skipped + await obj.decorated_method(2, max_period=1.0) # Same as second, skipped + assert obj.call_count == 2 + + +async def test_prevented_multiple_calls_with_kwargs(): + """kwargs should be part of the cache key.""" + obj = _TestClass() + await obj.decorated_method(1, max_period=1.0, key="a") + await obj.decorated_method(1, max_period=1.0, key="b") # Different kwargs + await obj.decorated_method(1, max_period=1.0, key="a") # Same as first, skipped + assert obj.call_count == 2 diff --git a/packages/commons/tests/test_context_util.py b/packages/commons/tests/test_context_util.py new file mode 100644 index 0000000000..291f35d990 --- /dev/null +++ b/packages/commons/tests/test_context_util.py @@ -0,0 +1,166 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import pytest + +import octobot_commons.context_util as context_util + + +def test_empty_context_manager_sync_enter_exit(): + """Test EmptyContextManager as synchronous context manager""" + manager = context_util.EmptyContextManager() + + # Test that __enter__ returns self + with manager as ctx: + assert ctx is manager + # Verify we can use it normally + assert ctx is not None + + +def test_empty_context_manager_sync_no_exception_suppression(): + """Test that EmptyContextManager doesn't suppress exceptions""" + manager = context_util.EmptyContextManager() + + # Exception should propagate normally + with pytest.raises(ValueError): + with manager: + raise ValueError("Test exception") + + +def test_empty_context_manager_sync_multiple_usage(): + """Test EmptyContextManager can be used multiple times""" + manager = context_util.EmptyContextManager() + + # First usage + with manager as ctx1: + assert ctx1 is manager + + # Second usage + with manager as ctx2: + assert ctx2 is manager + + +def test_empty_context_manager_sync_exit_with_exception(): + """Test EmptyContextManager __exit__ with exception""" + manager = context_util.EmptyContextManager() + + # __exit__ should not suppress exceptions (returns None/False) + with pytest.raises(ValueError): + with manager: + raise ValueError("Test exception") + + # Verify exception was not suppressed + try: + with manager: + raise ValueError("Another exception") + except ValueError as e: + assert str(e) == "Another exception" + + +def test_empty_context_manager_sync_exit_without_exception(): + """Test EmptyContextManager __exit__ without exception""" + manager = context_util.EmptyContextManager() + + # Normal exit should work fine + with manager: + pass + + # Should complete without issues + assert True + + +@pytest.mark.asyncio +async def test_empty_context_manager_async_enter_exit(): + """Test EmptyContextManager as asynchronous context manager""" + manager = context_util.EmptyContextManager() + + # Test that __aenter__ returns self + async with manager as ctx: + assert ctx is manager + # Verify we can use it normally + assert ctx is not None + + +@pytest.mark.asyncio +async def test_empty_context_manager_async_no_exception_suppression(): + """Test that EmptyContextManager doesn't suppress exceptions in async context""" + manager = context_util.EmptyContextManager() + + # Exception should propagate normally + with pytest.raises(ValueError): + async with manager: + raise ValueError("Test async exception") + + +@pytest.mark.asyncio +async def test_empty_context_manager_async_multiple_usage(): + """Test EmptyContextManager can be used multiple times in async context""" + manager = context_util.EmptyContextManager() + + # First usage + async with manager as ctx1: + assert ctx1 is manager + + # Second usage + async with manager as ctx2: + assert ctx2 is manager + + +@pytest.mark.asyncio +async def test_empty_context_manager_async_exit_with_exception(): + """Test EmptyContextManager __aexit__ with exception""" + manager = context_util.EmptyContextManager() + + # __aexit__ should not suppress exceptions (returns None/False) + with pytest.raises(ValueError): + async with manager: + raise ValueError("Test async exception") + + # Verify exception was not suppressed + try: + async with manager: + raise ValueError("Another async exception") + except ValueError as e: + assert str(e) == "Another async exception" + + +@pytest.mark.asyncio +async def test_empty_context_manager_async_exit_without_exception(): + """Test EmptyContextManager __aexit__ without exception""" + manager = context_util.EmptyContextManager() + + # Normal exit should work fine + async with manager: + pass + + # Should complete without issues + assert True + + +@pytest.mark.asyncio +async def test_empty_context_manager_mixed_sync_async(): + """Test EmptyContextManager can be used both synchronously and asynchronously""" + manager = context_util.EmptyContextManager() + + # Use synchronously + with manager: + pass + + # Use asynchronously + async with manager: + pass + + # Both should work without issues + assert True diff --git a/packages/commons/tests/test_data_util.py b/packages/commons/tests/test_data_util.py new file mode 100644 index 0000000000..cdff3ebc41 --- /dev/null +++ b/packages/commons/tests/test_data_util.py @@ -0,0 +1,39 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import numpy as np + +from octobot_commons.data_util import drop_nan, mean, shift_value_array + + +def test_drop_nan(): + assert np.array_equal(drop_nan(np.array([1, np.nan, 2, 3, np.nan])), np.array([1, 2, 3])) + assert np.array_equal(drop_nan(np.array([np.nan, np.nan, np.nan])), np.array([])) + + +def test_mean(): + assert mean([1, 2, 3, 4, 5, 6, 7]) == 4.0 + assert mean([0.684, 1, 2, 3, 4, 5.5, 6, 7.5]) == 3.7105 + assert mean([]) == 0 + + +def test_shift_value_array(): + array = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.float64) + np.testing.assert_array_equal(shift_value_array(array, shift_count=-1, fill_value=np.nan), + np.array([2, 3, 4, 5, 6, 7, 8, 9, np.nan], dtype=np.float64)) + + array = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.float64) + np.testing.assert_array_equal(shift_value_array(array, shift_count=2, fill_value=np.nan), + np.array([np.nan, np.nan, 1, 2, 3, 4, 5, 6, 7], dtype=np.float64)) diff --git a/packages/commons/tests/test_dict_util.py b/packages/commons/tests/test_dict_util.py new file mode 100644 index 0000000000..6c92fc1431 --- /dev/null +++ b/packages/commons/tests/test_dict_util.py @@ -0,0 +1,123 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.dict_util as dict_util + + +def test_find_nested_value(): + assert dict_util.find_nested_value({"a": 1, "b": 2}, "b") == (True, 2) + assert dict_util.find_nested_value({"a": 1, "b": 2}, "c") == (False, "c") + assert dict_util.find_nested_value({"a": 1, "b": {"c": 5, "d": "e"}}, "d") == (True, "e") + assert dict_util.find_nested_value({"a": 1, "b": {"c": 5, "d": {"f": [1, 2, 3]}}}, "f") == (True, [1, 2, 3]) + assert dict_util.find_nested_value({"a": {"e": 7}, "b": {"c": {"t": 5}, "d": {"f": [1, 2, 3], "y": 1}}}, "y") == (True, 1) + complex_dict = {"a": {"e": 7}, "b": {"c": {"t": [{ + "ab": [8, 9, 10], + "cd": { + "4": 4, + "5": 5, + "6": [7, 8, 9, { + "abc": "def", + "zyx": "zxv" + }] + } + }, + { + "up": "pa", + "123": 1234 + }]}, "d": {"f": [1, 2, 3], "y": 1}}} + assert dict_util.find_nested_value(complex_dict, "abc") == (True, "def") + assert dict_util.find_nested_value(complex_dict, "5") == (True, 5) + assert dict_util.find_nested_value(complex_dict, "123") == (True, 1234) + + +def test_check_and_merge_values_from_reference(): + current_dict = { + "b": 5, + "d": 1 + } + ref_dict = { + "a": 1, + "b": 2, + "c": 3, + "d": 4 + } + exception_list = ["d"] + dict_util.check_and_merge_values_from_reference(current_dict, ref_dict, exception_list) + assert current_dict == { + "a": 1, + "b": 5, + "c": 3, + "d": 1 + } + + +def test_contains_each_element(): + assert dict_util.contains_each_element( + {}, + {} + ) is True + assert dict_util.contains_each_element( + {}, + {"1": 1} + ) is False + assert dict_util.contains_each_element( + {"1": 1}, + {"1": 1} + ) is True + assert dict_util.contains_each_element( + {"1": 1, "2": 2}, + {"1": 1} + ) is True + assert dict_util.contains_each_element( + {"1": 1}, + {"1": 1, "2": 2} + ) is False + assert dict_util.contains_each_element( + {"1": 1}, + {} + ) is True + assert dict_util.contains_each_element( + {"1": 2}, + {"1": 1} + ) is False + + +def test_nested_update_dict(): + assert dict_util.nested_update_dict({}, {"1": 1, "2": 2}) == {"1": 1, "2": 2} + assert dict_util.nested_update_dict({"1": "aa"}, {"1": 1, "2": 2}) == {"1": 1, "2": 2} + assert dict_util.nested_update_dict({"1": []}, {"1": 1, "2": 2}) == {"1": 1, "2": 2} + assert dict_util.nested_update_dict( + {"1": []}, {"1": 1, "2": 2}, ignore_lists=True + ) == {"1": [], "2": 2} + assert dict_util.nested_update_dict( + {"1": [], "plop": {"jerome": 0, "michel": 1, "monique": [1]}}, + {"1": 1, "2": 2, "plop": {"jerome": 0.5, "simon": 3, "monique": [2]}}, + ignore_lists=True + ) == {"1": [], "2": 2, "plop": {"jerome": 0.5, "michel": 1, "monique": [1], "simon": 3}} + assert dict_util.nested_update_dict( + {"1": [], "plop": {"jerome": 0, "michel": 1, "monique": [1]}}, + {"1": 1, "2": 2, "plop": {"jerome": 0.5, "simon": 3, "monique": [2]}}, + ignore_lists=False + ) == {"1": 1, "2": 2, "plop": {"jerome": 0.5, "michel": 1, "monique": [2], "simon": 3}} + assert dict_util.nested_update_dict( + {"1": []}, + {"1": [{"jerome": 0.5, "monique": [2]}, {"simon": 3, "monique": [23]}]}, + ignore_lists=True + ) == {"1": []} + assert dict_util.nested_update_dict( + {"1": []}, + {"1": [{"jerome": 0.5, "monique": [2]}, {"simon": 3, "monique": [23]}]}, + ignore_lists=False + ) == {'1': [{'jerome': 0.5, 'monique': [2]}, {'monique': [23], 'simon': 3}]} diff --git a/packages/commons/tests/test_evaluator_util.py b/packages/commons/tests/test_evaluator_util.py new file mode 100644 index 0000000000..9abeda6d32 --- /dev/null +++ b/packages/commons/tests/test_evaluator_util.py @@ -0,0 +1,33 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import math +import numpy as np + +from octobot_commons.constants import START_PENDING_EVAL_NOTE, INIT_EVAL_NOTE +from octobot_commons.evaluators_util import check_valid_eval_note + + +def test_check_valid_eval_note(): + assert not check_valid_eval_note(START_PENDING_EVAL_NOTE) + assert check_valid_eval_note(True) + assert check_valid_eval_note({"a": 1}) + assert check_valid_eval_note({"a": 1}, eval_time=1, expiry_delay=2, current_time=1) + assert check_valid_eval_note({"a": 1}, eval_time=1, expiry_delay=2, current_time=2) + assert not check_valid_eval_note({"a": 1}, eval_time=1, expiry_delay=2, current_time=3) + + assert check_valid_eval_note(INIT_EVAL_NOTE) + # UNSET_EVAL_TYPE + diff --git a/packages/commons/tests/test_html_util.py b/packages/commons/tests/test_html_util.py new file mode 100644 index 0000000000..7976fb1a49 --- /dev/null +++ b/packages/commons/tests/test_html_util.py @@ -0,0 +1,383 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import mock +import pytest +import traceback + +import octobot_commons.html_util as html_util + + +def test_summarize_page_content_invalid_content(): + assert html_util.summarize_page_content("plop") == [('message', 'plop')] + with pytest.raises(TypeError): + html_util.summarize_page_content(1234) + with pytest.raises(TypeError): + html_util.summarize_page_content(RuntimeError("124")) + with pytest.raises(TypeError): + html_util.summarize_page_content({}) + assert html_util.summarize_page_content(RATE_LIMIT_INVALID_HTMLS[0]) == [] + with pytest.raises(IndexError): + html_util.summarize_page_content(RATE_LIMIT_INVALID_HTMLS[1]) + with pytest.raises(IndexError): + html_util.summarize_page_content(RATE_LIMIT_INVALID_HTMLS[2]) + + +def test_summarize_page_content_valid_content(): + assert html_util.summarize_page_content(RATE_LIMIT_HTML) == [ + ('title', 'Access denied | api.hollaex.com used Cloudflare to restrict access'), + ('div', 'Please enable cookies.'), + ('span', 'Error'), + ('span', '1006'), + ('span', 'Ray ID: 8e61e5180c749e8a •'), + ('span', '2024-11-21 16:00:49 UTC'), + ('h2', 'Access denied'), + ('h2', 'What happened?'), + ('p', 'The owner of this website (api.hollaex.com) has banned your IP address (123.123.123.123).'), + ('div', 'Was this page helpful?'), + ('div', 'Thank you for your feedback!'), + ('span', 'Cloudflare Ray ID:'), + ('strong', '8e61e5180c749e8a'), + ('span', 'Your IP:'), + ('span', '123.123.123.123'), + ('span', 'Performance & security by'), + ('a', 'Cloudflare') + ] + assert html_util.summarize_page_content(RATE_LIMIT_HTML_WITH_MESSAGE) == [ + ('message', 'hollaex GET https://api.sandbox.hollaex.com/v2/constants 403 Forbidden'), + ('title', 'Access denied | api.hollaex.com used Cloudflare to restrict access'), + ('div', 'Please enable cookies.'), + ('span', 'Error'), + ('span', '1006'), + ('span', 'Ray ID: 8e61e5180c749e8a •'), + ('span', '2024-11-21 16:00:49 UTC'), + ('h2', 'Access denied'), + ('h2', 'What happened?'), + ('p', 'The owner of this website (api.hollaex.com) has banned your IP address (123.123.123.123).'), + ('div', 'Was this page helpful?'), + ('div', 'Thank you for your feedback!'), + ('span', 'Cloudflare Ray ID:'), + ('strong', '8e61e5180c749e8a'), + ('span', 'Your IP:'), + ('span', '123.123.123.123'), + ('span', 'Performance & security by'), + ('a', 'Cloudflare') + ] + assert html_util.summarize_page_content(RATE_LIMIT_HTML_WITH_MESSAGE, max_element_text_size=4) == [ + ('message', 'holl[...]'), + ('title', 'Acce[...]'), + ('div', 'Plea[...]'), + ('span', 'Erro[...]'), + ('span', '1006'), + ('span', 'Ray [...]'), + ('span', '2024[...]'), + ('h2', 'Acce[...]'), + ('h2', 'What[...]'), + ('p', 'The [...]'), + ('div', 'Was [...]'), + ('div', 'Than[...]'), + ('span', 'Clou[...]'), + ('strong', '8e61[...]'), + ('span', 'Your[...]'), + ('span', '123.[...]'), + ('span', 'Perf[...]'), + ('a', 'Clou[...]') + ] + + +def test_pretty_print_summary(): + assert html_util.pretty_print_summary(html_util.summarize_page_content(RATE_LIMIT_HTML_WITH_MESSAGE)) == ( + 'message<hollaex GET https://api.sandbox.hollaex.com/v2/constants 403 ' + 'Forbidden>; title<Access denied | api.hollaex.com used Cloudflare to ' + 'restrict access>; div<Please enable cookies.>; span<Error>; span<1006>; ' + 'span<Ray ID: 8e61e5180c749e8a •>; span<2024-11-21 16:00:49 UTC>; h2<Access ' + 'denied>; h2<What happened?>; p<The owner of this website (api.hollaex.com) ' + 'has banned your IP address (123.123.123.123).>; div<Was this page helpful?>; ' + 'div<Thank you for your feedback!>; span<Cloudflare Ray ID:>; ' + 'strong<8e61e5180c749e8a>; span<Your IP:>; span<123.123.123.123>; ' + 'span<Performance & security by>; a<Cloudflare>' + ) + + +def test_is_html_content(): + assert html_util.is_html_content(RATE_LIMIT_HTML_WITH_MESSAGE) is True + assert html_util.is_html_content(RATE_LIMIT_HTML) is True + assert all(html_util.is_html_content(html) for html in RATE_LIMIT_INVALID_HTMLS) + assert html_util.is_html_content("plpoop") is False + assert html_util.is_html_content("<html>") is False + assert html_util.is_html_content("</html/>") is False + assert html_util.is_html_content("<html/>") is False + assert html_util.is_html_content("</html>") is True + + +def test_get_html_summary_if_relevant(): + # valid html + assert html_util.get_html_summary_if_relevant(RATE_LIMIT_HTML_WITH_MESSAGE) == ( + 'message<hollaex GET https://api.sandbox.hollaex.com/v2/constants 403 ' + 'Forbidden>; title<Access denied | api.hollaex.com used Cloudflare to ' + 'restrict access>; div<Please enable cookies.>; span<Error>; span<1006>; ' + 'span<Ray ID: 8e61e5180c749e8a •>; span<2024-11-21 16:00:49 UTC>; h2<Access ' + 'denied>; h2<What happened?>; p<The owner of this website (api.hollaex.com) ' + 'has banned your IP address (123.123.123.123).>; div<Was this page helpful?>; ' + 'div<Thank you for your feedback!>; span<Cloudflare Ray ID:>; ' + 'strong<8e61e5180c749e8a>; span<Your IP:>; span<123.123.123.123>; ' + 'span<Performance & security by>; a<Cloudflare>' + ) + # invalid html + assert html_util.get_html_summary_if_relevant("PLOPOPDS") == "PLOPOPDS" + # non string + assert html_util.get_html_summary_if_relevant(123) == "123" + assert html_util.get_html_summary_if_relevant(Exception("plop")) == "plop" + # raising error does not propagate + with mock.patch.object(html_util, "summarize_page_content", mock.Mock(side_effect=Exception("err"))) \ + as summarize_page_content_mock: + # not html + assert html_util.get_html_summary_if_relevant("PLOPOPDS") == "PLOPOPDS" + summarize_page_content_mock.assert_not_called() + # html + assert html_util.get_html_summary_if_relevant( + "dfdsfsdfsd</html>", max_element_text_size=3 + ) == "dfdsfsdfsd</html>" + summarize_page_content_mock.assert_called_once_with( + "dfdsfsdfsd</html>", max_element_text_size=3 + ) + # raising error does not propagate + with mock.patch.object(html_util, "pretty_print_summary", mock.Mock(side_effect=Exception("err"))) \ + as pretty_print_summary_mock: + # not html + assert html_util.get_html_summary_if_relevant("PLOPOPDS") == "PLOPOPDS" + pretty_print_summary_mock.assert_not_called() + # html + assert html_util.get_html_summary_if_relevant("<html>dfdsfsdfsd</html>") == "<html>dfdsfsdfsd</html>" + pretty_print_summary_mock.assert_called_once_with( + [('html', 'dfdsfsdfsd')] + ) + + +def test_summarize_exception_html_cause_if_relevant(): + assert html_util.summarize_exception_html_cause_if_relevant(Exception("plop")) is None + try: + # lvl. 1 + try: + # lvl. 2 + try: + # lvl. 3 + try: + # lvl. 4 + raise IndexError(RATE_LIMIT_HTML_WITH_MESSAGE) + except IndexError as err_4: + assert err_4.__cause__ is None + assert "<script>" in str(err_4) + str_traceback = traceback.format_exc() + assert "<script>" in str_traceback + # does not crash if __cause__ is None + html_util.summarize_exception_html_cause_if_relevant(err_4) + str_traceback = traceback.format_exc() + assert "<script>" not in str_traceback + raise KeyError(html_util.get_html_summary_if_relevant(err_4)) from err_4 + except KeyError as err_3: + # lvl. 3 + assert err_3.args == ( + 'message<hollaex GET https://api.sandbox.hollaex.com/v2/constants 403 ' + 'Forbidden>; title<Access denied | api.hollaex.com used Cloudflare to ' + 'restrict access>; div<Please enable cookies.>; span<Error>; span<1006>; ' + 'span<Ray ID: 8e61e5180c749e8a •>; span<2024-11-21 16:00:49 UTC>; h2<Access ' + 'denied>; h2<What happened?>; p<The owner of this website (api.hollaex.com) ' + 'has banned your IP address (123.123.123.123).>; div<Was this page helpful?>; ' + 'div<Thank you for your feedback!>; span<Cloudflare Ray ID:>; ' + 'strong<8e61e5180c749e8a>; span<Your IP:>; span<123.123.123.123>; ' + 'span<Performance & security by>; a<Cloudflare>', + ) + # cause is not summarized + assert "<script>" not in str(err_3.__cause__.args) + html_util.summarize_exception_html_cause_if_relevant(err_3) + # cause has been summarized + assert err_3.__cause__.args == ( + 'message<hollaex GET https://api.sandbox.hollaex.com/v2/constants 403 ' + 'Forbidden>; title<Access denied | api.hollaex.com used Cloudflare to ' + 'restrict access>; div<Please enable cookies.>; span<Error>; span<1006>; ' + 'span<Ray ID: 8e61e5180c749e8a •>; span<2024-11-21 16:00:49 UTC>; h2<Access ' + 'denied>; h2<What happened?>; p<The owner of this website (api.hollaex.com) ' + 'has banned your IP address (123.123.123.123).>; div<Was this page helpful?>; ' + 'div<Thank you for your feedback!>; span<Cloudflare Ray ID:>; ' + 'strong<8e61e5180c749e8a>; span<Your IP:>; span<123.123.123.123>; ' + 'span<Performance & security by>; a<Cloudflare>', + ) + raise NotImplementedError(err_3) from err_3 + except NotImplementedError as err_2: + # lvl. 2 + assert err_2.args[0].args == ( + 'message<hollaex GET https://api.sandbox.hollaex.com/v2/constants 403 ' + 'Forbidden>; title<Access denied | api.hollaex.com used Cloudflare to ' + 'restrict access>; div<Please enable cookies.>; span<Error>; span<1006>; ' + 'span<Ray ID: 8e61e5180c749e8a •>; span<2024-11-21 16:00:49 UTC>; h2<Access ' + 'denied>; h2<What happened?>; p<The owner of this website (api.hollaex.com) ' + 'has banned your IP address (123.123.123.123).>; div<Was this page helpful?>; ' + 'div<Thank you for your feedback!>; span<Cloudflare Ray ID:>; ' + 'strong<8e61e5180c749e8a>; span<Your IP:>; span<123.123.123.123>; ' + 'span<Performance & security by>; a<Cloudflare>', + ) + assert err_2.__cause__.__cause__.__class__ == IndexError + assert err_2.__cause__.__class__ == KeyError + raise ZeroDivisionError from err_2 + except ZeroDivisionError as err_1: + # lvl. 1 + assert "<script>" not in str(err_1) + str_traceback = traceback.format_exc() + assert "<script>" not in str_traceback + + # no summary at lower levels + try: + # lvl. 1 + try: + # lvl. 2 + try: + # lvl. 3 + try: + # lvl. 4 + raise IndexError(RATE_LIMIT_HTML_WITH_MESSAGE) + except IndexError as err_4: + raise KeyError(html_util.get_html_summary_if_relevant(err_4)) from err_4 + except KeyError as err_3: + # lvl. 3 + raise NotImplementedError(err_3) from err_3 + except NotImplementedError as err_2: + # lvl. 2 + raise ZeroDivisionError from err_2 + except ZeroDivisionError as err_1: + # lvl. 1 + # summarized in error message + assert "<script>" not in str(err_1) + # not yet summarized in causes (used by traceback) + str_traceback = traceback.format_exc() + assert "<script>" in str_traceback + + # summarize + html_util.summarize_exception_html_cause_if_relevant(err_1) + str_traceback = traceback.format_exc() + assert "<script>" not in str_traceback + + +RATE_LIMIT_INVALID_HTMLS = [ +""" +<!DOCTYPE html> +<!--[if lt IE 7]> <html class="no-js ie6 oldie" lang="en-US"> <![endif]--> +<!--[if IE 7]> <html class="no-js ie7 oldie" lang="en-US"> <![endif]--> +<!--[if IE 8]> <html class="no-js ie8 oldie" lang="en-US"> <![endif]--> +<!--[if gt IE 8]><!--> <html class="no-js" lang="en-US"> <!--<![endif]--> +<head> +</html> +""", +""" +PLOP +</html> +""", +""" +</ll> +PLOP +</html> +""", +] + +RATE_LIMIT_HTML = """ +<!DOCTYPE html> +<!--[if lt IE 7]> <html class="no-js ie6 oldie" lang="en-US"> <![endif]--> +<!--[if IE 7]> <html class="no-js ie7 oldie" lang="en-US"> <![endif]--> +<!--[if IE 8]> <html class="no-js ie8 oldie" lang="en-US"> <![endif]--> +<!--[if gt IE 8]><!--> <html class="no-js" lang="en-US"> <!--<![endif]--> +<head> +<title>Access denied | api.hollaex.com used Cloudflare to restrict access + + + + + + + + + + + + + +
+ +
+
+

+ Error + 1006 +

+ Ray ID: 8e61e5180c749e8a • + 2024-11-21 16:00:49 UTC +

Access denied

+
+ +
+
+

What happened?

+

The owner of this website (api.hollaex.com) has banned your IP address (123.123.123.123).

+ +
+ + +
+ + + + + + + +
+
+ + + + + +""" +RATE_LIMIT_HTML_WITH_MESSAGE = \ + f"hollaex GET https://api.sandbox.hollaex.com/v2/constants 403 Forbidden {RATE_LIMIT_HTML}" \ No newline at end of file diff --git a/packages/commons/tests/test_json_util.py b/packages/commons/tests/test_json_util.py new file mode 100644 index 0000000000..bb9d6bd528 --- /dev/null +++ b/packages/commons/tests/test_json_util.py @@ -0,0 +1,73 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import dataclasses +import decimal + +import pytest + +import octobot_commons.dataclasses as commons_dataclasses +import octobot_commons.json_util as json_util + + +@dataclasses.dataclass +class _SampleFlexibleRecord(commons_dataclasses.FlexibleDataclass): + label: str = "" + nested: dict = dataclasses.field(default_factory=dict) + + +class TestSanitize: + def test_decimal_in_dict_converts_to_float(self): + payload = {"amount": decimal.Decimal("3.14")} + assert json_util.sanitize(payload) is payload + assert payload["amount"] == 3.14 + assert isinstance(payload["amount"], float) + + def test_nested_list_and_dict(self): + payload = { + "items": [ + {"x": decimal.Decimal("1")}, + [decimal.Decimal("2"), 3], + ] + } + json_util.sanitize(payload) + assert payload["items"][0]["x"] == 1.0 + assert payload["items"][1][0] == 2.0 + assert payload["items"][1][1] == 3 + + def test_tuple_preserves_type(self): + payload = ({"a": decimal.Decimal("5")},) + result = json_util.sanitize(payload) + assert isinstance(result, tuple) + assert result[0]["a"] == 5.0 + + def test_flexible_dataclass_mutates_nested_decimal(self): + record = _SampleFlexibleRecord( + label="t", + nested={"v": decimal.Decimal("9.9")}, + ) + json_util.sanitize(record) + assert record.nested["v"] == 9.9 + + +class TestSanitized: + @pytest.mark.asyncio + async def test_wraps_async_result(self): + @json_util.sanitized + async def load(): + return {"d": decimal.Decimal("2.5")} + + out = await load() + assert out == {"d": 2.5} diff --git a/packages/commons/tests/test_list_util.py b/packages/commons/tests/test_list_util.py new file mode 100644 index 0000000000..d86ef0725f --- /dev/null +++ b/packages/commons/tests/test_list_util.py @@ -0,0 +1,20 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +from octobot_commons.list_util import flatten_list + + +def test_flatten_list(): + assert flatten_list([["a", "b", "c"], [1, 2, 3], [1, "5"]]) == ["a", "b", "c", 1, 2, 3, 1, "5"] diff --git a/packages/commons/tests/test_monitored_process.py b/packages/commons/tests/test_monitored_process.py new file mode 100644 index 0000000000..a00aa365c2 --- /dev/null +++ b/packages/commons/tests/test_monitored_process.py @@ -0,0 +1,455 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import asyncio +import unittest.mock as mock + +import pytest + +import octobot_commons.monitored_process as monitored_process + +pytestmark = pytest.mark.asyncio + + +class _SimpleProcess(monitored_process.MonitoredProcess): + """Minimal concrete subclass used as system under test.""" + + READINESS_STRING = "ready" + ERROR_PATTERNS = ["FATAL"] + TERMINATE_TIMEOUT_SECONDS = 0.1 + READINESS_TIMEOUT_SECONDS = 1.0 + + def _get_subprocess_args(self) -> list: + return ["mybin", "--flag"] + + +def _make_stream(*lines: str) -> asyncio.StreamReader: + """Create a StreamReader pre-filled with the given lines (each terminated with \\n).""" + reader = asyncio.StreamReader() + for line in lines: + reader.feed_data((line + "\n").encode()) + reader.feed_eof() + return reader + + +def _make_mock_process( + stdout_lines: list = None, + stderr_lines: list = None, + returncode: int = None, + pid: int = 1234, +): + """ + Build a mock asyncio.subprocess.Process whose stdout/stderr are real + StreamReaders pre-loaded with the given lines. + """ + proc = mock.MagicMock() + proc.pid = pid + proc.returncode = returncode + + proc.stdout = _make_stream(*(stdout_lines or [])) + proc.stderr = _make_stream(*(stderr_lines or [])) + + # proc.wait() returns an awaitable that resolves to returncode. + async def _wait(): + # Drain the streams first so _start_output_monitor tasks finish. + while not proc.stdout.at_eof(): + await asyncio.sleep(0) + while not proc.stderr.at_eof(): + await asyncio.sleep(0) + return returncode if returncode is not None else 0 + + proc.wait = _wait + proc.terminate = mock.MagicMock() + proc.kill = mock.MagicMock() + return proc + +async def test_happy_path_readiness_in_stdout(): + proc = _make_mock_process(stdout_lines=["startup log", "ready", "more output"]) + with mock.patch("asyncio.create_subprocess_exec", return_value=proc): + async with _SimpleProcess() as p: + assert p._ready_event.is_set() + assert p._monitor_error is None + + +async def test_happy_path_readiness_in_stderr(): + proc = _make_mock_process(stderr_lines=["ready"]) + with mock.patch("asyncio.create_subprocess_exec", return_value=proc): + async with _SimpleProcess() as p: + assert p._ready_event.is_set() + + +async def test_subprocess_args_passed_correctly(): + proc = _make_mock_process(stdout_lines=["ready"]) + with mock.patch("asyncio.create_subprocess_exec", return_value=proc) as mock_exec: + async with _SimpleProcess(): + pass + mock_exec.assert_called_once() + call_args = mock_exec.call_args + assert call_args.args == ("mybin", "--flag") + + +async def test_env_and_cwd_defaults(): + proc = _make_mock_process(stdout_lines=["ready"]) + with mock.patch("asyncio.create_subprocess_exec", return_value=proc) as mock_exec: + async with _SimpleProcess(): + pass + kwargs = mock_exec.call_args.kwargs + assert kwargs["cwd"] is None + assert kwargs["env"] is None + + +async def test_stdout_lines_go_to_stdout_buffer(): + proc = _make_mock_process(stdout_lines=["ready", "line2"]) + with mock.patch("asyncio.create_subprocess_exec", return_value=proc): + async with _SimpleProcess() as p: + # Allow monitor tasks to finish draining + await asyncio.sleep(0.05) + assert "ready" in p._stdout_buffer + assert "line2" in p._stdout_buffer + assert len(p._stderr_buffer) == 0 + + +async def test_stderr_lines_go_to_stderr_buffer(): + proc = _make_mock_process(stdout_lines=["ready"], stderr_lines=["err1", "err2"]) + with mock.patch("asyncio.create_subprocess_exec", return_value=proc): + async with _SimpleProcess() as p: + await asyncio.sleep(0.05) + assert "err1" in p._stderr_buffer + assert "err2" in p._stderr_buffer + + +async def test_graceful_shutdown_calls_terminate(): + proc = _make_mock_process(stdout_lines=["ready"]) + # returncode is None while running; set to 0 after terminate + proc.returncode = None + + terminate_called = asyncio.Event() + + original_terminate = proc.terminate + + def _terminate(): + proc.returncode = 0 + terminate_called.set() + original_terminate() + + proc.terminate = _terminate + + with mock.patch("asyncio.create_subprocess_exec", return_value=proc): + async with _SimpleProcess(): + pass + + assert terminate_called.is_set() + proc.kill.assert_not_called() + + +async def test_forced_kill_when_terminate_times_out(): + proc = _make_mock_process(stdout_lines=["ready"]) + proc.returncode = None + + # wait() never resolves — simulates a hung process. + hang = asyncio.Event() + + async def _hanging_wait(): + await hang.wait() + return 0 + + proc.wait = _hanging_wait + proc.terminate = mock.MagicMock() + proc.kill = mock.MagicMock(side_effect=lambda: hang.set()) + + with mock.patch("asyncio.create_subprocess_exec", return_value=proc): + async with _SimpleProcess(): + pass + + proc.terminate.assert_called_once() + proc.kill.assert_called_once() + + +async def test_error_pattern_in_stdout_raises_output_error(): + proc = _make_mock_process(stdout_lines=["starting up", "FATAL: something broke"]) + with mock.patch("asyncio.create_subprocess_exec", return_value=proc): + with pytest.raises(monitored_process.MonitoredProcessOutputError) as exc_info: + async with _SimpleProcess(): + pass + assert "FATAL: something broke" in str(exc_info.value) + assert exc_info.value.stream == "stdout" + + +async def test_error_pattern_in_stderr_raises_output_error(): + proc = _make_mock_process( + stdout_lines=["ready"], + stderr_lines=["FATAL: stderr error"], + ) + with mock.patch("asyncio.create_subprocess_exec", return_value=proc): + with pytest.raises(monitored_process.MonitoredProcessOutputError) as exc_info: + async with _SimpleProcess(): + # Keep the context alive long enough for the stderr monitor to fire. + await asyncio.sleep(0.05) + assert exc_info.value.stream == "stderr" + + +async def test_readiness_timeout_raises_ready_timeout_error(): + class _NoReadyProcess(monitored_process.MonitoredProcess): + READINESS_STRING = "never-appears" + READINESS_TIMEOUT_SECONDS = 0.05 + + def _get_subprocess_args(self) -> list: + return ["mybin"] + + proc = _make_mock_process(stdout_lines=["line without readiness string"]) + # Make wait() block so the process doesn't exit before the timeout. + block = asyncio.Event() + + async def _wait(): + await block.wait() + return 0 + + proc.wait = _wait + + with mock.patch("asyncio.create_subprocess_exec", return_value=proc): + with pytest.raises(monitored_process.MonitoredProcessReadyTimeoutError) as exc_info: + async with _NoReadyProcess(): + pass + block.set() + assert exc_info.value.readiness_string == "never-appears" + assert exc_info.value.timeout_seconds == 0.05 + + +async def test_premature_exit_nonzero_raises_exited_error(): + proc = _make_mock_process( + stderr_lines=["something went wrong"], + returncode=1, + ) + # Ensure returncode is None initially so watch_exit() fires. + proc.returncode = None + + async def _exit_quickly(): + return 1 + + proc.wait = _exit_quickly + + with mock.patch("asyncio.create_subprocess_exec", return_value=proc): + with pytest.raises(monitored_process.MonitoredProcessExitedError) as exc_info: + async with _SimpleProcess(): + # Give monitor tasks a moment to detect the exit. + await asyncio.sleep(0.1) + assert exc_info.value.exit_code == 1 + + +async def test_configuration_error_on_missing_executable(): + with mock.patch( + "asyncio.create_subprocess_exec", + side_effect=FileNotFoundError("mybin not found"), + ): + with pytest.raises(monitored_process.MonitoredProcessConfigurationError): + async with _SimpleProcess(): + pass + + +async def test_monitor_error_reraised_on_aexit(): + """An error captured after __aenter__ is re-raised when the context exits.""" + proc = _make_mock_process(stdout_lines=["ready"]) + proc.returncode = None + + async def _exit_after_ready(): + # Wait a tiny bit so __aenter__ completes first + await asyncio.sleep(0.02) + return 2 + + proc.wait = _exit_after_ready + + with mock.patch("asyncio.create_subprocess_exec", return_value=proc): + with pytest.raises(monitored_process.MonitoredProcessExitedError): + async with _SimpleProcess(): + await asyncio.sleep(0.1) + + +async def test_monitor_error_not_suppressed_after_stdout_buffer_clear(): + """Monitor error is still raised even though __aexit__ clears stdout buffer first.""" + proc = _make_mock_process(stdout_lines=["ready"]) + expected_error = monitored_process.MonitoredProcessExitedError(7) + + with mock.patch("asyncio.create_subprocess_exec", return_value=proc): + with pytest.raises(monitored_process.MonitoredProcessExitedError) as exc_info: + async with _SimpleProcess() as p: + p._stdout_buffer.extend(["ready", "extra line"]) + p._monitor_error = expected_error + assert exc_info.value is expected_error + + +class _CustomOutputError(monitored_process.MonitoredProcessOutputError): + pass + + +class _CustomExitedError(monitored_process.MonitoredProcessExitedError): + pass + + +class _CustomTimeoutError(monitored_process.MonitoredProcessReadyTimeoutError): + pass + + +class _CustomConfigError(monitored_process.MonitoredProcessConfigurationError): + pass + + +class _CustomProcess(_SimpleProcess): + READINESS_TIMEOUT_SECONDS = 0.05 + + def _make_output_error(self, stream, line, stdout_buf, stderr_buf): + return _CustomOutputError("custom", stream, line, stdout_buf, stderr_buf) + + def _make_exited_error(self, exit_code, output_err): + return _CustomExitedError(exit_code, output_err) + + def _make_ready_timeout_error(self): + return _CustomTimeoutError(self.READINESS_STRING, self.READINESS_TIMEOUT_SECONDS) + + def _make_configuration_error(self, message): + return _CustomConfigError(message) + + +async def test_custom_output_error_factory(): + proc = _make_mock_process(stdout_lines=["FATAL: boom"]) + with mock.patch("asyncio.create_subprocess_exec", return_value=proc): + with pytest.raises(_CustomOutputError): + async with _CustomProcess(): + pass + + +async def test_custom_ready_timeout_error_factory(): + class _NeverReady(_CustomProcess): + READINESS_STRING = "never-appears" + + proc = _make_mock_process(stdout_lines=["no ready here"]) + block = asyncio.Event() + + async def _wait(): + await block.wait() + return 0 + + proc.wait = _wait + + with mock.patch("asyncio.create_subprocess_exec", return_value=proc): + with pytest.raises(_CustomTimeoutError): + async with _NeverReady(): + pass + block.set() + + +async def test_custom_configuration_error_factory(): + with mock.patch( + "asyncio.create_subprocess_exec", + side_effect=FileNotFoundError, + ): + with pytest.raises(_CustomConfigError): + async with _CustomProcess(): + pass + +async def test_output_error_includes_buffers(): + proc = _make_mock_process( + stdout_lines=["stdout line 1", "FATAL: crash"], + stderr_lines=["stderr line 1"], + ) + with mock.patch("asyncio.create_subprocess_exec", return_value=proc): + with pytest.raises(monitored_process.MonitoredProcessOutputError) as exc_info: + async with _SimpleProcess(): + await asyncio.sleep(0.05) + err = exc_info.value + assert err.std_out_buffer is not None + assert "stdout line 1" in err.std_out_buffer + + +async def test_exited_error_includes_stderr(): + proc = _make_mock_process( + stderr_lines=["crash info"], + returncode=None, + ) + + async def _exit_quickly(): + return 3 + + proc.wait = _exit_quickly + + with mock.patch("asyncio.create_subprocess_exec", return_value=proc): + with pytest.raises(monitored_process.MonitoredProcessExitedError) as exc_info: + async with _SimpleProcess(): + await asyncio.sleep(0.1) + assert exc_info.value.exit_code == 3 + + +async def test_log_output_logs_last_lines_from_buffers(): + """log_output logs the last N lines from stdout and stderr buffers.""" + proc = _make_mock_process( + stdout_lines=["ready", "line2", "line3", "line4"], + stderr_lines=["err1", "err2", "err3"], + ) + with mock.patch("asyncio.create_subprocess_exec", return_value=proc): + async with _SimpleProcess() as p: + await asyncio.sleep(0.05) + with mock.patch.object(p._logger, "info") as mock_info: + p.log_output(last_lines=2) + mock_info.assert_any_call( + "%s last %s lines from stdout and stderr outputs:\n", + p.__class__.__name__, + 2, + ) + mock_info.assert_any_call("stdout:\n%s", "line3\nline4") + mock_info.assert_any_call("stderr:\n%s", "err2\nerr3") + + +async def test_log_output_handles_empty_buffers(): + """log_output does not log stdout/stderr sections when buffers are empty.""" + proc = _make_mock_process(stdout_lines=["ready"]) + with mock.patch("asyncio.create_subprocess_exec", return_value=proc): + async with _SimpleProcess() as p: + await asyncio.sleep(0.05) + p._stderr_buffer.clear() + with mock.patch.object(p._logger, "info") as mock_info: + p.log_output(last_lines=5) + # Header + stdout only (stderr buffer was cleared) + assert mock_info.call_count == 2 + mock_info.assert_any_call( + "%s last %s lines from stdout and stderr outputs:\n", + p.__class__.__name__, + 5, + ) + mock_info.assert_any_call("stdout:\n%s", "ready") + + +async def test_empty_readiness_string_never_fires_ready(): + """When READINESS_STRING is empty, the ready event is never set by output.""" + + class _NoReadinessProcess(monitored_process.MonitoredProcess): + READINESS_STRING = "" + READINESS_TIMEOUT_SECONDS = 0.05 + + def _get_subprocess_args(self) -> list: + return ["mybin"] + + proc = _make_mock_process(stdout_lines=["some output"]) + block = asyncio.Event() + + async def _wait(): + await block.wait() + return 0 + + proc.wait = _wait + + with mock.patch("asyncio.create_subprocess_exec", return_value=proc): + with pytest.raises(monitored_process.MonitoredProcessReadyTimeoutError): + async with _NoReadinessProcess(): + pass + block.set() diff --git a/packages/commons/tests/test_number_util.py b/packages/commons/tests/test_number_util.py new file mode 100644 index 0000000000..797c8ce987 --- /dev/null +++ b/packages/commons/tests/test_number_util.py @@ -0,0 +1,33 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +from octobot_commons.number_util import round_into_str_with_max_digits, round_into_float_with_max_digits + + +def test_round_into_max_digits(): + assert round_into_str_with_max_digits(125.0256, 2) == '125.03' + assert round_into_float_with_max_digits(125.0210, 2) == 125.02 + assert round_into_float_with_max_digits(1301, 5) == 1301.00000 + assert round_into_float_with_max_digits(59866, 0) == 59866 + assert round_into_float_with_max_digits(1.567824117582484154178, 15) == 1.567824117582484 + assert round_into_float_with_max_digits(0.000000059, 8) == 0.00000006 + assert not round_into_float_with_max_digits(8712661000.1273185137283, 10) == 8712661000.127318 + assert round_into_float_with_max_digits(8712661000.1273185137283, 10) == 8712661000.1273185 + assert round_into_float_with_max_digits(8712661000.1273185137283, 10) == 8712661000.12731851 + assert round_into_float_with_max_digits(8712661000.1273185137283, 10) == 8712661000.127318513 + assert round_into_float_with_max_digits(8712661000.1273185137283, 10) == 8712661000.1273185137 + assert round_into_float_with_max_digits(0.0000000000001, 5) == 0 + assert not round_into_float_with_max_digits(0.0000000000001, 13) == 0 + diff --git a/packages/commons/tests/test_os_util.py b/packages/commons/tests/test_os_util.py new file mode 100644 index 0000000000..a21fb8a765 --- /dev/null +++ b/packages/commons/tests/test_os_util.py @@ -0,0 +1,26 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.os_util as os_util + + +def test_get_cpu_and_ram_usage(): + cpu, percent_ram, used_ram, process_ram, virtual_ram, unique_ram = os_util.get_cpu_and_ram_usage(0.1) + assert isinstance(cpu, float) + assert percent_ram > 0 + assert used_ram > 0 + assert process_ram > 0 + assert virtual_ram > 0 + assert unique_ram > 0 diff --git a/packages/commons/tests/test_pretty_printer.py b/packages/commons/tests/test_pretty_printer.py new file mode 100644 index 0000000000..a4b70db917 --- /dev/null +++ b/packages/commons/tests/test_pretty_printer.py @@ -0,0 +1,129 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import decimal +import os +import mock + +import octobot_commons.pretty_printer as pretty_printer +import octobot_commons.constants as constants + + +def test_get_min_string_from_number(): + assert pretty_printer.get_min_string_from_number(1) == "1" + assert pretty_printer.get_min_string_from_number(-1) == "-1" + assert pretty_printer.get_min_string_from_number(1.000000000001) == "1" + assert pretty_printer.get_min_string_from_number(1.000000000001, max_digits=1) == "1" + assert pretty_printer.get_min_string_from_number(1.00000001, max_digits=8) == "1.00000001" + assert pretty_printer.get_min_string_from_number(0.00000001, max_digits=8) == "0.00000001" + assert pretty_printer.get_min_string_from_number(-0.000000009, max_digits=8) == "-0.00000001" + assert pretty_printer.get_min_string_from_number(100.00000001, max_digits=8) == "100.00000001" + assert pretty_printer.get_min_string_from_number(-100.00000001, max_digits=8) == "-100.00000001" + assert pretty_printer.get_min_string_from_number(100.09, max_digits=1) == "100.1" + assert pretty_printer.get_min_string_from_number(100.06, max_digits=1) == "100.1" + assert pretty_printer.get_min_string_from_number(100.05, max_digits=1) == "100" + assert pretty_printer.get_min_string_from_number(100.04, max_digits=1) == "100" + assert pretty_printer.get_min_string_from_number(101.04, max_digits=1) == "101" + assert pretty_printer.get_min_string_from_number(100.00000000, max_digits=8) == "100" + assert pretty_printer.get_min_string_from_number(-100.00000000, max_digits=8) == "-100" + # with computed max_digits + assert pretty_printer.get_min_string_from_number(-100.00000001) == "-100" + assert pretty_printer.get_min_string_from_number(100.00000001) == "100" + assert pretty_printer.get_min_string_from_number(-1.12345678) == "-1.12" + assert pretty_printer.get_min_string_from_number(1.12345678) == "1.12" + assert pretty_printer.get_min_string_from_number(0.12345678) == "0.1235" + assert pretty_printer.get_min_string_from_number(-0.12345678) == "-0.1235" + assert pretty_printer.get_min_string_from_number(0.0045678) == "0.004568" + assert pretty_printer.get_min_string_from_number(-0.0045678) == "-0.004568" + assert pretty_printer.get_min_string_from_number(0.0000456789) == "0.00004568" + assert pretty_printer.get_min_string_from_number(-0.0000456789) == "-0.00004568" + + +def test_round_with_decimal_count(): + assert pretty_printer.round_with_decimal_count(None) == 0 + assert pretty_printer.round_with_decimal_count(1) == 1 + assert pretty_printer.round_with_decimal_count(-1) == -1 + assert pretty_printer.round_with_decimal_count(1.000000000001, max_digits=1) == 1 + if not os.getenv('CYTHON_IGNORE'): + with mock.patch.object(pretty_printer, "get_min_string_from_number", mock.Mock(return_value="1")) \ + as get_min_string_from_number_mock: + pretty_printer.round_with_decimal_count(None, max_digits=4) + get_min_string_from_number_mock.assert_not_called() + pretty_printer.round_with_decimal_count(1.011, max_digits=4) + get_min_string_from_number_mock.assert_called_once_with(1.011, 4) + + +def test_global_portfolio_pretty_print(): + portfolio = { + "BTC": {constants.PORTFOLIO_TOTAL: decimal.Decimal(1)}, + "ETH": {constants.PORTFOLIO_TOTAL: decimal.Decimal(2)}, + "PLOP": {constants.PORTFOLIO_TOTAL: decimal.Decimal("0.444")}, + "ADA": {constants.PORTFOLIO_TOTAL: decimal.Decimal(0)} + } + # without ref market + for res in ( + pretty_printer.global_portfolio_pretty_print(portfolio), + pretty_printer.global_portfolio_pretty_print(portfolio, markdown=True) + ): + assert "BTC" in res + assert "1" in res + assert "ETH" in res + assert "2" in res + assert "PLOP" in res + assert "0.444" in res + assert "ADA" not in res + # with ref market + for res in ( + pretty_printer.global_portfolio_pretty_print(portfolio, ref_market_name="BTC"), + pretty_printer.global_portfolio_pretty_print(portfolio, ref_market_name="BTC", markdown=True) + ): + assert "BTC" in res + assert "1" in res + assert "ETH" in res + assert "2" in res + assert "PLOP" in res + assert "0.444" in res + assert "ADA" not in res + currency_values = {"ETH": decimal.Decimal("1.5")} + for res in ( + pretty_printer.global_portfolio_pretty_print(portfolio, currency_values=currency_values, + ref_market_name="BTC"), + pretty_printer.global_portfolio_pretty_print(portfolio, currency_values=currency_values, + ref_market_name="BTC", markdown=True) + ): + assert "BTC" in res + assert "1" in res + assert "ETH" in res + assert "2" in res + assert "3" in res + assert "PLOP" in res + assert "0.444" in res + assert "ADA" not in res + # with separator + for res in ( + pretty_printer.global_portfolio_pretty_print(portfolio, currency_values=currency_values, + ref_market_name="BTC", separator="111"), + pretty_printer.global_portfolio_pretty_print(portfolio, currency_values=currency_values, + ref_market_name="BTC", markdown=True, separator="111") + ): + assert "BTC" in res + assert "1" in res + assert "ETH" in res + assert "2" in res + assert "3" in res + assert "PLOP" in res + assert "0.444" in res + assert "ADA" not in res + assert "111" in res diff --git a/packages/commons/tests/test_proxy_config.py b/packages/commons/tests/test_proxy_config.py new file mode 100644 index 0000000000..0182ca54c7 --- /dev/null +++ b/packages/commons/tests/test_proxy_config.py @@ -0,0 +1,298 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import asyncio +import mock +import pytest +import aiohttp + +import octobot_commons.proxy_config as proxy_config + + +def _open_client_session_from_proxy_config(proxy_configuration, check_session): + async def runner(): + session_kwargs = proxy_configuration.get_aiohttp_session_proxy_args() + async with aiohttp.ClientSession(**session_kwargs) as client_session: + check_session(client_session, session_kwargs) + + asyncio.run(runner()) + + +class TestProxyConfigDefaults: + def test_default_proxy_host_constant(self): + empty_config = proxy_config.ProxyConfig() + assert empty_config.proxy_host == proxy_config.DEFAULT_PROXY_HOST + + +class TestHasRestProxy: + def test_false_when_unset(self): + config = proxy_config.ProxyConfig() + assert config.has_rest_proxy() is False + + def test_true_for_http_https_socks(self): + assert proxy_config.ProxyConfig(http_proxy="http://h:1").has_rest_proxy() is True + assert proxy_config.ProxyConfig(https_proxy="https://h:1").has_rest_proxy() is True + assert proxy_config.ProxyConfig(socks_proxy="socks5://h:1").has_rest_proxy() is True + + def test_true_for_callbacks(self): + dummy_callback = mock.Mock(return_value=None) + assert proxy_config.ProxyConfig(http_proxy_callback=dummy_callback).has_rest_proxy() is True + assert proxy_config.ProxyConfig(https_proxy_callback=dummy_callback).has_rest_proxy() is True + assert proxy_config.ProxyConfig(socks_proxy_callback=dummy_callback).has_rest_proxy() is True + + +class TestHasWebsocketProxy: + def test_false_when_unset(self): + assert proxy_config.ProxyConfig().has_websocket_proxy() is False + + def test_true_for_ws_wss_ws_socks(self): + assert proxy_config.ProxyConfig(ws_proxy="ws://h:1").has_websocket_proxy() is True + assert proxy_config.ProxyConfig(wss_proxy="wss://h:1").has_websocket_proxy() is True + assert proxy_config.ProxyConfig(ws_socks_proxy="socks5://h:1").has_websocket_proxy() is True + + +class TestHasProxy: + def test_true_for_rest_or_websocket(self): + rest_only = proxy_config.ProxyConfig(http_proxy="http://h:1") + ws_only = proxy_config.ProxyConfig(ws_proxy="ws://h:1") + assert rest_only.has_proxy() is True + assert ws_only.has_proxy() is True + + def test_false_when_unset(self): + assert proxy_config.ProxyConfig().has_proxy() is False + + +class TestGetRestProxyUrl: + def test_prioritizes_https_over_http(self): + config = proxy_config.ProxyConfig( + http_proxy="http://a:1", + https_proxy="https://b:2", + ) + assert config.get_rest_proxy_url() == "https://b:2" + + def test_falls_back_to_http(self): + config = proxy_config.ProxyConfig(http_proxy="http://a:1") + assert config.get_rest_proxy_url() == "http://a:1" + + def test_none_when_unset(self): + assert proxy_config.ProxyConfig().get_rest_proxy_url() is None + + +class TestGetWebsocketProxyUrl: + def test_prioritizes_wss_over_ws(self): + config = proxy_config.ProxyConfig( + ws_proxy="ws://a:1", + wss_proxy="wss://b:2", + ) + assert config.get_websocket_proxy_url() == "wss://b:2" + + def test_falls_back_to_ws(self): + config = proxy_config.ProxyConfig(ws_proxy="ws://a:1") + assert config.get_websocket_proxy_url() == "ws://a:1" + + def test_none_when_unset(self): + assert proxy_config.ProxyConfig().get_websocket_proxy_url() is None + + +class TestParseSocksProxyUrlForConnector: + def test_socks5_keeps_url_and_disables_rdns_flag(self): + url = "socks5://proxy.example:1080" + reverse_dns, selected_url = proxy_config.parse_socks_proxy_url_for_connector(url) + assert reverse_dns is False + assert selected_url == url + + def test_socks5h_normalizes_scheme_and_enables_rdns_flag(self): + url = "socks5h://proxy.example:1080" + reverse_dns, selected_url = proxy_config.parse_socks_proxy_url_for_connector(url) + assert reverse_dns is True + assert selected_url == "socks5://proxy.example:1080" + + +class TestSocksProxyFactory: + def test_raises_import_error_when_socks_unavailable(self): + config = proxy_config.ProxyConfig() + with mock.patch.object(proxy_config, "SOCKS_PROXY_AVAILABLE", False): + with pytest.raises(ImportError, match="aiohttp_socks is not available"): + config._socks_proxy_factory("socks5://h:1", "socks_proxy") + + def test_raises_when_url_missing(self): + config = proxy_config.ProxyConfig() + with mock.patch.object(proxy_config, "SOCKS_PROXY_AVAILABLE", True): + with pytest.raises(ValueError, match="socks_proxy proxy url is not set"): + config._socks_proxy_factory(None, "socks_proxy") + + def test_calls_from_url_with_socks5_passes_normalized_url_and_rdns_none(self): + if not proxy_config.SOCKS_PROXY_AVAILABLE: + pytest.skip("aiohttp_socks is not installed") + with mock.patch.object(proxy_config, "SOCKS_PROXY_AVAILABLE", True): + with mock.patch.object( + proxy_config.aiohttp_socks.ProxyConnector, + "from_url", + mock.Mock(return_value="connector"), + ) as from_url_mock: + config = proxy_config.ProxyConfig() + result = config._socks_proxy_factory("socks5://host:1080", "socks_proxy") + from_url_mock.assert_called_once_with("socks5://host:1080", rdns=None) + assert result == "connector" + + def test_calls_from_url_with_socks5h_passes_socks5_url_and_rdns_true(self): + if not proxy_config.SOCKS_PROXY_AVAILABLE: + pytest.skip("aiohttp_socks is not installed") + with mock.patch.object(proxy_config, "SOCKS_PROXY_AVAILABLE", True): + with mock.patch.object( + proxy_config.aiohttp_socks.ProxyConnector, + "from_url", + mock.Mock(return_value="connector-h"), + ) as from_url_mock: + config = proxy_config.ProxyConfig() + result = config._socks_proxy_factory("socks5h://host:1080", "socks_proxy") + from_url_mock.assert_called_once_with("socks5://host:1080", rdns=True) + assert result == "connector-h" + + def test_calls_from_url_integration(self): + if not proxy_config.SOCKS_PROXY_AVAILABLE: + pytest.skip("aiohttp_socks is not installed") + + async def build_connector_in_running_loop(): + config = proxy_config.ProxyConfig() + return config._socks_proxy_factory("socks5://host:1080", "socks_proxy") + + connector = asyncio.run(build_connector_in_running_loop()) + assert isinstance(connector, proxy_config.aiohttp_socks.ProxyConnector) + + +class TestGetRestSocksProxyConnector: + def test_uses_socks_proxy_field(self): + config = proxy_config.ProxyConfig(socks_proxy="socks5://r:1") + with mock.patch.object( + proxy_config.ProxyConfig, + "_socks_proxy_factory", + mock.Mock(return_value="connector-rest"), + ) as factory_mock: + assert config.get_rest_socks_proxy_connector() == "connector-rest" + factory_mock.assert_called_once_with("socks5://r:1", "socks_proxy") + + +class TestGetWebsocketProxyConnector: + def test_uses_wss_proxy_field(self): + config = proxy_config.ProxyConfig(wss_proxy="socks5://w:1") + with mock.patch.object( + proxy_config.ProxyConfig, + "_socks_proxy_factory", + mock.Mock(return_value="connector-ws"), + ) as factory_mock: + assert config.get_websocket_proxy_connector() == "connector-ws" + factory_mock.assert_called_once_with("socks5://w:1", "wss_proxy") + + +class TestGetAiohttpSessionProxyArgs: + def test_empty(self): + assert proxy_config.ProxyConfig().get_aiohttp_session_proxy_args() == {} + + def test_prefers_socks_over_http(self): + config = proxy_config.ProxyConfig( + socks_proxy="socks5://s:1", + http_proxy="http://h:1", + ) + fake_connector = object() + with mock.patch.object( + proxy_config.ProxyConfig, + "get_rest_socks_proxy_connector", + mock.Mock(return_value=fake_connector), + ): + session_args = config.get_aiohttp_session_proxy_args() + assert session_args == {"connector": fake_connector} + + def test_rest_http_only(self): + config = proxy_config.ProxyConfig(http_proxy="http://h:9") + assert config.get_aiohttp_session_proxy_args() == {"proxy": "http://h:9"} + + def test_rest_https_url(self): + config = proxy_config.ProxyConfig(https_proxy="https://h:9") + assert config.get_aiohttp_session_proxy_args() == {"proxy": "https://h:9"} + + def test_rest_via_callback_only(self): + callback = mock.Mock(return_value=None) + config = proxy_config.ProxyConfig(http_proxy_callback=callback) + assert config.get_aiohttp_session_proxy_args() == {"proxy": None} + + def test_creates_client_session_with_empty_proxy_kwargs(self): + def check_session(client_session, session_kwargs): + assert session_kwargs == {} + assert isinstance(client_session, aiohttp.ClientSession) + + _open_client_session_from_proxy_config(proxy_config.ProxyConfig(), check_session) + + def test_creates_client_session_with_http_proxy_kwargs(self): + http_proxy_url = "http://127.0.0.1:9" + + def check_session(client_session, session_kwargs): + assert session_kwargs == {"proxy": http_proxy_url} + assert isinstance(client_session, aiohttp.ClientSession) + + proxy_configuration = proxy_config.ProxyConfig(http_proxy=http_proxy_url) + _open_client_session_from_proxy_config(proxy_configuration, check_session) + + def test_creates_client_session_with_https_proxy_kwargs(self): + https_proxy_url = "https://127.0.0.1:9" + + def check_session(client_session, session_kwargs): + assert session_kwargs == {"proxy": https_proxy_url} + assert isinstance(client_session, aiohttp.ClientSession) + + proxy_configuration = proxy_config.ProxyConfig(https_proxy=https_proxy_url) + _open_client_session_from_proxy_config(proxy_configuration, check_session) + + def test_creates_client_session_with_callback_only_proxy_kwargs(self): + callback = mock.Mock(return_value=None) + + def check_session(client_session, session_kwargs): + assert session_kwargs == {"proxy": None} + assert isinstance(client_session, aiohttp.ClientSession) + + proxy_configuration = proxy_config.ProxyConfig(http_proxy_callback=callback) + _open_client_session_from_proxy_config(proxy_configuration, check_session) + + def test_creates_client_session_with_socks_proxy_kwargs(self): + if not proxy_config.SOCKS_PROXY_AVAILABLE: + pytest.skip("aiohttp_socks is not installed") + + socks_url = "socks5://127.0.0.1:1080" + + def check_session(client_session, session_kwargs): + socks_connector = session_kwargs["connector"] + assert isinstance(socks_connector, proxy_config.aiohttp_socks.ProxyConnector) + assert isinstance(client_session, aiohttp.ClientSession) + assert client_session.connector is socks_connector + + proxy_configuration = proxy_config.ProxyConfig(socks_proxy=socks_url) + _open_client_session_from_proxy_config(proxy_configuration, check_session) + + def test_creates_client_session_prefers_socks_connector_when_also_http_set(self): + if not proxy_config.SOCKS_PROXY_AVAILABLE: + pytest.skip("aiohttp_socks is not installed") + + def check_session(client_session, session_kwargs): + assert "connector" in session_kwargs + assert "proxy" not in session_kwargs + socks_connector = session_kwargs["connector"] + assert isinstance(socks_connector, proxy_config.aiohttp_socks.ProxyConnector) + assert client_session.connector is socks_connector + + proxy_configuration = proxy_config.ProxyConfig( + socks_proxy="socks5://127.0.0.1:1080", + http_proxy="http://127.0.0.1:9", + ) + _open_client_session_from_proxy_config(proxy_configuration, check_session) diff --git a/packages/commons/tests/test_singleton.py b/packages/commons/tests/test_singleton.py new file mode 100644 index 0000000000..c4085a99ce --- /dev/null +++ b/packages/commons/tests/test_singleton.py @@ -0,0 +1,33 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +from octobot_commons.singleton.singleton_class import Singleton + + +class SingletonTest(Singleton): + def __init__(self): + self.test_attr = "" + + +instance = SingletonTest().instance() + + +def test_create_instance(): + assert SingletonTest.instance() is instance + + +def test_instance_attribute(): + instance.test_attr = "test" + assert SingletonTest.instance().test_attr == "test" diff --git a/packages/commons/tests/test_str_util.py b/packages/commons/tests/test_str_util.py new file mode 100644 index 0000000000..603cd4812b --- /dev/null +++ b/packages/commons/tests/test_str_util.py @@ -0,0 +1,38 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.str_util as str_util + + +def test_camel_to_snake_empty(): + assert str_util.camel_to_snake("") == "" + + +def test_camel_to_snake_single_letter(): + assert str_util.camel_to_snake("A") == "a" + + +def test_camel_to_snake_trading_mode_style(): + assert str_util.camel_to_snake("GridTradingMode") == "grid_trading_mode" + assert str_util.camel_to_snake("IndexTradingMode") == "index_trading_mode" + assert str_util.camel_to_snake("AbstractTradingMode") == "abstract_trading_mode" + + +def test_camel_to_snake_already_lowercase(): + assert str_util.camel_to_snake("already_snake") == "already_snake" + + +def test_camel_to_snake_single_word_upper(): + assert str_util.camel_to_snake("Trading") == "trading" diff --git a/packages/commons/tests/test_time_frame_manager.py b/packages/commons/tests/test_time_frame_manager.py new file mode 100644 index 0000000000..b6ce8cabf1 --- /dev/null +++ b/packages/commons/tests/test_time_frame_manager.py @@ -0,0 +1,58 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +from octobot_commons.enums import TimeFrames +from octobot_commons.tests.test_config import load_test_config +from octobot_commons.time_frame_manager import get_config_time_frame, parse_time_frames, find_min_time_frame, \ + get_previous_time_frame, get_display_time_frame, sort_time_frames + + +def test_get_config_time_frame(): + assert get_config_time_frame(load_test_config()) == [TimeFrames("1h"), TimeFrames("4h"), TimeFrames("1d")] + + +def test_parse_time_frames(): + assert parse_time_frames(["3d", "5d", "1m", "6h"]) == [TimeFrames("3d"), TimeFrames("1m"), TimeFrames("6h")] + + +def test_sort_time_frames(): + assert sort_time_frames([TimeFrames("3d"), TimeFrames("1m"), TimeFrames("6h")]) == \ + [TimeFrames("1m"), TimeFrames("6h"), TimeFrames("3d")] + assert sort_time_frames([TimeFrames("1M"), TimeFrames("3d"), TimeFrames("12h"), TimeFrames("1h"), + TimeFrames("1m"), TimeFrames("6h")]) == \ + [TimeFrames("1m"), TimeFrames("1h"), TimeFrames("6h"), TimeFrames("12h"), TimeFrames("3d"), TimeFrames("1M")] + + +def test_find_min_time_frame(): + assert find_min_time_frame([TimeFrames.FOUR_HOURS, TimeFrames.ONE_DAY, TimeFrames.ONE_MONTH, + TimeFrames.FIFTEEN_MINUTES]) == TimeFrames.FIFTEEN_MINUTES + assert find_min_time_frame([TimeFrames.ONE_MONTH, TimeFrames.ONE_WEEK]) == TimeFrames.ONE_WEEK + assert find_min_time_frame([TimeFrames.ONE_MINUTE]) == TimeFrames.ONE_MINUTE + + +def test_get_previous_time_frame(): + assert get_previous_time_frame(get_config_time_frame(load_test_config()), + TimeFrames.ONE_DAY, TimeFrames.ONE_DAY) == TimeFrames.FOUR_HOURS + assert get_previous_time_frame(get_config_time_frame(load_test_config()), + TimeFrames.ONE_MINUTE, TimeFrames.ONE_MINUTE) == TimeFrames.ONE_MINUTE + assert get_previous_time_frame(get_config_time_frame(load_test_config()), + TimeFrames.ONE_HOUR, TimeFrames.ONE_HOUR) == TimeFrames.ONE_HOUR + assert get_previous_time_frame(get_config_time_frame(load_test_config()), + TimeFrames.ONE_MONTH, TimeFrames.ONE_MONTH) == TimeFrames.ONE_DAY + + +def test_get_display_time_frame(): + assert get_display_time_frame(load_test_config(), TimeFrames.ONE_MONTH) == TimeFrames.ONE_DAY + assert get_display_time_frame(load_test_config(), TimeFrames.FOUR_HOURS) == TimeFrames.FOUR_HOURS diff --git a/packages/commons/tests/test_timestamp_util.py b/packages/commons/tests/test_timestamp_util.py new file mode 100644 index 0000000000..703c69a869 --- /dev/null +++ b/packages/commons/tests/test_timestamp_util.py @@ -0,0 +1,68 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import time +import mock +from datetime import timezone, timedelta + +from octobot_commons.timestamp_util import is_valid_timestamp, get_now_time, datetime_to_timestamp, \ + convert_timestamp_to_datetime + + +def test_is_valid_timestamp(): + assert not is_valid_timestamp(get_now_time()) + assert is_valid_timestamp(time.time()) + + +def test_datetime_to_timestamp(): + date_str = convert_timestamp_to_datetime(1737331200, time_format="%d/%m/%y %H:%M", local_timezone=True) + assert datetime_to_timestamp(date_str, "%d/%m/%y %H:%M") == 1737331200 + + +def test_convert_timestamp_to_datetime_default(): + # 1 Jan 2020 00:00 UTC + ts = 1577836800 + assert convert_timestamp_to_datetime(ts) == "01/01/20 00:00" + + +def test_convert_timestamp_to_datetime_custom_format(): + ts = 1577836800 + assert convert_timestamp_to_datetime(ts, time_format="%Y-%m-%d") == "2020-01-01" + + +def test_convert_timestamp_to_datetime_local_timezone(monkeypatch): + # Patch LOCAL_TIMEZONE to UTC+2 for test + dummy_tz = timezone(timedelta(hours=2)) + with mock.patch("octobot_commons.timestamp_util.LOCAL_TIMEZONE", dummy_tz): + ts = 1577836800 # 1 Jan 2020 00:00 UTC + # Should be 02:00 in UTC+2 + assert convert_timestamp_to_datetime(ts, local_timezone=True) == "01/01/20 02:00" + + +def test_convert_timestamp_to_datetime_epoch(): + assert convert_timestamp_to_datetime(0) == "01/01/70 00:00" + + +def test_convert_timestamp_to_datetime_far_future(): + ts = 32503680000 # year 3000 + assert convert_timestamp_to_datetime(ts, time_format="%Y") == "3000" + + +def test_convert_timestamp_to_datetime_negative(): + # Negative timestamp: before epoch + ts = -1 + result = convert_timestamp_to_datetime(ts) + # Should not raise, but result is system-dependent + assert isinstance(result, str) diff --git a/packages/commons/tests/thread_util.py b/packages/commons/tests/thread_util.py new file mode 100644 index 0000000000..0567d71791 --- /dev/null +++ b/packages/commons/tests/thread_util.py @@ -0,0 +1,31 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import concurrent.futures as futures +import time + +import octobot_commons.thread_util as thread_util + + +def test_method(): + time.sleep(100000000) + + +def test_stop_thread_pool_executor_non_gracefully(): + executor = futures.ThreadPoolExecutor(max_workers=2) + for _ in range(2): + executor.submit(test_method) + thread_util.stop_thread_pool_executor_non_gracefully(executor) diff --git a/packages/commons/tests/tree/__init__.py b/packages/commons/tests/tree/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/packages/commons/tests/tree/test_base_tree.py b/packages/commons/tests/tree/test_base_tree.py new file mode 100644 index 0000000000..6fa2d53065 --- /dev/null +++ b/packages/commons/tests/tree/test_base_tree.py @@ -0,0 +1,148 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import pytest + +from octobot_commons.tree import BaseTree, NodeExistsError + + +def test_base_tree_init(): + assert BaseTree() + + +def test_base_tree_get_new_node(): + base_tree = BaseTree() + created_node = base_tree.get_or_create_node(["test"]) + assert base_tree.root.children == {"test": created_node} + + +def test_base_tree_get_existing_node(): + base_tree = BaseTree() + created_node = base_tree.get_or_create_node(["test"]) + get_node_result = base_tree.get_or_create_node(["test"]) + assert created_node is get_node_result + + +def test_base_tree_get_not_existing_node(): + base_tree = BaseTree() + with pytest.raises(NodeExistsError): + assert base_tree.get_node(["test"]) is None + + +def test_base_tree_delete_existing_node(): + base_tree = BaseTree() + created_node = base_tree.get_or_create_node(["test"]) + delete_node_result = base_tree.delete_node(["test"]) + assert created_node is delete_node_result + with pytest.raises(NodeExistsError): + base_tree.get_node(["test"]) + + +def test_base_tree_delete_not_existing_node(): + base_tree = BaseTree() + with pytest.raises(NodeExistsError): + base_tree.delete_node(["test"]) + + +def test_base_tree_get_new_relative_node(): + base_tree = BaseTree() + created_node = base_tree.get_or_create_node(["test"]) + relative_created_node = base_tree.get_or_create_node(["test-relative"], starting_node=created_node) + get_node_result = base_tree.get_or_create_node(["test", "test-relative"]) + assert relative_created_node is get_node_result + + +def test_base_tree_get_relative_node(): + base_tree = BaseTree() + created_node = base_tree.get_or_create_node(["test"]) + relative_created_node = base_tree.get_or_create_node(["test", "test-relative"]) + get_node_result = base_tree.get_or_create_node(["test-relative"], starting_node=created_node) + assert relative_created_node is get_node_result + + +def test_base_tree_delete_relative_node(): + base_tree = BaseTree() + created_node = base_tree.get_or_create_node(["test"]) + relative_created_node = base_tree.get_or_create_node(["test", "test-relative"]) + delete_node_result = base_tree.delete_node(["test-relative"], starting_node=created_node) + assert relative_created_node is delete_node_result + assert base_tree.get_node(["test"]) is created_node + with pytest.raises(NodeExistsError): + base_tree.get_node(["test", "test-relative"]) + + +def test_base_tree_set_node(): + base_tree = BaseTree() + created_node = base_tree.get_or_create_node(["test"]) + base_tree.set_node(1, None, created_node) + assert created_node.node_value == 1 + assert created_node.node_type is None + base_tree.set_node(5, None, created_node, timestamp=10) + assert created_node.node_value == 5 + assert created_node.node_type is None + assert created_node.node_value_time == 10 + + +def test_base_tree_set_node_at_path(): + base_tree = BaseTree() + base_tree.set_node_at_path("test-string", "test-type", ["test", "test2", "test3"]) + assert base_tree.get_or_create_node(["test"]) + assert base_tree.get_or_create_node(["test", "test2"]) + assert base_tree.get_or_create_node(["test", "test2", "test3"]) + assert base_tree.get_or_create_node(["test"]).children + assert base_tree.get_or_create_node(["test", "test2"]).children + assert not base_tree.get_or_create_node(["test", "test2", "test3"]).children + assert base_tree.get_or_create_node(["test", "test2", "test3"]).node_value == "test-string" + assert base_tree.get_or_create_node(["test", "test2", "test3"]).node_type == "test-type" + + +def test_get_children_keys(): + base_tree = BaseTree() + base_tree.set_node_at_path("test-string", "test-type", ["test", "test2", "test3"]) + base_tree.set_node_at_path("test-string_2", None, ["test", "test2", "test3_2"]) + base_tree.set_node_at_path("test-string_3", None, ["test", "test2"]) + base_tree.set_node_at_path("test-string_4", None, ["test", "test3"]) + assert base_tree.get_children_keys([]) == ["test"] + assert base_tree.get_children_keys(["test"]) == ["test2", "test3"] + assert base_tree.get_children_keys(["test", "test2"]) == ["test3", "test3_2"] + with pytest.raises(NodeExistsError): + assert base_tree.get_children_keys(["test", "testXXXX"]) == ["test3"] + + +def test_get_nested_children_with_path(): + base_tree = BaseTree() + base_tree.set_node_at_path("test-string", "test-type", ["test", "test2", "test3"]) + base_tree.set_node_at_path("test-string_2", None, ["test", "test2", "test3_2"]) + base_tree.set_node_at_path("test-string_3", None, ["test", "test2"]) + base_tree.set_node_at_path("test-string_4", None, ["test", "test3"]) + assert [(n.node_value, p) for n, p in base_tree.get_nested_children_with_path()] == [ + ("test-string", ["test", "test2", "test3"]), + ("test-string_2", ["test", "test2", "test3_2"]), + ("test-string_4", ["test", "test3"]) + ] + assert [(n.node_value, p) for n, p in base_tree.get_nested_children_with_path(select_leaves_only=False)] == [ + (None, []), + (None, ['test']), + ("test-string_3", ["test", "test2"]), + ("test-string", ["test", "test2", "test3"]), + ("test-string_2", ["test", "test2", "test3_2"]), + ("test-string_4", ["test", "test3"]) + ] + assert [(n.node_value, p) for n, p in base_tree.get_nested_children_with_path(path=["test", "test2"], + select_leaves_only=False)] == [ + ("test-string_3", ["test", "test2"]), + ("test-string", ["test", "test2", "test3"]), + ("test-string_2", ["test", "test2", "test3_2"]) + ] diff --git a/packages/commons/tests/tree/test_event_tree.py b/packages/commons/tests/tree/test_event_tree.py new file mode 100644 index 0000000000..c88e00ebef --- /dev/null +++ b/packages/commons/tests/tree/test_event_tree.py @@ -0,0 +1,161 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import asyncio +import pytest + +import octobot_commons.tree as tree +import octobot_commons.asyncio_tools as asyncio_tools + + +@pytest.fixture +def event_tree(): + return tree.EventTree() + + +@pytest.fixture +def event_tree_node(): + return tree.EventTreeNode() + + +def test_event_tree_create_node_at_path(event_tree): + assert event_tree.root.children == {} + event_tree.create_node_at_path(["1"], True) + assert isinstance(event_tree.get_node(["1"]), tree.EventTreeNode) + assert event_tree.get_node(["1"]).is_triggered() + event_tree.create_node_at_path(["2", "abtc"], False) + assert not event_tree.get_node(["2", "abtc"]).is_triggered() + assert isinstance(event_tree.get_node(["1"]), tree.EventTreeNode) + assert isinstance(event_tree.get_node(["2", "abtc"]), tree.EventTreeNode) + + +def test_event_tree_node_trigger(event_tree_node): + assert not event_tree_node.is_triggered() + event_tree_node.trigger() + assert event_tree_node.is_triggered() + + +def test_event_tree_node_bind_parent(event_tree_node): + parent_node = tree.EventTreeNode() + event_tree_node.bind_parent(parent_node) + assert event_tree_node.get_parent() is parent_node + parent_node.children["d"] = event_tree_node + assert not event_tree_node.is_triggered() + assert not parent_node.is_triggered() + event_tree_node.trigger() + assert event_tree_node.is_triggered() + assert parent_node.is_triggered() + + +@pytest.mark.asyncio +async def test_event_tree_node_wait(event_tree_node): + waiter = asyncio.create_task(asyncio.wait_for(event_tree_node.wait(), 0.1)) + for _ in range(10): + # let async loop end task if necessary + await asyncio_tools.wait_asyncio_next_cycle() + assert not event_tree_node.is_triggered() + assert not waiter.done() + event_tree_node.trigger() + assert not waiter.done() + # need 2 cycles to both end the waiter and end the wait_for + for _ in range(2): + await asyncio_tools.wait_asyncio_next_cycle() + assert waiter.done() + + +def test_event_tree_node_set_child(event_tree_node): + assert event_tree_node.children == {} + node = tree.EventTreeNode() + event_tree_node.set_child("hi", node) + assert event_tree_node.children == { + "hi": node + } + + +def test_event_tree_node_pop_child(event_tree_node): + assert event_tree_node.children == {} + node = tree.EventTreeNode() + event_tree_node.set_child("hi", node) + assert event_tree_node.pop_child("hi", None) is node + assert event_tree_node.children == {} + assert event_tree_node.pop_child("hi", None) is None + assert event_tree_node.children == {} + + +def test_event_tree_node_on_child_change(event_tree_node): + assert not event_tree_node.is_triggered() + event_tree_node.on_child_change() + # no child, need manual trigger + assert not event_tree_node.is_triggered() + event_tree_node.trigger() + assert event_tree_node.is_triggered() + event_tree_node.on_child_change() + # still triggered + assert event_tree_node.is_triggered() + + parent_node = tree.EventTreeNode() + parent_node.children["a"] = event_tree_node + assert not parent_node.is_triggered() + event_tree_node.bind_parent(parent_node) + assert parent_node.is_triggered() + + event_tree_node.clear() + assert not event_tree_node.is_triggered() + assert not parent_node.is_triggered() + + other_child = tree.EventTreeNode() + parent_node.children["b"] = other_child + other_child.bind_parent(parent_node) + assert not parent_node.is_triggered() + + event_tree_node.trigger() + assert not other_child.is_triggered() + assert not parent_node.is_triggered() + + other_child.trigger() + assert event_tree_node.is_triggered() + assert parent_node.is_triggered() + + other_child.clear() + assert event_tree_node.is_triggered() + assert not parent_node.is_triggered() + + +def test_event_tree_node_get_path_to_root(event_tree_node): + assert event_tree_node.children == {} + assert event_tree_node.get_path_to_root() == [] + parent = tree.EventTreeNode() + event_tree_node.bind_parent(parent) + parent.set_child("hi", event_tree_node) + assert event_tree_node.get_path_to_root() == ["hi"] + parent_2 = tree.EventTreeNode() + parent.bind_parent(parent_2) + parent_2.set_child("hello", parent) + assert event_tree_node.get_path_to_root() == ["hello", "hi"] + assert parent.get_path_to_root() == ["hello"] + assert parent_2.get_path_to_root() == [] + other_child = tree.EventTreeNode() + other_child.bind_parent(parent) + parent.set_child("ho", other_child) + assert event_tree_node.get_path_to_root() == ["hello", "hi"] + assert other_child.get_path_to_root() == ["hello", "ho"] + + +def test_event_tree_node_get_child_key(event_tree_node): + with pytest.raises(KeyError): + event_tree_node.get_child_key("dd") + other = tree.EventTreeNode() + event_tree_node.children["dd"] = other + assert event_tree_node.get_child_key(other) == "dd" diff --git a/packages/copy/BUILD b/packages/copy/BUILD new file mode 100644 index 0000000000..1c6d03b2ea --- /dev/null +++ b/packages/copy/BUILD @@ -0,0 +1,17 @@ +python_sources( + name="octobot_copy", + sources=["octobot_copy/**/*.py"], + dependencies=[ + "packages/commons:octobot_commons", + "packages/trading:octobot_trading", + "//:tentacles", + ], +) + +python_tests( + name="tests", + sources=["tests/**/test_*.py"], + dependencies=[ + ":octobot_copy", + ], +) \ No newline at end of file diff --git a/packages/copy/README.md b/packages/copy/README.md new file mode 100644 index 0000000000..2ca4d0472a --- /dev/null +++ b/packages/copy/README.md @@ -0,0 +1 @@ +# OctoBot copy \ No newline at end of file diff --git a/packages/copy/octobot_copy/__init__.py b/packages/copy/octobot_copy/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/packages/copy/octobot_copy/constants.py b/packages/copy/octobot_copy/constants.py new file mode 100644 index 0000000000..817780d21e --- /dev/null +++ b/packages/copy/octobot_copy/constants.py @@ -0,0 +1,50 @@ +# Drakkar-Software OctoBot +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import decimal + +import octobot_commons.constants + + +# Rebalance planner thresholds +ALLOWED_1_TO_1_SWAP_COUNTS = 1 +MIN_RATIO_TO_SELL = decimal.Decimal("0.0001") # 1/10000 +QUOTE_ASSET_TO_TARGETED_SWAP_RATIO_THRESHOLD = decimal.Decimal("0.1") # 10% + +# Index / rebalancing trading config keys (shared by planner, index trading mode, profiles). +CONFIG_INDEX_CONTENT = "index_content" +CONFIG_REBALANCE_TRIGGER_MIN_PERCENT = "rebalance_trigger_min_percent" +CONFIG_REBALANCE_TRIGGER_PROFILES = "rebalance_trigger_profiles" +CONFIG_SELECTED_REBALANCE_TRIGGER_PROFILE = "selected_rebalance_trigger_profile" +CONFIG_REBALANCE_TRIGGER_PROFILE_NAME = "name" +CONFIG_REBALANCE_TRIGGER_PROFILE_MIN_PERCENT = "min_percent" + +# Exchange / order lifecycle (seconds) +FILL_ORDER_TIMEOUT = 60 + +# Mirrored orphan grace: max |simulated_copier_pair_share − ref_pair_share| to allow deferral +DEFAULT_MIRRORED_ORPHAN_GRACE_PAIR_RATIO_MAX_DELTA = decimal.Decimal("0.02") # 2% +DEFAULT_MIRRORED_ORPHAN_ORDERS_GRACE_ABORT_THRESHOLD = 2 +DEFAULT_MISSED_SIGNALS_GRACE_ABORT_THRESHOLD = 10 + +# Account keys +PORTFOLIO_ASSET_ALLOCATION_RATIO = "allocation_ratio" + +# Account copy settings +DEFAULT_COPY_WAITING_TIME = octobot_commons.constants.HOURS_TO_SECONDS * 4 # wake up every 4 hours by default + +# Order tags: reference mirror orders vs rebalance limit orders (orphan cancellation scope) +MIRRORED_ORDER_TAG = "mirrored_order" +REBALANCER_ORDER_TAG = "rebalancer_order" diff --git a/packages/copy/octobot_copy/copiers/__init__.py b/packages/copy/octobot_copy/copiers/__init__.py new file mode 100644 index 0000000000..2485875b86 --- /dev/null +++ b/packages/copy/octobot_copy/copiers/__init__.py @@ -0,0 +1,13 @@ +from octobot_copy.copiers.account_copier import AccountCopier +from octobot_copy.copiers.spot_account_copier import SpotAccountCopier +from octobot_copy.copiers.futures_account_copier import FuturesAccountCopier +from octobot_copy.copiers.option_account_copier import OptionAccountCopier +from octobot_copy.copiers.account_copier_factory import create_account_copier + +__all__ = [ + "AccountCopier", + "SpotAccountCopier", + "FuturesAccountCopier", + "OptionAccountCopier", + "create_account_copier", +] diff --git a/packages/copy/octobot_copy/copiers/account_copier.py b/packages/copy/octobot_copy/copiers/account_copier.py new file mode 100644 index 0000000000..77ab170f0d --- /dev/null +++ b/packages/copy/octobot_copy/copiers/account_copier.py @@ -0,0 +1,233 @@ +import time +import typing + +import octobot_commons.logging as logging +import octobot_trading.constants as trading_constants +import octobot_trading.errors as trading_errors +import octobot_trading.personal_data as trading_personal_data + +import octobot_copy.constants as copy_constants +import octobot_copy.entities as copy_entities +import octobot_copy.errors as copy_errors +import octobot_copy.exchange as copy_exchange +import octobot_copy.orders_mirroring.orders_synchronizer as orders_synchronizer_module +import octobot_copy.rebalancing as copy_rebalancing + + +class AccountCopier: + """ + Copies a reference spot-style account allocation onto the copier exchange by planning with + BaseRebalanceActionsPlanner and executing with an AbstractRebalancer. + + Target weights are derived from reference_account (quantity-proportional). Holdings ratios + and order execution use the live portfolio behind exchange_interface. Callers must ensure + traded pairs on the copier exchange cover the assets to trade. + copier_account is reserved for future snapshot/offline use and is not used by the rebalance pipeline. + copy_settings controls reference_market, rebalance thresholds, and synchronization. + Reference open orders in reference_account.orders are synched onto the copier after each successful run (spot). + """ + + def __init__( + self, + reference_account: copy_entities.Account, + exchange_interface: copy_exchange.ExchangeInterface, + copy_settings: copy_entities.AccountCopySettings, + ) -> None: + self._reference_account: copy_entities.Account = reference_account + self._copier_exchange_interface: copy_exchange.ExchangeInterface = exchange_interface + self._copy_settings: copy_entities.AccountCopySettings = copy_settings + self._orders_synchronizer: orders_synchronizer_module.OrdersSynchronizer = ( + orders_synchronizer_module.OrdersSynchronizer( + reference_account, + exchange_interface, + copy_settings, + ) + ) + + async def copy_account(self) -> copy_entities.AccountCopyResult: + await self._resync_if_mirrored_open_order_grace_period_elapsed() + rebalancer, should_rebalance, details = await self._prepare_rebalance_plan() + if self._orders_synchronizer.is_mirrored_orphan_grace_invalid_no_compliant_snapshot(): + self._get_logger().info( + "Forcing rebalance: mirrored orphan grace has no compliant reference snapshot " + f"on [{self._copier_exchange_interface.exchange_name}]" + ) + should_rebalance = True + if self._orders_synchronizer.is_mirrored_orphan_grace_aborted_for_missed_historical_signals(): + self._get_logger().info( + "Forcing rebalance: mirrored orphan grace aborted for missed historical signals " + f"on [{self._copier_exchange_interface.exchange_name}]" + ) + should_rebalance = True + rebalance_orders: list = [] + try: + if should_rebalance: + if self._orders_synchronizer.is_mirrored_orphan_grace_blocking_rebalance(): + self._get_logger().info( + "Skipping rebalance: mirrored open-order grace period is active " + f"on [{self._copier_exchange_interface.exchange_name}]" + ) + else: + self._get_logger().info( + f"Executing rebalance on [{self._copier_exchange_interface.exchange_name}]" + ) + await self._orders_synchronizer.cancel_orders_pending_synchronization(None) + rebalance_orders = await self._run_rebalance(rebalancer, details) + else: + self._get_logger().info("No rebalance needed") + if rebalance_orders: + await self._copier_exchange_interface.portfolio.refresh_portfolio() + synched_orders = await self._synchronize_reference_open_orders() + all_orders: list = rebalance_orders + synched_orders + return copy_entities.AccountCopyResult(created_orders=all_orders) + except (trading_errors.MissingMinimalExchangeTradeVolume, copy_errors.RebalanceAborted) as err: + self._get_logger().exception( + err, + True, + f"Aborted rebalance on {self._copier_exchange_interface.exchange_name}: {err} ({err.__class__.__name__})", + ) + return copy_entities.AccountCopyResult(created_orders=[]) + finally: + self._get_logger().info("Portfolio rebalance process complete") + + async def _resync_if_mirrored_open_order_grace_period_elapsed(self) -> None: + copy_settings = self._copy_settings + grace_seconds = copy_settings.mirrored_orphan_cancel_grace_seconds + grace_started_at = self._orders_synchronizer.get_mirrored_orphan_grace_started_at() + if ( + grace_seconds > 0 + and grace_started_at is not None + and grace_started_at > 0 + and (time.time() - grace_started_at) >= grace_seconds + ): + self._get_logger().info( + "Mirrored open-order grace period elapsed before this run; " + f"aborting grace and resyncing (cancel orphans, refresh portfolio) on " + f"[{self._copier_exchange_interface.exchange_name}]" + ) + self._orders_synchronizer.abort_mirrored_orphan_grace() + await self._orders_synchronizer.cancel_orders_pending_synchronization(None) + await self._copier_exchange_interface.portfolio.refresh_portfolio() + + async def _synchronize_reference_open_orders(self) -> list[trading_personal_data.Order]: + return await self._orders_synchronizer.synchronize() + + def get_rebalancer_class(self) -> type[copy_rebalancing.AbstractRebalancer]: + raise NotImplementedError("get_rebalancer_class is not implemented") + + async def _prepare_rebalance_plan( + self, + ) -> tuple[copy_rebalancing.AbstractRebalancer, bool, dict]: + rebalancing_client = self._create_rebalancing_client() + planner = self._create_rebalance_actions_planner(rebalancing_client) + self._sync_planner(planner) + planner.update_distribution(adapt_to_holdings=False, force_latest=False) + rebalancer = self._create_rebalancer(planner) + for coin in planner.targeted_coins: + await rebalancer.prepare_coin_rebalancing(coin) + should_rebalance, details = planner.get_rebalance_details() + return rebalancer, should_rebalance, details + + async def _run_rebalance( + self, + rebalancer: copy_rebalancing.AbstractRebalancer, + details: dict, + ) -> list[trading_personal_data.Order]: + orders: list = [] + efficient_orders = await rebalancer.try_efficient_spot_rebalance(details, None) + if efficient_orders is not None: + self._get_logger().info("Efficient spot rebalance completed (skipped sell/split pipeline)") + if efficient_orders and not self._copier_exchange_interface.orders.automatically_synchronize_orders(): + await self._copier_exchange_interface.portfolio.refresh_portfolio() + return efficient_orders + self._get_logger().info("Step 1/3: ensuring enough funds are available for rebalance") + await rebalancer.ensure_enough_funds_to_buy_after_selling() + is_simple_buy_without_selling = rebalancer.can_simply_buy_coins_without_selling(details) + reference_market = self._copier_exchange_interface.portfolio.reference_market + if is_simple_buy_without_selling: + self._get_logger().info(f"Step 2/3: skipped: no coin to sell for {reference_market}") + else: + self._get_logger().info(f"Step 2/3: selling coins to free {reference_market}") + if sell_orders := await rebalancer.sell_targeted_coins_for_reference_market(details, None): + if not self._copier_exchange_interface.orders.automatically_synchronize_orders(): + await self._copier_exchange_interface.portfolio.refresh_portfolio() + orders += sell_orders + self._get_logger().info(f"Step 3/3: buying coins using {reference_market}") + if buy_orders := await rebalancer.split_reference_market_into_targeted_coins( + details, + is_simple_buy_without_selling, + None, + ): + if not self._copier_exchange_interface.orders.automatically_synchronize_orders(): + await self._copier_exchange_interface.portfolio.refresh_portfolio() + orders += buy_orders + return orders + + def _get_synthetic_config(self) -> dict: + return { + copy_constants.CONFIG_INDEX_CONTENT: self._reference_account.create_assets_distribution(), + copy_constants.CONFIG_REBALANCE_TRIGGER_MIN_PERCENT: float( + self._copy_settings.rebalance_trigger_min_ratio * trading_constants.ONE_HUNDRED + ), + } + + def _get_ideal_distribution(self, config: typing.Optional[dict]) -> typing.Optional[list]: + if not config: + return None + return config.get(copy_constants.CONFIG_INDEX_CONTENT) + + def _create_rebalancing_client(self) -> copy_rebalancing.RebalancingClientInterface: + return copy_rebalancing.RebalancingClientInterface( + client_name=self.__class__.__name__, + min_order_size_margin=self._copy_settings.min_order_size_margin, + rebalance_trigger_min_ratio=self._copy_settings.rebalance_trigger_min_ratio, + quote_asset_rebalance_ratio_threshold=self._copy_settings.quote_asset_rebalance_ratio_threshold, + reference_market_ratio=self._copy_settings.reference_market_ratio, + sell_untargeted_traded_coins=self._copy_settings.sell_untargeted_traded_coins, + synchronization_policy=self._copy_settings.synchronization_policy, + allow_skip_asset=self._copy_settings.allow_skip_asset, + can_include_assets_in_open_orders_in_holdings_ratio=( + self._copy_settings.can_include_assets_in_open_orders_in_holdings_ratio + ), + raise_all_order_errors=True, + get_config=self._get_synthetic_config, + get_previous_config=lambda: None, # not implemented for now + get_historical_configs=lambda _ft, _tt: [], # not implemented for now + get_ideal_distribution=self._get_ideal_distribution, + ) + + def _create_rebalance_actions_planner( + self, + rebalancing_client: copy_rebalancing.RebalancingClientInterface, + ) -> copy_rebalancing.BaseRebalanceActionsPlanner: + return copy_rebalancing.BaseRebalanceActionsPlanner( + exchange_interface=self._copier_exchange_interface, + client=rebalancing_client, + ) + + def _sync_planner(self, planner: copy_rebalancing.BaseRebalanceActionsPlanner) -> None: + planner.update( + min_order_size_margin=self._copy_settings.min_order_size_margin, + synchronization_policy=self._copy_settings.synchronization_policy, + rebalance_trigger_min_ratio=self._copy_settings.rebalance_trigger_min_ratio, + quote_asset_rebalance_ratio_threshold=self._copy_settings.quote_asset_rebalance_ratio_threshold, + reference_market_ratio=self._copy_settings.reference_market_ratio, + sell_untargeted_traded_coins=self._copy_settings.sell_untargeted_traded_coins, + allow_skip_asset=self._copy_settings.allow_skip_asset, + can_include_assets_in_open_orders_in_holdings_ratio=( + self._copy_settings.can_include_assets_in_open_orders_in_holdings_ratio + ), + ) + + def _create_rebalancer( + self, + planner: copy_rebalancing.BaseRebalanceActionsPlanner, + ) -> copy_rebalancing.AbstractRebalancer: + return self.get_rebalancer_class()( + self._copier_exchange_interface, + planner, + {}, + ) + + def _get_logger(self) -> logging.BotLogger: + return logging.get_logger(self.__class__.__name__) diff --git a/packages/copy/octobot_copy/copiers/account_copier_factory.py b/packages/copy/octobot_copy/copiers/account_copier_factory.py new file mode 100644 index 0000000000..73acbe34e5 --- /dev/null +++ b/packages/copy/octobot_copy/copiers/account_copier_factory.py @@ -0,0 +1,44 @@ +import typing + +import octobot_copy.copiers.account_copier as account_copier +import octobot_copy.copiers.futures_account_copier as futures_account_copier +import octobot_copy.copiers.option_account_copier as option_account_copier +import octobot_copy.copiers.spot_account_copier as spot_account_copier +import octobot_copy.entities as copy_entities +import octobot_copy.exchange as copy_exchange + +if typing.TYPE_CHECKING: + import octobot_trading.exchanges + import octobot_trading.modes + + +def create_account_copier( + reference_account: copy_entities.Account, + copy_settings: copy_entities.AccountCopySettings, + copier_exchange_manager: "octobot_trading.exchanges.ExchangeManager", + copier_trading_mode: typing.Optional["octobot_trading.modes.AbstractTradingMode"] = None, +) -> account_copier.AccountCopier: + """ + Build an ExchangeInterface from copier_exchange_manager and return the AccountCopier implementation + suited to that copier_exchange_manager (option, future, or spot) . + """ + copier_exchange_interface = copy_exchange.ExchangeInterface( + copier_exchange_manager, copier_trading_mode + ) + if copier_exchange_manager.is_option: + return option_account_copier.OptionAccountCopier( + reference_account, + copier_exchange_interface, + copy_settings, + ) + if copier_exchange_manager.is_future: + return futures_account_copier.FuturesAccountCopier( + reference_account, + copier_exchange_interface, + copy_settings, + ) + return spot_account_copier.SpotAccountCopier( + reference_account, + copier_exchange_interface, + copy_settings, + ) diff --git a/packages/copy/octobot_copy/copiers/futures_account_copier.py b/packages/copy/octobot_copy/copiers/futures_account_copier.py new file mode 100644 index 0000000000..502a14a8e2 --- /dev/null +++ b/packages/copy/octobot_copy/copiers/futures_account_copier.py @@ -0,0 +1,14 @@ +import octobot_trading.personal_data as trading_personal_data + +import octobot_copy.copiers.spot_account_copier as spot_account_copier +import octobot_copy.rebalancing.rebalancer.futures_rebalancer as futures_rebalancer + + +class FuturesAccountCopier(spot_account_copier.SpotAccountCopier): + def get_rebalancer_class(self) -> type[futures_rebalancer.FuturesRebalancer]: + return futures_rebalancer.FuturesRebalancer + + async def _synchronize_reference_open_orders(self) -> list[trading_personal_data.Order]: + raise NotImplementedError( + "Reference open-order replication on futures is not implemented yet." + ) diff --git a/packages/copy/octobot_copy/copiers/option_account_copier.py b/packages/copy/octobot_copy/copiers/option_account_copier.py new file mode 100644 index 0000000000..917125c595 --- /dev/null +++ b/packages/copy/octobot_copy/copiers/option_account_copier.py @@ -0,0 +1,7 @@ +from octobot_copy.copiers.futures_account_copier import FuturesAccountCopier +import octobot_copy.rebalancing.rebalancer.option_rebalancer as option_rebalancer + + +class OptionAccountCopier(FuturesAccountCopier): + def get_rebalancer_class(self) -> type[option_rebalancer.OptionRebalancer]: + return option_rebalancer.OptionRebalancer diff --git a/packages/copy/octobot_copy/copiers/spot_account_copier.py b/packages/copy/octobot_copy/copiers/spot_account_copier.py new file mode 100644 index 0000000000..6b1acef582 --- /dev/null +++ b/packages/copy/octobot_copy/copiers/spot_account_copier.py @@ -0,0 +1,9 @@ +from octobot_copy.copiers.account_copier import AccountCopier +import octobot_copy.rebalancing.rebalancer.spot_rebalancer as spot_rebalancer + + +class SpotAccountCopier(AccountCopier): + """Spot account copy: executes rebalance via SpotRebalancer (no contract prep per coin).""" + + def get_rebalancer_class(self) -> type[spot_rebalancer.SpotRebalancer]: + return spot_rebalancer.SpotRebalancer diff --git a/packages/copy/octobot_copy/entities/__init__.py b/packages/copy/octobot_copy/entities/__init__.py new file mode 100644 index 0000000000..36af00ac19 --- /dev/null +++ b/packages/copy/octobot_copy/entities/__init__.py @@ -0,0 +1,13 @@ +from octobot_copy.entities.account import Account +from octobot_copy.entities.account_copy_settings import ( + AccountCopySettings, + parse_account_copy_settings, +) +from octobot_copy.entities.account_copy_result import AccountCopyResult + +__all__ = [ + "Account", + "AccountCopySettings", + "parse_account_copy_settings", + "AccountCopyResult", +] \ No newline at end of file diff --git a/packages/copy/octobot_copy/entities/account.py b/packages/copy/octobot_copy/entities/account.py new file mode 100644 index 0000000000..deb413981b --- /dev/null +++ b/packages/copy/octobot_copy/entities/account.py @@ -0,0 +1,72 @@ +import dataclasses +import typing +import decimal + +import octobot_commons.constants as common_constants +import octobot_commons.dataclasses as commons_dataclasses +import octobot_trading.constants as trading_constants + +import octobot_copy.enums as copy_enums +import octobot_copy.rebalancing.planner.distributions as planner_distributions +import octobot_copy.constants as copy_constants + + +@dataclasses.dataclass +class Account(commons_dataclasses.MinimizableDataclass): + updated_at: float = dataclasses.field(default=0, repr=True) + # account portfolio: dict of assets with allocation_ratio, available and total amounts + # the allocation_ratio key is used to compute the distribution allocation + content: dict[str, dict[str, decimal.Decimal]] = dataclasses.field(default_factory=dict, repr=True) + # account enriched orders formatted as trading_storage.orders_storage._format_order + orders: list[dict[str, typing.Any]] = dataclasses.field(default_factory=list, repr=True) + # account positions, dict keys: trading_enums.ExchangeConstantsPositionColumns + positions: list[dict[str, typing.Any]] = dataclasses.field(default_factory=list, repr=True) + # list of historical snapshots of the account, sorted by updated_at (most recent first) + historical_snapshots: list["Account"] = dataclasses.field(default_factory=list, repr=True) + + def __post_init__(self): + self.content = { + asset: { + key: decimal.Decimal(str(value)) for key, value in holdings.items() + } + for asset, holdings in self.content.items() + } + if self.historical_snapshots: + if isinstance(self.historical_snapshots[0], dict): + snapshots = [ + Account.from_dict(snapshot) for snapshot in self.historical_snapshots + ] + else: + snapshots = self.historical_snapshots + self.historical_snapshots = sorted( + snapshots, key=lambda x: x.updated_at, reverse=True + ) + + def create_assets_distribution(self) -> list[dict[str, typing.Any]]: + amounts: list[tuple[str, decimal.Decimal]] = [] + for currency, holdings in self.content.items(): + allocation_ratio = holdings[copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO] + if allocation_ratio > trading_constants.ZERO: + amounts.append((currency, allocation_ratio)) + if not amounts: + return [] + total = sum((allocation_ratio for _, allocation_ratio in amounts), trading_constants.ZERO) + if total <= trading_constants.ZERO: + return [] + distribution: list[dict[str, typing.Any]] = [] + for currency, allocation_ratio in amounts: + percentage = float( + round( + allocation_ratio / total * trading_constants.ONE_HUNDRED, + planner_distributions.MAX_DISTRIBUTION_AFTER_COMMA_DIGITS, + ) + ) + if percentage: + distribution.append( + { + copy_enums.DistributionKeys.NAME.value: currency, + copy_enums.DistributionKeys.VALUE.value: percentage, + copy_enums.DistributionKeys.PRICE.value: None, + } + ) + return distribution diff --git a/packages/copy/octobot_copy/entities/account_copy_result.py b/packages/copy/octobot_copy/entities/account_copy_result.py new file mode 100644 index 0000000000..7c014e276d --- /dev/null +++ b/packages/copy/octobot_copy/entities/account_copy_result.py @@ -0,0 +1,12 @@ +import dataclasses +import typing + +import octobot_commons.dataclasses as commons_dataclasses + + +@dataclasses.dataclass +class AccountCopyResult(commons_dataclasses.MinimizableDataclass): + """Outcome of AccountCopier.copy_account (rebalance + mirrored order sync).""" + + # Placed copier orders (octobot_trading.personal_data.Order instances) + created_orders: list = dataclasses.field(default_factory=list) diff --git a/packages/copy/octobot_copy/entities/account_copy_settings.py b/packages/copy/octobot_copy/entities/account_copy_settings.py new file mode 100644 index 0000000000..175cb5fb2a --- /dev/null +++ b/packages/copy/octobot_copy/entities/account_copy_settings.py @@ -0,0 +1,73 @@ +import dataclasses +import decimal +import json +import typing + +import octobot_commons.dataclasses as commons_dataclasses +import octobot_commons.errors as commons_errors +import octobot_copy.constants as copy_constants +import octobot_copy.enums as copy_enums + + +@dataclasses.dataclass +class AccountCopySettings(commons_dataclasses.MinimizableDataclass): + synchronization_policy: copy_enums.SynchronizationPolicy = ( + copy_enums.SynchronizationPolicy.SELL_REMOVED_INDEX_COINS_AS_SOON_AS_POSSIBLE + ) + # Minimum ratio of the portfolio that must be rebalanced to trigger a rebalance + rebalance_trigger_min_ratio: decimal.Decimal = decimal.Decimal("0.05") # 5% + # Minimum ratio of quote(ref market) in portfolio to trigger a rebalance + quote_asset_rebalance_ratio_threshold: decimal.Decimal = decimal.Decimal("0.1") # 10% + # Percentage of the portfolio to trade (distributed among targeted coins). + reference_market_ratio: decimal.Decimal = decimal.Decimal("1") # 100% + # When True, coins in portfolio that are not in targeted coins will be sold to free up funds for the rebalance. + sell_untargeted_traded_coins: bool = True + # Min order size safety factor: ideal amount must be at least this multiple of the exchange min cost + min_order_size_margin: decimal.Decimal = decimal.Decimal("2") + # Allow skipping assets that don't meet minimum order size requirements instead of aborting portfolio rebalancing + allow_skip_asset: bool = False + # When True, planner holdings ratios include value tied up in open orders + can_include_assets_in_open_orders_in_holdings_ratio: bool = False + # Defer cancelling mirrored copier orders when reference open orders disappeared (wall time.time) + mirrored_orphan_cancel_grace_seconds: float = float(copy_constants.FILL_ORDER_TIMEOUT) + mirrored_orphan_grace_abort_threshold: int = copy_constants.DEFAULT_MIRRORED_ORPHAN_ORDERS_GRACE_ABORT_THRESHOLD + missed_signals_grace_abort_threshold: int = copy_constants.DEFAULT_MISSED_SIGNALS_GRACE_ABORT_THRESHOLD + mirrored_orphan_grace_pair_ratio_max_delta: decimal.Decimal = ( + copy_constants.DEFAULT_MIRRORED_ORPHAN_GRACE_PAIR_RATIO_MAX_DELTA + ) + + def __post_init__(self): + if self.synchronization_policy: + self.synchronization_policy = copy_enums.SynchronizationPolicy(self.synchronization_policy) + if self.rebalance_trigger_min_ratio: + self.rebalance_trigger_min_ratio = decimal.Decimal(str(self.rebalance_trigger_min_ratio)) + if self.quote_asset_rebalance_ratio_threshold: + self.quote_asset_rebalance_ratio_threshold = decimal.Decimal(str(self.quote_asset_rebalance_ratio_threshold)) + if self.reference_market_ratio: + self.reference_market_ratio = decimal.Decimal(str(self.reference_market_ratio)) + if self.min_order_size_margin: + self.min_order_size_margin = decimal.Decimal(str(self.min_order_size_margin)) + if self.mirrored_orphan_grace_pair_ratio_max_delta: + self.mirrored_orphan_grace_pair_ratio_max_delta = decimal.Decimal( + str(self.mirrored_orphan_grace_pair_ratio_max_delta) + ) + + +def parse_account_copy_settings(raw: typing.Any) -> "AccountCopySettings": + """ + Parse ``account_copy_settings`` from a DSL parameter (JSON string, dict, or None for defaults). + """ + if raw is None: + return AccountCopySettings() + if isinstance(raw, dict): + return AccountCopySettings.from_dict(raw) + if not isinstance(raw, str): + raise commons_errors.InvalidParameterFormatError( + f"account_copy_settings must be a JSON string, got {type(raw).__name__}" + ) + try: + return AccountCopySettings.from_dict(json.loads(raw)) + except json.JSONDecodeError as err: + raise commons_errors.InvalidParameterFormatError( + f"Invalid account_copy_settings JSON: {err}" + ) from err diff --git a/packages/copy/octobot_copy/enums.py b/packages/copy/octobot_copy/enums.py new file mode 100644 index 0000000000..08ddf71539 --- /dev/null +++ b/packages/copy/octobot_copy/enums.py @@ -0,0 +1,38 @@ +# Drakkar-Software OctoBot +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import enum + + +class RebalanceDetails(enum.Enum): + SELL_SOME = "SELL_SOME" + BUY_MORE = "BUY_MORE" + REMOVE = "REMOVE" + ADD = "ADD" + SWAP = "SWAP" + FORCED_REBALANCE = "FORCED_REBALANCE" + + +class SynchronizationPolicy(enum.Enum): + SELL_REMOVED_INDEX_COINS_ON_RATIO_REBALANCE = "sell_removed_index_coins_on_ratio_rebalance" + SELL_REMOVED_INDEX_COINS_AS_SOON_AS_POSSIBLE = "sell_removed_index_coins_as_soon_as_possible" + SELL_REMOVED_DYNAMIC_INDEX_COINS_AS_SOON_AS_POSSIBLE = "sell_removed_dynamic_index_coins_as_soon_as_possible" + + +class DistributionKeys(enum.StrEnum): + NAME = "name" + VALUE = "value" + PRICE = "price" + diff --git a/packages/copy/octobot_copy/errors.py b/packages/copy/octobot_copy/errors.py new file mode 100644 index 0000000000..d0b72ce0fd --- /dev/null +++ b/packages/copy/octobot_copy/errors.py @@ -0,0 +1,16 @@ +class OctobotCopyError(Exception): + """ + Parent class for all octobot copy errors + """ + + +class RebalanceError(OctobotCopyError): + """ + Parent class for all rebalance errors + """ + + +class RebalanceAborted(RebalanceError): + """ + Raised when a rebalance is aborted + """ diff --git a/packages/copy/octobot_copy/exchange/__init__.py b/packages/copy/octobot_copy/exchange/__init__.py new file mode 100644 index 0000000000..5ee8844c9d --- /dev/null +++ b/packages/copy/octobot_copy/exchange/__init__.py @@ -0,0 +1,5 @@ +from octobot_copy.exchange.exchange_interface import ExchangeInterface + +__all__ = [ + "ExchangeInterface", +] diff --git a/packages/copy/octobot_copy/exchange/exchange_interface.py b/packages/copy/octobot_copy/exchange/exchange_interface.py new file mode 100644 index 0000000000..081282ddf9 --- /dev/null +++ b/packages/copy/octobot_copy/exchange/exchange_interface.py @@ -0,0 +1,37 @@ +import typing + +import octobot_trading.modes as trading_modes + +import octobot_copy.exchange.market as exchange_market +import octobot_copy.exchange.orders as exchange_orders +import octobot_copy.exchange.portfolio as exchange_portfolio +import octobot_copy.exchange.positions as exchange_positions + +if typing.TYPE_CHECKING: + import octobot_trading.exchanges + + +class ExchangeInterface: + def __init__( + self, + exchange_manager: "octobot_trading.exchanges.ExchangeManager", + trading_mode: typing.Optional["trading_modes.AbstractTradingMode"] = None, + ): + self._exchange_manager: "octobot_trading.exchanges.ExchangeManager" = exchange_manager + self.market: exchange_market.MarketInterface = exchange_market.MarketInterface(exchange_manager) + self.portfolio: exchange_portfolio.PortfolioInterface = exchange_portfolio.PortfolioInterface( + exchange_manager + ) + self.orders: exchange_orders.OrdersInterface = exchange_orders.OrdersInterface( + exchange_manager, trading_mode + ) + self.positions: exchange_positions.PositionsInterface = exchange_positions.PositionsInterface( + exchange_manager, self.orders, self.market + ) + + @property + def exchange_name(self) -> str: + return self._exchange_manager.exchange_name + + def get_time(self) -> float: + return self._exchange_manager.exchange.get_exchange_current_time() diff --git a/packages/copy/octobot_copy/exchange/market.py b/packages/copy/octobot_copy/exchange/market.py new file mode 100644 index 0000000000..e3847ce33c --- /dev/null +++ b/packages/copy/octobot_copy/exchange/market.py @@ -0,0 +1,50 @@ +import decimal +import typing + +import octobot_commons.logging as commons_logging +import octobot_commons.symbols +import octobot_trading.constants as trading_constants +import octobot_trading.enums as trading_enums +import octobot_trading.errors as trading_errors +import octobot_trading.personal_data as trading_personal_data + +if typing.TYPE_CHECKING: + import octobot_trading.exchanges + + +class MarketInterface: + def __init__(self, exchange_manager: "octobot_trading.exchanges.ExchangeManager"): + self._exchange_manager: "octobot_trading.exchanges.ExchangeManager" = exchange_manager + + def get_traded_symbols(self) -> typing.Iterable[octobot_commons.symbols.Symbol]: + return self._exchange_manager.exchange_config.traded_symbols + + def is_symbol_tradable(self, symbol: str) -> bool: + return symbol in self._exchange_manager.exchange_symbols_data.exchange_symbol_data + + async def get_up_to_date_price(self, symbol: str) -> decimal.Decimal: + return await trading_personal_data.get_up_to_date_price( + self._exchange_manager, + symbol, + timeout=trading_constants.ORDER_DATA_FETCHING_TIMEOUT, + ) + + def get_potentially_outdated_price(self, symbol: str) -> (decimal.Decimal, bool): + return trading_personal_data.get_potentially_outdated_price( + self._exchange_manager, + symbol, + ) + + async def ensure_contract_loaded(self, symbol: str) -> None: + try: + await self._exchange_manager.exchange.get_pair_contract_async(symbol) + except trading_errors.ContractExistsError: + commons_logging.get_logger(self.__class__.__name__).info( + f"Contract for {symbol} has been loaded." + ) + + def is_market_open_for_order_type(self, symbol: str, order_type: trading_enums.TraderOrderType) -> bool: + return self._exchange_manager.exchange.is_market_open_for_order_type(symbol, order_type) + + def get_market_status(self, symbol: str, *, with_fixer: bool = False): + return self._exchange_manager.exchange.get_market_status(symbol, with_fixer=with_fixer) diff --git a/packages/copy/octobot_copy/exchange/orders.py b/packages/copy/octobot_copy/exchange/orders.py new file mode 100644 index 0000000000..021969b585 --- /dev/null +++ b/packages/copy/octobot_copy/exchange/orders.py @@ -0,0 +1,322 @@ +import asyncio +import decimal +import typing + +import octobot_commons.constants as commons_constants +import octobot_commons.logging as commons_logging +import octobot_commons.signals as commons_signals +import octobot_trading.api as trading_api +import octobot_trading.constants as trading_constants +import octobot_trading.enums as trading_enums +import octobot_trading.errors as trading_errors +import octobot_trading.modes as trading_modes +import octobot_trading.modes.modes_util as modes_util +import octobot_trading.personal_data as trading_personal_data +import octobot_trading.personal_data.orders.order_util as order_util +import octobot_trading.signals.signal_creation as signal_creation + +import octobot_copy.constants as copy_constants + +if typing.TYPE_CHECKING: + import octobot_trading.exchanges + + +class OrdersInterface: + def __init__( + self, + exchange_manager: "octobot_trading.exchanges.ExchangeManager", + trading_mode: typing.Optional["trading_modes.AbstractTradingMode"], + ): + self._exchange_manager: "octobot_trading.exchanges.ExchangeManager" = exchange_manager + self._trading_mode: typing.Optional["trading_modes.AbstractTradingMode"] = trading_mode + + async def create_order( + self, + order_type: trading_enums.TraderOrderType, + symbol: str, + current_price: decimal.Decimal, + quantity: decimal.Decimal, + price: decimal.Decimal, + *, + reduce_only: typing.Optional[bool] = None, + close_position: bool = False, + params: typing.Optional[dict] = None, + wait_for_creation=True, + creation_timeout=trading_constants.INDIVIDUAL_ORDER_SYNC_TIMEOUT, + dependencies: typing.Optional[commons_signals.SignalDependencies] = None, + tag: typing.Optional[str] = None, + order_id: typing.Optional[str] = None, + raise_all_creation_error: bool = False, + ): + order = trading_personal_data.create_order_instance( + trader=self._exchange_manager.trader, + order_type=order_type, + symbol=symbol, + current_price=current_price, + quantity=quantity, + price=price, + reduce_only=reduce_only, + close_position=close_position, + tag=tag, + order_id=order_id, + ) + if self._trading_mode is not None: + return await self._trading_mode.create_order( + order, + loaded=False, + params=params, + wait_for_creation=wait_for_creation, + creation_timeout=creation_timeout, + raise_all_creation_error=raise_all_creation_error, + dependencies=dependencies, + ) + return await self._exchange_manager.trader.create_order( + order, + loaded=False, + params=params, + wait_for_creation=wait_for_creation, + creation_timeout=creation_timeout, + raise_all_creation_error=raise_all_creation_error, + force_if_disabled=False # type: ignore + ) + + def adapt_order_quantity_and_target_price_for_order_creation( + self, + order_type: trading_enums.TraderOrderType, + symbol: str, + quantity: decimal.Decimal, + order_target_price: decimal.Decimal, + *, + adapt_price_for_limit_orders: bool = False, + ) -> tuple[decimal.Decimal, decimal.Decimal]: + side = ( + trading_enums.TradeOrderSide.BUY + if order_type + in ( + trading_enums.TraderOrderType.BUY_MARKET, + trading_enums.TraderOrderType.BUY_LIMIT, + ) + else trading_enums.TradeOrderSide.SELL + ) + adapted_target_price = order_target_price + adapted_quantity = trading_personal_data.decimal_adapt_order_quantity_because_fees( + self._exchange_manager, + symbol, + order_type, + quantity, + order_target_price, + side, + ) + if trading_personal_data.get_trade_order_type(order_type) is trading_enums.TradeOrderType.MARKET: + return adapted_target_price, adapted_quantity + if adapt_price_for_limit_orders: + adapted_target_price, adapted_quantity = ( + trading_modes.get_instantly_filled_limit_order_adapted_price_and_quantity( + adapted_target_price, adapted_quantity, order_type + ) + ) + return adapted_target_price, adapted_quantity + + async def create_orders( + self, + order_type: trading_enums.TraderOrderType, + symbol: str, + current_price: decimal.Decimal, + quantity: decimal.Decimal, + order_target_price: decimal.Decimal, + symbol_market, + dependencies: typing.Optional[commons_signals.SignalDependencies] = None, + *, + reduce_only: typing.Optional[bool] = None, + close_position: bool = False, + skip_none_create_results: bool = False, + tag: typing.Optional[str] = None, + order_id: typing.Optional[str] = None, + raise_all_creation_error: bool = False, + ) -> tuple[list, bool]: + created_orders: list = [] + orders_should_have_been_created = False + chunk_index = 0 + for order_quantity, order_price in trading_personal_data.decimal_check_and_adapt_order_details_if_necessary( + quantity, + order_target_price, + symbol_market, + ): + orders_should_have_been_created = True + chunk_order_id = order_id if chunk_index == 0 else None + chunk_tag = tag if chunk_index == 0 else None + chunk_index += 1 + created_order = await self.create_order( + order_type, + symbol, + current_price, + order_quantity, + order_price, + reduce_only=reduce_only, + close_position=close_position, + dependencies=dependencies, + tag=chunk_tag, + order_id=chunk_order_id, + raise_all_creation_error=raise_all_creation_error, + ) + if skip_none_create_results: + if created_order is not None: + created_orders.append(created_order) + else: + created_orders.append(created_order) + return created_orders, orders_should_have_been_created + + def automatically_synchronize_orders(self) -> bool: + return self._exchange_manager.exchange_personal_data.orders_manager.enable_order_auto_synchronization + + async def wait_for_orders_to_fill(self, orders: list) -> None: + return await trading_personal_data.wait_for_orders_to_fill_considering_order_auto_synchronization( + self._exchange_manager, orders, copy_constants.FILL_ORDER_TIMEOUT, True, temp_refresh_portfolio_on_static_wait=False + ) + + def get_open_orders(self, symbol: typing.Optional[str] = None, active: typing.Optional[bool] = None) -> list: + return trading_api.get_open_orders(self._exchange_manager, symbol=symbol, active=active) + + def get_pending_open_quantity(self, symbol: str) -> decimal.Decimal: + pending_quantity = decimal.Decimal(0) + for order in self.get_open_orders(symbol=symbol): + remaining_quantity = order.origin_quantity - order.filled_quantity + if remaining_quantity <= decimal.Decimal(0): + continue + if order.side is trading_enums.TradeOrderSide.BUY: + pending_quantity += remaining_quantity + elif order.side is trading_enums.TradeOrderSide.SELL: + pending_quantity -= remaining_quantity + return pending_quantity + + async def get_pre_order_data( + self, + symbol: str, + timeout: typing.Optional[int] = trading_constants.ORDER_DATA_FETCHING_TIMEOUT, + portfolio_type=commons_constants.PORTFOLIO_AVAILABLE, + target_price=None, + ): + return await trading_personal_data.get_pre_order_data( + self._exchange_manager, + symbol=symbol, + timeout=timeout, + portfolio_type=portfolio_type, + target_price=target_price, + ) + + def get_futures_max_order_size( + self, + symbol: str, + side: trading_enums.TradeOrderSide, + current_price: decimal.Decimal, + reduce_only: bool, + current_symbol_holding: decimal.Decimal, + market_quantity: decimal.Decimal, + ) -> tuple[decimal.Decimal, bool]: + return order_util.get_futures_max_order_size( + self._exchange_manager, + symbol, + side, + current_price, + reduce_only, + current_symbol_holding, + market_quantity, + ) + + async def convert_assets_to_target_asset( + self, + sellable_assets: list, + target_asset: str, + tickers: dict, + dependencies: typing.Optional[commons_signals.SignalDependencies] = None, + raise_all_order_errors: bool = False, + ) -> list: + return await modes_util.convert_assets_to_target_asset( + sellable_assets, + target_asset, + tickers, + dependencies=dependencies, + raise_all_order_errors=raise_all_order_errors, + trading_mode=self._trading_mode, + exchange_manager=self._exchange_manager, + ) + + async def cancel_order( + self, + order, + ignored_order: object = None, + wait_for_cancelling: bool = True, + dependencies: typing.Optional[commons_signals.SignalDependencies] = None, + ) -> tuple[bool, commons_signals.SignalDependencies]: + if self._trading_mode is not None: + return await self._trading_mode.cancel_order( + order, + ignored_order=ignored_order, + wait_for_cancelling=wait_for_cancelling, + dependencies=dependencies, + ) + return await signal_creation.cancel_order( + self._exchange_manager, + False, + order, + ignored_order=ignored_order, + wait_for_cancelling=wait_for_cancelling, + dependencies=dependencies, + ) + + async def cancel_symbol_open_orders( + self, + symbol: str, + dependencies: typing.Optional[commons_signals.SignalDependencies], + allowed_sides: typing.Optional[set[trading_enums.TradeOrderSide]] = None, + ) -> typing.Optional[commons_signals.SignalDependencies]: + cancelled_dependencies = commons_signals.SignalDependencies() + for order in self.get_open_orders(symbol=symbol): + if isinstance(order, trading_personal_data.MarketOrder): + continue + if allowed_sides and order.side not in allowed_sides: + continue + try: + is_cancelled, dependency = await self.cancel_order(order) + if is_cancelled and dependency is not None: + cancelled_dependencies.extend(dependency) + except trading_errors.UnexpectedExchangeSideOrderStateError as err: + self._get_logger().warning( + f"Skipped order cancel: {err}, order: {order}" + ) + if dependencies is not None: + dependencies.extend(cancelled_dependencies) + return cancelled_dependencies or None + + def adapt_order_quantity_because_fees( + self, + symbol: str, + order_type: trading_enums.TraderOrderType, + quantity: decimal.Decimal, + price: decimal.Decimal, + side: trading_enums.TradeOrderSide, + ) -> decimal.Decimal: + return trading_personal_data.decimal_adapt_order_quantity_because_fees( + self._exchange_manager, symbol, order_type, quantity, price, side + ) + + def get_minimal_order_cost(self, symbol: str, default_price: typing.Optional[float] = None) -> float: + symbol_market = self._exchange_manager.exchange.get_market_status(symbol, with_fixer=False) + return trading_personal_data.get_minimal_order_cost(symbol_market, default_price=default_price) + + def check_and_adapt_order_details_if_necessary( + self, + symbol: str, + quantity: decimal.Decimal, + price: decimal.Decimal, + ) -> tuple[list[tuple[decimal.Decimal, decimal.Decimal]], dict]: + symbol_market = self._exchange_manager.exchange.get_market_status(symbol, with_fixer=False) + adapted_order_details = trading_personal_data.decimal_check_and_adapt_order_details_if_necessary( + quantity, + price, + symbol_market, + ) + return adapted_order_details, symbol_market + + def _get_logger(self) -> commons_logging.BotLogger: + return commons_logging.get_logger(self.__class__.__name__) diff --git a/packages/copy/octobot_copy/exchange/portfolio.py b/packages/copy/octobot_copy/exchange/portfolio.py new file mode 100644 index 0000000000..9e75b68f94 --- /dev/null +++ b/packages/copy/octobot_copy/exchange/portfolio.py @@ -0,0 +1,63 @@ +import decimal +import typing + +import octobot_commons.logging as commons_logging +import octobot_trading.personal_data as personal_data +import octobot_trading.constants as trading_constants + +if typing.TYPE_CHECKING: + import octobot_trading.exchanges + + +class PortfolioInterface: + def __init__(self, exchange_manager: "octobot_trading.exchanges.ExchangeManager"): + self._exchange_manager: "octobot_trading.exchanges.ExchangeManager" = exchange_manager + + @property + def reference_market(self) -> str: + return self._exchange_manager.exchange_personal_data.portfolio_manager.reference_market + + def get_holdings_ratio( + self, + coin: str, + traded_symbols_only: bool = False, + include_assets_in_open_orders: bool = False, + coins_whitelist: typing.Optional[list] = None, + ) -> decimal.Decimal: + ratio = self._exchange_manager.exchange_personal_data.portfolio_manager.portfolio_value_holder.get_holdings_ratio( + coin, + traded_symbols_only=traded_symbols_only, + include_assets_in_open_orders=include_assets_in_open_orders, + coins_whitelist=coins_whitelist, + ) + return ratio if ratio is not None else trading_constants.ZERO + + def get_traded_assets_holdings_value( + self, + unit: str, + coins_whitelist: typing.Optional[typing.Iterable] = None, + ) -> decimal.Decimal: + portfolio_manager = self._exchange_manager.exchange_personal_data.portfolio_manager + return portfolio_manager.portfolio_value_holder.get_traded_assets_holdings_value( + unit, coins_whitelist + ) + + def get_free_reference_market_holding(self, reference_market: str) -> decimal.Decimal: + portfolio_manager = self._exchange_manager.exchange_personal_data.portfolio_manager + return portfolio_manager.portfolio.get_currency_portfolio(reference_market).available + + def get_currency_portfolio_total(self, currency: str) -> decimal.Decimal: + portfolio = self._exchange_manager.exchange_personal_data.portfolio_manager.portfolio + return portfolio.get_currency_portfolio(currency).total + + def get_currency_portfolio_available(self, currency: str) -> decimal.Decimal: + portfolio = self._exchange_manager.exchange_personal_data.portfolio_manager.portfolio + return portfolio.get_currency_portfolio(currency).available + + async def refresh_portfolio(self) -> bool: + return await self._exchange_manager.exchange_personal_data.portfolio_manager.refresh_real_trader_portfolio( + force_manual_refresh=True + ) + + def _get_logger(self) -> commons_logging.BotLogger: + return commons_logging.get_logger(self.__class__.__name__) diff --git a/packages/copy/octobot_copy/exchange/positions.py b/packages/copy/octobot_copy/exchange/positions.py new file mode 100644 index 0000000000..3cf0c2e426 --- /dev/null +++ b/packages/copy/octobot_copy/exchange/positions.py @@ -0,0 +1,132 @@ +import decimal +import typing + +import octobot_commons.signals as commons_signals +import octobot_trading.constants as trading_constants +import octobot_trading.enums as trading_enums +import octobot_trading.modes as trading_modes +import octobot_trading.personal_data as trading_personal_data + +import octobot_copy.exchange.market as exchange_market +import octobot_copy.exchange.orders as exchange_orders + +if typing.TYPE_CHECKING: + import octobot_trading.exchanges + + +class PositionsInterface: + def __init__( + self, + exchange_manager: "octobot_trading.exchanges.ExchangeManager", + orders: "exchange_orders.OrdersInterface", + market: "exchange_market.MarketInterface", + ): + self._exchange_manager: "octobot_trading.exchanges.ExchangeManager" = exchange_manager + self._orders: exchange_orders.OrdersInterface = orders + self._market: exchange_market.MarketInterface = market + + def get_symbol_position( + self, + symbol: str, + side: trading_enums.PositionSide = trading_enums.PositionSide.BOTH, + ): + return self._exchange_manager.exchange_personal_data.positions_manager.get_symbol_position( + symbol, side + ) + + async def refresh_real_trader_position(self, position, *, force_job_execution: bool = True) -> None: + await self._exchange_manager.exchange_personal_data.positions_manager.refresh_real_trader_position( + position, force_job_execution=force_job_execution + ) + + async def close_symbol_position( + self, + symbol: str, + dependencies: typing.Optional[commons_signals.SignalDependencies], + current_price: decimal.Decimal, + symbol_market, + desired_futures_position_size: typing.Optional[decimal.Decimal] = None, + ) -> list: + position = self.get_symbol_position(symbol, trading_enums.PositionSide.BOTH) + if position.is_idle(): + # Force a refresh from the exchange before concluding there is nothing to sell. + await self.refresh_real_trader_position(position, force_job_execution=True) + position = self.get_symbol_position(symbol, trading_enums.PositionSide.BOTH) + if position.is_idle(): + await self._orders.cancel_symbol_open_orders(symbol, dependencies=dependencies) + return [] + + # Cancel open close-side orders BEFORE computing effective position size so that a stuck + # IOC→GTC order from a previous cycle does not subtract from pending_open_quantity and wrongly suppress the fresh close order. + close_side = ( + trading_enums.TradeOrderSide.BUY if position.is_short() + else trading_enums.TradeOrderSide.SELL + ) + await self._orders.cancel_symbol_open_orders(symbol, dependencies, allowed_sides={close_side}) + pending_open_quantity = self._orders.get_pending_open_quantity(symbol) + position_size = decimal.Decimal(str(position.size)) + if position.is_short(): + effective_position_size = -abs(position_size) + pending_open_quantity + else: + effective_position_size = abs(position_size) + pending_open_quantity + + if effective_position_size == trading_constants.ZERO: + return [] + + if effective_position_size > trading_constants.ZERO: + side = trading_enums.TradeOrderSide.SELL + else: + side = trading_enums.TradeOrderSide.BUY + + quantity_to_close = abs(effective_position_size) + if desired_futures_position_size is not None and effective_position_size > trading_constants.ZERO: + quantity_to_close = max( + trading_constants.ZERO, + effective_position_size - desired_futures_position_size, + ) + if quantity_to_close <= trading_constants.ZERO: + return [] + + ideal_order_type = ( + trading_enums.TraderOrderType.SELL_MARKET + if side is trading_enums.TradeOrderSide.SELL + else trading_enums.TraderOrderType.BUY_MARKET + ) + order_type = ( + ideal_order_type + if self._market.is_market_open_for_order_type(symbol, ideal_order_type) + else ( + trading_enums.TraderOrderType.SELL_LIMIT + if side is trading_enums.TradeOrderSide.SELL + else trading_enums.TraderOrderType.BUY_LIMIT + ) + ) + + quantity = self._orders.adapt_order_quantity_because_fees( + symbol, order_type, quantity_to_close, current_price, side + ) + if trading_personal_data.get_trade_order_type(order_type) is not trading_enums.TradeOrderType.MARKET: + current_price, quantity = trading_modes.get_instantly_filled_limit_order_adapted_price_and_quantity( + current_price, quantity, order_type + ) + + created_orders = [] + for order_quantity, order_price in trading_personal_data.decimal_check_and_adapt_order_details_if_necessary( + quantity, + current_price, + symbol_market, + ): + created_order = await self._orders.create_order( + order_type, + symbol, + order_price, + order_quantity, + order_price, + reduce_only=True, + close_position=True, + dependencies=dependencies, + ) + if created_order is not None: + created_orders.append(created_order) + + return created_orders diff --git a/packages/copy/octobot_copy/orders_mirroring/__init__.py b/packages/copy/octobot_copy/orders_mirroring/__init__.py new file mode 100644 index 0000000000..ea35e97b6d --- /dev/null +++ b/packages/copy/octobot_copy/orders_mirroring/__init__.py @@ -0,0 +1 @@ +# orders_mirroring: reference order sync for account copy diff --git a/packages/copy/octobot_copy/orders_mirroring/orders_synchronizer.py b/packages/copy/octobot_copy/orders_mirroring/orders_synchronizer.py new file mode 100644 index 0000000000..fef6075c0d --- /dev/null +++ b/packages/copy/octobot_copy/orders_mirroring/orders_synchronizer.py @@ -0,0 +1,950 @@ +import decimal +import time +import typing + +import octobot_commons.constants as commons_constants +import octobot_commons.logging as logging +import octobot_commons.symbols.symbol_util as symbol_util +import octobot_trading.constants as trading_constants +import octobot_trading.errors as trading_errors +import octobot_trading.enums as trading_enums +import octobot_trading.personal_data as trading_personal_data + +import octobot_copy.constants as copy_constants +import octobot_copy.entities as copy_entities +import octobot_copy.exchange as copy_exchange + + +class OrdersSynchronizer: + """Synches reference account open orders onto the copier exchange (spot mirror rows).""" + + def __init__( + self, + reference_account: copy_entities.Account, + exchange_interface: copy_exchange.ExchangeInterface, + copy_settings: copy_entities.AccountCopySettings, + ) -> None: + self._reference_account = reference_account + self._exchange_interface = exchange_interface + self._copy_settings = copy_settings + self._force_immediate_orphan_cancel_next: bool = False + + def _get_replicable_reference_orders_from( + self, + reference_account: copy_entities.Account, + ) -> list[dict[str, typing.Any]]: + replicable: list[dict[str, typing.Any]] = [] + for order in reference_account.orders: + if trading_constants.STORAGE_ORIGIN_VALUE not in order: + continue + origin = order[trading_constants.STORAGE_ORIGIN_VALUE] + if ( + origin.get(trading_enums.ExchangeConstantsOrderColumns.STATUS.value) + != trading_enums.OrderStatus.OPEN.value + ): + continue + if origin.get(trading_enums.ExchangeConstantsOrderColumns.SELF_MANAGED.value, False): + continue + if origin.get(trading_enums.ExchangeConstantsOrderColumns.IS_ACTIVE.value, True) is False: + continue + _, trader_order_type = trading_personal_data.parse_order_type(origin) + if trader_order_type in ( + trading_enums.TraderOrderType.BUY_MARKET, + trading_enums.TraderOrderType.SELL_MARKET, + ): + # ignore market orders: they can't be replicated + continue + replicable.append(order) + return replicable + + def _get_replicable_reference_orders(self) -> list[dict[str, typing.Any]]: + return self._get_replicable_reference_orders_from(self._reference_account) + + def _active_reference_order_ids(self, replicable: list[dict[str, typing.Any]]) -> set: + return { + str( + order[trading_constants.STORAGE_ORIGIN_VALUE][ + trading_enums.ExchangeConstantsOrderColumns.ID.value + ] + ) + for order in replicable + } + + async def cancel_orders_pending_synchronization( + self, + replicable_orders: typing.Optional[list[dict[str, typing.Any]]], + ) -> int: + """ + Cancel mirrored copier open orders that no longer match a replicable reference open order + """ + replicable = replicable_orders or self._get_replicable_reference_orders() + to_keep_ids = self._active_reference_order_ids(replicable) + return await self._cancel_mirrored_orphan_orders(to_keep_ids, replicable) + + def abort_mirrored_orphan_grace(self) -> None: + self._force_immediate_orphan_cancel_next = True + + def is_mirrored_orphan_grace_invalid_no_compliant_snapshot(self) -> bool: + """ + True when ``historical_snapshots`` is non-empty but no stored snapshot aligns with the copier + under grace checks, so grace cannot be anchored to history. Empty history is **not** invalid: + callers without prior reference states use the live account only (see + ``get_mirrored_orphan_grace_started_at``). + """ + if not self._reference_account.historical_snapshots: + return False + for snapshot in self._reference_account.historical_snapshots: + if self._reference_state_complies_with_copier_for_grace(snapshot): + return False + return True + + def get_mirrored_orphan_grace_started_at(self) -> typing.Optional[float]: + """ + Wall-clock start of the mirrored-orphan grace window derived from reference + historical_snapshots and updated_at. With no history, uses ``reference_account.updated_at``. + None when non-empty history has no compliant snapshot (invalid) or not applicable. + """ + if not self._reference_account.historical_snapshots: + return self._reference_account.updated_at + if self.is_mirrored_orphan_grace_invalid_no_compliant_snapshot(): + return None + for index, snapshot in enumerate(self._reference_account.historical_snapshots): + if self._reference_state_complies_with_copier_for_grace(snapshot): + if index > 0: + return self._reference_account.historical_snapshots[index - 1].updated_at + return self._reference_account.updated_at + return None + + def _reference_state_complies_with_copier_for_grace( + self, + reference_state: copy_entities.Account, + ) -> bool: + replicable = self._get_replicable_reference_orders_from(reference_state) + active_reference_ids = self._active_reference_order_ids(replicable) + orphan_orders = self._mirrored_orphan_open_orders(active_reference_ids) + orphan_count = len(orphan_orders) + late_fill_orders = self._late_reference_fill_candidate_orders( + replicable, + orphan_orders, + reference_state, + ) + late_reference_fill_count = len(late_fill_orders) + grace_total = orphan_count + late_reference_fill_count + settings = self._copy_settings + threshold = settings.mirrored_orphan_grace_abort_threshold + if grace_total == 0: + return True + if grace_total >= threshold: + return False + if orphan_count > 0 and not self._mirrored_orphan_batch_eligible_for_grace( + orphan_orders, + reference_state, + ): + return False + return True + + def is_mirrored_orphan_grace_aborted_for_missed_historical_signals(self) -> bool: + """ + True when ``historical_snapshots`` is non-empty, at least one snapshot complies with + ``_reference_state_complies_with_copier_for_grace``, and the first compliant snapshot is at + index ``>= missed_signals_grace_abort_threshold`` (newest-first order). Empty history and + the no-compliant-snapshot invalid case are handled elsewhere. + """ + snapshots = self._reference_account.historical_snapshots + if not snapshots: + return False + threshold = self._copy_settings.missed_signals_grace_abort_threshold + for index, snapshot in enumerate(snapshots): + if self._reference_state_complies_with_copier_for_grace(snapshot): + return index >= threshold + return False + + def is_mirrored_orphan_grace_blocking_rebalance(self) -> bool: + replicable = self._get_replicable_reference_orders() + active_reference_ids = self._active_reference_order_ids(replicable) + return self._is_grace_blocking_rebalance(active_reference_ids, replicable) + + def _mirrored_orphan_open_orders(self, active_reference_ids: set) -> list[trading_personal_data.Order]: + return [ + order + for order in self._exchange_interface.orders.get_open_orders() + if order.tag == copy_constants.MIRRORED_ORDER_TAG + and str(order.order_id) not in active_reference_ids + and order.order_type + not in ( + # market orders can't be orphaned or cancelled: they are always filled + trading_enums.TraderOrderType.BUY_MARKET, + trading_enums.TraderOrderType.SELL_MARKET, + ) + ] + + def _reference_pair_leg_share( + self, + symbol: str, + reference_state: typing.Optional[copy_entities.Account] = None, + ) -> typing.Optional[decimal.Decimal]: + reference_account = reference_state or self._reference_account + parsed = symbol_util.parse_symbol(symbol) + base_currency = parsed.base + quote_currency = parsed.quote + if not base_currency or not quote_currency: + return None + reference_content = reference_account.content + if base_currency not in reference_content: + return trading_constants.ZERO + if quote_currency not in reference_content: + return trading_constants.ONE + base_holdings = reference_content[base_currency] or {} + quote_holdings = reference_content[quote_currency] or {} + allocation_base = base_holdings.get(copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO, trading_constants.ZERO) + allocation_quote = quote_holdings.get(copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO, trading_constants.ZERO) + pair_denom = allocation_base + allocation_quote + if pair_denom <= trading_constants.ZERO: + return None + return allocation_base / pair_denom + + def _copier_asset_value_in_reference_market( + self, + asset: str, + amount: decimal.Decimal, + ) -> typing.Optional[decimal.Decimal]: + reference_market = self._exchange_interface.portfolio.reference_market + if amount < trading_constants.ZERO: + return None + if asset == reference_market: + return amount + pair_symbol = symbol_util.merge_currencies(asset, reference_market) + market_price, _ = self._exchange_interface.market.get_potentially_outdated_price(pair_symbol) + if market_price <= trading_constants.ZERO: + return None + return amount * market_price + + def _orphan_order_execution_price(self, order: trading_personal_data.Order) -> typing.Optional[decimal.Decimal]: + if order.origin_price > trading_constants.ZERO: + return order.origin_price + market_price, _ = self._exchange_interface.market.get_potentially_outdated_price(order.symbol) + if market_price <= trading_constants.ZERO: + return None + return market_price + + def _reference_order_execution_price_from_origin(self, origin: dict) -> typing.Optional[decimal.Decimal]: + symbol = origin[trading_enums.ExchangeConstantsOrderColumns.SYMBOL.value] + price_val = origin[trading_enums.ExchangeConstantsOrderColumns.PRICE.value] + if price_val not in (None, ""): + parsed_price = decimal.Decimal(str(price_val)) + if parsed_price > trading_constants.ZERO: + return parsed_price + market_price, _ = self._exchange_interface.market.get_potentially_outdated_price(symbol) + if market_price <= trading_constants.ZERO: + return None + return market_price + + def _pair_leg_share_value_weighted( + self, + base_currency: str, + quote_currency: str, + base_amount: decimal.Decimal, + quote_amount: decimal.Decimal, + ) -> typing.Optional[decimal.Decimal]: + if base_amount < trading_constants.ZERO or quote_amount < trading_constants.ZERO: + return None + value_base = self._copier_asset_value_in_reference_market(base_currency, base_amount) + value_quote = self._copier_asset_value_in_reference_market(quote_currency, quote_amount) + if value_base is None or value_quote is None: + return None + value_total = value_base + value_quote + if value_total <= trading_constants.ZERO: + return None + return value_base / value_total + + def _copier_pair_base_quote_totals_for_symbol( + self, + symbol: str, + ) -> typing.Optional[tuple[str, str, decimal.Decimal, decimal.Decimal]]: + parsed = symbol_util.parse_symbol(symbol) + base_currency = parsed.base + quote_currency = parsed.quote + if not base_currency or not quote_currency: + return None + base_total = self._exchange_interface.portfolio.get_currency_portfolio_total(base_currency) + quote_total = self._exchange_interface.portfolio.get_currency_portfolio_total(quote_currency) + return (base_currency, quote_currency, base_total, quote_total) + + def _copier_pair_leg_share(self, symbol: str) -> typing.Optional[decimal.Decimal]: + pair_totals = self._copier_pair_base_quote_totals_for_symbol(symbol) + if pair_totals is None: + return None + base_currency, quote_currency, base_total, quote_total = pair_totals + return self._pair_leg_share_value_weighted(base_currency, quote_currency, base_total, quote_total) + + def _simulated_copier_pair_leg_share_after_orphan_fill( + self, + order: trading_personal_data.Order, + ) -> typing.Optional[decimal.Decimal]: + pair_totals = self._copier_pair_base_quote_totals_for_symbol(order.symbol) + if pair_totals is None: + return None + base_currency, quote_currency, base_total, quote_total = pair_totals + execution_price = self._orphan_order_execution_price(order) + if execution_price is None: + return None + order_quantity = order.origin_quantity + if order.side is trading_enums.TradeOrderSide.BUY: + base_adjusted = base_total + order_quantity + quote_adjusted = quote_total - order_quantity * execution_price + elif order.side is trading_enums.TradeOrderSide.SELL: + base_adjusted = base_total - order_quantity + quote_adjusted = quote_total + order_quantity * execution_price + else: + return None + return self._pair_leg_share_value_weighted(base_currency, quote_currency, base_adjusted, quote_adjusted) + + def _simulated_reference_pair_leg_share_after_order_fill( + self, + origin: dict, + reference_state: typing.Optional[copy_entities.Account] = None, + ) -> typing.Optional[decimal.Decimal]: + reference_account = reference_state or self._reference_account + symbol = origin[trading_enums.ExchangeConstantsOrderColumns.SYMBOL.value] + parsed = symbol_util.parse_symbol(symbol) + base_currency = parsed.base + quote_currency = parsed.quote + if not base_currency or not quote_currency: + return None + side, _trader_order_type = trading_personal_data.parse_order_type(origin) + if side is None: + return None + execution_price = self._reference_order_execution_price_from_origin(origin) + if execution_price is None: + return None + amount_raw = origin[trading_enums.ExchangeConstantsOrderColumns.AMOUNT.value] + order_quantity = decimal.Decimal(str(amount_raw)) + if order_quantity <= trading_constants.ZERO: + return None + reference_content = reference_account.content + base_holdings = reference_content.get(base_currency, {}) or {} + quote_holdings = reference_content.get(quote_currency, {}) or {} + base_total = base_holdings.get(commons_constants.PORTFOLIO_TOTAL, trading_constants.ZERO) + quote_total = quote_holdings.get(commons_constants.PORTFOLIO_TOTAL, trading_constants.ZERO) + if side is trading_enums.TradeOrderSide.BUY: + base_adjusted = base_total + order_quantity + quote_adjusted = quote_total - order_quantity * execution_price + elif side is trading_enums.TradeOrderSide.SELL: + base_adjusted = base_total - order_quantity + quote_adjusted = quote_total + order_quantity * execution_price + else: + return None + return self._pair_leg_share_value_weighted(base_currency, quote_currency, base_adjusted, quote_adjusted) + + def _passes_late_reference_fill_heuristic( + self, + order: dict, + reference_state: typing.Optional[copy_entities.Account] = None, + ) -> bool: + origin = order[trading_constants.STORAGE_ORIGIN_VALUE] + symbol = origin[trading_enums.ExchangeConstantsOrderColumns.SYMBOL.value] + max_delta = self._copy_settings.mirrored_orphan_grace_pair_ratio_max_delta + copier_share = self._copier_pair_leg_share(symbol) + simulated_reference_share = self._simulated_reference_pair_leg_share_after_order_fill( + origin, + reference_state, + ) + if copier_share is None or simulated_reference_share is None: + return False + return abs(simulated_reference_share - copier_share) <= max_delta + + def _is_late_reference_fill_for_order( + self, + order: dict, + orphan_orders: list[trading_personal_data.Order], + reference_state: typing.Optional[copy_entities.Account] = None, + ) -> bool: + origin = order[trading_constants.STORAGE_ORIGIN_VALUE] + reference_order_id = str(origin[trading_enums.ExchangeConstantsOrderColumns.ID.value]) + if self._find_open_order_by_bot_order_id(reference_order_id) is not None: + return False + reference_side, _trader_order_type = trading_personal_data.parse_order_type(origin) + if reference_side is None: + return False + reference_symbol = origin[trading_enums.ExchangeConstantsOrderColumns.SYMBOL.value] + timestamp_raw = origin.get(trading_enums.ExchangeConstantsOrderColumns.TIMESTAMP.value) + reference_timestamp: typing.Optional[float] = None + if timestamp_raw not in (None, ""): + try: + reference_timestamp = float(timestamp_raw) + except (TypeError, ValueError): + reference_timestamp = None + if reference_timestamp is not None: + for orphan_order in orphan_orders: + if orphan_order.symbol != reference_symbol: + continue + if orphan_order.side == reference_side: + continue + orphan_timestamp = orphan_order.creation_time or orphan_order.timestamp + if orphan_timestamp is None: + continue + try: + orphan_timestamp_float = float(orphan_timestamp) + except (TypeError, ValueError): + continue + if reference_timestamp > orphan_timestamp_float: + # reference account order was created after the orphan order, + # on the same symbol and a different side => it likely is the + # "other side" equivalent of the orphan order: it's not a late reference fill. + return False + return self._passes_late_reference_fill_heuristic(order, reference_state) + + def _late_reference_fill_candidate_orders( + self, + replicable: list[dict[str, typing.Any]], + orphan_orders: list[trading_personal_data.Order], + reference_state: typing.Optional[copy_entities.Account], + ) -> list[dict[str, typing.Any]]: + return [ + order + for order in replicable + if self._is_late_reference_fill_for_order(order, orphan_orders, reference_state) + ] + + def _mirrored_orphan_batch_eligible_for_grace( + self, + orphan_orders: list[trading_personal_data.Order], + reference_state: typing.Optional[copy_entities.Account] = None, + ) -> bool: + max_delta = self._copy_settings.mirrored_orphan_grace_pair_ratio_max_delta + for orphan_order in orphan_orders: + reference_share = self._reference_pair_leg_share(orphan_order.symbol, reference_state) + simulated_share = self._simulated_copier_pair_leg_share_after_orphan_fill(orphan_order) + if reference_share is None or simulated_share is None: + return False + if abs(simulated_share - reference_share) > max_delta: + return False + return True + + def _is_grace_blocking_rebalance( + self, + active_reference_ids: set, + replicable: list[dict[str, typing.Any]], + ) -> bool: + settings = self._copy_settings + grace_seconds = settings.mirrored_orphan_cancel_grace_seconds + if grace_seconds <= 0: + return False + if self.is_mirrored_orphan_grace_invalid_no_compliant_snapshot(): + return False + if self.is_mirrored_orphan_grace_aborted_for_missed_historical_signals(): + missed_threshold = settings.missed_signals_grace_abort_threshold + self._get_logger().info( + "Mirrored orphans grace period aborted: first compliant reference snapshot index " + f">= missed_signals_grace_abort_threshold ({missed_threshold})" + ) + return False + orphan_orders = self._mirrored_orphan_open_orders(active_reference_ids) + orphan_count = len(orphan_orders) + late_reference_fill_count = len(self._late_reference_fill_candidate_orders(replicable, orphan_orders, None)) + grace_total = orphan_count + late_reference_fill_count + threshold = settings.mirrored_orphan_grace_abort_threshold + if grace_total == 0: + return False + if grace_total >= threshold: + self._get_logger().info( + f"Mirrored orphans grace period aborted: {grace_total} grace item(s) >= threshold ({threshold})" + ) + return False + if orphan_count > 0 and not self._mirrored_orphan_batch_eligible_for_grace(orphan_orders): + return False + started_at = self.get_mirrored_orphan_grace_started_at() + if started_at is None: + return True + now = time.time() + return (now - started_at) < grace_seconds + + def _reference_symbols_skipped_while_grace_orphans_uncancelled( + self, + replicable: list[dict[str, typing.Any]], + ) -> set[str]: + """ + Symbols whose mirrored orphan open orders are still on the copier because cancel is deferred + during the grace window, or symbols with late-reference-fill candidates (copier filled first). + Reference upserts on these symbols are skipped to avoid stacking a new mirrored side while + alignment is uncertain. + """ + settings = self._copy_settings + grace_seconds = settings.mirrored_orphan_cancel_grace_seconds + if grace_seconds <= 0: + return set() + if self.is_mirrored_orphan_grace_invalid_no_compliant_snapshot(): + return set() + if self.is_mirrored_orphan_grace_aborted_for_missed_historical_signals(): + return set() + active_reference_ids = self._active_reference_order_ids(replicable) + orphan_orders = self._mirrored_orphan_open_orders(active_reference_ids) + orphan_count = len(orphan_orders) + late_fill_orders = ( + [] if self._force_immediate_orphan_cancel_next + else self._late_reference_fill_candidate_orders(replicable, orphan_orders, None) + ) + late_reference_fill_count = len(late_fill_orders) + grace_total = orphan_count + late_reference_fill_count + threshold = settings.mirrored_orphan_grace_abort_threshold + if grace_total == 0 or grace_total >= threshold: + return set() + if orphan_count > 0 and not self._mirrored_orphan_batch_eligible_for_grace(orphan_orders): + return set() + started_at = self.get_mirrored_orphan_grace_started_at() + now = time.time() + if started_at is not None and (now - started_at) >= grace_seconds: + return set() + symbols = {order.symbol for order in orphan_orders} + for late_order in late_fill_orders: + late_origin = late_order[trading_constants.STORAGE_ORIGIN_VALUE] + symbols.add(late_origin[trading_enums.ExchangeConstantsOrderColumns.SYMBOL.value]) + return symbols + + async def synchronize(self) -> list: + """Align copier open orders with reference_account.orders (synched mirror rows).""" + replicable = self._get_replicable_reference_orders() + orphan_cancelled_count = await self.cancel_orders_pending_synchronization(replicable) + skip_symbols_for_upsert = self._reference_symbols_skipped_while_grace_orphans_uncancelled(replicable) + created: list = [] + replaced_cancelled_count = 0 + already_synchronized_count = 0 + skipped_grace_upserts: list[tuple[str, typing.Any]] = [] + for order in replicable: + origin = order[trading_constants.STORAGE_ORIGIN_VALUE] + order_symbol = origin[trading_enums.ExchangeConstantsOrderColumns.SYMBOL.value] + if order_symbol in skip_symbols_for_upsert: + skipped_grace_upserts.append( + ( + order_symbol, + origin.get(trading_enums.ExchangeConstantsOrderColumns.ID.value), + ) + ) + continue + try: + batch, replace_count, already_count = await self._upsert_mirrored_reference_order(order) + created.extend(batch) + replaced_cancelled_count += replace_count + already_synchronized_count += already_count + except ( + trading_errors.MissingMinimalExchangeTradeVolume, + trading_errors.OrderCreationError, + ) as err: + self._get_logger().exception( + err, + True, + f"Skipping synched reference order mirror: {err} ({err.__class__.__name__})", + ) + if skipped_grace_upserts: + skipped_summary = ", ".join( + f"{symbol}:{reference_order_id}" + for symbol, reference_order_id in skipped_grace_upserts + ) + self._get_logger().info( + "Skipped reference mirror upsert for %s order(s) (mirrored orphan grace period active): %s", + len(skipped_grace_upserts), + skipped_summary, + ) + total_cancelled = orphan_cancelled_count + replaced_cancelled_count + total_created = len(created) + self._get_logger().info( + f"Order mirror completed: {total_cancelled} cancelled " + f"[{orphan_cancelled_count} orphan(s), {replaced_cancelled_count} replaced], " + f"{total_created} created, " + f"{already_synchronized_count} already synchronized orders." + ) + return created + + async def _cancel_mirrored_orphan_orders( + self, + active_reference_ids: set, + replicable: list[dict[str, typing.Any]], + ) -> int: + orphan_orders = self._mirrored_orphan_open_orders(active_reference_ids) + return await self._apply_grace_policy_and_cancel_mirrored_orphans(orphan_orders, replicable) + + async def _apply_grace_policy_and_cancel_mirrored_orphans( + self, + orphan_orders: list[trading_personal_data.Order], + replicable: list[dict[str, typing.Any]], + ) -> int: + """ + When mirrored orphans exist and/or late-reference-fill candidates exist (or neither, to clear + grace state): defer cancel during an active grace window unless ``abort_mirrored_orphan_grace`` + requested immediate cancel, grace is disabled, combined count reaches abort threshold, + no compliant reference snapshot exists in ``historical_snapshots``, or wall-clock grace + (from ``get_mirrored_orphan_grace_started_at()``) has elapsed, or when the first compliant + reference snapshot is too deep in ``historical_snapshots`` (missed historical signals abort). + """ + settings = self._copy_settings + late_fill_orders = self._late_reference_fill_candidate_orders(replicable, orphan_orders, None) + late_reference_fill_count = len(late_fill_orders) + orphan_count = len(orphan_orders) + grace_total = orphan_count + late_reference_fill_count + + # Nothing to cancel and no alignment episode: clear abort flag for the next sync. + if grace_total == 0: + self._force_immediate_orphan_cancel_next = False + self._get_logger().info( + "Mirrored open-order grace episode cleared: no mirrored orphan orders remain " + "and no late-reference-fill candidates." + ) + return 0 + + grace_seconds = settings.mirrored_orphan_cancel_grace_seconds + threshold = settings.mirrored_orphan_grace_abort_threshold + # Explicit abort or grace disabled: cancel orphans immediately, do not start or extend grace. + if self._force_immediate_orphan_cancel_next or grace_seconds <= 0: + return await self._cancel_mirrored_orphan_order_list(orphan_orders) + + # No compliant snapshot in reference history: cannot anchor grace; same outcome as runaway desync. + if self.is_mirrored_orphan_grace_invalid_no_compliant_snapshot(): + self._get_logger().info( + "Mirrored orphan grace invalid: no compliant reference snapshot in history; " + "cancelling orphan order(s) immediately (full resync required)." + ) + return await self._cancel_mirrored_orphan_order_list(orphan_orders) + + if self.is_mirrored_orphan_grace_aborted_for_missed_historical_signals(): + missed_threshold = settings.missed_signals_grace_abort_threshold + self._get_logger().info( + "Mirrored orphan grace aborted: first compliant reference snapshot index " + f">= missed_signals_grace_abort_threshold ({missed_threshold}); " + "cancelling orphan order(s) immediately (reference history desync)." + ) + return await self._cancel_mirrored_orphan_order_list(orphan_orders) + + # Too many grace items at once: treat as runaway desync, cancel orphans immediately. + if grace_total >= threshold: + self._get_logger().info( + f"Mirrored orphan grace aborted: {grace_total} grace item(s) >= threshold {threshold} " + f"({orphan_count} orphan(s), {late_reference_fill_count} late-reference-fill candidate(s))" + ) + return await self._cancel_mirrored_orphan_order_list(orphan_orders) + + # Orphans present but pair-ratio heuristic fails: cancel those orphans; may still defer on late fills. + if orphan_count > 0 and not self._mirrored_orphan_batch_eligible_for_grace(orphan_orders): + cancelled = await self._cancel_mirrored_orphan_order_list(orphan_orders) + if late_reference_fill_count == 0: + had_grace_window = self.get_mirrored_orphan_grace_started_at() is not None + if had_grace_window: + self._get_logger().info( + "Mirrored orphan grace aborted: post-fill pair-ratio heuristic failed for at least one " + f"orphan (threshold={settings.mirrored_orphan_grace_pair_ratio_max_delta}); " + "cancelling immediately" + ) + else: + self._get_logger().info( + "Mirrored orphan grace skipped: post-fill pair-ratio heuristic failed for at least one " + f"orphan (threshold={settings.mirrored_orphan_grace_pair_ratio_max_delta}); " + "cancelling immediately" + ) + return cancelled + # Orphans cancelled; continue grace driven by late-reference fills (do not exit deferral here). + orphan_orders = [] + orphan_count = 0 + grace_total = late_reference_fill_count + self._get_logger().info( + f"Mirrored orphan grace ineligible for orphan deferral; {cancelled} orphan order(s) cancelled. " + f"Continuing grace window for {late_reference_fill_count} late-reference-fill candidate(s)" + ) + + # Grace window start time is derived from reference historical_snapshots + updated_at (not wall time here). + now = time.time() + started_at = self.get_mirrored_orphan_grace_started_at() + if started_at is None: + self._get_logger().info( + "Mirrored orphan grace: could not resolve grace start from reference history; " + "cancelling orphan order(s) immediately." + ) + return await self._cancel_mirrored_orphan_order_list(orphan_orders) + # Still inside grace window: wait for copier fills / alignment. + if (now - started_at) < grace_seconds: + remaining_seconds = grace_seconds - (now - started_at) + self._get_logger().info( + f"Mirrored orphan cancel deferred: {orphan_count} orphan(s), " + f"{late_reference_fill_count} late-reference-fill candidate(s), " + f"{remaining_seconds:.1f}s grace remaining" + ) + return 0 + + # Grace window finished: cancel orphans that are still open. + self._get_logger().info( + f"Mirrored orphan grace elapsed after {grace_seconds}s: cancelling {orphan_count} orphan(s)" + ) + return await self._cancel_mirrored_orphan_order_list(orphan_orders) + + async def _cancel_mirrored_orphan_order_list( + self, + orphan_orders: list[trading_personal_data.Order], + ) -> int: + cancelled_count = 0 + for order in orphan_orders: + try: + await self._exchange_interface.orders.cancel_order(order) + cancelled_count += 1 + self._get_logger().info( + f"Cancelled mirrored orphan order: symbol={order.symbol} " + f"order_id={order.order_id} side={order.side} type={order.order_type}" + ) + except trading_errors.UnexpectedExchangeSideOrderStateError as err: + self._get_logger().exception( + err, + True, + f"Skipped orphan cancel: {err}, order: {order}", + ) + return cancelled_count + + def _scale_mirrored_order_quantity( + self, + origin: dict, + symbol: str, + side: trading_enums.TradeOrderSide, + ) -> typing.Optional[decimal.Decimal]: + # Spot: sells spend base, buys spend quote — scale the reference order amount by the same leg's + # holdings ratio. This will need to be adapted for futures (margin/position sizing, not spot wallets). + parsed = symbol_util.parse_symbol(symbol) + scale_currency = parsed.quote if side is trading_enums.TradeOrderSide.BUY else parsed.base + reference_holdings = self._reference_account.content.get(scale_currency, {}) # type: ignore + reference_total = reference_holdings.get( + commons_constants.PORTFOLIO_TOTAL, trading_constants.ZERO + ) + if reference_total <= trading_constants.ZERO: + return None + copier_total = self._exchange_interface.portfolio.get_currency_portfolio_total(scale_currency) + amount = decimal.Decimal(str(origin[trading_enums.ExchangeConstantsOrderColumns.AMOUNT.value])) + if amount <= trading_constants.ZERO: + return None + scale = copier_total / reference_total + return amount * scale + + def _find_open_order_by_bot_order_id(self, order_id: str) -> typing.Optional[trading_personal_data.Order]: + for order in self._exchange_interface.orders.get_open_orders(): + if str(order.order_id) == str(order_id): + return order + return None + + def _mirrored_order_target_mismatch_reason( + self, + order: trading_personal_data.Order, + symbol: str, + side: trading_enums.TradeOrderSide, + order_type: trading_enums.TraderOrderType, + ideal_quantity: decimal.Decimal, + order_target_price: decimal.Decimal, + current_price: decimal.Decimal, + ) -> typing.Optional[str]: + if order.symbol != symbol: + return f"symbol mismatch (open_order={order.symbol!r}, target={symbol!r})" + if order.side != side: + return f"side mismatch (open_order={order.side}, target={side})" + if order.order_type != order_type: + return f"order_type mismatch (open_order={order.order_type}, target={order_type})" + quantity_tolerance = ideal_quantity * decimal.Decimal("0.002") + quantity_threshold = max(quantity_tolerance, decimal.Decimal("1e-12")) + if abs(order.origin_quantity - ideal_quantity) > quantity_threshold: + return ( + f"quantity mismatch (open_order={order.origin_quantity}, target={ideal_quantity}, " + f"threshold={quantity_threshold})" + ) + if trading_personal_data.get_trade_order_type(order_type) is trading_enums.TradeOrderType.MARKET: + return None + price_tolerance = order_target_price * decimal.Decimal("0.0001") + reference_price = ( + order_target_price if order_target_price > trading_constants.ZERO else current_price + ) + price_threshold = max(price_tolerance, decimal.Decimal("1e-12")) + if abs(order.origin_price - reference_price) > price_threshold: + return ( + f"limit price mismatch (open_order={order.origin_price}, target={reference_price}, " + f"threshold={price_threshold})" + ) + return None + + async def _upsert_mirrored_reference_order(self, order: dict) -> tuple[list, int, int]: + origin = order[trading_constants.STORAGE_ORIGIN_VALUE] + symbol = origin[trading_enums.ExchangeConstantsOrderColumns.SYMBOL.value] + side, trader_order_type = trading_personal_data.parse_order_type(origin) + if side is None or trader_order_type is None: + self._get_logger().info( + f"Skipping reference order mirror: unsupported type for {symbol} ({trader_order_type})" + ) + return [], 0, 0 + reference_order_id = str(origin[trading_enums.ExchangeConstantsOrderColumns.ID.value]) + existing = self._find_open_order_by_bot_order_id(reference_order_id) + replicable_orders = self._get_replicable_reference_orders() + active_reference_ids = self._active_reference_order_ids(replicable_orders) + orphan_orders = self._mirrored_orphan_open_orders(active_reference_ids) + if existing is None and not self._force_immediate_orphan_cancel_next and self._is_late_reference_fill_for_order(order, orphan_orders): + self._get_logger().info( + f"Skipping mirrored order creation (late reference fill on copier): symbol={symbol} " + f"reference_order_id={reference_order_id}" + ) + return [], 0, 0 + scaled_quantity = self._scale_mirrored_order_quantity(origin, symbol, side) + if scaled_quantity is None or scaled_quantity <= trading_constants.ZERO: + return [], 0, 0 + current_price_val = origin[trading_enums.ExchangeConstantsOrderColumns.PRICE.value] + order_target_price = ( + decimal.Decimal(str(current_price_val)) + if current_price_val not in (None, "") + else trading_constants.ZERO + ) + ( + ideal_quantity, + resolved_type, + market_or_limit_price, + current_price, + ) = await self._compute_mirrored_quantity_type_and_price( + symbol, + side, + scaled_quantity, + order_target_price, + trader_order_type, + ) + if ideal_quantity <= trading_constants.ZERO: + self._get_logger().error( + f"Skipping mirrored order: target quantity is zero: symbol={symbol} " + f"order_id={reference_order_id} side={side} type={trader_order_type} " + f"origin_order: {origin}" + ) + return [], 0, 0 + replace_reason: typing.Optional[str] = None + if existing is not None: + replace_reason = self._mirrored_order_target_mismatch_reason( + existing, + symbol, + side, + resolved_type, + ideal_quantity, + market_or_limit_price, + current_price, + ) + if replace_reason is None: + self._get_logger().info( + f"Mirrored order already synchronized: symbol={existing.symbol} " + f"order_id={existing.order_id} side={existing.side} type={existing.order_type} " + f"(reference_id={reference_order_id})" + ) + return [], 0, 1 + replaced_cancelled = 0 + if existing is not None: + self._get_logger().info( + f"Cancelling mirrored order for replace ({replace_reason}): symbol={existing.symbol} " + f"order_id={existing.order_id} side={existing.side} type={existing.order_type} " + f"(reference_id={reference_order_id})" + ) + await self._exchange_interface.orders.cancel_order(existing) + replaced_cancelled = 1 + self._get_logger().info( + f"Cancelled mirrored order for replace ({replace_reason}): symbol={existing.symbol} " + f"order_id={existing.order_id} side={existing.side} type={existing.order_type} " + f"(reference_id={reference_order_id})" + ) + symbol_market = self._exchange_interface.market.get_market_status(symbol, with_fixer=False) + market_or_limit_price, ideal_quantity = ( + self._exchange_interface.orders.adapt_order_quantity_and_target_price_for_order_creation( + resolved_type, + symbol, + ideal_quantity, + market_or_limit_price, + adapt_price_for_limit_orders=False, + ) + ) + created, _ = await self._exchange_interface.orders.create_orders( + resolved_type, + symbol, + current_price, + ideal_quantity, + market_or_limit_price, + symbol_market, + tag=copy_constants.MIRRORED_ORDER_TAG, + order_id=reference_order_id, + raise_all_creation_error=True, + ) + out = [o for o in created if o is not None] + for created_order in out: + self._get_logger().info( + f"Created mirrored order: symbol={created_order.symbol} " + f"bot_order_id={created_order.order_id} side={created_order.side} type={created_order.order_type} " + f"quantity={created_order.origin_quantity} (reference_id={reference_order_id})" + ) + return out, replaced_cancelled, 0 + + async def _compute_mirrored_quantity_type_and_price( + self, + symbol: str, + side: trading_enums.TradeOrderSide, + scaled_quantity: decimal.Decimal, + order_target_price: decimal.Decimal, + trader_order_type: trading_enums.TraderOrderType, + ) -> tuple[decimal.Decimal, trading_enums.TraderOrderType, decimal.Decimal, decimal.Decimal]: + ( + total_symbol_holding, + total_market_holding, + _market_quantity, + current_price, + _symbol_market, + ) = await self._exchange_interface.orders.get_pre_order_data( + symbol=symbol, + timeout=trading_constants.ORDER_DATA_FETCHING_TIMEOUT, + portfolio_type=commons_constants.PORTFOLIO_TOTAL, + ) + effective_target_price = ( + order_target_price + if order_target_price > trading_constants.ZERO + else current_price + ) + resolved_trader_order_type = trader_order_type + if trading_personal_data.get_trade_order_type(trader_order_type) is trading_enums.TradeOrderType.MARKET: + if not self._exchange_interface.market.is_market_open_for_order_type( + symbol, trader_order_type + ): + resolved_trader_order_type = ( + trading_enums.TraderOrderType.BUY_LIMIT + if side is trading_enums.TradeOrderSide.BUY + else trading_enums.TraderOrderType.SELL_LIMIT + ) + resolved_trade_type = trading_personal_data.get_trade_order_type( + resolved_trader_order_type + ) + limit_price = ( + current_price + if resolved_trade_type is trading_enums.TradeOrderType.MARKET + else effective_target_price + ) + if side is trading_enums.TradeOrderSide.BUY: + target_quantity = min( + scaled_quantity, + total_market_holding / effective_target_price + if effective_target_price + else scaled_quantity, + ) + else: + target_quantity = min(scaled_quantity, total_symbol_holding) + if target_quantity <= trading_constants.ZERO: + target_quantity = trading_constants.ZERO + else: + adapted_order_chunks, _ = ( + self._exchange_interface.orders.check_and_adapt_order_details_if_necessary( + symbol, + target_quantity, + limit_price, + ) + ) + adapted_details: list[tuple[decimal.Decimal, decimal.Decimal]] = typing.cast( + list[tuple[decimal.Decimal, decimal.Decimal]], + adapted_order_chunks, + ) + if not adapted_details: + target_quantity = trading_constants.ZERO + else: + target_quantity = sum( + (quantity for quantity, _ in adapted_details), + trading_constants.ZERO, + ) + limit_price = adapted_details[0][1] + return target_quantity, resolved_trader_order_type, limit_price, current_price + + def _get_logger(self) -> logging.BotLogger: + return logging.get_logger(self.__class__.__name__) diff --git a/packages/copy/octobot_copy/rebalancing/__init__.py b/packages/copy/octobot_copy/rebalancing/__init__.py new file mode 100644 index 0000000000..a3dec3153d --- /dev/null +++ b/packages/copy/octobot_copy/rebalancing/__init__.py @@ -0,0 +1,45 @@ +# Drakkar-Software OctoBot +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_copy.rebalancing.rebalancer import ( + AbstractRebalancer, + FuturesRebalancer, + SpotRebalancer, + OptionRebalancer, + IDEAL_AMOUNT, + IDEAL_PRICE, + SIMPLE_ADD_MIN_TOLERANCE_RATIO, +) +import octobot_copy.rebalancing.planner as rebalancing_planner +from octobot_copy.rebalancing.rebalancing_client_interface import RebalancingClientInterface + +BaseRebalanceActionsPlanner = rebalancing_planner.BaseRebalanceActionsPlanner +HistoricalConfigurationRebalanceActionsPlanner = rebalancing_planner.HistoricalConfigurationRebalanceActionsPlanner +get_uniform_distribution = rebalancing_planner.get_uniform_distribution + +__all__ = [ + "AbstractRebalancer", + "FuturesRebalancer", + "SpotRebalancer", + "OptionRebalancer", + "IDEAL_AMOUNT", + "IDEAL_PRICE", + "SIMPLE_ADD_MIN_TOLERANCE_RATIO", + "BaseRebalanceActionsPlanner", + "HistoricalConfigurationRebalanceActionsPlanner", + "get_uniform_distribution", + "RebalancingClientInterface", +] \ No newline at end of file diff --git a/packages/copy/octobot_copy/rebalancing/planner/__init__.py b/packages/copy/octobot_copy/rebalancing/planner/__init__.py new file mode 100644 index 0000000000..634f7994ae --- /dev/null +++ b/packages/copy/octobot_copy/rebalancing/planner/__init__.py @@ -0,0 +1,26 @@ +# Drakkar-Software OctoBot +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_copy.rebalancing.planner.base_rebalance_actions_planner import BaseRebalanceActionsPlanner +from octobot_copy.rebalancing.planner.historical_configuration_rebalance_actions_planner import HistoricalConfigurationRebalanceActionsPlanner +from octobot_copy.rebalancing.planner.distributions import get_uniform_distribution + + +__all__ = [ + "BaseRebalanceActionsPlanner", + "HistoricalConfigurationRebalanceActionsPlanner", + "get_uniform_distribution", +] diff --git a/packages/copy/octobot_copy/rebalancing/planner/base_rebalance_actions_planner.py b/packages/copy/octobot_copy/rebalancing/planner/base_rebalance_actions_planner.py new file mode 100644 index 0000000000..5ec6db3811 --- /dev/null +++ b/packages/copy/octobot_copy/rebalancing/planner/base_rebalance_actions_planner.py @@ -0,0 +1,502 @@ +# Drakkar-Software OctoBot +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import decimal +import typing + +import octobot_commons.list_util as list_util +import octobot_commons.logging as logging +import octobot_commons.symbols.symbol_util as symbol_util +import octobot_trading.constants as trading_constants +import octobot_trading.errors as trading_errors + +import octobot_copy.constants as copy_constants +import octobot_copy.enums as rebalancer_enums +import octobot_copy.exchange.exchange_interface as exchange_interface +import octobot_copy.rebalancing.planner.distributions as planner_distributions +import octobot_copy.rebalancing.rebalancing_client_interface as rebalancing_client_interface + + +class BaseRebalanceActionsPlanner: + def __init__( + self, + exchange_interface: exchange_interface.ExchangeInterface, + client: rebalancing_client_interface.RebalancingClientInterface, + ): + self._exchange_interface: exchange_interface.ExchangeInterface = exchange_interface + self.client: rebalancing_client_interface.RebalancingClientInterface = client + + self.ratio_per_asset: dict = {} + self.total_ratio_per_asset: decimal.Decimal = trading_constants.ZERO + + self._targeted_coins: list[str] = [] + self._disabled_symbol_bases: frozenset = frozenset() + self.logger: logging.BotLogger = logging.get_logger(self.__class__.__name__) + + @property + def targeted_coins(self) -> list[str]: + return self._targeted_coins + + @targeted_coins.setter + def targeted_coins(self, value: list[str]) -> None: + self._targeted_coins = list_util.deduplicate(value) + + def get_rebalance_details(self) -> typing.Tuple[bool, dict]: + """ + Main method to get the rebalance details. + """ + rebalance_details = self._empty_rebalance_details() + should_rebalance = False + available_traded_bases = set( + symbol.base + for symbol in self._exchange_interface.market.get_traded_symbols() + ) + + if self.client.synchronization_policy in ( + rebalancer_enums.SynchronizationPolicy.SELL_REMOVED_INDEX_COINS_AS_SOON_AS_POSSIBLE, + rebalancer_enums.SynchronizationPolicy.SELL_REMOVED_DYNAMIC_INDEX_COINS_AS_SOON_AS_POSSIBLE, + ): + should_rebalance = self._register_removed_coin(rebalance_details, available_traded_bases) + should_rebalance = self._register_coins_update(rebalance_details) or should_rebalance + should_rebalance = self._register_quote_asset_rebalance(rebalance_details) or should_rebalance + if ( + should_rebalance + and self.client.synchronization_policy + == rebalancer_enums.SynchronizationPolicy.SELL_REMOVED_INDEX_COINS_ON_RATIO_REBALANCE + ): + self.update_distribution(force_latest=True) + rebalance_details = self._empty_rebalance_details() + self._register_removed_coin(rebalance_details, available_traded_bases) + self._register_coins_update(rebalance_details) + self._register_quote_asset_rebalance(rebalance_details) + + if not rebalance_details[rebalancer_enums.RebalanceDetails.FORCED_REBALANCE.value]: + self._resolve_swaps(rebalance_details) + self._log_rebalance_swap_details(rebalance_details) + return ( + should_rebalance or rebalance_details[rebalancer_enums.RebalanceDetails.FORCED_REBALANCE.value], + rebalance_details, + ) + + def _log_rebalance_swap_details(self, rebalance_details: dict): + for origin, target in rebalance_details[rebalancer_enums.RebalanceDetails.SWAP.value].items(): + logged_origin_ratio = round( + rebalance_details[rebalancer_enums.RebalanceDetails.REMOVE.value][origin] + * trading_constants.ONE_HUNDRED, + 3 + ) + if not (logged_target_ratio := round( + rebalance_details[rebalancer_enums.RebalanceDetails.ADD.value].get( + target, + rebalance_details[rebalancer_enums.RebalanceDetails.BUY_MORE.value].get( + target, trading_constants.ZERO + ) + ) * trading_constants.ONE_HUNDRED, + 3 + )): + self.logger.error(f"No target ratio found for {target} in rebalance details: {rebalance_details}") + logged_target_ratio = "???" # used for logging only + self.logger.info( + f"Swapping {origin} (holding ratio: {logged_origin_ratio}%) for {target} (to buy ratio: {logged_target_ratio}%) " + f"on [{self._exchange_interface.exchange_name}]: ratios are similar enough to allow swapping." + ) + + def update( + self, + *, + min_order_size_margin: decimal.Decimal, + synchronization_policy: typing.Any, + rebalance_trigger_min_ratio: decimal.Decimal, + quote_asset_rebalance_ratio_threshold: decimal.Decimal, + reference_market_ratio: decimal.Decimal, + sell_untargeted_traded_coins: bool, + allow_skip_asset: bool, + can_include_assets_in_open_orders_in_holdings_ratio: bool, + ) -> None: + self.client.update( + min_order_size_margin=min_order_size_margin, + synchronization_policy=synchronization_policy, + rebalance_trigger_min_ratio=rebalance_trigger_min_ratio, + quote_asset_rebalance_ratio_threshold=quote_asset_rebalance_ratio_threshold, + reference_market_ratio=reference_market_ratio, + sell_untargeted_traded_coins=sell_untargeted_traded_coins, + allow_skip_asset=allow_skip_asset, + can_include_assets_in_open_orders_in_holdings_ratio=( + can_include_assets_in_open_orders_in_holdings_ratio + ), + ) + + def update_distribution(self, adapt_to_holdings: bool = False, force_latest: bool = False) -> None: + """ + Refresh the target distribution state + """ + distribution = self._get_supported_distribution(adapt_to_holdings, force_latest) + self.ratio_per_asset = { + asset[rebalancer_enums.DistributionKeys.NAME]: asset + for asset in distribution + } + self.total_ratio_per_asset = decimal.Decimal(sum( + asset[rebalancer_enums.DistributionKeys.VALUE] + for asset in self.ratio_per_asset.values() + )) + self._targeted_coins = self._get_filtered_traded_coins() + + def get_target_ratio(self, currency) -> decimal.Decimal: + if currency in self.ratio_per_asset: + try: + return ( + decimal.Decimal(str( + self.ratio_per_asset[currency][rebalancer_enums.DistributionKeys.VALUE] + )) / self.total_ratio_per_asset + ) + except (decimal.DivisionByZero, decimal.InvalidOperation): + pass + return trading_constants.ZERO + + def _resolve_target_config_for_distribution( + self, + trading_config: typing.Optional[dict], + traded_bases: set[str], + adapt_to_holdings: bool, + force_latest: bool, + ) -> dict: + return trading_config or {} + + def _removed_coins_dynamic_index_as_soon_as_possible( + self, available_traded_bases: typing.AbstractSet[str], + ) -> list: + return [ + coin for coin in available_traded_bases + if coin not in self._targeted_coins and coin != self._exchange_interface.portfolio.reference_market + ] + + def _apply_synchronization_policy_to_removed_coins( + self, + removed_coins: list, + trading_config: typing.Optional[dict], + available_traded_bases: typing.AbstractSet[str], + ) -> list: + policy = self.client.synchronization_policy + if policy == rebalancer_enums.SynchronizationPolicy.SELL_REMOVED_INDEX_COINS_AS_SOON_AS_POSSIBLE: + return removed_coins + if policy == rebalancer_enums.SynchronizationPolicy.SELL_REMOVED_INDEX_COINS_ON_RATIO_REBALANCE: + raise NotImplementedError(f"Use HistoricalConfigurationRebalanceActionsPlanner for {policy}") + if policy == rebalancer_enums.SynchronizationPolicy.SELL_REMOVED_DYNAMIC_INDEX_COINS_AS_SOON_AS_POSSIBLE: + return self._removed_coins_dynamic_index_as_soon_as_possible(available_traded_bases) + self.logger.error(f"Unknown synchronization policy: {self.client.synchronization_policy}") + return [] + + def get_removed_coins_from_config(self, available_traded_bases) -> list: + """ + Get the coins that should be removed from the config. + Mainly used when a target configuration changed and some coins are no longer in the target. + """ + removed_coins = [] + trading_config = self.client.get_config() + if self.client.get_ideal_distribution(trading_config or {}) and self.client.sell_untargeted_traded_coins: + removed_coins = [ + coin + for coin in available_traded_bases + if coin not in self._targeted_coins + and coin != self._exchange_interface.portfolio.reference_market + ] + return self._apply_synchronization_policy_to_removed_coins( + removed_coins, trading_config, available_traded_bases + ) + + def _get_adjusted_target_ratio(self, currency: str) -> decimal.Decimal: + """ + Get the adjusted target ratio for a given currency relatively to the reference market ratio. + """ + base_ratio = self.get_target_ratio(currency) + if self.client.reference_market_ratio < trading_constants.ONE: + return base_ratio * self.client.reference_market_ratio + return base_ratio + + def _get_coins_to_consider_for_ratio(self) -> list: + """ + Get the coins that should be considered for the ratio, including the reference market. + """ + return self._targeted_coins + [self._exchange_interface.portfolio.reference_market] + + def _empty_rebalance_details(self) -> dict: + return { + rebalancer_enums.RebalanceDetails.SELL_SOME.value: {}, + rebalancer_enums.RebalanceDetails.BUY_MORE.value: {}, + rebalancer_enums.RebalanceDetails.REMOVE.value: {}, + rebalancer_enums.RebalanceDetails.ADD.value: {}, + rebalancer_enums.RebalanceDetails.SWAP.value: {}, + rebalancer_enums.RebalanceDetails.FORCED_REBALANCE.value: False, + } + + def _register_coins_update(self, rebalance_details: dict) -> bool: + """ + Register the coins that are beyond the target ratio: + - some should be added + - some should be bought + - some should be sold + """ + should_rebalance = False + for coin in self._targeted_coins: + target_ratio = self._get_adjusted_target_ratio(coin) + coin_ratio = self._exchange_interface.portfolio.get_holdings_ratio( + coin, + traded_symbols_only=True, + include_assets_in_open_orders=( + self.client.can_include_assets_in_open_orders_in_holdings_ratio + ), + ) + beyond_ratio = True + if coin_ratio == trading_constants.ZERO and target_ratio > trading_constants.ZERO: + rebalance_details[rebalancer_enums.RebalanceDetails.ADD.value][coin] = target_ratio + should_rebalance = True + elif coin_ratio < target_ratio - self.client.rebalance_trigger_min_ratio: + rebalance_details[rebalancer_enums.RebalanceDetails.BUY_MORE.value][coin] = target_ratio + should_rebalance = True + elif coin_ratio > target_ratio + self.client.rebalance_trigger_min_ratio: + rebalance_details[rebalancer_enums.RebalanceDetails.SELL_SOME.value][coin] = target_ratio + should_rebalance = True + else: + beyond_ratio = False + if beyond_ratio: + allowance = round(self.client.rebalance_trigger_min_ratio * trading_constants.ONE_HUNDRED, 2) + self.logger.info( + f"{coin} is beyond the target ratio of {round(target_ratio * trading_constants.ONE_HUNDRED, 2)}[+/-{allowance}]%, " + f"ratio: {round(coin_ratio * trading_constants.ONE_HUNDRED, 2)}%. A rebalance is required." + ) + return should_rebalance + + def _register_removed_coin(self, rebalance_details: dict, available_traded_bases: set[str]) -> bool: + """ + Register the coins that are no longer in the target and should be sold. + """ + should_rebalance = False + for coin in self.get_removed_coins_from_config(available_traded_bases): + if coin in available_traded_bases: + coin_ratio = self._exchange_interface.portfolio.get_holdings_ratio( + coin, + traded_symbols_only=True, + include_assets_in_open_orders=( + self.client.can_include_assets_in_open_orders_in_holdings_ratio + ), + ) + if coin_ratio >= copy_constants.MIN_RATIO_TO_SELL: + if self._removed_index_assets_unsold_are_only_dust({coin: coin_ratio}, []): + self.logger.info( + f"{coin} is not in target anymore but available holding value is below the exchange " + f"minimal order cost; omitting it from the removed-coins sell list." + ) + continue + rebalance_details[rebalancer_enums.RebalanceDetails.REMOVE.value][coin] = coin_ratio + self.logger.info( + f"{coin} (holdings: {round(coin_ratio * trading_constants.ONE_HUNDRED, 3)}%) is not in target " + f"anymore. A rebalance is required." + ) + should_rebalance = True + else: + if coin in self._disabled_symbol_bases: + self.logger.info( + f"Ignoring {coin} holding: {coin} is not in target anymore but is disabled." + ) + else: + self.logger.error( + f"Ignoring {coin} holding: Can't sell {coin} as it is not in any trading pair" + f" but is not in target anymore. This is unexpected" + ) + return should_rebalance + + def _removed_index_assets_unsold_are_only_dust( + self, + removed_coins: typing.Mapping[str, typing.Any], + sold_coins: list, + ) -> bool: + ref_market = self._exchange_interface.portfolio.reference_market + targeted = frozenset(self._targeted_coins) + for asset in removed_coins: + if asset in sold_coins: + continue + if asset in targeted: + return False + if asset == ref_market: + return False + symbol = symbol_util.merge_currencies(asset, ref_market) + try: + price, _ = self._exchange_interface.market.get_potentially_outdated_price(symbol) + min_cost_decimal = self._exchange_interface.orders.get_minimal_order_cost( + symbol, default_price=float(price) + ) + except trading_errors.NotSupported: + return False + available = self._exchange_interface.portfolio.get_currency_portfolio_available(asset) + holding_value = available * price + if holding_value >= min_cost_decimal: + return False + return True + + def _register_quote_asset_rebalance(self, rebalance_details: dict) -> bool: + """ + Returns True if the rebalance is required due to a high non-targeted quote asset holdings ratio. + """ + non_targeted_quote_assets_ratio = self._get_non_targeted_quote_assets_ratio() + if self._should_rebalance_due_to_non_targeted_quote_assets_ratio( + non_targeted_quote_assets_ratio, rebalance_details + ): + rebalance_details[rebalancer_enums.RebalanceDetails.FORCED_REBALANCE.value] = True + self.logger.info( + f"Rebalancing due to a high non-targeted quote asset holdings ratio: " + f"{round(non_targeted_quote_assets_ratio * trading_constants.ONE_HUNDRED, 2)}%, quote rebalance " + f"threshold = {self.client.quote_asset_rebalance_ratio_threshold * trading_constants.ONE_HUNDRED}%" + ) + return True + return False + + def _should_rebalance_due_to_non_targeted_quote_assets_ratio( + self, non_targeted_quote_assets_ratio: decimal.Decimal, rebalance_details: dict + ) -> bool: + total_added_ratio = ( + self._sum_ratios(rebalance_details, rebalancer_enums.RebalanceDetails.ADD.value) + + self._sum_ratios(rebalance_details, rebalancer_enums.RebalanceDetails.BUY_MORE.value) + ) + + if ( + total_added_ratio * (trading_constants.ONE - copy_constants.QUOTE_ASSET_TO_TARGETED_SWAP_RATIO_THRESHOLD) + <= non_targeted_quote_assets_ratio + <= total_added_ratio * (trading_constants.ONE + copy_constants.QUOTE_ASSET_TO_TARGETED_SWAP_RATIO_THRESHOLD) + ): + total_removed_ratio = ( + self._sum_ratios(rebalance_details, rebalancer_enums.RebalanceDetails.REMOVE.value) + + self._sum_ratios(rebalance_details, rebalancer_enums.RebalanceDetails.SELL_SOME.value) + ) + if total_removed_ratio == trading_constants.ZERO: + return False + min_ratio = min( + min( + self.get_target_ratio(coin) + for coin in self._targeted_coins + ) if self._targeted_coins else self.client.quote_asset_rebalance_ratio_threshold, + self.client.quote_asset_rebalance_ratio_threshold + ) + return non_targeted_quote_assets_ratio >= min_ratio + + @staticmethod + def _sum_ratios(rebalance_details: dict, key: str) -> decimal.Decimal: + return decimal.Decimal(str(sum( + ratio + for ratio in rebalance_details[key].values() + ))) if rebalance_details[key] else trading_constants.ZERO + + def _get_non_targeted_quote_assets_ratio(self) -> decimal.Decimal: + total = trading_constants.ZERO + for quote in set( + symbol.quote + for symbol in self._exchange_interface.market.get_traded_symbols() + if symbol.quote not in self._targeted_coins + ): + ratio = self._exchange_interface.portfolio.get_holdings_ratio( + quote, + traded_symbols_only=True, + include_assets_in_open_orders=( + self.client.can_include_assets_in_open_orders_in_holdings_ratio + ), + ) + if quote == self._exchange_interface.portfolio.reference_market and self.client.reference_market_ratio > trading_constants.ZERO: + reference_market_keep_ratio = trading_constants.ONE - self.client.reference_market_ratio + ratio = max(trading_constants.ZERO, ratio - reference_market_keep_ratio) + total += ratio + return decimal.Decimal(str(total)) + + def _resolve_swaps(self, details: dict): + """ + Resolve swaps between added and removed coins, when swaps are possible + """ + removed = details[rebalancer_enums.RebalanceDetails.REMOVE.value] + details[rebalancer_enums.RebalanceDetails.SWAP.value] = {} + if details[rebalancer_enums.RebalanceDetails.SELL_SOME.value]: + return + added = { + **details[rebalancer_enums.RebalanceDetails.ADD.value], + **details[rebalancer_enums.RebalanceDetails.BUY_MORE.value], + } + if len(removed) == len(added) == copy_constants.ALLOWED_1_TO_1_SWAP_COUNTS: + for removed_coin, removed_ratio, added_coin, added_ratio in zip( + removed, removed.values(), added, added.values() + ): + added_holding_ratio = self._exchange_interface.portfolio.get_holdings_ratio( + added_coin, traded_symbols_only=True, include_assets_in_open_orders=False, + coins_whitelist=self._get_coins_to_consider_for_ratio() + ) + required_added_ratio = added_ratio - added_holding_ratio + if ( + removed_ratio - self.client.rebalance_trigger_min_ratio + < required_added_ratio + < removed_ratio + self.client.rebalance_trigger_min_ratio + ): + details[rebalancer_enums.RebalanceDetails.SWAP.value][removed_coin] = added_coin + else: + details[rebalancer_enums.RebalanceDetails.SWAP.value] = {} + return + + def _get_filtered_traded_coins(self) -> list[str]: + coins = set( + symbol.base + for symbol in self._exchange_interface.market.get_traded_symbols() + if symbol.base in self.ratio_per_asset and symbol.quote == self._exchange_interface.portfolio.reference_market + ) + if self._exchange_interface.portfolio.reference_market in self.ratio_per_asset and coins: + coins.add(self._exchange_interface.portfolio.reference_market) + return sorted(list(coins)) + + def _get_supported_distribution(self, adapt_to_holdings: bool, force_latest: bool) -> list: + """ + Returns the configured distribution if any, resolved via `_resolve_target_config_for_distribution` + before filtering to traded pairs. Uses a uniform distribution over traded bases if none is configured. + + :param adapt_to_holdings: Passed to `_resolve_target_config_for_distribution` (subclass may use it). + :param force_latest: Passed to `_resolve_target_config_for_distribution` (subclass may use it). + """ + initial_target_config = self.client.get_config() or {} + if detailed_distribution := self.client.get_ideal_distribution(initial_target_config): + traded_bases = set( + symbol.base + for symbol in self._exchange_interface.market.get_traded_symbols() + ) + traded_bases.add(self._exchange_interface.portfolio.reference_market) + target_config = self._resolve_target_config_for_distribution( + initial_target_config, traded_bases, adapt_to_holdings, force_latest + ) + if target_config is not initial_target_config: + # update distribution to the new target config + detailed_distribution = self.client.get_ideal_distribution(target_config) + if not detailed_distribution: + raise ValueError(f"No distribution found in historical target config: {target_config}") + distribution = [ + asset + for asset in detailed_distribution + if asset[rebalancer_enums.DistributionKeys.NAME] in traded_bases + ] + if removed_assets := [ + asset[rebalancer_enums.DistributionKeys.NAME] + for asset in detailed_distribution + if asset not in distribution + ]: + self.logger.info( + f"Ignored {len(removed_assets)} assets {removed_assets} from configured " + f"distribution as absent from traded pairs." + ) + return distribution + return planner_distributions.get_uniform_distribution([ + symbol.base + for symbol in self._exchange_interface.market.get_traded_symbols() + ]) diff --git a/packages/copy/octobot_copy/rebalancing/planner/distributions.py b/packages/copy/octobot_copy/rebalancing/planner/distributions.py new file mode 100644 index 0000000000..158b296afb --- /dev/null +++ b/packages/copy/octobot_copy/rebalancing/planner/distributions.py @@ -0,0 +1,46 @@ +# Drakkar-Software OctoBot +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import decimal +import typing + +import octobot_copy.enums as copy_enums +import octobot_trading.constants + +MAX_DISTRIBUTION_AFTER_COMMA_DIGITS = 1 + + +def get_uniform_distribution( + coins, + price_by_coin: typing.Optional[dict[str, decimal.Decimal]] = None, +) -> typing.List: + if not coins: + return [] + ratio = float( + round( + octobot_trading.constants.ONE / decimal.Decimal(str(len(coins))) * octobot_trading.constants.ONE_HUNDRED, + MAX_DISTRIBUTION_AFTER_COMMA_DIGITS, + ) + ) + if not ratio: + return [] + return [ + { + copy_enums.DistributionKeys.NAME.value: coin, + copy_enums.DistributionKeys.VALUE.value: ratio, + copy_enums.DistributionKeys.PRICE.value: price_by_coin.get(coin) if price_by_coin else None, + } + for coin in coins + ] diff --git a/packages/copy/octobot_copy/rebalancing/planner/historical_configuration_rebalance_actions_planner.py b/packages/copy/octobot_copy/rebalancing/planner/historical_configuration_rebalance_actions_planner.py new file mode 100644 index 0000000000..86ff5b3b0a --- /dev/null +++ b/packages/copy/octobot_copy/rebalancing/planner/historical_configuration_rebalance_actions_planner.py @@ -0,0 +1,220 @@ +# Drakkar-Software OctoBot +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import decimal +import typing + +import octobot_trading.constants as trading_constants + +import octobot_copy.constants as copy_constants +import octobot_copy.enums as rebalancer_enums +import octobot_copy.rebalancing.planner.base_rebalance_actions_planner as rebalance_actions_planner_module + + +class HistoricalConfigurationRebalanceActionsPlanner( + rebalance_actions_planner_module.BaseRebalanceActionsPlanner, +): + def _apply_synchronization_policy_to_removed_coins( + self, + removed_coins: list, + trading_config: typing.Optional[dict], + available_traded_bases: typing.AbstractSet[str], + ) -> list: + """ + Override to handle previous & historical configs. + """ + policy = self.client.synchronization_policy + if policy == rebalancer_enums.SynchronizationPolicy.SELL_REMOVED_INDEX_COINS_AS_SOON_AS_POSSIBLE: + return self._extend_removed_coins_index_from_previous_config( + removed_coins, trading_config, available_traded_bases + ) + if policy == rebalancer_enums.SynchronizationPolicy.SELL_REMOVED_INDEX_COINS_ON_RATIO_REBALANCE: + return self._extend_removed_coins_from_historical_config( + removed_coins, trading_config, available_traded_bases + ) + if policy == rebalancer_enums.SynchronizationPolicy.SELL_REMOVED_DYNAMIC_INDEX_COINS_AS_SOON_AS_POSSIBLE: + return self._removed_coins_dynamic_index_as_soon_as_possible(available_traded_bases) + self.logger.error(f"Unknown synchronization policy: {self.client.synchronization_policy}") + return [] + + def _resolve_target_config_for_distribution( + self, + trading_config: typing.Optional[dict], + traded_bases: set[str], + adapt_to_holdings: bool, + force_latest: bool, + ) -> dict: + """ + Override to handle historical configs. + """ + if not ( + (adapt_to_holdings or force_latest) + and self.client.synchronization_policy + == rebalancer_enums.SynchronizationPolicy.SELL_REMOVED_INDEX_COINS_ON_RATIO_REBALANCE + ): + return trading_config or {} + if adapt_to_holdings: + return self._get_currently_applied_historical_config_according_to_holdings( + trading_config or {}, traded_bases + ) + try: + target_config = self.client.get_historical_configs( + 0, self._exchange_interface.get_time() + )[0] + self.logger.info( + f"Updated {self.client.client_name} to use latest distribution: " + f"{self.client.get_ideal_distribution(target_config)}." + ) + except IndexError: + target_config = trading_config or {} + return target_config + + def _extend_removed_coins_index_from_previous_config( + self, + removed_coins: list, + trading_config: typing.Optional[dict], + available_traded_bases: typing.AbstractSet[str], + ) -> list: + del available_traded_bases + previous_trading_config = self.client.get_previous_config() + if not (previous_trading_config and trading_config): + return removed_coins + current_coins = [ + asset[rebalancer_enums.DistributionKeys.NAME] + for asset in (self.client.get_ideal_distribution(trading_config or {}) or []) + ] + return list(set(removed_coins + [ + asset[rebalancer_enums.DistributionKeys.NAME] + for asset in previous_trading_config[copy_constants.CONFIG_INDEX_CONTENT] + if asset[rebalancer_enums.DistributionKeys.NAME] not in current_coins + and ( + asset[rebalancer_enums.DistributionKeys.NAME] + != self._exchange_interface.portfolio.reference_market + ) + ])) + + def _extend_removed_coins_from_historical_config( + self, + removed_coins: list, + trading_config: typing.Optional[dict], + available_traded_bases: typing.AbstractSet[str], + ) -> list: + del available_traded_bases + historical_configs = self.client.get_historical_configs( + 0, self._exchange_interface.get_time() + ) + if not (historical_configs and trading_config): + return removed_coins + current_coins = [ + asset[rebalancer_enums.DistributionKeys.NAME] + for asset in (self.client.get_ideal_distribution(trading_config or {}) or []) + ] + removed_coins_from_historical_configs = set() + for historical_config in historical_configs: + for asset in historical_config[copy_constants.CONFIG_INDEX_CONTENT]: + asset_name = asset[rebalancer_enums.DistributionKeys.NAME] + if asset_name not in current_coins and asset_name != self._exchange_interface.portfolio.reference_market: + removed_coins_from_historical_configs.add(asset_name) + return list(removed_coins_from_historical_configs.union(removed_coins)) + + def _get_currently_applied_historical_config_according_to_holdings( + self, config: dict, traded_bases: set[str] + ) -> dict: + if self._is_target_config_applied(config, traded_bases): + self.logger.info(f"Using {self.client.client_name} latest config.") + return config + historical_configs = self.client.get_historical_configs( + 0, self._exchange_interface.get_time() + ) + if not historical_configs or ( + len(historical_configs) == 1 and ( + self.client.get_ideal_distribution(historical_configs[0]) == self.client.get_ideal_distribution(config) + and historical_configs[0][copy_constants.CONFIG_REBALANCE_TRIGGER_MIN_PERCENT] == config[copy_constants.CONFIG_REBALANCE_TRIGGER_MIN_PERCENT] + ) + ): + self.logger.info(f"Using {self.client.client_name} latest config as no historical configs are available.") + return config + for hist_rank, historical_config in enumerate(historical_configs): + if self._is_target_config_applied(historical_config, traded_bases): + self.logger.info( + f"Using [N-{hist_rank}] {self.client.client_name} historical config distribution: " + f"{self.client.get_ideal_distribution(historical_config)}." + ) + return historical_config + self.logger.info( + f"No suitable {self.client.client_name} config found: using latest distribution: " + f"{self.client.get_ideal_distribution(config)}." + ) + return config + + def _is_target_config_applied(self, config: dict, traded_bases: set[str]) -> bool: + full_assets_distribution = self.client.get_ideal_distribution(config) + if not full_assets_distribution: + return False + assets_distribution = [ + asset + for asset in full_assets_distribution + if asset[rebalancer_enums.DistributionKeys.NAME] in traded_bases + ] + if len(assets_distribution) != len(full_assets_distribution): + missing_assets = [ + asset[rebalancer_enums.DistributionKeys.NAME] + for asset in full_assets_distribution + if asset not in assets_distribution + ] + self.logger.warning( + f"Ignored {self.client.client_name} config candidate as {len(missing_assets)} configured assets " + f"{missing_assets} are missing from {self._exchange_interface.exchange_name} traded pairs." + ) + return False + + total_ratio = decimal.Decimal(sum( + asset[rebalancer_enums.DistributionKeys.VALUE] + for asset in assets_distribution + )) + if total_ratio == trading_constants.ZERO: + return False + min_trigger_ratio = self._get_config_min_ratio(config) + for asset_distrib in assets_distribution: + base_target_ratio = decimal.Decimal(str(asset_distrib[rebalancer_enums.DistributionKeys.VALUE])) / total_ratio + if self.client.reference_market_ratio < trading_constants.ONE: + target_ratio = base_target_ratio * self.client.reference_market_ratio + else: + target_ratio = base_target_ratio + coin_ratio = self._exchange_interface.portfolio.get_holdings_ratio( + asset_distrib[rebalancer_enums.DistributionKeys.NAME], traded_symbols_only=True, + include_assets_in_open_orders=False, + ) + if not (target_ratio - min_trigger_ratio <= coin_ratio <= target_ratio + min_trigger_ratio): + return False + return True + + def _get_config_min_ratio(self, config: dict) -> decimal.Decimal: + ratio = None + rebalance_trigger_profiles = config.get(copy_constants.CONFIG_REBALANCE_TRIGGER_PROFILES, None) + if rebalance_trigger_profiles: + selected_rebalance_trigger_profile_name = config.get(copy_constants.CONFIG_SELECTED_REBALANCE_TRIGGER_PROFILE, None) + selected_profile = [ + p for p in rebalance_trigger_profiles + if p[copy_constants.CONFIG_REBALANCE_TRIGGER_PROFILE_NAME] == selected_rebalance_trigger_profile_name + ] + if selected_profile: + selected_rebalance_trigger_profile = selected_profile[0] + ratio = selected_rebalance_trigger_profile[copy_constants.CONFIG_REBALANCE_TRIGGER_PROFILE_MIN_PERCENT] + if ratio is None: + ratio = config.get(copy_constants.CONFIG_REBALANCE_TRIGGER_MIN_PERCENT) + if ratio is None: + return self.client.rebalance_trigger_min_ratio + return decimal.Decimal(str(ratio)) / trading_constants.ONE_HUNDRED diff --git a/packages/copy/octobot_copy/rebalancing/rebalancer/__init__.py b/packages/copy/octobot_copy/rebalancing/rebalancer/__init__.py new file mode 100644 index 0000000000..bf603769f0 --- /dev/null +++ b/packages/copy/octobot_copy/rebalancing/rebalancer/__init__.py @@ -0,0 +1,35 @@ +# Drakkar-Software OctoBot +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_copy.rebalancing.rebalancer.rebalancer import ( + AbstractRebalancer, + IDEAL_AMOUNT, + IDEAL_PRICE, + SIMPLE_ADD_MIN_TOLERANCE_RATIO, +) +from octobot_copy.rebalancing.rebalancer.futures_rebalancer import FuturesRebalancer +from octobot_copy.rebalancing.rebalancer.spot_rebalancer import SpotRebalancer +from octobot_copy.rebalancing.rebalancer.option_rebalancer import OptionRebalancer + +__all__ = [ + "AbstractRebalancer", + "FuturesRebalancer", + "SpotRebalancer", + "OptionRebalancer", + "IDEAL_AMOUNT", + "IDEAL_PRICE", + "SIMPLE_ADD_MIN_TOLERANCE_RATIO", +] diff --git a/packages/copy/octobot_copy/rebalancing/rebalancer/futures_rebalancer.py b/packages/copy/octobot_copy/rebalancing/rebalancer/futures_rebalancer.py new file mode 100644 index 0000000000..73b5d97413 --- /dev/null +++ b/packages/copy/octobot_copy/rebalancing/rebalancer/futures_rebalancer.py @@ -0,0 +1,155 @@ +# Drakkar-Software OctoBot +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import decimal +import typing + +import octobot_commons.signals as commons_signals +import octobot_trading.constants as trading_constants +import octobot_trading.enums as trading_enums +import octobot_trading.errors as trading_errors + +import octobot_copy.enums as rebalancer_enums +import octobot_copy.rebalancing.rebalancer.rebalancer as base_rebalancer + + +class FuturesRebalancer(base_rebalancer.AbstractRebalancer): + async def prepare_coin_rebalancing(self, coin: str): + symbol, _ = self._get_symbol_and_base_asset(coin) + await self._exchange_interface.market.ensure_contract_loaded(symbol) + + async def _buy_coin( + self, + symbol: str, + ideal_amount: decimal.Decimal, + ideal_price: typing.Optional[decimal.Decimal], + dependencies: typing.Optional[commons_signals.SignalDependencies] + ) -> list: + """ + Opens or increases a position for a symbol. + For futures, this creates orders to open/increase positions instead of buying assets. + """ + position = self._exchange_interface.positions.get_symbol_position(symbol, trading_enums.PositionSide.BOTH) + _, _, _, current_price, symbol_market = await self._exchange_interface.orders.get_pre_order_data( + symbol=symbol, + timeout=trading_constants.ORDER_DATA_FETCHING_TIMEOUT + ) + + order_target_price = ideal_price if ideal_price is not None else current_price + current_position_size = position.size if not position.is_idle() else trading_constants.ZERO + effective_current_position_size = current_position_size + self._get_pending_open_quantity(symbol) + size_difference = ideal_amount - effective_current_position_size + + if size_difference <= trading_constants.ZERO: + return [] + + side = trading_enums.TradeOrderSide.BUY # Always open long positions for targeted coins + max_order_size, _ = self._exchange_interface.orders.get_futures_max_order_size( + symbol, side, current_price, False, current_position_size, ideal_amount + ) + + order_quantity = min(size_difference, max_order_size) + if order_quantity <= trading_constants.ZERO: + return [] + + is_price_close_to_market = order_target_price >= current_price * (decimal.Decimal(1) - self.PRICE_THRESHOLD_TO_USE_MARKET_ORDER) + ideal_order_type = trading_enums.TraderOrderType.BUY_MARKET if is_price_close_to_market else trading_enums.TraderOrderType.BUY_LIMIT + order_type = ( + ideal_order_type + if self._exchange_interface.market.is_market_open_for_order_type(symbol, ideal_order_type) + else trading_enums.TraderOrderType.BUY_LIMIT + ) + + order_target_price, order_quantity = ( + self._exchange_interface.orders.adapt_order_quantity_and_target_price_for_order_creation( + order_type, + symbol, + order_quantity, + order_target_price, + adapt_price_for_limit_orders=True, + ) + ) + created_orders, orders_should_have_been_created = await self._exchange_interface.orders.create_orders( + order_type, + symbol, + current_price, + order_quantity, + order_target_price, + symbol_market, + dependencies=dependencies, + reduce_only=False, + skip_none_create_results=True, + raise_all_creation_error=self._rebalance_actions_planner.client.raise_all_order_errors, + ) + + if created_orders: + return created_orders + if self._rebalance_actions_planner.client.allow_skip_asset: + self._get_logger().warning(f"Skipping {symbol} order creation...") + return [] + if orders_should_have_been_created: + raise trading_errors.OrderCreationError() + raise trading_errors.MissingMinimalExchangeTradeVolume() + + def compute_desired_futures_position_size( + self, + current_price: decimal.Decimal, + target_ratio: decimal.Decimal, + ) -> decimal.Decimal: + if current_price <= trading_constants.ZERO: + return trading_constants.ZERO + total_holdings_value = self._exchange_interface.portfolio.get_traded_assets_holdings_value( + self._exchange_interface.portfolio.reference_market + ) + try: + return max( + trading_constants.ZERO, + decimal.Decimal(str(target_ratio)) * total_holdings_value / current_price + ) + except decimal.DecimalException: + return trading_constants.ZERO + + async def _get_coins_to_sell_orders(self, details: dict, dependencies: typing.Optional[commons_signals.SignalDependencies]) -> list: + orders = [] + symbol_target_ratio: dict[str, typing.Optional[decimal.Decimal]] = {} + + for coin_or_symbol in self._get_coins_to_sell(details): + symbol_target_ratio[self._get_symbol_and_base_asset(coin_or_symbol)[0]] = None + + for coin_or_symbol in details.get(rebalancer_enums.RebalanceDetails.REMOVE.value, {}): + symbol_target_ratio[self._get_symbol_and_base_asset(coin_or_symbol)[0]] = None + + for coin_or_symbol, target_ratio in details.get(rebalancer_enums.RebalanceDetails.SELL_SOME.value, {}).items(): + symbol_target_ratio[self._get_symbol_and_base_asset(coin_or_symbol)[0]] = target_ratio + + for symbol, target_ratio in symbol_target_ratio.items(): + _, _, _, current_price, symbol_market = await self._exchange_interface.orders.get_pre_order_data( + symbol=symbol, + timeout=trading_constants.ORDER_DATA_FETCHING_TIMEOUT, + ) + desired_futures_position_size = ( + self.compute_desired_futures_position_size(current_price, target_ratio) + if target_ratio is not None + else None + ) + orders += await self._exchange_interface.positions.close_symbol_position( + symbol, + dependencies, + current_price, + symbol_market, + desired_futures_position_size=desired_futures_position_size, + ) + + return orders diff --git a/packages/copy/octobot_copy/rebalancing/rebalancer/option_rebalancer.py b/packages/copy/octobot_copy/rebalancing/rebalancer/option_rebalancer.py new file mode 100644 index 0000000000..1ce1db9367 --- /dev/null +++ b/packages/copy/octobot_copy/rebalancing/rebalancer/option_rebalancer.py @@ -0,0 +1,21 @@ +# Drakkar-Software OctoBot +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import octobot_copy.rebalancing.rebalancer.futures_rebalancer as futures_rebalancer + + +class OptionRebalancer(futures_rebalancer.FuturesRebalancer): + pass diff --git a/packages/copy/octobot_copy/rebalancing/rebalancer/rebalancer.py b/packages/copy/octobot_copy/rebalancing/rebalancer/rebalancer.py new file mode 100644 index 0000000000..9ebb9de50a --- /dev/null +++ b/packages/copy/octobot_copy/rebalancing/rebalancer/rebalancer.py @@ -0,0 +1,422 @@ +# Drakkar-Software OctoBot +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import decimal +import typing + +import octobot_commons.logging as logging +import octobot_commons.signals as commons_signals +import octobot_commons.symbols.symbol_util as symbol_util +import octobot_trading.constants as trading_constants +import octobot_trading.errors as trading_errors +import octobot_trading.enums as trading_enums +import octobot_copy.enums as rebalancer_enums +import octobot_copy.errors as copy_errors +import octobot_copy.exchange.exchange_interface as copy_exchange +import octobot_copy.rebalancing.planner.base_rebalance_actions_planner as rebalance_actions_planner_import + + +SIMPLE_ADD_MIN_TOLERANCE_RATIO = decimal.Decimal("0.8") # 20% tolerance +IDEAL_AMOUNT = "ideal_amount" +IDEAL_PRICE = "ideal_price" + + +class AbstractRebalancer: + PRICE_THRESHOLD_TO_USE_MARKET_ORDER = decimal.Decimal(0.01) # 1% + + def __init__( + self, + exchange_interface: copy_exchange.ExchangeInterface, + rebalance_actions_planner: rebalance_actions_planner_import.BaseRebalanceActionsPlanner, + target_coins_prices: dict, + ): + self._exchange_interface: copy_exchange.ExchangeInterface = exchange_interface + self._rebalance_actions_planner: rebalance_actions_planner_import.BaseRebalanceActionsPlanner = ( + rebalance_actions_planner + ) + self._target_coins_prices: dict[str, decimal.Decimal] = target_coins_prices + self._already_logged_aborted_rebalance_error: bool = False + + async def prepare_coin_rebalancing(self, coin: str): + raise NotImplementedError("prepare_coin_rebalancing is not implemented") + + async def try_efficient_spot_rebalance( + self, + details: dict[str, typing.Any], + dependencies: typing.Optional[commons_signals.SignalDependencies] = None, + ) -> typing.Optional[list]: + """ + When applicable, executes a cheaper spot rebalance (e.g. single delta trade) and returns + created orders. Returns None to use the default sell-all-then-buy pipeline instead. + """ + return None + + async def ensure_enough_funds_to_buy_after_selling(self) -> None: + """ + Raises MissingMinimalExchangeTradeVolume if there are not enough funds + to buy the targeted coins. + """ + ref_market = self._exchange_interface.portfolio.reference_market + reference_market_to_split = self._get_traded_assets_holdings_value(ref_market) + # will raise if funds are missing + await self._get_buy_symbols_and_amounts( + self._target_coins_prices, + reference_market_to_split, + ) + + async def sell_targeted_coins_for_reference_market( + self, + details: dict[str, typing.Any], + dependencies: typing.Optional[commons_signals.SignalDependencies] + ) -> list: + """ + Sells targeted or swapped coins for the reference market. + """ + pre_cancel_side = trading_enums.TradeOrderSide.BUY if self._rebalance_actions_planner.client.can_include_assets_in_open_orders_in_holdings_ratio else None + await self._pre_cancel_conflicting_orders(details, dependencies, pre_cancel_side) + removed_coins_to_sell_orders = await self._get_removed_coins_to_sell_orders(details, dependencies) + await self._validate_sold_removed_assets(details, removed_coins_to_sell_orders) + coins_to_sell_orders = await self._get_coins_to_sell_orders(details, dependencies) + orders = removed_coins_to_sell_orders + coins_to_sell_orders + if orders: + # ensure all orders are filled + await self._exchange_interface.orders.wait_for_orders_to_fill(orders) + return orders + + def can_simply_buy_coins_without_selling(self, details: dict[str, typing.Any]) -> bool: + """ + Returns True when it is possible to just buy the targeted coins + without selling any other coins. + """ + simple_buy_coins = self._get_simple_buy_coins(details) + if not simple_buy_coins: + return False + # check if there is enough free funds to buy those coins + ref_market = self._exchange_interface.portfolio.reference_market + reference_market_to_split = self._get_traded_assets_holdings_value(ref_market) + free_reference_market_holding = self._get_free_reference_market_holding(ref_market) + cumulated_ratio = sum( + self._rebalance_actions_planner.get_target_ratio(coin) + for coin in simple_buy_coins + ) + tolerated_min_amount = reference_market_to_split * cumulated_ratio * SIMPLE_ADD_MIN_TOLERANCE_RATIO + # can reach target ratios without selling if this condition is met + return tolerated_min_amount <= free_reference_market_holding + + async def split_reference_market_into_targeted_coins( + self, + details: dict[str, typing.Any], + is_simple_buy_without_selling: bool, + dependencies: typing.Optional[commons_signals.SignalDependencies], + ) -> list: + """ + Splits the reference market into the targeted coins. + If is_simple_buy_without_selling is True and swaps are identified, only swap + targets will be bought in order to reduce the number of required transactions. + Otherwise, all targeted coins will be bought. + + For each coin, if self._target_coins_prices is set and far enough from the current price, + the coin will be bought using a limit order at the target price. + Otherwise, a market order will be used. + """ + orders = [] + + pre_cancel_side = trading_enums.TradeOrderSide.SELL if self._rebalance_actions_planner.client.can_include_assets_in_open_orders_in_holdings_ratio else None + await self._pre_cancel_conflicting_orders(details, dependencies, pre_cancel_side) + ref_market = self._exchange_interface.portfolio.reference_market + if details[rebalancer_enums.RebalanceDetails.SWAP.value] or is_simple_buy_without_selling: + # has to infer total reference market holdings + reference_market_to_split = self._get_traded_assets_holdings_value(ref_market) + coins_to_buy = ( + self._get_simple_buy_coins(details) if is_simple_buy_without_selling + else list(details[rebalancer_enums.RebalanceDetails.SWAP.value].values()) + ) + else: + # can use actual reference market holdings: everything has been sold + reference_market_to_split = self._get_free_reference_market_holding(ref_market) + coins_to_buy = self._rebalance_actions_planner.targeted_coins + + reference_market_ratio = self._rebalance_actions_planner.client.reference_market_ratio + # Distribute a percentage among targeted coins, keep the rest in reference market + # If reference_market_ratio is 0, distribute everything (no reservation) + if reference_market_ratio > trading_constants.ZERO: + reference_market_to_distribute = reference_market_to_split * reference_market_ratio + reference_market_reserved = reference_market_to_split - reference_market_to_distribute + else: + reference_market_to_distribute = reference_market_to_split + reference_market_reserved = trading_constants.ZERO + + if reference_market_reserved > trading_constants.ZERO: + self._get_logger().info( + f"Distributing {reference_market_to_distribute} {ref_market} ({reference_market_ratio * trading_constants.ONE_HUNDRED}%) " + f"among targeted coins, reserving {reference_market_reserved} {ref_market} for reference market" + ) + + amount_by_symbol = await self._get_buy_symbols_and_amounts( + self._target_coins_prices, + reference_market_to_distribute, + coins_to_buy=coins_to_buy, + ) + for symbol, values in amount_by_symbol.items(): + orders.extend( + await self._buy_coin( + symbol, + values.get(IDEAL_AMOUNT), + values.get(IDEAL_PRICE), + dependencies, + ) + ) + if not orders and not self._rebalance_actions_planner.client.allow_skip_asset: + raise trading_errors.MissingMinimalExchangeTradeVolume(f"{amount_by_symbol=}") + return orders + + async def _buy_coin( + self, + symbol: str, + ideal_amount: decimal.Decimal, + ideal_price: typing.Optional[decimal.Decimal], + dependencies: typing.Optional[commons_signals.SignalDependencies] + ) -> list: + """ + Buys a coin or opens/increases a position for a symbol. + If ideal_price is set and far enough from the current price, a limit order will be used. + Otherwise, a market order will be used. + """ + raise NotImplementedError("_buy_coin is not implemented") + + async def _get_removed_coins_to_sell_orders(self, details: dict[str, typing.Any], dependencies: typing.Optional[commons_signals.SignalDependencies]) -> list: + removed_coins_to_sell_orders = [] + if removed_coins_to_sell := list(details[rebalancer_enums.RebalanceDetails.REMOVE.value]): + removed_coins_to_sell_orders = await self._exchange_interface.orders.convert_assets_to_target_asset( + removed_coins_to_sell, + self._exchange_interface.portfolio.reference_market, + {}, + dependencies=dependencies, + raise_all_order_errors=self._rebalance_actions_planner.client.raise_all_order_errors, + ) + return removed_coins_to_sell_orders + + async def _get_coins_to_sell_orders(self, details: dict[str, typing.Any], dependencies: typing.Optional[commons_signals.SignalDependencies]) -> list: + order_coins_to_sell = self._get_coins_to_sell(details) + coins_to_sell_orders = await self._exchange_interface.orders.convert_assets_to_target_asset( + order_coins_to_sell, + self._exchange_interface.portfolio.reference_market, + {}, + dependencies=dependencies, + raise_all_order_errors=self._rebalance_actions_planner.client.raise_all_order_errors, + ) + return coins_to_sell_orders + + async def _validate_sold_removed_assets( + self, + details: dict[str, typing.Any], + removed_orders: typing.Optional[list] = None + ) -> None: + if ( + details[rebalancer_enums.RebalanceDetails.REMOVE.value] and + not ( + details[rebalancer_enums.RebalanceDetails.BUY_MORE.value] + or details[rebalancer_enums.RebalanceDetails.ADD.value] + or details[rebalancer_enums.RebalanceDetails.SWAP.value] + ) + ): + if removed_orders is None: + removed_orders = [] + # if rebalance is triggered by removed assets, make sure that the asset can actually be sold + # otherwise the whole rebalance is useless + sold_coins = [ + symbol_util.parse_symbol(order.symbol).base + if order.side is trading_enums.TradeOrderSide.SELL + else symbol_util.parse_symbol(order.symbol).quote + for order in removed_orders + ] + if not any( + asset in sold_coins + for asset in details[rebalancer_enums.RebalanceDetails.REMOVE.value] + ): + self._get_logger().info( + f"Cancelling rebalance: not enough {list(details[rebalancer_enums.RebalanceDetails.REMOVE.value])} funds to sell" + ) + raise trading_errors.MissingMinimalExchangeTradeVolume( + f"not enough {list(details[rebalancer_enums.RebalanceDetails.REMOVE.value])} funds to sell" + ) + + def _get_simple_buy_coins(self, details: dict[str, typing.Any]) -> list: + # Returns the list of coins to simply buy. + # Used to avoid a full rebalance when coins are seen as added to a basket + # AND funds are available to buy it AND no asset should be sold + added = details[rebalancer_enums.RebalanceDetails.ADD.value] or details[rebalancer_enums.RebalanceDetails.BUY_MORE.value] + if added and not ( + details[rebalancer_enums.RebalanceDetails.SWAP.value] + or details[rebalancer_enums.RebalanceDetails.SELL_SOME.value] + or details[rebalancer_enums.RebalanceDetails.REMOVE.value] + or details[rebalancer_enums.RebalanceDetails.FORCED_REBALANCE.value] + ): + added_coins = list(details[rebalancer_enums.RebalanceDetails.ADD.value]) + list(details[rebalancer_enums.RebalanceDetails.BUY_MORE.value]) + return [ + coin + for coin in self._rebalance_actions_planner.targeted_coins # iterate over targeted coins to keep order + if coin in added_coins + ] + [ + coin + for coin in added_coins + if coin not in self._rebalance_actions_planner.targeted_coins + ] + return [] + + def _get_traded_assets_holdings_value( + self, + unit: str, + coins_whitelist: typing.Optional[typing.Iterable] = None, + ) -> decimal.Decimal: + return self._exchange_interface.portfolio.get_traded_assets_holdings_value( + unit, coins_whitelist + ) + + def _get_free_reference_market_holding(self, reference_market: str) -> decimal.Decimal: + return self._exchange_interface.portfolio.get_free_reference_market_holding(reference_market) + + async def _get_buy_symbols_and_amounts( + self, + coins_prices: dict[str, decimal.Decimal], + reference_market_to_split: decimal.Decimal, + *, + coins_to_buy: typing.Optional[list] = None, + ) -> dict: + amount_by_symbol = {} + ref_market = self._exchange_interface.portfolio.reference_market + min_order_size_margin = self._rebalance_actions_planner.client.min_order_size_margin + coins = ( + list(coins_to_buy) + if coins_to_buy is not None + else list(self._rebalance_actions_planner.targeted_coins) + ) + for coin in coins: + if not symbol_util.is_symbol(coin): + if coin == ref_market: + # nothing to do for reference market, keep as is + continue + symbol = symbol_util.merge_currencies(coin, ref_market) + else: + symbol = coin + + up_to_date_price = await self._exchange_interface.market.get_up_to_date_price(symbol) + price = coins_prices.get(symbol, up_to_date_price) + ratio = self._rebalance_actions_planner.get_target_ratio(coin) + if ratio == trading_constants.ZERO: + # coin is not to handle + continue + try: + ideal_amount = ratio * reference_market_to_split / price + except decimal.DecimalException as err: + raise copy_errors.RebalanceAborted( + f"Error computing {symbol} ideal amount ({ratio=}, {reference_market_to_split=}, {price=}): {err=}" + ) from err + # worse case (ex with 5 USDT min order size): exactly 5 USDT can be in portfolio, we therefore want to + # trade at least 5 USDT to be able to buy more. + # - we want ideal_amount - min_cost > min_cost + # - in other words ideal_amount > min_cost * min_order_size_margin + # => ideal_amount / min_order_size_margin > min_cost + effective_min_order_size_margin = min_order_size_margin + if effective_min_order_size_margin < trading_constants.ONE: + effective_min_order_size_margin = trading_constants.ONE + adapted_quantity, symbol_market = ( + self._exchange_interface.orders.check_and_adapt_order_details_if_necessary( + symbol, + ideal_amount / effective_min_order_size_margin, + price, + ) + ) + if not adapted_quantity: + if self._rebalance_actions_planner.client.allow_skip_asset: + self._get_logger().warning( + f"Skipping {symbol} buy: available funds are too low to buy {ratio*trading_constants.ONE_HUNDRED}% " + f"of {reference_market_to_split} holdings: {round(ideal_amount / effective_min_order_size_margin, 9)} {coin}" + ) + continue + # if we can't create an order in this case, we won't be able to balance the portfolio. + # don't try to avoid triggering new rebalances on each wakeup cycling market sell & buy orders + raise trading_errors.MissingMinimalExchangeTradeVolume( + f"Can't buy {symbol}: available funds are too low to buy {ratio*trading_constants.ONE_HUNDRED}% " + f"of {reference_market_to_split} holdings: {round(ideal_amount / effective_min_order_size_margin, 9)} {coin} " + f"required order size is not compatible with {symbol} exchange requirements: " + f"{symbol_market[trading_enums.ExchangeConstantsMarketStatusColumns.LIMITS.value]}." + ) + + amount_by_symbol[symbol] = { + IDEAL_AMOUNT: ideal_amount, + IDEAL_PRICE: price, + } + return amount_by_symbol + + def _get_coins_to_sell(self, details: dict[str, typing.Any]) -> list: + return list(details[rebalancer_enums.RebalanceDetails.SWAP.value]) or ( + self._rebalance_actions_planner.targeted_coins + ) + + def _get_pending_open_quantity(self, symbol: str) -> decimal.Decimal: + return self._exchange_interface.orders.get_pending_open_quantity(symbol) + + async def _cancel_symbol_open_orders( + self, + symbol: str, + dependencies: typing.Optional[commons_signals.SignalDependencies], + allowed_sides: typing.Optional[set[trading_enums.TradeOrderSide]] = None + ) -> typing.Optional[commons_signals.SignalDependencies]: + return await self._exchange_interface.orders.cancel_symbol_open_orders( + symbol, dependencies, allowed_sides=allowed_sides + ) + + async def _pre_cancel_conflicting_orders( + self, + details: dict[str, typing.Any], + dependencies: typing.Optional[commons_signals.SignalDependencies], + side: typing.Optional[trading_enums.TradeOrderSide] + ) -> None: + symbols_to_cleanup = self._get_pre_cancel_order_symbols(details, side) + for symbol in symbols_to_cleanup: + await self._cancel_symbol_open_orders( + symbol, + dependencies=dependencies, + allowed_sides={side} if side else None + ) + + def _get_pre_cancel_order_symbols(self, details: dict[str, typing.Any], side: typing.Optional[trading_enums.TradeOrderSide]) -> set[str]: + symbols_to_cleanup: set[str] = set() + # if side is None, we need to cleanup both buy and sell orders + sides = [side] if side else [trading_enums.TradeOrderSide.BUY, trading_enums.TradeOrderSide.SELL] + keys = [] + for side in sides: + keys.extend(self._get_rebalance_details_keys_for_side(side)) + + for key in keys: + for coin_or_symbol in details.get(key, {}): + symbols_to_cleanup.add(self._get_symbol_and_base_asset(coin_or_symbol)[0]) + return symbols_to_cleanup + + def _get_rebalance_details_keys_for_side(self, side: trading_enums.TradeOrderSide) -> list[str]: + if side == trading_enums.TradeOrderSide.BUY: + return [rebalancer_enums.RebalanceDetails.REMOVE.value, rebalancer_enums.RebalanceDetails.SELL_SOME.value] + if side == trading_enums.TradeOrderSide.SELL: + return [rebalancer_enums.RebalanceDetails.ADD.value, rebalancer_enums.RebalanceDetails.BUY_MORE.value] + raise ValueError(f"Unsupported side: {side}") + + def _get_symbol_and_base_asset(self, coin_or_symbol: str) -> tuple[str, str]: + if symbol_util.is_symbol(coin_or_symbol): + return coin_or_symbol, symbol_util.parse_symbol(coin_or_symbol).base # type: ignore + return symbol_util.merge_currencies(coin_or_symbol, self._exchange_interface.portfolio.reference_market), coin_or_symbol + + def _get_logger(self): + return logging.get_logger(self.__class__.__name__) diff --git a/packages/copy/octobot_copy/rebalancing/rebalancer/spot_rebalancer.py b/packages/copy/octobot_copy/rebalancing/rebalancer/spot_rebalancer.py new file mode 100644 index 0000000000..c102032fd8 --- /dev/null +++ b/packages/copy/octobot_copy/rebalancing/rebalancer/spot_rebalancer.py @@ -0,0 +1,209 @@ +# Drakkar-Software OctoBot +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import decimal +import typing + +import octobot_commons.signals as commons_signals +import octobot_commons.symbols.symbol_util as symbol_util +import octobot_trading.constants as trading_constants +import octobot_trading.enums as trading_enums +import octobot_trading.errors as trading_errors +import octobot_trading.modes.modes_util as modes_util +import octobot_trading.personal_data as trading_personal_data + +import octobot_copy.constants as copy_constants +import octobot_copy.enums as rebalancer_enums +import octobot_copy.rebalancing.rebalancer.rebalancer as base_rebalancer + + +class SpotRebalancer(base_rebalancer.AbstractRebalancer): + + async def prepare_coin_rebalancing(self, coin: str): + # Nothing to do in SPOT + pass + + async def try_efficient_spot_rebalance( + self, + details: dict[str, typing.Any], + dependencies: typing.Optional[commons_signals.SignalDependencies] = None, + ) -> typing.Optional[list]: + if not self._is_two_asset_spot_delta_eligible(details): + return None + ref_market = self._exchange_interface.portfolio.reference_market + base_currency = self._get_single_non_reference_targeted_coin() + if base_currency is None: + return None + symbol = symbol_util.merge_currencies(base_currency, ref_market) + price = await self._exchange_interface.market.get_up_to_date_price(symbol) + price = self._target_coins_prices.get(symbol, price) + if not price or price <= trading_constants.ZERO: + return None + portfolio_value_ref = self._get_traded_assets_holdings_value(ref_market) + reference_market_ratio = self._rebalance_actions_planner.client.reference_market_ratio + if reference_market_ratio > trading_constants.ZERO: + value_to_distribute = portfolio_value_ref * reference_market_ratio + else: + value_to_distribute = portfolio_value_ref + target_ratio = self._rebalance_actions_planner.get_target_ratio(base_currency) + target_base_quantity = target_ratio * value_to_distribute / price + if self._rebalance_actions_planner.client.can_include_assets_in_open_orders_in_holdings_ratio: + current_base_quantity = ( + self._exchange_interface.portfolio.get_currency_portfolio_available(base_currency) + + self._get_pending_open_quantity(symbol) + ) + else: + current_base_quantity = self._exchange_interface.portfolio.get_currency_portfolio_available( + base_currency + ) + delta_base = current_base_quantity - target_base_quantity + exchange_manager = self._exchange_interface.orders._exchange_manager + if delta_base > trading_constants.ZERO: + await self._pre_cancel_conflicting_orders( + details, dependencies, trading_enums.TradeOrderSide.BUY + ) + adapted_chunks, _symbol_market = ( + self._exchange_interface.orders.check_and_adapt_order_details_if_necessary( + symbol, + delta_base, + price, + ) + ) + if not adapted_chunks: + # dust amounts: delta_base is too small to be traded + return None + sell_orders = await modes_util.convert_asset_to_target_asset( + base_currency, + ref_market, + {}, + asset_amount=delta_base, + dependencies=dependencies, + raise_all_order_errors=self._rebalance_actions_planner.client.raise_all_order_errors, + exchange_manager=exchange_manager, + ) + if not sell_orders: + return None + await self._exchange_interface.orders.wait_for_orders_to_fill(sell_orders) + return sell_orders + if delta_base < trading_constants.ZERO: + await self._pre_cancel_conflicting_orders( + details, dependencies, trading_enums.TradeOrderSide.SELL + ) + ideal_price = self._target_coins_prices.get(symbol, price) + try: + buy_orders = await self._buy_coin( + symbol, + target_base_quantity, + ideal_price, + dependencies, + ) + except trading_errors.MissingMinimalExchangeTradeVolume: + # e.g. free quote is mostly locked in open (mirrored) orders: delta buy is not + # executable at min size; fall back to legacy sell-all-then-buy. + return None + if not buy_orders: + # buy order is too small to be traded + return None + await self._exchange_interface.orders.wait_for_orders_to_fill(buy_orders) + return buy_orders + return None + + def _is_two_asset_spot_delta_eligible(self, details: dict[str, typing.Any]) -> bool: + if details[rebalancer_enums.RebalanceDetails.FORCED_REBALANCE.value]: + return False + if details[rebalancer_enums.RebalanceDetails.REMOVE.value]: + return False + if details[rebalancer_enums.RebalanceDetails.ADD.value]: + return False + if details[rebalancer_enums.RebalanceDetails.SWAP.value]: + return False + ref_market = self._exchange_interface.portfolio.reference_market + targeted = self._rebalance_actions_planner.targeted_coins + if len(targeted) != 2 or ref_market not in targeted: + return False + non_reference = [coin for coin in targeted if coin != ref_market] + return len(non_reference) == 1 + + def _get_single_non_reference_targeted_coin(self) -> typing.Optional[str]: + ref_market = self._exchange_interface.portfolio.reference_market + for coin in self._rebalance_actions_planner.targeted_coins: + if coin != ref_market: + return coin + return None + + async def _buy_coin( + self, + symbol: str, + ideal_amount: decimal.Decimal, + ideal_price: typing.Optional[decimal.Decimal], + dependencies: typing.Optional[commons_signals.SignalDependencies] + ) -> list: + current_symbol_holding, current_market_holding, market_quantity, current_price, symbol_market = \ + await self._exchange_interface.orders.get_pre_order_data( + symbol=symbol, + timeout=trading_constants.ORDER_DATA_FETCHING_TIMEOUT + ) + order_target_price = ideal_price if ideal_price is not None else current_price + # ideally use the expected reference_market_available_holdings ratio, fallback to available + # holdings if necessary + target_quantity = min(ideal_amount, current_market_holding / order_target_price) + if self._rebalance_actions_planner.client.can_include_assets_in_open_orders_in_holdings_ratio: + effective_current_symbol_holding = current_symbol_holding + self._get_pending_open_quantity(symbol) + else: + effective_current_symbol_holding = current_symbol_holding # should be >0 ?? + ideal_quantity = target_quantity - effective_current_symbol_holding + if ideal_quantity <= trading_constants.ZERO: + return [] + if ideal_quantity < ideal_amount * decimal.Decimal("0.9"): + self._get_logger().warning( + f"{symbol} order quantity has to be reduced from {ideal_amount} to " + f"{ideal_quantity} to adapt to available funds." + ) + is_price_close_to_market = order_target_price >= current_price * (decimal.Decimal(1) - self.PRICE_THRESHOLD_TO_USE_MARKET_ORDER) + ideal_order_type = trading_enums.TraderOrderType.BUY_MARKET if is_price_close_to_market else trading_enums.TraderOrderType.BUY_LIMIT + order_type = ( + ideal_order_type + if self._exchange_interface.market.is_market_open_for_order_type(symbol, ideal_order_type) + else trading_enums.TraderOrderType.BUY_LIMIT + ) + + order_target_price, ideal_quantity = ( + self._exchange_interface.orders.adapt_order_quantity_and_target_price_for_order_creation( + order_type, + symbol, + ideal_quantity, + order_target_price, + adapt_price_for_limit_orders=True, + ) + ) + created_orders, orders_should_have_been_created = await self._exchange_interface.orders.create_orders( + order_type, + symbol, + current_price, + ideal_quantity, + order_target_price, + symbol_market, + dependencies=dependencies, + tag=copy_constants.REBALANCER_ORDER_TAG, + raise_all_creation_error=self._rebalance_actions_planner.client.raise_all_order_errors, + ) + if created_orders: + return created_orders + if self._rebalance_actions_planner.client.allow_skip_asset: + self._get_logger().warning(f"Skipping {symbol} order creation...") + return [] + if orders_should_have_been_created: + raise trading_errors.OrderCreationError() + raise trading_errors.MissingMinimalExchangeTradeVolume() diff --git a/packages/copy/octobot_copy/rebalancing/rebalancing_client_interface.py b/packages/copy/octobot_copy/rebalancing/rebalancing_client_interface.py new file mode 100644 index 0000000000..81bb23c23b --- /dev/null +++ b/packages/copy/octobot_copy/rebalancing/rebalancing_client_interface.py @@ -0,0 +1,85 @@ +# Drakkar-Software OctoBot +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import decimal +import typing + +import octobot_copy.enums as copy_enums + + +class RebalancingClientInterface: + def __init__( + self, + *, + client_name: str, + min_order_size_margin: decimal.Decimal, + rebalance_trigger_min_ratio: decimal.Decimal, + quote_asset_rebalance_ratio_threshold: decimal.Decimal, + reference_market_ratio: decimal.Decimal, + sell_untargeted_traded_coins: bool, + synchronization_policy: copy_enums.SynchronizationPolicy, + allow_skip_asset: bool, + can_include_assets_in_open_orders_in_holdings_ratio: bool, + raise_all_order_errors: bool, + get_config: typing.Callable[[], typing.Optional[dict]], + get_previous_config: typing.Callable[[], typing.Optional[dict]], + get_historical_configs: typing.Callable[[float, float], list], + get_ideal_distribution: typing.Callable[[dict], typing.Optional[list]], + ) -> None: + # static values + self.client_name: str = client_name + self.min_order_size_margin: decimal.Decimal = min_order_size_margin + self.rebalance_trigger_min_ratio: decimal.Decimal = rebalance_trigger_min_ratio + self.quote_asset_rebalance_ratio_threshold: decimal.Decimal = ( + quote_asset_rebalance_ratio_threshold + ) + self.reference_market_ratio: decimal.Decimal = reference_market_ratio + self.sell_untargeted_traded_coins: bool = sell_untargeted_traded_coins + self.synchronization_policy: copy_enums.SynchronizationPolicy = synchronization_policy + self.allow_skip_asset: bool = allow_skip_asset + self.can_include_assets_in_open_orders_in_holdings_ratio: bool = can_include_assets_in_open_orders_in_holdings_ratio + self.raise_all_order_errors: bool = raise_all_order_errors + + # dynamic values + self.get_config = get_config + self.get_previous_config = get_previous_config + self.get_historical_configs = get_historical_configs + self.get_ideal_distribution = get_ideal_distribution + + def update( + self, + *, + min_order_size_margin: decimal.Decimal, + synchronization_policy: typing.Any, + rebalance_trigger_min_ratio: decimal.Decimal, + quote_asset_rebalance_ratio_threshold: decimal.Decimal, + reference_market_ratio: decimal.Decimal, + sell_untargeted_traded_coins: bool, + allow_skip_asset: bool, + can_include_assets_in_open_orders_in_holdings_ratio: bool, + raise_all_order_errors: bool = False, + ) -> None: + self.min_order_size_margin = min_order_size_margin + self.synchronization_policy = synchronization_policy + self.rebalance_trigger_min_ratio = rebalance_trigger_min_ratio + self.quote_asset_rebalance_ratio_threshold = quote_asset_rebalance_ratio_threshold + self.reference_market_ratio = reference_market_ratio + self.sell_untargeted_traded_coins = sell_untargeted_traded_coins + self.allow_skip_asset = allow_skip_asset + self.can_include_assets_in_open_orders_in_holdings_ratio = ( + can_include_assets_in_open_orders_in_holdings_ratio + ) + self.raise_all_order_errors = raise_all_order_errors + \ No newline at end of file diff --git a/packages/copy/octobot_copy_ts/README.md b/packages/copy/octobot_copy_ts/README.md new file mode 100644 index 0000000000..a07768f139 --- /dev/null +++ b/packages/copy/octobot_copy_ts/README.md @@ -0,0 +1 @@ +# OctoBot copy Typescript implementation \ No newline at end of file diff --git a/packages/copy/tests/python/__init__.py b/packages/copy/tests/python/__init__.py new file mode 100644 index 0000000000..63547dc378 --- /dev/null +++ b/packages/copy/tests/python/__init__.py @@ -0,0 +1,44 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3.0 of the License, or +# (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along with +# OctoBot. If not, see . +import typing + +import octobot_commons.symbols as commons_symbols + + +def ensure_traded_symbol_pairs(exchange_manager, symbols: typing.Iterable[str]) -> None: + """ + Config may list pairs, but exchange init can drop them when symbol_exists is false. + The copy rebalance planner only keeps assets whose base appears in traded_symbols. + """ + symbols_set = set(symbols) + if not symbols_set: + return + exchange_config = exchange_manager.exchange_config + if symbols_set.issubset(set(exchange_config.traded_symbol_pairs)): + return + exchange_config.traded_symbol_pairs = sorted( + set(exchange_config.traded_symbol_pairs) | symbols_set + ) + exchange_config.traded_symbols = [ + commons_symbols.parse_symbol(symbol) for symbol in exchange_config.traded_symbol_pairs + ] + exchange_config.watched_pairs = sorted( + set(exchange_config.watched_pairs) | symbols_set + ) + + for symbol in symbols_set: + if symbol not in exchange_manager.client_symbols: + exchange_manager.client_symbols.append(symbol) diff --git a/packages/copy/tests/python/conftest.py b/packages/copy/tests/python/conftest.py new file mode 100644 index 0000000000..6cb4784ed2 --- /dev/null +++ b/packages/copy/tests/python/conftest.py @@ -0,0 +1,120 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3.0 of the License, or +# (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along with +# OctoBot. If not, see . +# Local pytest fixtures for octobot_copy tests (copied from packages/trading/tests). +import time + +import mock +import pytest +import pytest_asyncio + +import octobot_commons.constants as commons_constants +import octobot_backtesting.backtesting as backtesting_module +import octobot_backtesting.constants as backtesting_constants +import octobot_backtesting.time as backtesting_time +import octobot_commons.tests.test_config as test_config_module +import octobot_trading.exchanges + +pytestmark = pytest.mark.asyncio + +DEFAULT_EXCHANGE_NAME = "binanceus" +DEFAULT_FUTURE_EXCHANGE_NAME = "bybit" + + +@pytest_asyncio.fixture +async def backtesting_config(request): + config = test_config_module.load_test_config() + config[backtesting_constants.CONFIG_BACKTESTING] = {} + config[backtesting_constants.CONFIG_BACKTESTING][commons_constants.CONFIG_ENABLED_OPTION] = True + if hasattr(request, "param"): + ref_market = request.param + config[commons_constants.CONFIG_TRADING][commons_constants.CONFIG_TRADER_REFERENCE_MARKET] = ref_market + return config + + +@pytest_asyncio.fixture +async def fake_backtesting(backtesting_config): + return backtesting_module.Backtesting( + config=backtesting_config, + exchange_ids=[], + matrix_id="", + backtesting_files=[], + ) + + +@pytest_asyncio.fixture +async def backtesting_exchange_manager(request, backtesting_config, fake_backtesting): + config = None + exchange_name = DEFAULT_EXCHANGE_NAME + is_spot = True + is_margin = False + is_future = False + is_option = False + if hasattr(request, "param"): + if isinstance(request.param, str): + mode = request.param + if mode == "spot": + is_spot = True + is_margin = False + is_future = False + is_option = False + elif mode == "margin": + is_spot = False + is_margin = True + is_future = False + is_option = False + elif mode == "futures": + is_spot = False + is_margin = False + is_future = True + is_option = False + exchange_name = DEFAULT_FUTURE_EXCHANGE_NAME + elif mode == "options": + is_spot = False + is_margin = False + is_future = False + is_option = True + elif isinstance(request.param, tuple) and len(request.param) == 5: + config, exchange_name, is_spot, is_margin, is_future = request.param + + if config is None: + config = backtesting_config + exchange_manager_instance = octobot_trading.exchanges.ExchangeManager(config, exchange_name) + exchange_manager_instance.is_backtesting = True + exchange_manager_instance.use_cached_markets = False + exchange_manager_instance.is_spot_only = is_spot + exchange_manager_instance.is_margin = is_margin + exchange_manager_instance.is_future = is_future + exchange_manager_instance.is_option = is_option + exchange_manager_instance.backtesting = fake_backtesting + exchange_manager_instance.backtesting.time_manager = backtesting_time.TimeManager(config) + await exchange_manager_instance.initialize(exchange_config_by_exchange=None) + with mock.patch.object( + exchange_manager_instance.exchange.connector, + "get_exchange_current_time", + side_effect=lambda: time.time(), + ): + yield exchange_manager_instance + await exchange_manager_instance.stop() + + +@pytest_asyncio.fixture +async def backtesting_trader(backtesting_config, backtesting_exchange_manager): + trader_instance = octobot_trading.exchanges.TraderSimulator( + backtesting_config, + backtesting_exchange_manager, + ) + await trader_instance.initialize() + return backtesting_config, backtesting_exchange_manager, trader_instance diff --git a/packages/copy/tests/python/functional_tests/test_rebalance_plan_and_execution.py b/packages/copy/tests/python/functional_tests/test_rebalance_plan_and_execution.py new file mode 100644 index 0000000000..aaecd09c2d --- /dev/null +++ b/packages/copy/tests/python/functional_tests/test_rebalance_plan_and_execution.py @@ -0,0 +1,414 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3.0 of the License, or +# (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along with +# OctoBot. If not, see . +import decimal +import importlib.util +import pathlib + +import pytest + +import octobot_commons.constants as commons_constants +import octobot_trading.api as trading_api +import octobot_trading.constants as trading_constants +import octobot_trading.enums as trading_enums + +import octobot_copy.constants as copy_constants +import octobot_copy.copiers.account_copier_factory as account_copier_factory +import octobot_copy.entities as copy_entities +import octobot_copy.enums as copy_enums + + +def _load_copy_tests_python_helpers(): + init_path = pathlib.Path(__file__).resolve().parent.parent / "__init__.py" + spec = importlib.util.spec_from_file_location("copy_tests_python_helpers", init_path) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + return module + + +copy_tests_python_helpers = _load_copy_tests_python_helpers() + +pytestmark = pytest.mark.asyncio + +_BTC_USDT = "BTC/USDT" +_ETH_USDT = "ETH/USDT" +_ADA_USDT = "ADA/USDT" + + +@pytest.mark.parametrize("backtesting_config", ["USDT"], indirect=True) +async def test_rebalance_plan_and_execution_70_30_to_50_50_btc_usdt_optimized_path(backtesting_trader): + _config, exchange_manager, _trader = backtesting_trader + copy_tests_python_helpers.ensure_traded_symbol_pairs(exchange_manager, (_BTC_USDT,)) + portfolio_manager = exchange_manager.exchange_personal_data.portfolio_manager + + btc_usdt_price = decimal.Decimal("50000") + total_portfolio_usdt = decimal.Decimal("80000") + usdt_total = total_portfolio_usdt * decimal.Decimal("0.3") + btc_value_usdt = total_portfolio_usdt * decimal.Decimal("0.7") + btc_quantity = btc_value_usdt / btc_usdt_price + + trading_api.force_set_mark_price(exchange_manager, _BTC_USDT, btc_usdt_price) + portfolio_manager.portfolio.update_portfolio_from_balance( + { + "BTC": {"available": btc_quantity, "total": btc_quantity}, + "USDT": {"available": usdt_total, "total": usdt_total}, + }, + True, + ) + portfolio_manager.handle_balance_updated() + portfolio_manager.portfolio_value_holder.value_converter.missing_currency_data_in_exchange.discard("USDT") + portfolio_manager.handle_mark_price_update(_BTC_USDT, btc_usdt_price) + + reference_account = copy_entities.Account( + content={ + "BTC": { + commons_constants.PORTFOLIO_TOTAL: decimal.Decimal("1"), + copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO: decimal.Decimal("0.5"), + }, + "USDT": { + commons_constants.PORTFOLIO_TOTAL: decimal.Decimal("1"), + copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO: decimal.Decimal("0.5"), + }, + }, + orders=[], + ) + copy_settings = copy_entities.AccountCopySettings() + copier = account_copier_factory.create_account_copier( + reference_account, + copy_settings, + exchange_manager, + copier_trading_mode=None, + ) + + _rebalancer, should_rebalance, details = await copier._prepare_rebalance_plan() + assert should_rebalance is True + assert details == { + copy_enums.RebalanceDetails.FORCED_REBALANCE.value: False, + copy_enums.RebalanceDetails.SELL_SOME.value: {"BTC": decimal.Decimal("0.5")}, + copy_enums.RebalanceDetails.BUY_MORE.value: {"USDT": decimal.Decimal("0.5")}, + copy_enums.RebalanceDetails.SWAP.value: {}, + copy_enums.RebalanceDetails.REMOVE.value: {}, + copy_enums.RebalanceDetails.ADD.value: {}, + } + + result = await copier.copy_account() + # Two-asset spot: single partial sell (delta to target); no full round-trip. + rebalance_orders = result.created_orders + assert len(rebalance_orders) == 1 + + sell_orders = [ + order + for order in rebalance_orders + if order.symbol == _BTC_USDT and order.side is trading_enums.TradeOrderSide.SELL + ] + buy_orders = [ + order + for order in rebalance_orders + if order.symbol == _BTC_USDT and order.side is trading_enums.TradeOrderSide.BUY + ] + assert len(sell_orders) == 1 + assert len(buy_orders) == 0 + expected_sell_delta = btc_quantity - (total_portfolio_usdt * decimal.Decimal("0.5")) / btc_usdt_price + assert sell_orders[0].origin_quantity == expected_sell_delta == decimal.Decimal("0.32") + assert sell_orders[0].origin_price == btc_usdt_price + + +@pytest.mark.parametrize("backtesting_config", ["USDT"], indirect=True) +async def test_rebalance_plan_and_execution_20_80_to_50_50_btc_usdt_optimized_path(backtesting_trader): + _config, exchange_manager, _trader = backtesting_trader + copy_tests_python_helpers.ensure_traded_symbol_pairs(exchange_manager, (_BTC_USDT,)) + portfolio_manager = exchange_manager.exchange_personal_data.portfolio_manager + + btc_usdt_price = decimal.Decimal("50000") + total_portfolio_usdt = decimal.Decimal("100000") + usdt_total = total_portfolio_usdt * decimal.Decimal("0.8") + btc_value_usdt = total_portfolio_usdt * decimal.Decimal("0.2") + btc_quantity = btc_value_usdt / btc_usdt_price + + trading_api.force_set_mark_price(exchange_manager, _BTC_USDT, btc_usdt_price) + portfolio_manager.portfolio.update_portfolio_from_balance( + { + "BTC": {"available": btc_quantity, "total": btc_quantity}, + "USDT": {"available": usdt_total, "total": usdt_total}, + }, + True, + ) + portfolio_manager.handle_balance_updated() + portfolio_manager.portfolio_value_holder.value_converter.missing_currency_data_in_exchange.discard("USDT") + portfolio_manager.handle_mark_price_update(_BTC_USDT, btc_usdt_price) + + reference_account = copy_entities.Account( + content={ + "BTC": { + commons_constants.PORTFOLIO_TOTAL: decimal.Decimal("1"), + copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO: decimal.Decimal("0.5"), + }, + "USDT": { + commons_constants.PORTFOLIO_TOTAL: decimal.Decimal("1"), + copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO: decimal.Decimal("0.5"), + }, + }, + orders=[], + ) + copy_settings = copy_entities.AccountCopySettings() + copier = account_copier_factory.create_account_copier( + reference_account, + copy_settings, + exchange_manager, + copier_trading_mode=None, + ) + + _rebalancer, should_rebalance, details = await copier._prepare_rebalance_plan() + assert should_rebalance is True + assert details == { + copy_enums.RebalanceDetails.FORCED_REBALANCE.value: False, + copy_enums.RebalanceDetails.SELL_SOME.value: {"USDT": decimal.Decimal("0.5")}, + copy_enums.RebalanceDetails.BUY_MORE.value: {"BTC": decimal.Decimal("0.5")}, + copy_enums.RebalanceDetails.SWAP.value: {}, + copy_enums.RebalanceDetails.REMOVE.value: {}, + copy_enums.RebalanceDetails.ADD.value: {}, + } + + result = await copier.copy_account() + rebalance_orders = result.created_orders + assert len(rebalance_orders) == 1 + buy_orders = [ + order + for order in rebalance_orders + if order.symbol == _BTC_USDT and order.side is trading_enums.TradeOrderSide.BUY + ] + sell_orders = [ + order + for order in rebalance_orders + if order.symbol == _BTC_USDT and order.side is trading_enums.TradeOrderSide.SELL + ] + assert len(buy_orders) == 1 + assert len(sell_orders) == 0 + target_btc = (total_portfolio_usdt * decimal.Decimal("0.5")) / btc_usdt_price + expected_buy_delta = target_btc - btc_quantity + assert buy_orders[0].origin_quantity == expected_buy_delta == decimal.Decimal("0.6") + assert buy_orders[0].origin_price == btc_usdt_price + + +@pytest.mark.parametrize("backtesting_config", ["USDT"], indirect=True) +async def test_rebalance_plan_and_execution_two_asset_dust_falls_back_to_full_sell_and_buy(backtesting_trader): + _config, exchange_manager, _trader = backtesting_trader + copy_tests_python_helpers.ensure_traded_symbol_pairs(exchange_manager, (_BTC_USDT,)) + portfolio_manager = exchange_manager.exchange_personal_data.portfolio_manager + + btc_usdt_price = decimal.Decimal("50000") + # Small portfolio so delta-to-target is below min order size: efficient path bails out, legacy runs. + total_portfolio_usdt = decimal.Decimal("10") + btc_ratio = decimal.Decimal("0.51") # 1% above target + usdt_total = total_portfolio_usdt * (trading_constants.ONE - btc_ratio) + btc_value_usdt = total_portfolio_usdt * btc_ratio + btc_quantity = btc_value_usdt / btc_usdt_price + + trading_api.force_set_mark_price(exchange_manager, _BTC_USDT, btc_usdt_price) + portfolio_manager.portfolio.update_portfolio_from_balance( + { + "BTC": {"available": btc_quantity, "total": btc_quantity}, + "USDT": {"available": usdt_total, "total": usdt_total}, + }, + True, + ) + portfolio_manager.handle_balance_updated() + portfolio_manager.portfolio_value_holder.value_converter.missing_currency_data_in_exchange.discard("USDT") + portfolio_manager.handle_mark_price_update(_BTC_USDT, btc_usdt_price) + + reference_account = copy_entities.Account( + content={ + "BTC": { + commons_constants.PORTFOLIO_TOTAL: decimal.Decimal("1"), + copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO: decimal.Decimal("0.5"), + }, + "USDT": { + commons_constants.PORTFOLIO_TOTAL: decimal.Decimal("1"), + copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO: decimal.Decimal("0.5"), + }, + }, + orders=[], + ) + copy_settings = copy_entities.AccountCopySettings( + rebalance_trigger_min_ratio=decimal.Decimal("0"), + ) + copier = account_copier_factory.create_account_copier( + reference_account, + copy_settings, + exchange_manager, + copier_trading_mode=None, + ) + + _rebalancer, should_rebalance, details = await copier._prepare_rebalance_plan() + assert should_rebalance is True + + result = await copier.copy_account() + rebalance_orders = result.created_orders + assert len(rebalance_orders) == 2 + assert {order.symbol for order in rebalance_orders} == {_BTC_USDT} + assert any(order.side is trading_enums.TradeOrderSide.SELL for order in rebalance_orders) + assert any(order.side is trading_enums.TradeOrderSide.BUY for order in rebalance_orders) + + +@pytest.mark.parametrize("backtesting_config", ["USDT"], indirect=True) +async def test_rebalance_plan_and_execution_80_btc_20_eth_to_50_btc_50_ada(backtesting_trader): + _config, exchange_manager, _trader = backtesting_trader + copy_tests_python_helpers.ensure_traded_symbol_pairs( + exchange_manager, + (_BTC_USDT, _ETH_USDT, _ADA_USDT), + ) + portfolio_manager = exchange_manager.exchange_personal_data.portfolio_manager + + btc_usdt_price = decimal.Decimal("50000") + eth_usdt_price = decimal.Decimal("2500") + ada_usdt_price = decimal.Decimal("0.5") + total_portfolio_usdt = decimal.Decimal("100000") + btc_value_usdt = total_portfolio_usdt * decimal.Decimal("0.8") + eth_value_usdt = total_portfolio_usdt * decimal.Decimal("0.2") + btc_quantity = btc_value_usdt / btc_usdt_price + eth_quantity = eth_value_usdt / eth_usdt_price + + trading_api.force_set_mark_price(exchange_manager, _BTC_USDT, btc_usdt_price) + trading_api.force_set_mark_price(exchange_manager, _ETH_USDT, eth_usdt_price) + trading_api.force_set_mark_price(exchange_manager, _ADA_USDT, ada_usdt_price) + portfolio_manager.portfolio.update_portfolio_from_balance( + { + "BTC": {"available": btc_quantity, "total": btc_quantity}, + "ETH": {"available": eth_quantity, "total": eth_quantity}, + }, + True, + ) + portfolio_manager.handle_balance_updated() + portfolio_manager.portfolio_value_holder.value_converter.missing_currency_data_in_exchange.discard("USDT") + portfolio_manager.handle_mark_price_update(_BTC_USDT, btc_usdt_price) + portfolio_manager.handle_mark_price_update(_ETH_USDT, eth_usdt_price) + portfolio_manager.handle_mark_price_update(_ADA_USDT, ada_usdt_price) + + reference_account = copy_entities.Account( + content={ + "BTC": { + commons_constants.PORTFOLIO_TOTAL: decimal.Decimal("1"), + copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO: decimal.Decimal("0.5"), + }, + "ADA": { + commons_constants.PORTFOLIO_TOTAL: decimal.Decimal("1"), + copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO: decimal.Decimal("0.5"), + }, + }, + orders=[], + ) + copy_settings = copy_entities.AccountCopySettings() + copier = account_copier_factory.create_account_copier( + reference_account, + copy_settings, + exchange_manager, + copier_trading_mode=None, + ) + + _rebalancer, should_rebalance, details = await copier._prepare_rebalance_plan() + assert should_rebalance is True + assert details == { + copy_enums.RebalanceDetails.FORCED_REBALANCE.value: False, + copy_enums.RebalanceDetails.SELL_SOME.value: {"BTC": decimal.Decimal("0.5")}, + copy_enums.RebalanceDetails.BUY_MORE.value: {}, + copy_enums.RebalanceDetails.SWAP.value: {}, + copy_enums.RebalanceDetails.REMOVE.value: {"ETH": decimal.Decimal("0.2")}, + copy_enums.RebalanceDetails.ADD.value: {"ADA": decimal.Decimal("0.5")}, + } + + result = await copier.copy_account() + rebalance_orders = result.created_orders + assert len(rebalance_orders) == 4 + + sell_symbols = { + order.symbol + for order in rebalance_orders + if order.side is trading_enums.TradeOrderSide.SELL + } + buy_symbols = { + order.symbol + for order in rebalance_orders + if order.side is trading_enums.TradeOrderSide.BUY + } + assert sell_symbols == {_BTC_USDT, _ETH_USDT} + assert buy_symbols == {_BTC_USDT, _ADA_USDT} + + +@pytest.mark.parametrize("backtesting_config", ["USDT"], indirect=True) +async def test_rebalance_plan_and_execution_100_usdt_to_50_btc_50_eth(backtesting_trader): + _config, exchange_manager, _trader = backtesting_trader + copy_tests_python_helpers.ensure_traded_symbol_pairs( + exchange_manager, + (_BTC_USDT, _ETH_USDT), + ) + portfolio_manager = exchange_manager.exchange_personal_data.portfolio_manager + + btc_usdt_price = decimal.Decimal("50000") + eth_usdt_price = decimal.Decimal("3000") + usdt_total = decimal.Decimal("100000") + + trading_api.force_set_mark_price(exchange_manager, _BTC_USDT, btc_usdt_price) + trading_api.force_set_mark_price(exchange_manager, _ETH_USDT, eth_usdt_price) + portfolio_manager.portfolio.update_portfolio_from_balance( + { + "USDT": {"available": usdt_total, "total": usdt_total}, + }, + True, + ) + portfolio_manager.handle_balance_updated() + portfolio_manager.portfolio_value_holder.value_converter.missing_currency_data_in_exchange.discard("USDT") + portfolio_manager.handle_mark_price_update(_BTC_USDT, btc_usdt_price) + portfolio_manager.handle_mark_price_update(_ETH_USDT, eth_usdt_price) + + reference_account = copy_entities.Account( + content={ + "BTC": { + commons_constants.PORTFOLIO_TOTAL: decimal.Decimal("1"), + copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO: decimal.Decimal("0.5"), + }, + "ETH": { + commons_constants.PORTFOLIO_TOTAL: decimal.Decimal("1"), + copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO: decimal.Decimal("0.5"), + }, + }, + orders=[], + ) + copy_settings = copy_entities.AccountCopySettings() + copier = account_copier_factory.create_account_copier( + reference_account, + copy_settings, + exchange_manager, + copier_trading_mode=None, + ) + + _rebalancer, should_rebalance, details = await copier._prepare_rebalance_plan() + assert should_rebalance is True + assert details == { + copy_enums.RebalanceDetails.FORCED_REBALANCE.value: False, + copy_enums.RebalanceDetails.SELL_SOME.value: {}, + copy_enums.RebalanceDetails.BUY_MORE.value: {}, + copy_enums.RebalanceDetails.SWAP.value: {}, + copy_enums.RebalanceDetails.REMOVE.value: {}, + copy_enums.RebalanceDetails.ADD.value: { + "BTC": decimal.Decimal("0.5"), + "ETH": decimal.Decimal("0.5"), + }, + } + + result = await copier.copy_account() + rebalance_orders = result.created_orders + assert len(rebalance_orders) == 2 + assert {order.symbol for order in rebalance_orders} == {_BTC_USDT, _ETH_USDT} + assert all(order.side is trading_enums.TradeOrderSide.BUY for order in rebalance_orders) diff --git a/packages/copy/tests/python/orders_mirroring/test_orders_synchronizer.py b/packages/copy/tests/python/orders_mirroring/test_orders_synchronizer.py new file mode 100644 index 0000000000..38b744b7de --- /dev/null +++ b/packages/copy/tests/python/orders_mirroring/test_orders_synchronizer.py @@ -0,0 +1,534 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3.0 of the License, or +# (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along with +# OctoBot. If not, see . +import asyncio +import decimal +import time + +import mock + +import octobot_commons.constants as commons_constants +import octobot_trading.constants as trading_constants +import octobot_trading.enums as trading_enums + +import octobot_copy.constants as copy_constants +import octobot_copy.entities as copy_entities +import octobot_copy.orders_mirroring.orders_synchronizer as orders_synchronizer_module + + +def _reference_account_with_allocations( + base_ratio: decimal.Decimal, + quote_ratio: decimal.Decimal, +) -> copy_entities.Account: + return copy_entities.Account( + content={ + "ETH": { + commons_constants.PORTFOLIO_TOTAL: decimal.Decimal("1"), + copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO: base_ratio, + }, + "USDT": { + commons_constants.PORTFOLIO_TOTAL: decimal.Decimal("10000"), + copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO: quote_ratio, + }, + }, + orders=[], + ) + + +def _exchange_interface_stub(*, currency_totals: dict[str, decimal.Decimal], market_price: decimal.Decimal): + exchange_interface = mock.MagicMock() + exchange_interface.portfolio.reference_market = "USDT" + + def currency_total(currency: str) -> decimal.Decimal: + return currency_totals[currency] + + exchange_interface.portfolio.get_currency_portfolio_total = currency_total + exchange_interface.market.get_potentially_outdated_price = mock.Mock( + return_value=(market_price, False) + ) + return exchange_interface + + +def _order_stub(*, symbol: str, side, quantity: decimal.Decimal, price: decimal.Decimal): + order = mock.Mock() + order.symbol = symbol + order.side = side + order.origin_quantity = quantity + order.origin_price = price + return order + + +class TestOrdersSynchronizerOrphanGraceHeuristic: + def test_reference_pair_leg_share(self): + reference = _reference_account_with_allocations( + decimal.Decimal("0.25"), + decimal.Decimal("0.5"), + ) + synchronizer = orders_synchronizer_module.OrdersSynchronizer( + reference, + mock.MagicMock(), + copy_entities.AccountCopySettings(), + ) + expected = decimal.Decimal("0.25") / (decimal.Decimal("0.25") + decimal.Decimal("0.5")) + assert synchronizer._reference_pair_leg_share("ETH/USDT") == expected + + def test_reference_pair_leg_share_missing_quote_returns_one(self): + reference = copy_entities.Account( + content={ + "ETH": { + commons_constants.PORTFOLIO_TOTAL: decimal.Decimal("1"), + copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO: decimal.Decimal("0.5"), + }, + }, + orders=[], + ) + synchronizer = orders_synchronizer_module.OrdersSynchronizer( + reference, + mock.MagicMock(), + copy_entities.AccountCopySettings(), + ) + assert synchronizer._reference_pair_leg_share("ETH/USDT") == trading_constants.ONE + + def test_simulated_pair_share_buy_matches_reference_example(self): + reference = _reference_account_with_allocations( + decimal.Decimal("0.25"), + decimal.Decimal("0.5"), + ) + currency_totals = { + "ETH": decimal.Decimal("1"), + "USDT": decimal.Decimal("10000"), + } + exchange_if = _exchange_interface_stub( + currency_totals=currency_totals, + market_price=decimal.Decimal("2000"), + ) + synchronizer = orders_synchronizer_module.OrdersSynchronizer( + reference, + exchange_if, + copy_entities.AccountCopySettings(), + ) + buy_order = _order_stub( + symbol="ETH/USDT", + side=trading_enums.TradeOrderSide.BUY, + quantity=decimal.Decimal("1"), + price=decimal.Decimal("2000"), + ) + reference_share = synchronizer._reference_pair_leg_share("ETH/USDT") + simulated_share = synchronizer._simulated_copier_pair_leg_share_after_orphan_fill(buy_order) + assert reference_share is not None + assert simulated_share is not None + assert simulated_share == reference_share + + def test_batch_eligible_false_when_simulated_share_mismatch(self): + reference = _reference_account_with_allocations( + decimal.Decimal("0.5"), + decimal.Decimal("0.5"), + ) + currency_totals = { + "ETH": decimal.Decimal("1"), + "USDT": decimal.Decimal("10000"), + } + exchange_if = _exchange_interface_stub( + currency_totals=currency_totals, + market_price=decimal.Decimal("2000"), + ) + synchronizer = orders_synchronizer_module.OrdersSynchronizer( + reference, + exchange_if, + copy_entities.AccountCopySettings(mirrored_orphan_grace_pair_ratio_max_delta=decimal.Decimal("0.02")), + ) + buy_order = _order_stub( + symbol="ETH/USDT", + side=trading_enums.TradeOrderSide.BUY, + quantity=decimal.Decimal("1"), + price=decimal.Decimal("2000"), + ) + assert synchronizer._mirrored_orphan_batch_eligible_for_grace([buy_order]) is False + + def test_simulated_pair_share_sell(self): + reference = _reference_account_with_allocations( + decimal.Decimal("1") / decimal.Decimal("6"), + decimal.Decimal("5") / decimal.Decimal("6"), + ) + currency_totals = { + "ETH": decimal.Decimal("2"), + "USDT": decimal.Decimal("8000"), + } + exchange_if = _exchange_interface_stub( + currency_totals=currency_totals, + market_price=decimal.Decimal("2000"), + ) + synchronizer = orders_synchronizer_module.OrdersSynchronizer( + reference, + exchange_if, + copy_entities.AccountCopySettings(), + ) + sell_order = _order_stub( + symbol="ETH/USDT", + side=trading_enums.TradeOrderSide.SELL, + quantity=decimal.Decimal("1"), + price=decimal.Decimal("2000"), + ) + reference_share = synchronizer._reference_pair_leg_share("ETH/USDT") + simulated_share = synchronizer._simulated_copier_pair_leg_share_after_orphan_fill(sell_order) + assert reference_share is not None + assert simulated_share is not None + assert simulated_share == reference_share + + +def _replicable_buy_limit_order( + *, + order_id: str = "ref-late-1", + amount: decimal.Decimal = decimal.Decimal("1"), + price: decimal.Decimal = decimal.Decimal("2000"), +) -> dict: + return { + trading_constants.STORAGE_ORIGIN_VALUE: { + trading_enums.ExchangeConstantsOrderColumns.ID.value: order_id, + trading_enums.ExchangeConstantsOrderColumns.SYMBOL.value: "ETH/USDT", + trading_enums.ExchangeConstantsOrderColumns.SIDE.value: trading_enums.TradeOrderSide.BUY.value, + trading_enums.ExchangeConstantsOrderColumns.TYPE.value: trading_enums.TradeOrderType.LIMIT.value, + trading_enums.ExchangeConstantsOrderColumns.AMOUNT.value: amount, + trading_enums.ExchangeConstantsOrderColumns.PRICE.value: price, + trading_enums.ExchangeConstantsOrderColumns.STATUS.value: trading_enums.OrderStatus.OPEN.value, + } + } + + +def _replicable_buy_market_order( + *, + order_id: str = "ref-market-1", + amount: decimal.Decimal = decimal.Decimal("1"), + price: decimal.Decimal = decimal.Decimal("2000"), +) -> dict: + return { + trading_constants.STORAGE_ORIGIN_VALUE: { + trading_enums.ExchangeConstantsOrderColumns.ID.value: order_id, + trading_enums.ExchangeConstantsOrderColumns.SYMBOL.value: "ETH/USDT", + trading_enums.ExchangeConstantsOrderColumns.SIDE.value: trading_enums.TradeOrderSide.BUY.value, + trading_enums.ExchangeConstantsOrderColumns.TYPE.value: trading_enums.TradeOrderType.MARKET.value, + trading_enums.ExchangeConstantsOrderColumns.AMOUNT.value: amount, + trading_enums.ExchangeConstantsOrderColumns.PRICE.value: price, + trading_enums.ExchangeConstantsOrderColumns.STATUS.value: trading_enums.OrderStatus.OPEN.value, + } + } + + +class TestMarketOrderExclusion: + def test_replicable_reference_orders_omit_market_include_limit(self): + limit_order = _replicable_buy_limit_order(order_id="limit-1") + market_order = _replicable_buy_market_order(order_id="market-1") + reference = copy_entities.Account( + content={}, + orders=[market_order, limit_order], + ) + exchange_if = mock.MagicMock() + synchronizer = orders_synchronizer_module.OrdersSynchronizer( + reference, + exchange_if, + copy_entities.AccountCopySettings(), + ) + replicable = synchronizer._get_replicable_reference_orders() + assert replicable == [limit_order] + + def test_mirrored_orphan_open_orders_excludes_copier_market_orders(self): + reference = copy_entities.Account(content={}, orders=[]) + exchange_if = mock.MagicMock() + synchronizer = orders_synchronizer_module.OrdersSynchronizer( + reference, + exchange_if, + copy_entities.AccountCopySettings(), + ) + market_mirror = mock.Mock() + market_mirror.tag = copy_constants.MIRRORED_ORDER_TAG + market_mirror.order_id = "not-in-reference" + market_mirror.order_type = trading_enums.TraderOrderType.BUY_MARKET + limit_mirror = mock.Mock() + limit_mirror.tag = copy_constants.MIRRORED_ORDER_TAG + limit_mirror.order_id = "orphan-limit" + limit_mirror.order_type = trading_enums.TraderOrderType.BUY_LIMIT + exchange_if.orders.get_open_orders = mock.Mock(return_value=[market_mirror, limit_mirror]) + orphans = synchronizer._mirrored_orphan_open_orders(set()) + assert orphans == [limit_mirror] + + +class TestLateReferenceFillHeuristic: + def test_late_fill_true_when_copier_matches_simulated_reference_fill(self): + reference = copy_entities.Account( + content={ + "ETH": { + commons_constants.PORTFOLIO_TOTAL: decimal.Decimal("1"), + copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO: decimal.Decimal("0.25"), + }, + "USDT": { + commons_constants.PORTFOLIO_TOTAL: decimal.Decimal("10000"), + copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO: decimal.Decimal("0.5"), + }, + }, + orders=[], + ) + currency_totals = { + "ETH": decimal.Decimal("2"), + "USDT": decimal.Decimal("8000"), + } + exchange_if = _exchange_interface_stub( + currency_totals=currency_totals, + market_price=decimal.Decimal("2000"), + ) + exchange_if.orders.get_open_orders = mock.Mock(return_value=[]) + synchronizer = orders_synchronizer_module.OrdersSynchronizer( + reference, + exchange_if, + copy_entities.AccountCopySettings(), + ) + order = _replicable_buy_limit_order() + assert synchronizer._passes_late_reference_fill_heuristic(order) is True + assert synchronizer._is_late_reference_fill_for_order(order, []) is True + + def test_late_fill_false_when_new_reference_order_copier_not_yet_filled(self): + reference = copy_entities.Account( + content={ + "ETH": { + commons_constants.PORTFOLIO_TOTAL: decimal.Decimal("1"), + copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO: decimal.Decimal("0.25"), + }, + "USDT": { + commons_constants.PORTFOLIO_TOTAL: decimal.Decimal("10000"), + copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO: decimal.Decimal("0.5"), + }, + }, + orders=[], + ) + currency_totals = { + "ETH": decimal.Decimal("1"), + "USDT": decimal.Decimal("10000"), + } + exchange_if = _exchange_interface_stub( + currency_totals=currency_totals, + market_price=decimal.Decimal("2000"), + ) + exchange_if.orders.get_open_orders = mock.Mock(return_value=[]) + synchronizer = orders_synchronizer_module.OrdersSynchronizer( + reference, + exchange_if, + copy_entities.AccountCopySettings(), + ) + order = _replicable_buy_limit_order() + assert synchronizer._passes_late_reference_fill_heuristic(order) is False + assert synchronizer._is_late_reference_fill_for_order(order, []) is False + + def test_grace_started_when_late_fill_only_no_orphans(self): + compliant_snapshot = copy_entities.Account( + updated_at=time.time() - 1.0, + content={ + "ETH": { + commons_constants.PORTFOLIO_TOTAL: decimal.Decimal("1"), + copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO: decimal.Decimal("0.25"), + }, + "USDT": { + commons_constants.PORTFOLIO_TOTAL: decimal.Decimal("10000"), + copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO: decimal.Decimal("0.5"), + }, + }, + orders=[], + ) + reference = copy_entities.Account( + updated_at=time.time(), + content={ + "ETH": { + commons_constants.PORTFOLIO_TOTAL: decimal.Decimal("1"), + copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO: decimal.Decimal("0.25"), + }, + "USDT": { + commons_constants.PORTFOLIO_TOTAL: decimal.Decimal("10000"), + copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO: decimal.Decimal("0.5"), + }, + }, + orders=[], + historical_snapshots=[compliant_snapshot], + ) + currency_totals = { + "ETH": decimal.Decimal("2"), + "USDT": decimal.Decimal("8000"), + } + exchange_if = _exchange_interface_stub( + currency_totals=currency_totals, + market_price=decimal.Decimal("2000"), + ) + exchange_if.orders.get_open_orders = mock.Mock(return_value=[]) + copy_settings = copy_entities.AccountCopySettings( + mirrored_orphan_cancel_grace_seconds=60.0, + mirrored_orphan_grace_abort_threshold=3, + ) + synchronizer = orders_synchronizer_module.OrdersSynchronizer( + reference, + exchange_if, + copy_settings, + ) + order = _replicable_buy_limit_order() + replicable = [order] + + async def run_grace(): + return await synchronizer._apply_grace_policy_and_cancel_mirrored_orphans([], replicable) + + asyncio.run(run_grace()) + assert synchronizer.get_mirrored_orphan_grace_started_at() is not None + + +def _replicable_buy_limit_order_id(order_id: str) -> dict: + return _replicable_buy_limit_order(order_id=order_id) + + +def _mirrored_eth_buy_order_stub(order_id: str) -> mock.Mock: + mirrored = mock.Mock() + mirrored.tag = copy_constants.MIRRORED_ORDER_TAG + mirrored.order_id = order_id + mirrored.symbol = "ETH/USDT" + mirrored.side = trading_enums.TradeOrderSide.BUY + mirrored.origin_price = decimal.Decimal("2000") + mirrored.origin_quantity = decimal.Decimal("1") + return mirrored + + +class TestMissedHistoricalSignalsGraceAbort: + def test_is_aborted_when_first_compliant_snapshot_index_at_threshold(self): + order_m1 = _replicable_buy_limit_order_id("m1") + order_m2 = _replicable_buy_limit_order_id("m2") + portfolio = { + "ETH": { + commons_constants.PORTFOLIO_TOTAL: decimal.Decimal("1"), + copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO: decimal.Decimal("0.25"), + }, + "USDT": { + commons_constants.PORTFOLIO_TOTAL: decimal.Decimal("10000"), + copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO: decimal.Decimal("0.5"), + }, + } + empty_snapshot = copy_entities.Account( + updated_at=time.time(), + content=portfolio, + orders=[], + ) + empty_snapshot_mid = copy_entities.Account( + updated_at=time.time() - 1.0, + content=portfolio, + orders=[], + ) + compliant_snapshot = copy_entities.Account( + updated_at=time.time() - 5.0, + content=portfolio, + orders=[order_m1, order_m2], + ) + live_reference = copy_entities.Account( + updated_at=time.time(), + content=portfolio, + orders=[order_m1], + historical_snapshots=[empty_snapshot, empty_snapshot_mid, compliant_snapshot], + ) + mirror_m1 = _mirrored_eth_buy_order_stub("m1") + mirror_m2 = _mirrored_eth_buy_order_stub("m2") + exchange_if = mock.MagicMock() + exchange_if.orders.get_open_orders = mock.Mock(return_value=[mirror_m1, mirror_m2]) + exchange_if.portfolio.reference_market = "USDT" + exchange_if.portfolio.get_currency_portfolio_total = mock.Mock( + return_value=decimal.Decimal("1") + ) + exchange_if.market.get_potentially_outdated_price = mock.Mock( + return_value=(decimal.Decimal("2000"), False) + ) + copy_settings = copy_entities.AccountCopySettings( + mirrored_orphan_cancel_grace_seconds=60.0, + mirrored_orphan_grace_abort_threshold=2, + missed_signals_grace_abort_threshold=2, + ) + synchronizer = orders_synchronizer_module.OrdersSynchronizer( + live_reference, + exchange_if, + copy_settings, + ) + assert synchronizer.is_mirrored_orphan_grace_aborted_for_missed_historical_signals() is True + + def test_apply_grace_cancels_immediately_when_missed_signals_abort(self): + order_m1 = _replicable_buy_limit_order_id("m1") + order_m2 = _replicable_buy_limit_order_id("m2") + portfolio = { + "ETH": { + commons_constants.PORTFOLIO_TOTAL: decimal.Decimal("1"), + copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO: decimal.Decimal("0.25"), + }, + "USDT": { + commons_constants.PORTFOLIO_TOTAL: decimal.Decimal("10000"), + copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO: decimal.Decimal("0.5"), + }, + } + empty_snapshot = copy_entities.Account( + updated_at=time.time(), + content=portfolio, + orders=[], + ) + empty_snapshot_mid = copy_entities.Account( + updated_at=time.time() - 1.0, + content=portfolio, + orders=[], + ) + compliant_snapshot = copy_entities.Account( + updated_at=time.time() - 5.0, + content=portfolio, + orders=[order_m1, order_m2], + ) + live_reference = copy_entities.Account( + updated_at=time.time(), + content=portfolio, + orders=[order_m1], + historical_snapshots=[empty_snapshot, empty_snapshot_mid, compliant_snapshot], + ) + mirror_m1 = _mirrored_eth_buy_order_stub("m1") + mirror_m2 = _mirrored_eth_buy_order_stub("m2") + exchange_if = mock.MagicMock() + # Two open mirrors so empty-order snapshots see grace_total>=threshold and stay non-compliant; + # otherwise a single orphan snapshot "complies" and missed-signals abort never triggers. + exchange_if.orders.get_open_orders = mock.Mock(return_value=[mirror_m1, mirror_m2]) + exchange_if.orders.cancel_order = mock.AsyncMock() + exchange_if.portfolio.reference_market = "USDT" + currency_totals = { + "ETH": decimal.Decimal("1"), + "USDT": decimal.Decimal("10000"), + } + exchange_if.portfolio.get_currency_portfolio_total = mock.Mock( + side_effect=lambda currency: currency_totals[currency] + ) + exchange_if.market.get_potentially_outdated_price = mock.Mock( + return_value=(decimal.Decimal("2000"), False) + ) + copy_settings = copy_entities.AccountCopySettings( + mirrored_orphan_cancel_grace_seconds=60.0, + mirrored_orphan_grace_abort_threshold=2, + missed_signals_grace_abort_threshold=2, + ) + synchronizer = orders_synchronizer_module.OrdersSynchronizer( + live_reference, + exchange_if, + copy_settings, + ) + replicable = synchronizer._get_replicable_reference_orders() + + async def run_grace(): + return await synchronizer._apply_grace_policy_and_cancel_mirrored_orphans( + [mirror_m2], + replicable, + ) + + asyncio.run(run_grace()) + exchange_if.orders.cancel_order.assert_called_once_with(mirror_m2) diff --git a/packages/evaluators/.coveragerc b/packages/evaluators/.coveragerc new file mode 100644 index 0000000000..d0564c0928 --- /dev/null +++ b/packages/evaluators/.coveragerc @@ -0,0 +1,7 @@ +[run] +omit = + tests/* + venv/* + tentacles/* + setup.py + demo.py diff --git a/packages/evaluators/.gitignore b/packages/evaluators/.gitignore new file mode 100644 index 0000000000..8582e70df4 --- /dev/null +++ b/packages/evaluators/.gitignore @@ -0,0 +1,114 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ + +.idea + +# tests +tentacles +user/ + +# cython +cython_debug/ +*.c diff --git a/packages/evaluators/BUILD b/packages/evaluators/BUILD new file mode 100644 index 0000000000..c098821e46 --- /dev/null +++ b/packages/evaluators/BUILD @@ -0,0 +1,30 @@ +python_requirements( + name="reqs", + module_mapping={ + "OctoBot-Tulipy": ["tulipy"], + }, +) + +python_sources(name="octobot_evaluators", sources=["octobot_evaluators/**/*.py"]) + +files( + name="test_data", + sources=["tests/static/**/*"], +) + +python_tests( + name="tests", + sources=["tests/**/test_*.py"], + dependencies=[ + ":octobot_evaluators", + ":reqs", + "//:dev_reqs", + ":test_data", + "packages/commons:octobot_commons", + "packages/commons:reqs", + "packages/commons:full_reqs", + "packages/tentacles_manager:octobot_tentacles_manager", + "packages/tentacles_manager:reqs", + "packages/tentacles_manager:full_reqs" + ], +) \ No newline at end of file diff --git a/packages/evaluators/CHANGELOG.md b/packages/evaluators/CHANGELOG.md new file mode 100644 index 0000000000..173e9591a2 --- /dev/null +++ b/packages/evaluators/CHANGELOG.md @@ -0,0 +1,461 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [1.10.1] - 2026-01-25 +### Added +- Add `current_time` to social evaluator `get_data_cache` method +- Add `eval_note_description` and `eval_note_metadata` to evaluation_completed + +## [1.10.0] - 2026-01-23 +### Updated +- dependencies + +## [1.9.9] - 2024-11-26 +### Added +[Requirements] [full] requirements installation + +## [1.9.8] - 2025-10-28 +### Added +- Add social evaluator `get_data_cache` + +## [1.9.7] - 2023-09-27 +### Updated +- Skip warning on outdated evaluation when reseting evaluations + +## [1.9.6] - 2023-08-27 +### Added +- warning on outdated evaluation time submit + +## [1.9.5] - 2023-03-26 +### Added +- previous_config + +## [1.9.4] - 2023-01-09 +### Updated +- dependencies + +## [1.9.3] - 2023-11-10 +### Added +- [Evaluators] add is_in_async_evaluation +### Fixed +- [EvaluationUtil] fix context api + +## [1.9.2] - 2023-10-27 +### Added +- [Evaluators] get_signals_history_type +- [Evaluators] async_evaluation + +## [1.9.1] - 2023-07-23 +### Updated +- [Evaluators] add startup config optional arguments + +## [1.9.0] - 2023-05-02 +### Updated +- Supported python versions +### Removed +- Cython + +## [1.8.7] - 2023-04-03 +### Added +- [Evaluators] Split Initialize using _init_registered_topics + +## [1.8.6] - 2023-03-29 +### Added +- [Evaluators] enable_reevaluation() + +## [1.8.5] - 2023-03-27 +### Updated +- [Evaluators] Set HISTORIZE_USER_INPUT_CONFIG + +## [1.8.4] - 2023-03-23 +### Fixed +- [Evaluators] KeyError on evaluator re-evaluation + +## [1.8.3] - 2023-02-10 +### Updated +- [Scripted] update TriggerSource + +## [1.8.2] - 2023-02-04 +### Added +- [API] Time frame config + +## [1.8.1] - 2023-01-09 +### Added +- [Config] Log evaluator config on load + +## [1.8.0] - 2022-12-23 +### Updated +- [Requirements] Bump + +## [1.7.8] - 2022-12-22 +### Updated +- [API] don't call create_matrix in initialize_evaluators + +## [1.7.7] - 2022-11-11 +### Fixed +- wildcard TA trigger + +## [1.7.6] - 2022-10-22 +### Fixed +- Strategy stop + +## [1.7.5] - 2022-10-09 +### Added +- User inputs + +## [1.7.4] - 2022-06-05 +### Updated +- [Symbols] Update for symbol object + +## [1.7.3] - 2022-05-21 +### Fixed +- [Cython] Matrix channels typing issues + +## [1.7.2] - 2022-05-03 +### Updated +- [Caches] Remove cache clear and close + +## [1.7.1] - 2022-05-02 +### Updated +- [Signals] Import paths + +## [1.7.0] - 2022-03-31 +### Added +- [Scripted] Support for scripted evaluators +- [Caching] Support for cache in evaluators + +## [1.6.24] - 2022-01-23 +### Fixed +- [API] Fix init_required_candles_count when candles_count is empty + +## [1.6.23] - 2022-01-08 +### Updated +- Tulipy requirement to OctoBot-Tulipy +- Bump requirements + +## [1.6.22] - 2021-10-30 +### Updated +- Bump requirements + +## [1.6.21] - 2021-09-20 +### Updated +- bump requirements + +## [1.6.20] - 2021-09-10 +### Fixed +- Error on TA re-evaluations without enough candles data + +## [1.6.19] - 2021-08-11 +### Fixed +- Real time time frames are now available in TA + +## [1.6.18] - 2021-08-05 +### Fixed +- Real time evaluators time frames fallback strategy + +## [1.6.17] - 2021-07-19 +### Updated +- bump requirements + +## [1.6.16] - 2021-05-05 +### Updated +- bump requirements + +## [1.6.15] - 2021-03-03 +### Added +- Python 3.9 support + +## [1.6.14] - 2020-02-25 +### Updated +- Requirements + +## [1.6.13] - 2020-02-08 +### Updated +- Requirements + +## [1.6.12] - 2020-02-03 +### Updated +- Requirements + +## [1.6.11] - 2020-12-30 +### Fixed +- Cython headers + +## [1.6.10] - 2020-12-29 +### Fixed +- Multiple unrelated traded pairs under the same cryptocurrency init process + +## [1.6.9] - 2020-12-28 +### Updated +- Requirements + +## [1.6.8] - 2020-12-23 +### Added +- Profiles handling + +## [1.6.7] - 2020-11-30 +### Fixed +- Evaluators channel filters for non wildcard evaluators + +## [1.6.6] - 2020-11-21 +### Updated +- OctoBot-Trading import + +## [1.6.5] - 2020-11-07 +### Updated +- Requirements + +## [1.6.4] - 2020-10-29 +### Updated +- Numpy requirement + +## [1.6.3] - 2020-10-27 +### Updated +- Evaluator factory improvements + +## [1.6.2] - 2020-10-27 +### Added +- Evaluator factory tests + +## [1.6.1] - 2020-10-23 +### Updated +- Python 3.8 support + +## [1.6.0] - 2020-10-04 +### Updated +- Requirements + +## [1.5.23] - 2020-09-01 +### Updated +- Requirements + +## [1.5.22] - 2020-08-15 +### Updated +- Requirements + +## [1.5.21] - 2020-06-28 +### Updated +- Numpy requirement + +## [1.5.20] - 2020-06-28 +### Updated +- Requirements + +## [1.5.19] - 2020-06-19 +### Updated +- Requirements + +## [1.5.18] - 2020-06-07 +### Updated +- Skip evaluator creation when no activated strategy + +## [1.5.17] - 2020-06-04 +### Fixed +- TA OHLCV channel registration + +## [1.5.16] - 2020-05-27 +### Updated +- Cython version + +## [1.5.15] - 2020-05-21 +### Updated +- Remove advanced manager from commons + +## [1.5.14] - 2020-05-21 +### Updated +- TA re-evaluation trading API + +## [1.5.13] - 2020-05-16 +### Updated +- Requirements + +## [1.5.12] - 2020-05-16 +### Added +- [OctoBotChannel] Add consumer + +## [1.5.11] - 2020-05-14 +### Fixed +- [AbstractEvaluator] Priority level + +## [1.5.10] - 2020-05-14 +### Changed +- [EvaluatorChannel] Default priority level to medium + +## [1.5.9] - 2020-05-11 +### Fixed +- [Strategies] Fix cache handling + +## [1.5.8] - 2020-05-10 +### Update +- [Strategies] use exchange for allowed time delta in TA evaluation validity check + +## [1.5.7] - 2020-05-10 +### Update +- [Requirements] requirements update + +## [1.5.6] - 2020-05-08 +### Update +- [Requirements] requirements update + +## [1.5.5] - 2020-05-03 +### Fixed +- [API] Initialization incorrect type + +## [1.5.4] - 2020-05-03 +### Fixed +- [EventTree] Remove event related methods in matrix_manager + +## [1.5.3] - 2020-05-02 +### Added +- [Channel] Synchronization support + +## [1.5.2] - 2020-05-02 +### Updated +- octobot-channels requirement + +## [1.5.1] - 2020-04-28 +### Updated +- [Strategies]: Technical evaluator cycle handling +- [Evaluators]: Evaluation time handling + +## [1.5.0] - 2020-04-17 +### Added +- [Channels] Evaluators channel +- Matrix event clear management +- Matrix node value expiration + +### Updated +- [Channels] get_chan api with a new matrix_id param + +## [1.4.8] - 2020-04-13 +### Updated +- [TA] Update ohlcv callback + +## [1.4.7] - 2020-04-12 +### Added +- Matrix manager + +## [1.4.6] - 2020-04-10 +### Added +- Bot id support + +## [1.4.5] - 2020-04-07 +### Fixed +- Wildcard imports + +## [1.4.4] - 2020-04-05 +### Updated +- Integrate OctoBot-tentacles-manager 2.0.0 + +## [1.4.3] - 2020-04-04 +### Updated +- Exception logger API from Commons + +### Fixed +- Travis CI file + +## [1.4.2] - 2020-02-17 +### Added +- Stop method to AbstractEvaluator + +### Updated +- Evaluator, initialization, matrix APIs +- Matrix channel logger + +## [1.4.1] - 2020-01-18 +### Updated +- Use exchange_id in exchange channels + +## [1.4.0] - 2020-01-14 +### Added +- Matrices class to storage matrix instances + +### Updated +**Requirements** +- Commons version to 1.2.2 +- Channels version to 1.3.19 +- Tentacles Manager version to 1.0.13 + +## [1.3.3] - 2020-01-12 +### Added +- get_evaluator_classes_from_type API method + +### Updated +- Typing for API methods +- Social evaluator intialization + +## [1.3.2] - 2020-01-07 +### Added +- Cryptocurrency related evaluator + +## [1.3.1] - 2019-12-21 +### Updated +**Requirements** +- Commons version to 1.2.0 +- Channels version to 1.3.17 + +## [1.3.0] - 2019-12-14 +### Changed +- EvaluatorMatrix to EventTree implementation (from Commons) + +### Updated +**Requirements** +- Commons version to 1.1.51 +- Channels version to 1.3.16 + +## [1.2.6] - 2019-11-09 +### Updated +**Requirements** +- Cython version to 0.29.14 +- Commons version to 1.1.49 +- Channels version to 1.3.15 + +## [1.2.5] - 2019-10-30 +### Changed +- OSX support + +## [1.2.4] - 2019-10-11 +### Changed +- Style improvements + +## [1.2.3] - 2019-10-09 +### Added +- PyPi manylinux deployment + +## [1.2.2] - 2019-10-08 +### Changed +- Setup install + +## [1.2.1] - 2019-10-07 +### Changed +- Improved matrix channel cancelling management + +## [1.2.0] - 2019-10-05 +### Added +- Evaluator types management +- Initialization API + +## Moved +- config_manager to commons + +### Fixed +- Channels package compatibility +- Commons package compatibility + +## [1.1.0] - 2019-09-01 +### Changed +- Improved API initialization +- Improved matrix management + +### Fixed +- Channel package compatibility + +## [1.0.0] - 2019-08-14 +### Added +- Evaluator classes migrations from OctoBot project +- Matrix class that manage global evaluation dictionary +- Matrix channel, producer and consumer +- First API methods diff --git a/packages/evaluators/LICENSE b/packages/evaluators/LICENSE new file mode 100644 index 0000000000..0a041280bd --- /dev/null +++ b/packages/evaluators/LICENSE @@ -0,0 +1,165 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff --git a/packages/evaluators/MANIFEST.in b/packages/evaluators/MANIFEST.in new file mode 100644 index 0000000000..86655bfd4e --- /dev/null +++ b/packages/evaluators/MANIFEST.in @@ -0,0 +1,9 @@ +recursive-include octobot_evaluators *.pxd + +include README.md +include LICENSE +include CHANGELOG.md +include requirements.txt +include full_requirements.txt + +global-exclude *.c diff --git a/packages/evaluators/README.md b/packages/evaluators/README.md new file mode 100644 index 0000000000..20d6bff6cd --- /dev/null +++ b/packages/evaluators/README.md @@ -0,0 +1,37 @@ +# OctoBot-Evaluators +[![Codacy Badge](https://api.codacy.com/project/badge/Grade/a0c08eab5d4c440aa6e3fc3061ad0520)](https://app.codacy.com/gh/Drakkar-Software/OctoBot-Evaluators?utm_source=github.com&utm_medium=referral&utm_content=Drakkar-Software/OctoBot-Evaluators&utm_campaign=Badge_Grade_Dashboard) +[![Coverage Status](https://coveralls.io/repos/github/Drakkar-Software/OctoBot-Evaluators/badge.svg)](https://coveralls.io/github/Drakkar-Software/OctoBot-Evaluators) +[![Github-Action-CI](https://github.com/Drakkar-Software/OctoBot-Evaluators/workflows/OctoBot-Evaluators-CI/badge.svg)](https://github.com/Drakkar-Software/OctoBot-Evaluators/actions) +[![Build Status](https://cloud.drone.io/api/badges/Drakkar-Software/OctoBot-Evaluators/status.svg)](https://cloud.drone.io/Drakkar-Software/OctoBot-Evaluators) + +# Where are evaluators and strategies ? + +Because OctoBot is modular, a wide range of evaluators and strategies are usable. + +Default evaluators and strategies are located here: [https://github.com/Drakkar-Software/OctoBot-Tentacles](https://github.com/Drakkar-Software/OctoBot-Tentacles). + +To install default evaluators and strategies in your OctoBot, run the following command: + +```bash +python start.py tentacles --install --all +``` + + +It is also possible to specify which module(s) to install by naming it(them). In this case only the modules available in the available packages can be installed. +``` +python start.py tentacles --install forum_evaluator john_smith_macd_evaluator advanced_twitter_evaluator +``` + +**You can find how to create your OctoBot evaluators and strategies [on the OctoBot guides](https://www.octobot.cloud/en/guides/octobot-tentacles-development/customize-your-octobot?utm_source=octobot&utm_medium=dk&utm_campaign=regular_open_source_content&utm_content=octobot_evaluators_readme).** + + +# [Octobot Module Manager](https://github.com/Drakkar-Software/OctoBot-Tentacles-Manager) +A module manager for your [OctoBot](https://github.com/Drakkar-Software/OctoBot) ! + +- Install OctoBot-Tentacles-Manager from pip : + +``` {.sourceCode .bash} +$ python3 -m pip install OctoBot-Tentacles-Manager +``` + +# [Customize your Octobot](https://www.octobot.cloud/en/guides/octobot-tentacles-development/customize-your-octobot) \ No newline at end of file diff --git a/packages/evaluators/octobot_evaluators/__init__.py b/packages/evaluators/octobot_evaluators/__init__.py new file mode 100644 index 0000000000..45d42a5a11 --- /dev/null +++ b/packages/evaluators/octobot_evaluators/__init__.py @@ -0,0 +1,18 @@ +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +PROJECT_NAME = "OctoBot-Evaluators" +VERSION = "1.10.1" # major.minor.revision diff --git a/packages/evaluators/octobot_evaluators/api/__init__.py b/packages/evaluators/octobot_evaluators/api/__init__.py new file mode 100644 index 0000000000..cd76df65cb --- /dev/null +++ b/packages/evaluators/octobot_evaluators/api/__init__.py @@ -0,0 +1,99 @@ +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_evaluators.api import inspection +from octobot_evaluators.api import evaluators +from octobot_evaluators.api import initialization +from octobot_evaluators.api import matrix + +from octobot_evaluators.api.inspection import ( + is_relevant_evaluator, + get_relevant_TAs_for_strategy, +) +from octobot_evaluators.api.evaluators import ( + get_evaluator_classes_from_type, + get_evaluators_time_frames, + update_time_frames_config, + create_matrix, + stop_evaluator, + stop_evaluator_channel, + stop_all_evaluator_channels, + initialize_evaluators, + create_and_start_all_type_evaluators, +) +from octobot_evaluators.api.initialization import ( + init_time_frames_from_strategies, + get_time_frames_from_strategies, + get_time_frames_from_strategy, + init_required_candles_count_from_evaluators_and_strategies, + get_activated_evaluators, + get_activated_strategies_classes, + get_activated_TA_evaluators_classes, + get_activated_real_time_evaluators_classes, + get_activated_social_evaluators_classes, + del_evaluator_channels, + matrix_channel_exists, + create_evaluator_channels, +) +from octobot_evaluators.api.matrix import ( + get_matrix, + del_matrix, + get_node_children_by_names, + get_children_list, + has_children, + get_value, + get_type, + get_description, + get_metadata, + get_time, +) + +__all__ = [ + "is_relevant_evaluator", + "get_relevant_TAs_for_strategy", + "get_evaluator_classes_from_type", + "get_evaluators_time_frames", + "update_time_frames_config", + "create_matrix", + "stop_evaluator", + "stop_evaluator_channel", + "stop_all_evaluator_channels", + "initialize_evaluators", + "create_and_start_all_type_evaluators", + "init_time_frames_from_strategies", + "get_time_frames_from_strategies", + "get_time_frames_from_strategy", + "init_required_candles_count_from_evaluators_and_strategies", + "get_activated_evaluators", + "get_activated_strategies_classes", + "get_activated_TA_evaluators_classes", + "get_activated_real_time_evaluators_classes", + "get_activated_social_evaluators_classes", + "del_evaluator_channels", + "matrix_channel_exists", + "create_evaluator_channels", + "get_matrix", + "del_matrix", + "get_node_children_by_names", + "get_children_list", + "has_children", + "get_value", + "get_type", + "get_description", + "get_metadata", + "get_time", +] + diff --git a/packages/evaluators/octobot_evaluators/api/evaluators.py b/packages/evaluators/octobot_evaluators/api/evaluators.py new file mode 100644 index 0000000000..af0c2792a1 --- /dev/null +++ b/packages/evaluators/octobot_evaluators/api/evaluators.py @@ -0,0 +1,117 @@ +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import async_channel.channels as channel_instances + +import octobot_commons.constants as common_constants +import octobot_commons.logging as logging +import octobot_commons.tentacles_management as tentacles_management +import octobot_commons.time_frame_manager as time_frame_manager + +import octobot_evaluators.api as api +import octobot_evaluators.constants as constants +import octobot_evaluators.evaluators.channel as evaluator_channels +import octobot_evaluators.matrix as matrix +import octobot_evaluators.evaluators as evaluator + +import octobot_tentacles_manager.api as tentacles_manager_api + +LOGGER_NAME = "EvaluatorsAPI" + + +async def create_and_start_all_type_evaluators( + tentacles_setup_config: object, + matrix_id: str, + exchange_name: str, + bot_id: str, + symbols_by_crypto_currencies: dict = None, + symbols: list = None, + time_frames: list = None, + real_time_time_frames: list = None, + relevant_evaluators=common_constants.CONFIG_WILDCARD, + config_by_evaluator=None +) -> list: + return await evaluator.create_and_start_all_type_evaluators( + tentacles_setup_config=tentacles_setup_config, + matrix_id=matrix_id, + exchange_name=exchange_name, + bot_id=bot_id, + symbols_by_crypto_currencies=symbols_by_crypto_currencies, + symbols=symbols, + time_frames=time_frames, + real_time_time_frames=real_time_time_frames, + relevant_evaluators=relevant_evaluators, + config_by_evaluator=config_by_evaluator + ) + + +def get_evaluator_classes_from_type(evaluator_type, tentacles_setup_config, activated_only=True) -> list: + if activated_only: + return [cls for cls in tentacles_management.get_all_classes_from_parent( + evaluator.EvaluatorClassTypes[evaluator_type]) if cls.is_enabled(tentacles_setup_config, False)] + return tentacles_management.get_all_classes_from_parent(evaluator.EvaluatorClassTypes[evaluator_type]) + + +async def initialize_evaluators(config, tentacles_setup_config, config_by_evaluator=None) -> None: + """ + :param config: bot config + :param tentacles_setup_config: tentacles configuration + :param config_by_evaluator: dict of evaluator configuration by evaluator name + """ + _init_time_frames(config, tentacles_setup_config, config_by_evaluator=config_by_evaluator) + # take evaluators and strategies candles requirements into account if any + api.init_required_candles_count_from_evaluators_and_strategies(config, tentacles_setup_config) + + +def get_evaluators_time_frames(config) -> list: + return time_frame_manager.get_config_time_frame(config) + + +def update_time_frames_config(evaluator_class, tentacles_setup_config, time_frames) -> None: + config_update = { + constants.STRATEGIES_REQUIRED_TIME_FRAME: [tf.value for tf in time_frames] + } + tentacles_manager_api.update_tentacle_config( + tentacles_setup_config, + evaluator_class, + config_update + ) + + +def _init_time_frames(config, tentacles_setup_config, config_by_evaluator=None): + # Init time frames using enabled strategies + api.init_time_frames_from_strategies(config, tentacles_setup_config, config_by_strategy=config_by_evaluator) + + +def create_matrix() -> str: + created_matrix: matrix.Matrix = matrix.Matrix() + matrix.Matrices.instance().add_matrix(created_matrix) + return created_matrix.matrix_id + + +async def stop_evaluator(evaluator) -> None: + return await evaluator.stop() + + +async def stop_evaluator_channel(matrix_id, chan_name) -> None: + try: + await evaluator_channels.get_chan(chan_name, matrix_id).stop() + except Exception as e: + logging.get_logger(LOGGER_NAME).exception(e, True, f"Error when stopping evaluator channel {chan_name}: {e}") + + +async def stop_all_evaluator_channels(matrix_id) -> None: + for channel in channel_instances.ChannelInstances.instance().channels[matrix_id]: + await stop_evaluator_channel(matrix_id, channel) diff --git a/packages/evaluators/octobot_evaluators/api/initialization.py b/packages/evaluators/octobot_evaluators/api/initialization.py new file mode 100644 index 0000000000..56d1daad1e --- /dev/null +++ b/packages/evaluators/octobot_evaluators/api/initialization.py @@ -0,0 +1,112 @@ +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import async_channel.util as channel_util + +import octobot_tentacles_manager.api as api + +import octobot_commons.constants as common_constants +import octobot_commons.tentacles_management as tentacles_management +import octobot_commons.time_frame_manager as time_frame_manager + +import octobot_evaluators.evaluators.channel as evaluator_channels +import octobot_evaluators.constants as constants +import octobot_evaluators.evaluators as evaluator +import octobot_evaluators.util as util + + +def init_time_frames_from_strategies(config, tentacles_setup_config, config_by_strategy=None) -> None: + config[common_constants.CONFIG_TIME_FRAME] = get_time_frames_from_strategies( + config, tentacles_setup_config, config_by_strategy=config_by_strategy + ) + + +def get_time_frames_from_strategies(config, tentacles_setup_config, config_by_strategy=None) -> list: + config_by_strategy = config_by_strategy or {} + time_frame_list = set( + time_frame + for strategies_eval_class in get_activated_strategies_classes(tentacles_setup_config) + for time_frame in get_time_frames_from_strategy( + strategies_eval_class, config, tentacles_setup_config, + config_by_strategy.get(strategies_eval_class.get_name()) + ) + ) + return time_frame_manager.sort_time_frames(list(time_frame_list)) + + +def get_time_frames_from_strategy(strategy_class, config, tentacles_setup_config, strategy_config=None) -> list: + return strategy_class.get_required_time_frames(config, tentacles_setup_config, strategy_config=strategy_config) + + +def init_required_candles_count_from_evaluators_and_strategies(config, tentacles_setup_config) -> None: + candles_counts = [util.get_required_candles_count(tentacle_class, tentacles_setup_config) + for tentacle_class in get_activated_evaluators(tentacles_setup_config)] + config[common_constants.CONFIG_TENTACLES_REQUIRED_CANDLES_COUNT] = max(candles_counts) if candles_counts \ + else common_constants.DEFAULT_IGNORED_VALUE + + +def get_activated_evaluators(tentacles_setup_config): + return get_activated_TA_evaluators_classes(tentacles_setup_config) + \ + get_activated_scripted_evaluators_classes(tentacles_setup_config) + \ + get_activated_real_time_evaluators_classes(tentacles_setup_config) + \ + get_activated_social_evaluators_classes(tentacles_setup_config) + \ + get_activated_strategies_classes(tentacles_setup_config) + + +def get_activated_strategies_classes(tentacles_setup_config): + return _get_activated_classes(tentacles_setup_config, evaluator.StrategyEvaluator) + + +def get_activated_TA_evaluators_classes(tentacles_setup_config): + return _get_activated_classes(tentacles_setup_config, evaluator.TAEvaluator) + + +def get_activated_scripted_evaluators_classes(tentacles_setup_config): + return _get_activated_classes(tentacles_setup_config, evaluator.ScriptedEvaluator) + + +def get_activated_real_time_evaluators_classes(tentacles_setup_config): + return _get_activated_classes(tentacles_setup_config, evaluator.RealTimeEvaluator) + + +def get_activated_social_evaluators_classes(tentacles_setup_config): + return _get_activated_classes(tentacles_setup_config, evaluator.SocialEvaluator) + + +def _get_activated_classes(tentacles_setup_config, parent_class): + return [ + child_class + for child_class in tentacles_management.get_all_classes_from_parent(parent_class) + if api.is_tentacle_activated_in_tentacles_setup_config(tentacles_setup_config, child_class.get_name()) + ] + + +async def create_evaluator_channels(matrix_id: str, is_backtesting: bool = False) -> None: + await channel_util.create_all_subclasses_channel(evaluator_channels.EvaluatorChannel, + evaluator_channels.set_chan, + is_synchronized=is_backtesting, matrix_id=matrix_id) + + +def del_evaluator_channels(matrix_id: str) -> None: + evaluator_channels.del_chan(constants.MATRIX_CHANNEL, matrix_id) + evaluator_channels.del_chan(constants.EVALUATORS_CHANNEL, matrix_id) + + +def matrix_channel_exists(matrix_id: str) -> bool: + try: + evaluator_channels.get_chan(constants.MATRIX_CHANNEL, matrix_id) + return True + except KeyError: + return False diff --git a/packages/evaluators/octobot_evaluators/api/inspection.py b/packages/evaluators/octobot_evaluators/api/inspection.py new file mode 100644 index 0000000000..d93b8e804b --- /dev/null +++ b/packages/evaluators/octobot_evaluators/api/inspection.py @@ -0,0 +1,45 @@ +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.constants as common_constants +import octobot_commons.tentacles_management as tentacles_management + +import octobot_evaluators.evaluators as evaluator + + +def is_relevant_evaluator(evaluator_instance, relevant_evaluators, use_relevant_evaluators_only=False) -> bool: + if evaluator_instance.enabled or use_relevant_evaluators_only: + if relevant_evaluators == common_constants.CONFIG_WILDCARD or \ + evaluator_instance.get_name() in relevant_evaluators: + return True + else: + parent_classes_names = [e.get_name() for e in evaluator_instance.get_parent_evaluator_classes()] + to_check_set = relevant_evaluators + if not isinstance(relevant_evaluators, set): + to_check_set = set(relevant_evaluators) + return not to_check_set.isdisjoint(parent_classes_names) + return False + + +def get_relevant_TAs_for_strategy(strategy, tentacles_setup_config) -> list: + ta_classes_list = [] + relevant_evaluators = strategy.get_required_evaluators(tentacles_setup_config) + for ta_eval_class in tentacles_management.get_all_classes_from_parent(evaluator.TAEvaluator): + ta_eval_class_instance = ta_eval_class(tentacles_setup_config) + # use ony relevant_evaluators given by the strategy + if common_constants.CONFIG_WILDCARD in relevant_evaluators or \ + is_relevant_evaluator(ta_eval_class_instance, relevant_evaluators, use_relevant_evaluators_only=True): + ta_classes_list.append(ta_eval_class) + return ta_classes_list diff --git a/packages/evaluators/octobot_evaluators/api/matrix.py b/packages/evaluators/octobot_evaluators/api/matrix.py new file mode 100644 index 0000000000..c6380a2a41 --- /dev/null +++ b/packages/evaluators/octobot_evaluators/api/matrix.py @@ -0,0 +1,50 @@ +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_evaluators.matrix as matrix + + +def get_matrix(matrix_id) -> matrix.Matrix: + return matrix.Matrices.instance().get_matrix(matrix_id) + + +def del_matrix(matrix_id) -> matrix.Matrix: + return matrix.Matrices.instance().del_matrix(matrix_id) + + +def get_node_children_by_names(matrix) -> dict: + return matrix.get_node_children_by_names_at_path([]) + + +def get_children_list(matrix_node) -> dict: + return matrix_node.children + +def has_children(matrix_node) -> bool: + return bool(matrix_node.children) + +def get_value(matrix_node) -> object: + return matrix_node.node_value + +def get_description(matrix_node) -> object: + return matrix_node.node_description + +def get_metadata(matrix_node) -> object: + return matrix_node.node_metadata + +def get_type(matrix_node) -> object: + return matrix_node.node_type + +def get_time(matrix_node) -> object: + return matrix_node.node_value_time diff --git a/packages/evaluators/octobot_evaluators/constants.py b/packages/evaluators/octobot_evaluators/constants.py new file mode 100644 index 0000000000..9cbd33c8f6 --- /dev/null +++ b/packages/evaluators/octobot_evaluators/constants.py @@ -0,0 +1,40 @@ +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import typing + +MatrixValueType = typing.NewType('MatrixValueType', typing.Union[str, int, float]) + +START_EVAL_PERTINENCE = 1 +MAX_TA_EVAL_TIME_SECONDS = 0.1 +EVALUATION_ALLOWED_TIME_DELTA = 10 +EVALUATOR_EVAL_DEFAULT_TYPE = float +STRATEGIES_REQUIRED_TIME_FRAME = "required_time_frames" +STRATEGIES_REQUIRED_EVALUATORS = "required_evaluators" +STRATEGIES_COMPATIBLE_EVALUATOR_TYPES = "compatible_evaluator_types" +CONFIG_FORCED_TIME_FRAME = "forced_time_frame" +TENTACLE_DEFAULT_CONFIG = "default_config" + +EVALUATOR_CLASS_TYPE_MRO_INDEX = -4 + +EVALUATORS_CHANNEL: str = "Evaluators" +MATRIX_CHANNEL: str = "Matrix" +MATRIX_CHANNELS: str = "MatrixChannels" + +TA_RE_EVALUATION_TRIGGER_UPDATED_DATA = "TA_re_evaluation_trigger_updated_data" +RESET_EVALUATION = "reset_evaluation" +EVALUATOR_CHANNEL_DATA_ACTION = "action" +EVALUATOR_CHANNEL_DATA_EXCHANGE_ID = "exchange_id" +EVALUATOR_CHANNEL_DATA_TIME_FRAMES = "time_frames" diff --git a/packages/evaluators/octobot_evaluators/enums.py b/packages/evaluators/octobot_evaluators/enums.py new file mode 100644 index 0000000000..9bbe8ab500 --- /dev/null +++ b/packages/evaluators/octobot_evaluators/enums.py @@ -0,0 +1,24 @@ +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import enum + + +class EvaluatorMatrixTypes(enum.Enum): + TA = "TA" + SOCIAL = "SOCIAL" + REAL_TIME = "REAL_TIME" + SCRIPTED = "SCRIPTED" + STRATEGIES = "STRATEGIES" diff --git a/packages/evaluators/octobot_evaluators/errors.py b/packages/evaluators/octobot_evaluators/errors.py new file mode 100644 index 0000000000..5f57d45730 --- /dev/null +++ b/packages/evaluators/octobot_evaluators/errors.py @@ -0,0 +1,23 @@ +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + + +class UnsetTentacleEvaluation(Exception): + pass + + +class UnavailableEvaluatorError(Exception): + pass diff --git a/packages/evaluators/octobot_evaluators/evaluators/TA_evaluator.py b/packages/evaluators/octobot_evaluators/evaluators/TA_evaluator.py new file mode 100644 index 0000000000..5494b9c882 --- /dev/null +++ b/packages/evaluators/octobot_evaluators/evaluators/TA_evaluator.py @@ -0,0 +1,140 @@ +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import async_channel.constants as channel_constants + +import octobot_commons.constants as common_constants +import octobot_commons.enums as common_enums +import octobot_commons.channels_name as channels_name +import octobot_commons.tree as commons_tree + +import octobot_evaluators.constants as constants +import octobot_evaluators.evaluators as evaluator + + +class TAEvaluator(evaluator.AbstractEvaluator): + __metaclass__ = evaluator.AbstractEvaluator + DEFAULT_LIVE_PRICE_INIT_TIMEOUT = 5 * common_constants.MINUTE_TO_SECONDS + + def __init__(self, tentacles_setup_config): + super().__init__(tentacles_setup_config) + self.time_frame = None + + # True when this evaluator is only triggered on closed candles + self.is_triggered_after_candle_close = True + + self._price_init_timeout = self.DEFAULT_LIVE_PRICE_INIT_TIMEOUT + + async def start(self, bot_id: str) -> bool: + """ + Default TA start: to be overwritten + Subscribe to OHLCV notification from self.symbols and self.time_frames + :return: success of the evaluator's start + """ + await super().start(bot_id) + try: + import octobot_trading.exchange_channel as exchanges_channel + import octobot_trading.api as exchange_api + exchange_id = exchange_api.get_exchange_id_from_matrix_id(self.exchange_name, self.matrix_id) + time_frame_filter = [tf.value + for tf in exchange_api.get_exchange_available_required_time_frames( + self.exchange_name, exchange_id)] + if len(time_frame_filter) == 1: + time_frame_filter = time_frame_filter[0] + await exchanges_channel.get_chan(channels_name.OctoBotTradingChannelsName.OHLCV_CHANNEL.value, exchange_id).\ + new_consumer( + self.evaluator_ohlcv_callback, + cryptocurrency=self.cryptocurrency if self.cryptocurrency else channel_constants.CHANNEL_WILDCARD, + symbol=self.symbol if self.symbol else channel_constants.CHANNEL_WILDCARD, + time_frame=self.time_frame.value if self.time_frame else time_frame_filter, + priority_level=self.priority_level, + ) + if exchange_api.get_is_backtesting( + exchange_api.get_exchange_manager_from_exchange_name_and_id(self.exchange_name, exchange_id) + ): + self.use_backtesting_init_timeout() + return True + except ImportError as e: + self.logger.error(f"Can't connect to OHLCV trading channel {e}") + return False + + def use_backtesting_init_timeout(self): + self._price_init_timeout = 0 + + async def reset_evaluation(self, cryptocurrency, symbol, time_frame): + self.eval_note = common_constants.START_PENDING_EVAL_NOTE + await self.evaluation_completed(cryptocurrency, symbol, time_frame, eval_time=0, notify=False) + + async def ohlcv_callback(self, exchange: str, exchange_id: str, + cryptocurrency: str, symbol: str, time_frame, candle, inc_in_construction_data): + # To be used to trigger an evaluation when a new candle in closed or a re-evaluation is required + pass + + async def evaluator_ohlcv_callback(self, exchange: str, exchange_id: str, cryptocurrency: str, symbol: str, + time_frame: str, candle: dict): + if not self.get_is_symbol_wildcard(): + # do not wait for price in symbol wildcard or it will prevent evaluations for all + # symbols waiting in this consumer queue + await commons_tree.EventProvider.instance().wait_for_event( + self.bot_id, + commons_tree.get_exchange_path( + exchange, + common_enums.InitializationEventExchangeTopics.PRICE.value, + symbol=symbol, + ), + self._price_init_timeout + ) + await self.ohlcv_callback(exchange, exchange_id, cryptocurrency, symbol, time_frame, candle, False) + + async def evaluators_callback(self, + matrix_id, + evaluator_name, + evaluator_type, + exchange_name, + cryptocurrency, + symbol, + time_frame, + data): + # Used to communicate between evaluators + # ignore time frames related to other instances if case of a non time frame wildcard evaluator + time_frames_to_update = data.get(constants.EVALUATOR_CHANNEL_DATA_TIME_FRAMES, []) \ + if self.get_is_time_frame_wildcard() else \ + [self.time_frame] if self.time_frame in data.get(constants.EVALUATOR_CHANNEL_DATA_TIME_FRAMES, []) else [] + if not self.enable_reevaluation(): + self.logger.debug(f"Ignoring re-evaluation ({data[constants.EVALUATOR_CHANNEL_DATA_ACTION]}) " + f"for {symbol}/{time_frame} on {exchange_name}: enable_reevaluation() is False") + return + # re-evaluation processes + if data[constants.EVALUATOR_CHANNEL_DATA_ACTION] == constants.TA_RE_EVALUATION_TRIGGER_UPDATED_DATA: + try: + import octobot_trading.api as exchange_api + exchange_id = data[constants.EVALUATOR_CHANNEL_DATA_EXCHANGE_ID] + symbol_data = self.get_exchange_symbol_data(exchange_name, exchange_id, symbol) + for time_frame in time_frames_to_update: + candles_data = exchange_api.get_symbol_historical_candles(symbol_data, time_frame, limit=1) + # do not trigger ohlcv_callback on empty candles data + if all(values for values in candles_data.values()): + last_full_candle = exchange_api.get_candle_as_list(candles_data) + await self.ohlcv_callback(exchange_name, exchange_id, cryptocurrency, + symbol, time_frame.value, last_full_candle, True) + except KeyError: + self.logger.debug( + f"Ignored re-evaluation trigger for {symbol} on {exchange_name}: missing {time_frame} data" + ) + except ImportError: + self.logger.error(f"Can't get OHLCV: requires OctoBot-Trading package installed") + elif data[constants.EVALUATOR_CHANNEL_DATA_ACTION] == constants.RESET_EVALUATION: + for time_frame in time_frames_to_update: + await self.reset_evaluation(cryptocurrency, symbol, time_frame.value) diff --git a/packages/evaluators/octobot_evaluators/evaluators/__init__.py b/packages/evaluators/octobot_evaluators/evaluators/__init__.py new file mode 100644 index 0000000000..32de32e665 --- /dev/null +++ b/packages/evaluators/octobot_evaluators/evaluators/__init__.py @@ -0,0 +1,86 @@ +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_evaluators.enums +from octobot_evaluators.evaluators import abstract_evaluator +from octobot_evaluators.evaluators.abstract_evaluator import ( + AbstractEvaluator, +) + +from octobot_evaluators.evaluators import evaluator_factory +from octobot_evaluators.evaluators import realtime_evaluator +from octobot_evaluators.evaluators import social_evaluator +from octobot_evaluators.evaluators import TA_evaluator +from octobot_evaluators.evaluators import scripted_evaluator +from octobot_evaluators.evaluators import abstract_util +from octobot_evaluators.evaluators import strategy_evaluator + +from octobot_evaluators.evaluators.evaluator_factory import ( + create_evaluator, + create_and_start_all_type_evaluators, + create_evaluators, + create_temporary_evaluator_with_local_config, +) +from octobot_evaluators.evaluators.realtime_evaluator import ( + RealTimeEvaluator, +) +from octobot_evaluators.evaluators.social_evaluator import ( + SocialEvaluator, +) +from octobot_evaluators.evaluators.TA_evaluator import ( + TAEvaluator, +) +from octobot_evaluators.evaluators.scripted_evaluator import ( + ScriptedEvaluator, +) +from octobot_evaluators.evaluators.abstract_util import ( + AbstractUtil, +) +from octobot_evaluators.evaluators.strategy_evaluator import ( + StrategyEvaluator, +) + + +EvaluatorClassTypes = { + octobot_evaluators.enums.EvaluatorMatrixTypes.TA.value: TAEvaluator, + octobot_evaluators.enums.EvaluatorMatrixTypes.SOCIAL.value: SocialEvaluator, + octobot_evaluators.enums.EvaluatorMatrixTypes.REAL_TIME.value: RealTimeEvaluator, + octobot_evaluators.enums.EvaluatorMatrixTypes.SCRIPTED.value: ScriptedEvaluator, + octobot_evaluators.enums.EvaluatorMatrixTypes.STRATEGIES.value: StrategyEvaluator +} + +evaluator_class_str_to_matrix_type_dict = { + "TAEvaluator": octobot_evaluators.enums.EvaluatorMatrixTypes.TA, + "SocialEvaluator": octobot_evaluators.enums.EvaluatorMatrixTypes.SOCIAL, + "RealTimeEvaluator": octobot_evaluators.enums.EvaluatorMatrixTypes.REAL_TIME, + "ScriptedEvaluator": octobot_evaluators.enums.EvaluatorMatrixTypes.SCRIPTED, + "StrategyEvaluator": octobot_evaluators.enums.EvaluatorMatrixTypes.STRATEGIES +} + +__all__ = [ + "RealTimeEvaluator", + "AbstractEvaluator", + "SocialEvaluator", + "TAEvaluator", + "ScriptedEvaluator", + "AbstractUtil", + "StrategyEvaluator", + "EvaluatorClassTypes", + "create_evaluator", + "create_and_start_all_type_evaluators", + "create_evaluators", + "create_temporary_evaluator_with_local_config", + "evaluator_class_str_to_matrix_type_dict", +] diff --git a/packages/evaluators/octobot_evaluators/evaluators/abstract_evaluator.py b/packages/evaluators/octobot_evaluators/evaluators/abstract_evaluator.py new file mode 100644 index 0000000000..84b4afee4c --- /dev/null +++ b/packages/evaluators/octobot_evaluators/evaluators/abstract_evaluator.py @@ -0,0 +1,618 @@ +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import time +import asyncio +import contextlib +import typing + +import octobot_tentacles_manager.api as api +import octobot_tentacles_manager.configuration as tm_configuration + +import async_channel.constants as channel_constants +import async_channel.enums as channel_enums +import async_channel.channels as channels + +import octobot_commons.constants as common_constants +import octobot_commons.errors as commons_errors +import octobot_commons.enums as commons_enums +import octobot_commons.logging as commons_logging +import octobot_commons.tentacles_management as tentacles_management + +import octobot_evaluators.evaluators.channel as evaluator_channels +import octobot_evaluators.constants as constants +import octobot_evaluators.matrix as matrix +import octobot_evaluators.evaluators.evaluator_factory as evaluator_factory + +import octobot_evaluators.util as util + + +class AbstractEvaluator(tentacles_management.AbstractTentacle): + __metaclass__ = tentacles_management.AbstractTentacle + HISTORIZE_USER_INPUT_CONFIG = True + USER_INPUT_TENTACLE_TYPE = commons_enums.UserInputTentacleTypes.EVALUATOR + + def __init__(self, tentacles_setup_config: tm_configuration.TentaclesSetupConfiguration): + super().__init__() + self.logger = commons_logging.get_logger(self.get_name()) + + # Evaluator matrix id + self.matrix_id: typing.Optional[str] = None + + # OctoBot id this evaluator has been started with + self.bot_id: typing.Optional[str] = None + + # Tentacle global setup configuration + self.tentacles_setup_config: tm_configuration.TentaclesSetupConfiguration = tentacles_setup_config + + # Evaluator specific config (Is loaded from tentacle specific file) + self.specific_config: dict = {} + + # Evaluator specific config snapshot before a config update + self.previous_specific_config: dict = None + + # If this indicator is enabled + self.enabled: bool = self.is_enabled(self.tentacles_setup_config, False) + + # Specified Cryptocurrency for this instance (Should be None if wildcard) + self.cryptocurrency: typing.Optional[str] = None + + # Specified Cryptocurrency name for this instance (Should be None if wildcard) + self.cryptocurrency_name: typing.Optional[str] = None + + # Symbol is the cryptocurrency pair (Should be None if wildcard) + self.symbol: typing.Optional[str] = None + + # Evaluation related exchange name + self.exchange_name: typing.Optional[str] = None + + # Time_frame is the chart time frame (Should be None if wildcard) + self.time_frame = None + + # history time represents the period of time of the indicator + self.history_time = None + + # Evaluator category + self.evaluator_type = None + + # Eval note will be set by the eval_impl at each call + self.eval_note = common_constants.START_PENDING_EVAL_NOTE + + # Pertinence of indicator will be used with the eval_note to provide a relevancy + self.pertinence = constants.START_EVAL_PERTINENCE + + # Active tells if this evaluator is currently activated (an evaluator can be paused) + self.is_active: bool = True + + self.eval_note_time_to_live = None + self.eval_note_changed_time = None + + # Define evaluators default consumer priority level + self.priority_level: int = channel_enums.ChannelConsumerPriorityLevels.MEDIUM.value + + self.consumers: list = [] + + # True when this evaluator is only triggered on closed candles + self.is_triggered_after_candle_close: bool = False + + # Cleared when starting an async evaluation (using self.async_evaluation()) and set afterwards + self._is_evaluation_completed: typing.Optional[asyncio.Event] = None + + def post_init(self, tentacles_setup_config): + """ + Automatically called after __init__ when post_init is True (default) in evaluator_factory + Override when necessary + :param tentacles_setup_config: the tentacles_setup_config __init__ argument + :return: None + """ + pass + + @classmethod + async def single_evaluation( + cls, + tentacles_setup_config: tm_configuration.TentaclesSetupConfiguration, + specific_config: dict, + ignore_cache=False, + should_trigger_post_init=False, + **kwargs): + evaluator_instance = evaluator_factory.create_temporary_evaluator_with_local_config( + cls, tentacles_setup_config, specific_config, should_trigger_post_init) + evaluation, error = await evaluator_instance.evaluator_manual_callback(ignore_cache=ignore_cache, **kwargs) + return evaluation, error, evaluator_instance + + async def evaluator_manual_callback(self, **kwargs): + """ + Override this method to define the appropriate behavior when this evaluator is being + called manually + :param kwargs: keyword arguments to be used for evaluation + :return: the evaluation value + """ + raise NotImplementedError("evaluator_manual_callback is not implemented") + + @staticmethod + def get_eval_type(): + """ + Override this method when self.eval_note is other than : START_PENDING_EVAL_NOTE or float[-1:1] + :return: type + """ + return constants.EVALUATOR_EVAL_DEFAULT_TYPE + + @classmethod + def get_is_cryptocurrencies_wildcard(cls) -> bool: + """ + :return: True if the evaluator is not cryptocurrency dependant else False + """ + return True + + @classmethod + def get_is_cryptocurrency_name_wildcard(cls) -> bool: + """ + :return: True if the evaluator is not cryptocurrency name dependant else False + """ + return True + + @classmethod + def get_is_symbol_wildcard(cls) -> bool: + """ + :return: True if the evaluator is not symbol dependant else False + """ + return True + + @classmethod + def get_is_time_frame_wildcard(cls) -> bool: + """ + :return: True if the evaluator is not time_frame dependant else False + """ + return True + + def get_trigger_time_frames(self): + return self.specific_config.get(common_constants.CONFIG_TRIGGER_TIMEFRAMES, common_constants.CONFIG_WILDCARD) + + @staticmethod + def invalidate_cache_on_code_change(): + # False is not yet supported + return True + + @staticmethod + def invalidate_cache_on_config_change(): + # False is not yet supported + return True + + @classmethod + def use_cache(cls): + return False + + @classmethod + def get_signals_history_type(cls): + """ + Override when this evaluator uses a specific type of signal history + """ + return None + + def enable_reevaluation(self) -> bool: + """ + Override when artificial re-evaluations from the evaluator channel can be disabled + """ + return True + + def get_local_config(self): + return self.specific_config + + def _get_tentacle_registration_topic(self, all_symbols_by_crypto_currencies, time_frames, real_time_time_frames): + currencies = [self.cryptocurrency] + symbols = [self.symbol] + available_time_frames = [self.time_frame] + if self.get_is_cryptocurrencies_wildcard(): + currencies = all_symbols_by_crypto_currencies.keys() + if self.get_is_symbol_wildcard(): + symbols = [currency_symbol + for currency_symbols in all_symbols_by_crypto_currencies.values() + for currency_symbol in currency_symbols] + if self.get_is_time_frame_wildcard(): + available_time_frames = time_frames + trigger_timeframes = self.get_trigger_time_frames() + if trigger_timeframes != common_constants.CONFIG_WILDCARD: + available_time_frames = [tf + for tf in available_time_frames + if tf in trigger_timeframes] + return currencies, symbols, available_time_frames + + def _is_in_backtesting(self): + try: + import octobot_trading.api as exchange_api + return exchange_api.get_is_backtesting( + exchange_api.get_exchange_manager_from_exchange_name_and_id( + self.exchange_name, + exchange_api.get_exchange_id_from_matrix_id(self.exchange_name, self.matrix_id) + ) + ) + except ImportError as e: + self.logger.error(f"Can't connect check if backtesting is enabled {e}") + return False + + async def initialize( + self, all_symbols_by_crypto_currencies, time_frames, real_time_time_frames, bot_id, specific_config=None + ): + await self.reload_config(bot_id, specific_config=specific_config) + currencies, symbols, time_frames = self._get_tentacle_registration_topic( + all_symbols_by_crypto_currencies, time_frames, real_time_time_frames + ) + await self._init_registered_topics(all_symbols_by_crypto_currencies, currencies, symbols, time_frames) + + async def _init_registered_topics(self, all_symbols_by_crypto_currencies, currencies, symbols, time_frames): + for currency in currencies: + for symbol in symbols: + if symbol is None or symbol in all_symbols_by_crypto_currencies[currency]: + for time_frame in time_frames: + matrix.set_tentacle_value( + matrix_id=self.matrix_id, + tentacle_type=self.get_eval_type(), + tentacle_value=None, + tentacle_path=matrix.get_matrix_default_value_path( + exchange_name=self.exchange_name, + tentacle_type=self.evaluator_type.value, + tentacle_name=self.get_name(), + cryptocurrency=currency, + symbol=symbol, + time_frame=time_frame.value if time_frame else None + ) + ) + + async def evaluation_completed(self, + cryptocurrency: typing.Optional[str] = None, + symbol: typing.Optional[str] = None, + time_frame=None, + eval_note=None, + eval_time=0, + eval_note_description=None, + eval_note_metadata=None, + notify=True, + origin_consumer=None, + cache_client=None, + cache_if_available=True) -> None: + """ + Main async method to notify matrix to update + :param cryptocurrency: evaluated cryptocurrency + :param symbol: evaluated symbol + :param time_frame: evaluated time frame + :param eval_note: if None = self.eval_note + :param eval_time: the time of the evaluation if relevant, default is 0 + :param eval_note_description: the description of the evaluation if relevant + :param eval_note_metadata: the metadata of the evaluation if relevant + :param notify: if true, will trigger matrix consumers + :param origin_consumer: the sender consumer if it doesn't want to be notified + :param cache_client: an existing cache client to avoid creating a local one + :param cache_if_available: when True, if the evaluator is using cache, its value will be cached + :return: None + """ + try: + if eval_note is None: + eval_note = self.eval_note if self.eval_note is not None else common_constants.START_PENDING_EVAL_NOTE + + if self.use_cache(): + cache_client = cache_client or util.local_cache_client(self, symbol, time_frame) + if self.eval_note == common_constants.DO_NOT_OVERRIDE_CACHE: + self.eval_note, missing = await cache_client.get_cached_value(cache_key=eval_time) + cache_client.ensure_no_missing_cached_value(missing) + eval_note = self.eval_note + elif cache_if_available and eval_note != common_constants.DO_NOT_CACHE: + await cache_client.set_cached_value(eval_note, cache_key=eval_time, flush_if_necessary=True) + self.ensure_eval_note_is_not_expired() + if notify: + # skip warning when evaluation is not to be broadcasted (might be a simple reset) + self._log_on_invalid_eval_not_time(self.exchange_name, self.matrix_id, symbol, eval_time, time_frame) + await evaluator_channels.get_chan(constants.MATRIX_CHANNEL, + self.matrix_id).get_internal_producer().send_eval_note( + matrix_id=self.matrix_id, + evaluator_name=self.get_name(), + evaluator_type=self.evaluator_type.value, + eval_note=eval_note, + eval_note_type=self.get_eval_type(), + eval_time=eval_time, + eval_note_description=eval_note_description, + eval_note_metadata=eval_note_metadata, + exchange_name=self.exchange_name, + cryptocurrency=cryptocurrency, + symbol=symbol, + time_frame=time_frame, + notify=notify, + origin_consumer=origin_consumer) + except commons_errors.NoCacheValue: + self.logger.warning(f"Evaluation as \"{common_constants.DO_NOT_OVERRIDE_CACHE}\" " + f"but the is no cache to publish an evaluation from") + except Exception as e: + # if ConfigManager.is_in_dev_mode(self.config): # TODO + # raise e + # else: + self.logger.exception(e, True, f"Exception in evaluation_completed(): {e}") + finally: + if self.eval_note == "nan": + self.eval_note = common_constants.START_PENDING_EVAL_NOTE + self.logger.warning(str(self.symbol) + " evaluator returned 'nan' as eval_note, ignoring this value.") + + async def start(self, bot_id: str) -> bool: + """ + :return: success of the evaluator's start + """ + self.bot_id = bot_id + self.consumers.append( + await evaluator_channels.get_chan(constants.EVALUATORS_CHANNEL, self.matrix_id).new_consumer( + self.evaluators_callback, + cryptocurrency=self.cryptocurrency if self.cryptocurrency else channel_constants.CHANNEL_WILDCARD, + symbol=self.symbol if self.symbol else channel_constants.CHANNEL_WILDCARD, + time_frame=self.time_frame if self.time_frame else channel_constants.CHANNEL_WILDCARD, + priority_level=self.priority_level, + ) + ) + + try: + import octobot_services.channel as services_channels + self.consumers.append( + await channels.get_chan(services_channels.UserCommandsChannel.get_name()).new_consumer( + self.user_commands_callback, + {"bot_id": bot_id, "subject": self.get_name()} + ) + ) + except KeyError: + # UserCommandsChannel might not be available + pass + except ImportError: + self.logger.warning("Can't connect to services channels") + + async def user_commands_callback(self, bot_id, subject, action, data) -> None: + self.logger.debug(f"Received {action} command") + if action == commons_enums.UserCommands.RELOAD_CONFIG.value: + await self.reload_config(bot_id) + self.logger.debug("Reloaded configuration") + + async def stop(self) -> None: + """ + implement if necessary + :return: None + """ + for consumer in self.consumers: + await consumer.stop() + + async def prepare(self) -> None: + """ + Called just before start(), implement if necessary + :return: None + """ + pass + + async def start_evaluator(self, bot_id: str) -> None: + """ + Start a task as matrix producer + :return: None + """ + if await self.start(bot_id): + self.logger.debug("Evaluator started") + else: + self.logger.debug("Evaluator not started") + + async def reload_config(self, bot_id: str, specific_config=None) -> None: + self.set_default_config() + specific_config = specific_config or api.get_tentacle_config( + self.tentacles_setup_config, self.__class__ + ) + + if not specific_config and self.ALLOW_SUPER_CLASS_CONFIG: + # if nothing in config, try with any super-class' config file + for super_class in self.get_parent_evaluator_classes(AbstractEvaluator): + try: + if specific_config := api.get_tentacle_config(self.tentacles_setup_config, super_class): + break + except KeyError: + pass # super_class tentacle config not found + self.specific_config.update(specific_config) + await self.load_and_save_user_inputs(bot_id) + self.logger.debug(f"Using config: {self.specific_config}") + + @classmethod + def create_local_instance(cls, _, tentacles_setup_config, tentacle_config): + return evaluator_factory.create_temporary_evaluator_with_local_config( + cls, tentacles_setup_config, tentacle_config, False + ) + + def set_default_config(self): + """ + To implement in subclasses if config necessary + :return: + """ + self.specific_config = {} + + @classmethod + def get_evaluator_priority(cls, tentacles_setup_config) -> float: + """ + Returns the priority of the evaluator (will later be compared to other evaluators). + A higher priority evaluator will be called first when multiple evaluators are to + be called at the same time. + Default priority is DEFAULT_PRIORITY defined in OctoBot-Commons. + Order is undefined between evaluators of the same priority. + :return: the priority level + """ + return api.get_tentacle_config(tentacles_setup_config, cls).get(common_constants.EVALUATOR_PRIORITY, + common_constants.DEFAULT_EVALUATOR_PRIORITY) + + def reset(self) -> None: + """ + Reset temporary parameters to enable fresh start + :return: None + """ + self.eval_note = common_constants.START_PENDING_EVAL_NOTE + + @classmethod + def has_class_in_parents(cls, klass) -> bool: + """ + Explore up to the 2nd degree parent + :param klass: python Class to explore + :return: Boolean + """ + if klass in cls.__bases__: + return True + elif any(klass in base.__bases__ for base in cls.__bases__): + return True + else: + for base in cls.__bases__: + if any(klass in super_base.__bases__ for super_base in base.__bases__): + return True + return False + + @classmethod + def get_parent_evaluator_classes(cls, higher_parent_class_limit=None) -> list: + """ + Return the evaluator parent classe(s) + :param higher_parent_class_limit: + :return: list of classes + """ + return [ + class_type + for class_type in cls.mro() + if (higher_parent_class_limit if higher_parent_class_limit else AbstractEvaluator) in class_type.mro() + ] + + def set_eval_note(self, new_eval_note) -> None: + """ + Performs additionnal check to eval_note before changing it + :param new_eval_note: + :return: None + """ + self.eval_note_changed() + + if self.eval_note == common_constants.START_PENDING_EVAL_NOTE: + self.eval_note = common_constants.INIT_EVAL_NOTE + + if self.eval_note + new_eval_note > 1: + self.eval_note = 1 + elif self.eval_note + new_eval_note < -1: + self.eval_note = -1 + else: + self.eval_note += new_eval_note + + @classmethod + def is_enabled(cls, tentacles_setup_config, default) -> bool: + """ + Check if the evaluator is enabled by configuration + :param tentacles_setup_config: tentacles setup config + :param default: default value if evaluator config is not found + :return: evaluator config + """ + try: + return api.is_tentacle_activated_in_tentacles_setup_config(tentacles_setup_config, + cls.get_name(), + raise_errors=True) + except KeyError: + for parent in cls.mro(): + try: + return api.is_tentacle_activated_in_tentacles_setup_config(tentacles_setup_config, + parent.__name__, + raise_errors=True) + except KeyError: + pass + return default + + def save_evaluation_expiration_time(self, eval_note_time_to_live, eval_note_changed_time=None) -> None: + """ + Use only if the current evaluation is to stay for a pre-defined amount of seconds + :param eval_note_time_to_live: + :param eval_note_changed_time: + :return: None + """ + self.eval_note_time_to_live = eval_note_time_to_live + self.eval_note_changed_time = eval_note_changed_time if eval_note_changed_time else time.time() + + def eval_note_changed(self) -> None: + """ + Eval note changed callback + :return: None + """ + if self.eval_note_time_to_live is not None and self.eval_note_changed_time is None: + self.eval_note_changed_time = time.time() + + def ensure_eval_note_is_not_expired(self) -> None: + """ + Eval note expiration check + :return: None + """ + if self.eval_note_time_to_live is not None: + if self.eval_note_changed_time is None: + self.eval_note_changed_time = time.time() + + if time.time() - self.eval_note_changed_time > self.eval_note_time_to_live: + self.eval_note = common_constants.START_PENDING_EVAL_NOTE + self.eval_note_time_to_live = None + self.eval_note_changed_time = None + + def _log_on_invalid_eval_not_time(self, exchange_name, matrix_id, symbol, eval_time, time_frame) -> None: + if not time_frame: + return + current_time = self._get_exchange_current_time(exchange_name, matrix_id) + if not matrix.is_evaluation_valid_in_time(current_time, eval_time, commons_enums.TimeFrames(time_frame)): + self.logger.warning( + f"Performed {symbol} {time_frame} evaluation on outdated data: {eval_time=}, {current_time=}. " + f"Up-to-date {symbol} price data on {time_frame} might not yet be available on {exchange_name}." + ) + + def _get_exchange_current_time(self, exchange_name, matrix_id): + try: + import octobot_trading.api as exchange_api + exchange_manager = exchange_api.get_exchange_manager_from_exchange_name_and_id( + exchange_name, + exchange_api.get_exchange_id_from_matrix_id(exchange_name, matrix_id) + ) + return exchange_api.get_exchange_current_time(exchange_manager) + except ImportError: + self.logger.error("Strategy requires OctoBot-Trading package installed") + + def get_exchange_symbol_data(self, exchange_name: str, exchange_id: str, symbol: str): + try: + import octobot_trading.api as exchange_api + exchange_manager = exchange_api.get_exchange_manager_from_exchange_name_and_id(exchange_name, exchange_id) + return exchange_api.get_symbol_data(exchange_manager, symbol) + except (ImportError, KeyError): + self.logger.error(f"Can't get {exchange_name} from exchanges instances") + return + + async def evaluators_callback(self, + matrix_id, + evaluator_name, + evaluator_type, + exchange_name, + cryptocurrency, + symbol, + time_frame, + data): + # Used to communicate between evaluators + pass + + @contextlib.asynccontextmanager + async def async_evaluation(self): + if self._is_evaluation_completed is None: + self._is_evaluation_completed = asyncio.Event() + try: + self._is_evaluation_completed.clear() + yield + finally: + self._is_evaluation_completed.set() + + async def wait_for_async_evaluation_completion(self, timeout): + if self._is_evaluation_completed is None or self._is_evaluation_completed.is_set(): + return + await asyncio.wait_for(self._is_evaluation_completed.wait(), timeout=timeout) + + def is_in_async_evaluation(self): + if self._is_evaluation_completed is None: + return False + return not self._is_evaluation_completed.is_set() diff --git a/packages/evaluators/octobot_evaluators/evaluators/abstract_util.py b/packages/evaluators/octobot_evaluators/evaluators/abstract_util.py new file mode 100644 index 0000000000..a65c6243d7 --- /dev/null +++ b/packages/evaluators/octobot_evaluators/evaluators/abstract_util.py @@ -0,0 +1,20 @@ +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.tentacles_management as tentacles_management + + +class AbstractUtil(tentacles_management.AbstractTentacle): + pass diff --git a/packages/evaluators/octobot_evaluators/evaluators/channel/__init__.py b/packages/evaluators/octobot_evaluators/evaluators/channel/__init__.py new file mode 100644 index 0000000000..322a47535c --- /dev/null +++ b/packages/evaluators/octobot_evaluators/evaluators/channel/__init__.py @@ -0,0 +1,54 @@ +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_evaluators.evaluators.channel import evaluator_channel +from octobot_evaluators.evaluators.channel.evaluator_channel import ( + EvaluatorChannelConsumer, + EvaluatorChannelSupervisedConsumer, + EvaluatorChannelProducer, + EvaluatorChannel, + set_chan, + get_evaluator_channels, + del_evaluator_channel_container, + get_chan, + del_chan, + trigger_technical_evaluators_re_evaluation_with_updated_data, +) +from octobot_evaluators.evaluators.channel import evaluators +from octobot_evaluators.evaluators.channel.evaluators import ( + EvaluatorsChannelConsumer, + EvaluatorsChannelProducer, + EvaluatorsChannel, +) + +__all__ = [ + "EvaluatorChannelConsumer", + "EvaluatorChannelSupervisedConsumer", + "EvaluatorChannelProducer", + "EvaluatorChannel", + "EvaluatorChannelConsumer", + "EvaluatorChannelProducer", + "EvaluatorChannel", + "set_chan", + "get_evaluator_channels", + "del_evaluator_channel_container", + "get_chan", + "del_chan", + "trigger_technical_evaluators_re_evaluation_with_updated_data", + "EvaluatorsChannelConsumer", + "EvaluatorsChannelProducer", + "EvaluatorsChannel", +] diff --git a/packages/evaluators/octobot_evaluators/evaluators/channel/evaluator_channel.py b/packages/evaluators/octobot_evaluators/evaluators/channel/evaluator_channel.py new file mode 100644 index 0000000000..4f3692c175 --- /dev/null +++ b/packages/evaluators/octobot_evaluators/evaluators/channel/evaluator_channel.py @@ -0,0 +1,147 @@ +# pylint: disable=E0203 +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import async_channel.channels as channels +import async_channel.enums as channel_enums +import async_channel.constants as channel_constants +import async_channel.consumer as consumers +import async_channel.producer as producers + +import octobot_commons.logging as logging + +import octobot_evaluators.constants as constants + + +class EvaluatorChannelConsumer(consumers.Consumer): + """ + Consumer adapted for EvaluatorChannel + """ + + +class EvaluatorChannelSupervisedConsumer(consumers.SupervisedConsumer): + """ + SupervisedConsumer adapted for EvaluatorChannel + """ + + +class EvaluatorChannelProducer(producers.Producer): + """ + Producer adapted for EvaluatorChannel + """ + + +class EvaluatorChannel(channels.Channel): + PRODUCER_CLASS = EvaluatorChannelProducer + CONSUMER_CLASS = EvaluatorChannelConsumer + DEFAULT_PRIORITY_LEVEL = channel_enums.ChannelConsumerPriorityLevels.MEDIUM.value + + def __init__(self, matrix_id): + super().__init__() + self.matrix_id = matrix_id + + def get_consumer_from_filters(self, consumer_filters, origin_consumer=None) -> list: + """ + Returns the instance filtered consumers list except origin_consumer if provided + :param consumer_filters: the consumer filters dict + :param origin_consumer: the consumer behind the call if any else None + :return: the filtered consumer list + """ + return [consumer + for consumer in super(EvaluatorChannel, self).get_consumer_from_filters(consumer_filters) + if origin_consumer is None or consumer is not origin_consumer] + + +def set_chan(chan, name) -> None: + chan_name = chan.get_name() if name else name + + try: + evaluator_chan = channels.ChannelInstances.instance().channels[chan.matrix_id] + except KeyError: + channels.ChannelInstances.instance().channels[chan.matrix_id] = {} + evaluator_chan = channels.ChannelInstances.instance().channels[chan.matrix_id] + + if chan_name not in evaluator_chan: + evaluator_chan[chan_name] = chan + else: + raise ValueError(f"Channel {chan_name} already exists.") + + +def get_evaluator_channels(matrix_id) -> dict: + try: + return channels.ChannelInstances.instance().channels[matrix_id] + except KeyError as e: + raise KeyError(f"Channels not found with matrix_id: {matrix_id}") from e + + +def del_evaluator_channel_container(matrix_id): + try: + channels.ChannelInstances.instance().channels.pop(matrix_id, None) + except KeyError as e: + raise KeyError(f"Channels not found with matrix_id: {matrix_id}") from e + + +def get_chan(chan_name, matrix_id) -> EvaluatorChannel: + try: + return channels.ChannelInstances.instance().channels[matrix_id][chan_name] + except KeyError as e: + raise KeyError(f"Channel {chan_name} not found with matrix_id: {matrix_id}") from e + + +def del_chan(chan_name, matrix_id) -> None: + try: + channels.ChannelInstances.instance().channels[matrix_id].pop(chan_name, None) + except KeyError: + logging.get_logger(EvaluatorChannel.__name__).warning(f"Can't del chan {chan_name} with matrix_id: {matrix_id}") + + +async def trigger_technical_evaluators_re_evaluation_with_updated_data(matrix_id, + evaluator_name, + evaluator_type, + exchange_name, + cryptocurrency, + symbol, + exchange_id, + time_frames + ): + # first reset evaluations to avoid partially updated TA cycle validation + await get_chan(constants.EVALUATORS_CHANNEL, matrix_id).get_internal_producer().send( + matrix_id, + data={ + constants.EVALUATOR_CHANNEL_DATA_ACTION: constants.RESET_EVALUATION, + constants.EVALUATOR_CHANNEL_DATA_TIME_FRAMES: time_frames + }, + evaluator_name=evaluator_name, + evaluator_type=evaluator_type, + exchange_name=exchange_name, + cryptocurrency=cryptocurrency, + symbol=symbol, + time_frame=channel_constants.CHANNEL_WILDCARD + ) + await get_chan(constants.EVALUATORS_CHANNEL, matrix_id).get_internal_producer().send( + matrix_id, + data={ + constants.EVALUATOR_CHANNEL_DATA_ACTION: constants.TA_RE_EVALUATION_TRIGGER_UPDATED_DATA, + constants.EVALUATOR_CHANNEL_DATA_EXCHANGE_ID: exchange_id, + constants.EVALUATOR_CHANNEL_DATA_TIME_FRAMES: time_frames + }, + evaluator_name=evaluator_name, + evaluator_type=evaluator_type, + exchange_name=exchange_name, + cryptocurrency=cryptocurrency, + symbol=symbol, + time_frame=channel_constants.CHANNEL_WILDCARD + ) diff --git a/packages/evaluators/octobot_evaluators/evaluators/channel/evaluators.py b/packages/evaluators/octobot_evaluators/evaluators/channel/evaluators.py new file mode 100644 index 0000000000..020ce4d18b --- /dev/null +++ b/packages/evaluators/octobot_evaluators/evaluators/channel/evaluators.py @@ -0,0 +1,145 @@ +# cython: language_level=3 +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import async_channel.constants as channel_constants + +import octobot_commons.logging as logging + +import octobot_evaluators.evaluators.channel as evaluator_channels + + +class EvaluatorsChannelConsumer(evaluator_channels.EvaluatorChannelConsumer): + """ + EvaluatorChannelConsumer adapted for EvaluatorsChannel + """ + + +class EvaluatorsChannelProducer(evaluator_channels.EvaluatorChannelProducer): + """ + EvaluatorChannelProducer adapted for EvaluatorsChannel + """ + + # noinspection PyMethodOverriding + async def send(self, + matrix_id, + data, + evaluator_name=channel_constants.CHANNEL_WILDCARD, + evaluator_type=channel_constants.CHANNEL_WILDCARD, + exchange_name=channel_constants.CHANNEL_WILDCARD, + cryptocurrency=channel_constants.CHANNEL_WILDCARD, + symbol=channel_constants.CHANNEL_WILDCARD, + time_frame=channel_constants.CHANNEL_WILDCARD, + origin_consumer=None): + for consumer in self.channel.get_filtered_consumers(matrix_id=matrix_id, + evaluator_name=evaluator_name, + evaluator_type=evaluator_type, + exchange_name=exchange_name, + cryptocurrency=cryptocurrency, + symbol=symbol, + time_frame=time_frame, + origin_consumer=origin_consumer): + await consumer.queue.put({ + "matrix_id": matrix_id, + "evaluator_name": evaluator_name, + "evaluator_type": evaluator_type, + "exchange_name": exchange_name, + "cryptocurrency": cryptocurrency, + "symbol": symbol, + "time_frame": time_frame, + "data": data + }) + + +class EvaluatorsChannel(evaluator_channels.EvaluatorChannel): + PRODUCER_CLASS = EvaluatorsChannelProducer + CONSUMER_CLASS = EvaluatorsChannelConsumer + + MATRIX_ID_KEY = "matrix_id" + EVALUATOR_NAME_KEY = "evaluator_name" + EVALUATOR_TYPE_KEY = "evaluator_type" + EXCHANGE_NAME_KEY = "exchange_name" + CRYPTOCURRENCY_KEY = "cryptocurrency" + SYMBOL_KEY = "symbol" + TIME_FRAME_KEY = "time_frame" + + def __init__(self, matrix_id): + super().__init__(matrix_id) + self.logger = logging.get_logger(f"{self.__class__.__name__}") + + # noinspection PyMethodOverriding + async def new_consumer(self, + callback: object, + size: int = 0, + priority_level: int = evaluator_channels.EvaluatorChannel.DEFAULT_PRIORITY_LEVEL, + matrix_id: str = channel_constants.CHANNEL_WILDCARD, + evaluator_name: str = channel_constants.CHANNEL_WILDCARD, + evaluator_type: object = channel_constants.CHANNEL_WILDCARD, + exchange_name: str = channel_constants.CHANNEL_WILDCARD, + cryptocurrency: str = channel_constants.CHANNEL_WILDCARD, + symbol: str = channel_constants.CHANNEL_WILDCARD, + time_frame=channel_constants.CHANNEL_WILDCARD) -> EvaluatorsChannelConsumer: + consumer = EvaluatorsChannelConsumer(callback, size=size, priority_level=priority_level) + await self._add_new_consumer_and_run(consumer, + matrix_id=matrix_id, + evaluator_name=evaluator_name, + evaluator_type=evaluator_type, + exchange_name=exchange_name, + cryptocurrency=cryptocurrency, + symbol=symbol, + time_frame=time_frame) + return consumer + + def get_filtered_consumers(self, + matrix_id=channel_constants.CHANNEL_WILDCARD, + evaluator_name=channel_constants.CHANNEL_WILDCARD, + evaluator_type=channel_constants.CHANNEL_WILDCARD, + exchange_name=channel_constants.CHANNEL_WILDCARD, + cryptocurrency=channel_constants.CHANNEL_WILDCARD, + symbol=channel_constants.CHANNEL_WILDCARD, + time_frame=channel_constants.CHANNEL_WILDCARD, + origin_consumer=None): + return self.get_consumer_from_filters({ + self.MATRIX_ID_KEY: matrix_id, + self.EVALUATOR_NAME_KEY: evaluator_name, + self.EVALUATOR_TYPE_KEY: evaluator_type, + self.EXCHANGE_NAME_KEY: exchange_name, + self.CRYPTOCURRENCY_KEY: cryptocurrency, + self.SYMBOL_KEY: symbol, + self.TIME_FRAME_KEY: time_frame, + }, + origin_consumer=origin_consumer) + + async def _add_new_consumer_and_run(self, consumer, + matrix_id=channel_constants.CHANNEL_WILDCARD, + evaluator_name=channel_constants.CHANNEL_WILDCARD, + evaluator_type=channel_constants.CHANNEL_WILDCARD, + exchange_name=channel_constants.CHANNEL_WILDCARD, + cryptocurrency=channel_constants.CHANNEL_WILDCARD, + symbol=channel_constants.CHANNEL_WILDCARD, + time_frame=channel_constants.CHANNEL_WILDCARD): + consumer_filters: dict = { + self.MATRIX_ID_KEY: matrix_id, + self.EVALUATOR_NAME_KEY: evaluator_name, + self.EVALUATOR_TYPE_KEY: evaluator_type, + self.EXCHANGE_NAME_KEY: exchange_name, + self.CRYPTOCURRENCY_KEY: cryptocurrency, + self.SYMBOL_KEY: symbol, + self.TIME_FRAME_KEY: time_frame + } + + self.add_new_consumer(consumer, consumer_filters) + await consumer.run() diff --git a/packages/evaluators/octobot_evaluators/evaluators/evaluator_factory.py b/packages/evaluators/octobot_evaluators/evaluators/evaluator_factory.py new file mode 100644 index 0000000000..07966698bc --- /dev/null +++ b/packages/evaluators/octobot_evaluators/evaluators/evaluator_factory.py @@ -0,0 +1,241 @@ +# pylint: disable=E0401 +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import typing + +import octobot_commons.tentacles_management as tentacles_management +import octobot_commons.constants as common_constants +import octobot_commons.logging as logging + +import octobot_evaluators.api as api +import octobot_evaluators.evaluators as evaluator +import octobot_evaluators.constants as constants +import octobot_tentacles_manager.configuration as tm_configuration + +LOGGER_NAME = "EvaluatorsFactory" + + +async def create_evaluators( + evaluator_parent_class, + tentacles_setup_config: object, + matrix_id: str, + exchange_name: str, + bot_id: str, + crypto_currency_name_by_crypto_currencies: dict, + symbols_by_crypto_currency_tickers: dict, + symbols: list = None, + time_frames: list = None, + real_time_time_frames: list = None, + relevant_evaluators=common_constants.CONFIG_WILDCARD, + config_by_evaluator=None, +) -> list: + config_by_evaluator = config_by_evaluator or {} + return [ + await create_evaluator( + evaluator_class, + tentacles_setup_config, + bot_id=bot_id, + matrix_id=matrix_id, + exchange_name=exchange_name, + cryptocurrency=cryptocurrency, + cryptocurrency_name=_get_cryptocurrency_name( + evaluator_class, + crypto_currency_name_by_crypto_currencies, + cryptocurrency), + symbol=symbol, + time_frame=time_frame, + relevant_evaluators=relevant_evaluators, + all_symbols_by_crypto_currencies=symbols_by_crypto_currency_tickers, + time_frames=time_frames, + real_time_time_frames=real_time_time_frames, + evaluator_configuration=config_by_evaluator.get(evaluator_class.get_name()) + ) + for evaluator_class in tentacles_management.get_all_classes_from_parent(evaluator_parent_class) + for cryptocurrency in _get_cryptocurrencies_to_create(evaluator_class, + crypto_currency_name_by_crypto_currencies) + for symbol in _get_symbols_to_create(evaluator_class, + symbols_by_crypto_currency_tickers, + cryptocurrency, + symbols) + for time_frame in _get_time_frames_to_create(evaluator_class, time_frames) + ] + + +def create_temporary_evaluator_with_local_config( + evaluator_class, + tentacles_setup_config: tm_configuration.TentaclesSetupConfiguration, + specific_config, + should_trigger_post_init=False): + evaluator_instance = _instantiate_evaluator(evaluator_class, tentacles_setup_config, should_trigger_post_init) + evaluator_instance.specific_config = specific_config + return evaluator_instance + + +def _get_cryptocurrency_name(evaluator_class, crypto_currency_name_by_crypto_currencies, cryptocurrency): + return crypto_currency_name_by_crypto_currencies[cryptocurrency] \ + if crypto_currency_name_by_crypto_currencies \ + and cryptocurrency is not None \ + and not evaluator_class.get_is_cryptocurrency_name_wildcard() \ + else None + + +def _get_cryptocurrencies_to_create(evaluator_class, crypto_currency_name_by_crypto_currencies): + return list(crypto_currency_name_by_crypto_currencies) \ + if crypto_currency_name_by_crypto_currencies and \ + not evaluator_class.get_is_cryptocurrencies_wildcard() else [None] + + +def _get_symbols_to_create(evaluator_class, symbols_by_crypto_currencies, cryptocurrency, symbols): + currency_symbols = symbols + if cryptocurrency is not None: + currency_symbols = list(symbols_by_crypto_currencies.get(cryptocurrency, set())) + return currency_symbols if currency_symbols and not evaluator_class.get_is_symbol_wildcard() else [None] + + +def _get_time_frames_to_create(evaluator_class, time_frames): + return time_frames if time_frames and not evaluator_class.get_is_time_frame_wildcard() else [None] + + +async def create_evaluator( + evaluator_class, + tentacles_setup_config: object, + bot_id: str, + matrix_id: str, + exchange_name: str, + cryptocurrency: str = None, + cryptocurrency_name: str = None, + symbol: str = None, + time_frame=None, + relevant_evaluators=common_constants.CONFIG_WILDCARD, + all_symbols_by_crypto_currencies=None, + time_frames=None, + real_time_time_frames=None, + evaluator_configuration=None +): + try: + eval_class_instance = _instantiate_evaluator(evaluator_class, tentacles_setup_config, True) + if api.is_relevant_evaluator(eval_class_instance, relevant_evaluators): + eval_class_instance.matrix_id = matrix_id + eval_class_instance.exchange_name = exchange_name if exchange_name else None + eval_class_instance.cryptocurrency = cryptocurrency + eval_class_instance.cryptocurrency_name = cryptocurrency_name + eval_class_instance.symbol = symbol if symbol else None + eval_class_instance.time_frame = time_frame if time_frame else eval_class_instance.time_frame + eval_class_instance.evaluator_type = evaluator.evaluator_class_str_to_matrix_type_dict[ + eval_class_instance.__class__.mro()[constants.EVALUATOR_CLASS_TYPE_MRO_INDEX].__name__] + await eval_class_instance.initialize( + all_symbols_by_crypto_currencies, time_frames, real_time_time_frames, bot_id, evaluator_configuration + ) + await eval_class_instance.prepare() + return eval_class_instance + except Exception as e: + logging.get_logger(LOGGER_NAME).exception(e, True, f"Error when creating evaluator {evaluator_class}: {e}") + return None + + +def _instantiate_evaluator(evaluator_class, tentacles_setup_config, should_trigger_post_init): + eval_class_instance = evaluator_class(tentacles_setup_config) + if should_trigger_post_init: + eval_class_instance.post_init(tentacles_setup_config) + return eval_class_instance + + +async def _start_evaluators(evaluator_instances, tentacles_setup_config, bot_id): + all_evaluators = [evaluator_instance + for evaluators in evaluator_instances + for evaluator_instance in evaluators + if evaluator_instance is not None] + for evaluator_instance in _prioritized_evaluators(all_evaluators, tentacles_setup_config): + await evaluator_instance.start_evaluator(bot_id) + + +def _prioritized_evaluators(evaluators, tentacles_setup_config): + # highest evaluator priority first + return sorted( + evaluators, + key=lambda x: x.get_evaluator_priority(tentacles_setup_config), + reverse=True + ) + + +async def create_and_start_all_type_evaluators( + tentacles_setup_config: object, + matrix_id: str, + exchange_name: str, + bot_id: str, + symbols_by_crypto_currencies: typing.Optional[dict] = None, + symbols: typing.Optional[list] = None, + time_frames: typing.Optional[list] = None, + real_time_time_frames: typing.Optional[list] = None, + relevant_evaluators=common_constants.CONFIG_WILDCARD, + config_by_evaluator=None, +) -> list: + if not api.get_activated_strategies_classes(tentacles_setup_config): + # If no strategy is activated, there is no evaluator to create (their evaluation would not be used) + logging.get_logger(LOGGER_NAME).info( + f"No evaluator to create for {exchange_name}: no activated evaluator strategy.") + return [] + try: + import octobot_trading.api as exchange_api + crypto_currency_name_by_crypto_currencies, symbols_by_crypto_currency_tickers = \ + _extract_traded_pairs(symbols_by_crypto_currencies, exchange_name, matrix_id, exchange_api) + evaluators = [ + await create_evaluators( + evaluator_type, tentacles_setup_config, + matrix_id=matrix_id, exchange_name=exchange_name, + bot_id=bot_id, + crypto_currency_name_by_crypto_currencies=crypto_currency_name_by_crypto_currencies, + symbols_by_crypto_currency_tickers=symbols_by_crypto_currency_tickers, + symbols=symbols, time_frames=time_frames, + real_time_time_frames=real_time_time_frames, + relevant_evaluators=relevant_evaluators, + config_by_evaluator=config_by_evaluator, + ) + for evaluator_type in evaluator.EvaluatorClassTypes.values()] + await _start_evaluators(evaluators, tentacles_setup_config, bot_id) + return evaluators + except ImportError: + logging.get_logger(LOGGER_NAME).error("create_evaluators requires Octobot-Trading package installed") + return [] + + +def _extract_traded_pairs(symbols_by_crypto_currencies, exchange_name, matrix_id, exchange_api): + crypto_currency_name_by_crypto_currencies = {} + symbols_by_crypto_currency_tickers = {} + if not symbols_by_crypto_currencies: + return crypto_currency_name_by_crypto_currencies, symbols_by_crypto_currency_tickers + exchange_id = exchange_api.get_exchange_id_from_matrix_id(exchange_name, matrix_id) + exchange_manager = exchange_api.get_exchange_manager_from_exchange_name_and_id(exchange_name, exchange_id) + for name, symbol_list in symbols_by_crypto_currencies.items(): + if symbol_list: + # in case pairs are listed by reference market in user config, iterate over each pair + for symbol in symbol_list: + base = exchange_api.get_base_currency(exchange_manager, symbol) + crypto_currency_name_by_crypto_currencies[base] = \ + crypto_currency_name_by_crypto_currencies.get(base, name) + symbols_by_crypto_currency_tickers[base] = \ + symbols_by_crypto_currency_tickers.get(base, set()).union( + _filter_pairs(symbol_list, base, exchange_api, exchange_manager) + ) + return crypto_currency_name_by_crypto_currencies, symbols_by_crypto_currency_tickers + + +def _filter_pairs(pairs, required_ticker, exchange_api, exchange_manager): + return set([ + pair + for pair in pairs + if required_ticker == exchange_api.get_base_currency(exchange_manager, pair) + ]) diff --git a/packages/evaluators/octobot_evaluators/evaluators/realtime_evaluator.py b/packages/evaluators/octobot_evaluators/evaluators/realtime_evaluator.py new file mode 100644 index 0000000000..611e9a4b62 --- /dev/null +++ b/packages/evaluators/octobot_evaluators/evaluators/realtime_evaluator.py @@ -0,0 +1,57 @@ +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.enums as common_enums + +import octobot_tentacles_manager.api as api + +import octobot_evaluators.evaluators as evaluator +import octobot_evaluators.util as util + + +class RealTimeEvaluator(evaluator.AbstractEvaluator): + __metaclass__ = evaluator.AbstractEvaluator + + def __init__(self, tentacles_setup_config): + super().__init__(tentacles_setup_config) + self.available_time_frame = None + + def get_symbol_candles(self, exchange_name: str, exchange_id: str, symbol: str, time_frame): + try: + import octobot_trading.api as exchange_api + return exchange_api.get_symbol_candles_manager( + self.get_exchange_symbol_data(exchange_name, exchange_id, symbol), time_frame) + except ImportError: + self.logger.error(f"Can't get candles manager: requires OctoBot-Trading package installed") + + def _get_tentacle_registration_topic(self, all_symbols_by_crypto_currencies, time_frames, real_time_time_frames): + currencies, symbols, _ = super()._get_tentacle_registration_topic(all_symbols_by_crypto_currencies, + time_frames, + real_time_time_frames) + to_handle_time_frames = [] + if self.time_frame is None: + self.logger.error("Missing self.time_frame value, impossible to initialize this evaluator.") + else: + ideal_time_frame = common_enums.TimeFrames(self.time_frame) + to_handle_time_frame = util.get_shortest_time_frame(ideal_time_frame, real_time_time_frames, time_frames) + if ideal_time_frame != to_handle_time_frame: + self.logger.warning(f"Missing {ideal_time_frame.name} time frame in available time frames, " + f"using {to_handle_time_frame.name} instead.") + # set self.available_time_frame with the actually available one + self.available_time_frame = to_handle_time_frame.value + to_handle_time_frames = [to_handle_time_frame] + # by default time frame registration only for the timeframe of this real-time evaluator + return currencies, symbols, to_handle_time_frames + diff --git a/packages/evaluators/octobot_evaluators/evaluators/scripted_evaluator.py b/packages/evaluators/octobot_evaluators/evaluators/scripted_evaluator.py new file mode 100644 index 0000000000..9005af25ee --- /dev/null +++ b/packages/evaluators/octobot_evaluators/evaluators/scripted_evaluator.py @@ -0,0 +1,320 @@ +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import importlib + +import async_channel.constants as channel_constants + +import octobot_commons.channels_name as channels_name +import octobot_commons.enums as commons_enums +import octobot_commons.constants as commons_constants +import octobot_commons.errors as commons_errors +import octobot_commons.databases as commons_databases + +import octobot_evaluators.evaluators as evaluator +import octobot_evaluators.util as evaluators_util +import octobot_tentacles_manager.api as tentacles_manager_api + + +class ScriptedEvaluator(evaluator.AbstractEvaluator): + __metaclass__ = evaluator.AbstractEvaluator + EVALUATOR_SCRIPT_MODULE = None + + def __init__(self, tentacles_setup_config): + super().__init__(tentacles_setup_config) + self._script = None + self._are_candles_initialized = False + self._has_script_been_called_once = False + + def post_init(self, tentacles_setup_config): + # add config folder to importable files to import the user script + tentacles_manager_api.import_user_tentacles_config_folder(tentacles_setup_config) + + async def start(self, bot_id: str) -> bool: + """ + Default TA start: to be overwritten + Subscribe to OHLCV notification from self.symbols and self.time_frames + :return: success of the evaluator's start + """ + await super().start(bot_id) + try: + import octobot_trading.api as exchange_api + exchange_id = exchange_api.get_exchange_id_from_matrix_id(self.exchange_name, self.matrix_id) + trigger_time_frames = self.get_trigger_time_frames() + time_frame_filter = [tf.value + for tf in exchange_api.get_exchange_available_required_time_frames( + self.exchange_name, exchange_id) + if tf.value in trigger_time_frames or + trigger_time_frames == commons_constants.CONFIG_WILDCARD + ] + if trigger_time_frames != commons_constants.CONFIG_WILDCARD and \ + len(time_frame_filter) < len(trigger_time_frames): + missing_time_frames = [tf for tf in trigger_time_frames if tf not in time_frame_filter] + self.logger.error(f"Missing timeframe to satisfy {trigger_time_frames} required time frames. " + f"Please activate those timeframes {missing_time_frames}") + if len(time_frame_filter) == 1: + time_frame_filter = time_frame_filter[0] + cryptocurrency = self.cryptocurrency if self.cryptocurrency else channel_constants.CHANNEL_WILDCARD + symbol = self.symbol if self.symbol else channel_constants.CHANNEL_WILDCARD + time_frame = self.time_frame.value if self.time_frame else time_frame_filter + self.consumers += await self._register_on_channels(exchange_id, cryptocurrency, symbol, time_frame, bot_id) + return True + except ImportError: + self.logger.error("Can't connect to trading channels") + return False + + async def evaluator_ohlcv_callback(self, exchange: str, exchange_id: str, cryptocurrency: str, symbol: str, + time_frame: str, candle: dict): + # add a full candle to time to get the real time + trigger_time = candle[commons_enums.PriceIndexes.IND_PRICE_TIME.value] + \ + commons_enums.TimeFramesMinutes[commons_enums.TimeFrames(time_frame)] * \ + commons_constants.MINUTE_TO_SECONDS + await self._call_script(exchange, exchange_id, cryptocurrency, symbol, + trigger_time, + commons_enums.TriggerSource.OHLCV.value, + time_frame=time_frame, candle=candle) + + async def evaluator_kline_callback(self, exchange: str, exchange_id: str, cryptocurrency: str, symbol: str, + time_frame, kline: dict): + await self._call_script(exchange, exchange_id, cryptocurrency, symbol, + kline[commons_enums.PriceIndexes.IND_PRICE_TIME.value], + commons_enums.TriggerSource.KLINE.value, + time_frame=time_frame, kline=kline) + + async def evaluator_manual_callback(self, context=None, ignore_cache=False, **kwargs): + """ + Called when this evaluator is triggered from a manual call + :param context: the calling script's context + :param kwargs: unused parameters + :return: the evaluation value + """ + async with context.nested_call_context(self) as local_context: + try: + # Cache is initialized at the 1st call: since a new instance of the evaluator is + # potentially created each time, use cache to figure out if it has been called already. + # Since self._has_script_been_called_once is only used in the context of cached evaluators, + # it' fine to have it False all the time when no cache is used. + self._has_script_been_called_once = self.use_cache() and local_context.has_cache( + local_context.symbol, + local_context.time_frame, + config_name=local_context.config_name + ) + return_value, from_cache = await self._get_cached_or_computed_value( + local_context, ignore_cache=ignore_cache + ) + if return_value == commons_constants.DO_NOT_OVERRIDE_CACHE: + value, missing = await local_context.get_cached_value() + local_context.ensure_no_missing_cached_value(missing) + return value, None + + # Arriving here, we know the evaluation was completed, we would otherwise be in the except statement + # We can now cache the value if necessary + if not from_cache and self.use_cache() and return_value != commons_constants.DO_NOT_CACHE: + await local_context.set_cached_value(return_value, flush_if_necessary=True) + return return_value, None + except (commons_errors.MissingDataError, commons_errors.ExecutionAborted, + commons_errors.NoCacheValue) as e: + self.logger.debug(f"Can't compute evaluator value: {e}") + return commons_constants.DO_NOT_CACHE, e + + async def _inner_call_script(self, context): + # always call the script at least once to save plotting statements + await self._pre_script_call(context) + computed_value = await self.get_script()(context) + self._has_script_been_called_once = True + return computed_value + + async def _get_cached_or_computed_value(self, context, ignore_cache=False): + computed_value = None + called = False + is_value_missing = True + if not self._has_script_been_called_once: + if self.use_cache(): + # init necessary settings before initializing cache + await self._pre_script_call(context) + # init cache to be sure its initialized before any call + context.init_cache() + computed_value = await self._inner_call_script(context) + called = True + if not called: + if not ignore_cache and self.use_cache(): + computed_value, is_value_missing = await context.get_cached_value() + if is_value_missing and not called: + computed_value = await self._inner_call_script(context) + return computed_value, not is_value_missing + + async def _call_script(self, exchange: str, exchange_id: str, cryptocurrency: str, symbol: str, + trigger_cache_timestamp: float, trigger_source: str, + time_frame: str = None, candle: dict = None, kline: dict = None): + self.last_call = (exchange, exchange_id, cryptocurrency, symbol, trigger_cache_timestamp, + trigger_source, time_frame, candle, kline) + context = evaluators_util.local_trading_context( + self, symbol, time_frame, trigger_cache_timestamp, cryptocurrency=cryptocurrency, + exchange=exchange, exchange_id=exchange_id, trigger_source=trigger_source, + trigger_value=candle or kline + ) + try: + import octobot_trading.api as trading_api + if not self._are_candles_initialized: + self._are_candles_initialized = trading_api.are_symbol_candles_initialized(context.exchange_manager, + symbol, time_frame) + if not self._are_candles_initialized: + self.logger.debug(f"Waiting for candles to be initialized before calling script " + f"for {symbol} {time_frame}") + return + self.eval_note, from_cache = await self._get_cached_or_computed_value(context) + eval_time = None + if trigger_source == commons_enums.TriggerSource.OHLCV.value: + eval_time = evaluators_util.get_eval_time(full_candle=candle, time_frame=time_frame) + elif trigger_source == commons_enums.TriggerSource.KLINE.value: + eval_time = evaluators_util.get_eval_time(partial_candle=kline) + if eval_time is None: + self.logger.error("Can't compute evaluation time, using exchange time") + eval_time = trading_api.get_exchange_current_time(context.exchange_manager) + await self.evaluation_completed(cryptocurrency, symbol, time_frame, + eval_time=eval_time, cache_client=context, + cache_if_available=not from_cache) + except (commons_errors.MissingDataError, commons_errors.ExecutionAborted) as e: + self.logger.debug(f"Script execution aborted: {e}") + self.eval_note = commons_constants.DO_NOT_CACHE + except ImportError: + self.logger.error(f"Error when importing octobot-trading") + except Exception as e: + self.logger.exception(e, True, f"Error when calling evaluation script: {e}") + + async def _pre_script_call(self, context): + try: + import octobot_trading.modes.script_keywords.basic_keywords as basic_keywords + # Always register activation_topics use input to enable changing it from run metadata + # (where user inputs are registered) + activation_topic_values = [ + commons_enums.ActivationTopics.FULL_CANDLES.value, + commons_enums.ActivationTopics.IN_CONSTRUCTION_CANDLES.value + ] + activation_method = await basic_keywords.get_activation_topics( + context, + commons_enums.ActivationTopics.FULL_CANDLES.value, + activation_topic_values + ) + self.is_triggered_after_candle_close = activation_method == \ + commons_enums.ActivationTopics.FULL_CANDLES.value + except ImportError: + self.logger.error("Can't read octobot_trading scripting_library") + + @classmethod + def get_is_symbol_wildcard(cls) -> bool: + """ + :return: True if the evaluator is not symbol dependant else False + """ + return False + + @classmethod + def get_is_time_frame_wildcard(cls) -> bool: + """ + :return: True if the evaluator is not time_frame dependant else False + """ + return False + + def register_script_module(self, script_module): + self.__class__.EVALUATOR_SCRIPT_MODULE = script_module + self._script = self.get_script_from_module(script_module) + + @staticmethod + def get_script_from_module(module): + return module.script + + def get_script(self): + return self._script + + async def user_commands_callback(self, bot_id, subject, action, data) -> None: + await super().user_commands_callback(bot_id, subject, action, data) + if action in (commons_enums.UserCommands.RELOAD_SCRIPT.value, commons_enums.UserCommands.RELOAD_CONFIG.value): + # also reload script on RELOAD_CONFIG + await self._reload_script(bot_id) + + async def _reload_script(self, bot_id): + importlib.reload(self.__class__.EVALUATOR_SCRIPT_MODULE) + self.register_script_module(self.__class__.EVALUATOR_SCRIPT_MODULE) + # reload config + await self.reload_config(bot_id) + if self.last_call: + # todo cancel and restart live tasks + # recall script with for are_data_initialized to false to re-write initial data + run_data_db, symbol_db = self._get_run_and_symbol_dbs() + time_frames = None if self.get_is_time_frame_wildcard() else (self.time_frame.value, ) + run_data_db.set_initialized_flags(False) + symbol_db.set_initialized_flags(False, time_frames) + self._has_script_been_called_once = False + try: + await self._call_script(*self.last_call) + finally: + await run_data_db.flush() + run_data_db.set_initialized_flags(True) + await symbol_db.flush() + symbol_db.set_initialized_flags(True, time_frames) + + def _get_run_and_symbol_dbs(self): + try: + import octobot_trading.api as trading_api + exchange_manager = trading_api.get_exchange_manager_from_exchange_name_and_id( + self.exchange_name, + trading_api.get_exchange_id_from_matrix_id(self.exchange_name, self.matrix_id) + ) + bot_id = trading_api.get_bot_id(exchange_manager) + provider = commons_databases.RunDatabasesProvider.instance() + return provider.get_run_db(bot_id), provider.get_symbol_db(bot_id, self.exchange_name, self.symbol) + except ImportError: + self.logger.error("required OctoBot-trading to get a trading mode writer") + raise + + async def _register_on_channels(self, exchange_id, cryptocurrency, symbol, time_frame, bot_id): + consumers = [] + try: + import octobot_trading.exchange_channel as exchanges_channel + registration_topics = self._get_channel_registration() + if channels_name.OctoBotTradingChannelsName.OHLCV_CHANNEL.value in registration_topics: + consumers.append( + await exchanges_channel.get_chan(channels_name.OctoBotTradingChannelsName.OHLCV_CHANNEL.value, + exchange_id). + new_consumer(self.evaluator_ohlcv_callback, cryptocurrency=cryptocurrency, + symbol=symbol, time_frame=time_frame, priority_level=self.priority_level) + ) + if channels_name.OctoBotTradingChannelsName.KLINE_CHANNEL.value in registration_topics: + consumers.append( + await exchanges_channel.get_chan(channels_name.OctoBotTradingChannelsName.KLINE_CHANNEL.value, + exchange_id). \ + new_consumer(self.evaluator_kline_callback, cryptocurrency=cryptocurrency, + symbol=symbol, time_frame=time_frame, priority_level=self.priority_level) + ) + except ImportError: + self.logger.warning("Can't connect to trading channels") + return consumers + + def _get_channel_registration(self): + TOPIC_TO_CHANNEL_NAME = { + commons_enums.ActivationTopics.FULL_CANDLES.value: + channels_name.OctoBotTradingChannelsName.OHLCV_CHANNEL.value, + commons_enums.ActivationTopics.IN_CONSTRUCTION_CANDLES.value: + channels_name.OctoBotTradingChannelsName.KLINE_CHANNEL.value, + } + registration_channels = [] + # Activate on full candles only by default (same as technical evaluators) + topic = self.specific_config.get(commons_constants.CONFIG_ACTIVATION_TOPICS.replace(" ", "_"), + commons_enums.ActivationTopics.FULL_CANDLES.value) + try: + registration_channels.append(TOPIC_TO_CHANNEL_NAME[topic]) + except KeyError: + self.logger.error(f"Unknown registration topic: {topic}") + return registration_channels diff --git a/packages/evaluators/octobot_evaluators/evaluators/social_evaluator.py b/packages/evaluators/octobot_evaluators/evaluators/social_evaluator.py new file mode 100644 index 0000000000..7d586e111c --- /dev/null +++ b/packages/evaluators/octobot_evaluators/evaluators/social_evaluator.py @@ -0,0 +1,95 @@ +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import abc +import typing + +import async_channel.channels as channels + +import octobot_evaluators.evaluators as evaluator + +import octobot_services.api as service_api +import octobot_trading.api as exchange_api + + +class SocialEvaluator(evaluator.AbstractEvaluator): + __metaclass__ = evaluator.AbstractEvaluator + SERVICE_FEED_CLASS = None + ALLOW_SUPER_CLASS_CONFIG = True + + def __init__(self, tentacles_setup_config): + super().__init__(tentacles_setup_config) + self.exchange_id: typing.Optional[str] = None + self.bot_id: typing.Optional[str] = None + self.feed_config = {} + + # Override if no service feed is required for a social evaluator + async def start(self, bot_id: str) -> bool: + """ + :return: success of the evaluator's start + """ + self.bot_id = bot_id + if self.SERVICE_FEED_CLASS is None: + self.logger.error("SERVICE_FEED_CLASS is required to use a service feed. Consumer can't start.") + else: + await super().start(self.bot_id) + service_feed = service_api.get_service_feed(self.SERVICE_FEED_CLASS, self.bot_id) + if service_feed is not None: + service_feed.update_feed_config(self.feed_config) + await channels.get_chan(service_feed.FEED_CHANNEL.get_name()).new_consumer(self._feed_callback) + # store exchange_id to use it later for evaluation timestamps + self.exchange_id = exchange_api.get_exchange_id_from_matrix_id(self.exchange_name, self.matrix_id) + return True + return False + + def get_data_cache(self, current_time: float, key: typing.Optional[str] = None): + """ + :param current_time: the current time + :return: the data cache from the service feed + """ + try: + import octobot_services.api as service_api + return service_api.get_service_feed(self.SERVICE_FEED_CLASS, self.bot_id).get_data_cache(current_time, key) + except ImportError as e: + self.logger.exception(e, True, "Can't get data cache: requires OctoBot-Services package installed") + return None + + def get_current_exchange_time(self): + try: + import octobot_trading.api as exchange_api + if self.exchange_id is not None: + return exchange_api.get_exchange_current_time( + exchange_api.get_exchange_manager_from_exchange_name_and_id( + self.exchange_name, + self.exchange_id + ) + ) + except ImportError: + self.logger.error(f"Can't get current exchange time: requires OctoBot-Trading package installed") + return None + + def _get_tentacle_registration_topic(self, all_symbols_by_crypto_currencies, time_frames, real_time_time_frames): + currencies, _, _ = super()._get_tentacle_registration_topic(all_symbols_by_crypto_currencies, + time_frames, + real_time_time_frames) + symbols = [self.symbol] + to_handle_time_frames = [self.time_frame] + # by default no symbol registration for social evaluators + # by default no time frame re+gistration for social evaluators + return currencies, symbols, to_handle_time_frames + + @abc.abstractmethod + async def _feed_callback(self, *args): + raise NotImplementedError("_feed_callback is not implemented") diff --git a/packages/evaluators/octobot_evaluators/evaluators/strategy_evaluator.py b/packages/evaluators/octobot_evaluators/evaluators/strategy_evaluator.py new file mode 100644 index 0000000000..9d8ca963da --- /dev/null +++ b/packages/evaluators/octobot_evaluators/evaluators/strategy_evaluator.py @@ -0,0 +1,457 @@ +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import typing + +import octobot_commons.constants as common_constants +import octobot_commons.enums as common_enums +import octobot_commons.time_frame_manager as time_frame_manager + +import octobot_evaluators.constants as constants +import octobot_evaluators.enums as enums +import octobot_evaluators.evaluators as evaluator +import octobot_evaluators.evaluators.channel as evaluator_channels +import octobot_evaluators.matrix as matrix + +import octobot_tentacles_manager.api as api +import octobot_tentacles_manager.configuration as tm_configuration + + +class StrategyEvaluator(evaluator.AbstractEvaluator): + __metaclass__ = evaluator.AbstractEvaluator + + def __init__(self, tentacles_setup_config): + super().__init__(tentacles_setup_config) + self.consumer_instance = None + self.strategy_time_frames = [] + self.evaluations_last_updates = {} + self.allowed_time_delta = None + + # caches + self.available_evaluators_cache = {} + self.available_time_frames_cache = {} + self.available_node_paths_cache = {} + + def init_user_inputs(self, inputs: dict) -> None: + """ + Called right before starting the tentacle, should define all the tentacle's user inputs unless + those are defined somewhere else. + """ + self.UI.user_input(constants.STRATEGIES_REQUIRED_TIME_FRAME, common_enums.UserInputTypes.MULTIPLE_OPTIONS, + [common_enums.TimeFrames.ONE_HOUR.value], + inputs, options=[tf.value for tf in common_enums.TimeFrames], + title="Time frame: The time frame to observe in order to spot changes.") + + async def start(self, bot_id: str) -> bool: + """ + Default Strategy start: to be overwritten + Subscribe to Matrix notification from self.symbols and self.time_frames + :return: success of the evaluator's start + """ + await super().start(bot_id) + self.consumer_instance = await evaluator_channels.get_chan(constants.MATRIX_CHANNEL, + self.matrix_id).new_consumer( + self.strategy_matrix_callback, + priority_level=self.priority_level, + exchange_name=self.exchange_name if self.exchange_name else common_constants.CHANNEL_WILDCARD) + self._init_exchange_allowed_time_delta(self.exchange_name, self.matrix_id) + return True + + async def strategy_completed(self, + cryptocurrency: typing.Optional[str] = None, + symbol: typing.Optional[str] = None, + time_frame: typing.Optional[str] = None, + eval_note = None, + eval_time: int = 0, + notify: bool = True) -> None: + """ + Main async method to notify that a strategy has updated its evaluation + :param cryptocurrency: evaluated cryptocurrency + :param symbol: evaluated symbol + :param time_frame: evaluated time frame + :param eval_note: if None = self.eval_note + :param eval_time: the time of the evaluation if relevant, default is 0 + :param notify: if true, will trigger matrix consumers + :return: None + """ + return await self.evaluation_completed(cryptocurrency=cryptocurrency, + symbol=symbol, + time_frame=time_frame, + eval_note=eval_note, + eval_time=eval_time, + notify=notify, + origin_consumer=self.consumer_instance) + + def is_evaluator_cycle_complete(self, matrix_id, evaluator_name, evaluator_type, exchange_name, + cryptocurrency, symbol, time_frame) -> bool: + """ + :return: True if the strategy is to be waken up by evaluators of the given type at the moment of this call. + This avoids partial time frame updates wakeup. + Override if necessary + """ + # 1. Ensure this evaluation has not already been sent + # 2. Ensure every evaluator of the given type form this time frame are valid + return not self._already_sent_this_technical_evaluation(matrix_id, + evaluator_name, + evaluator_type, + exchange_name, + cryptocurrency, + symbol, + time_frame) and \ + self._are_every_evaluation_valid_and_up_to_date(matrix_id, + evaluator_name, + evaluator_type, + exchange_name, + cryptocurrency, + symbol, + time_frame) + + def clear_cache(self): + self.available_evaluators_cache = {} + self.available_time_frames_cache = {} + self.available_node_paths_cache = {} + + async def stop(self) -> None: + await super().stop() + if self.consumer_instance: + await evaluator_channels.get_chan(constants.MATRIX_CHANNEL, + self.matrix_id).remove_consumer(self.consumer_instance) + self.consumer_instance = None + + def get_full_cycle_evaluator_types(self) -> tuple: + # returns a tuple as it is faster to create than a list + return enums.EvaluatorMatrixTypes.TA.value, + + async def strategy_matrix_callback(self, + matrix_id, + evaluator_name, + evaluator_type, + eval_note, + eval_note_type, + eval_note_description, + eval_note_metadata, + exchange_name, + cryptocurrency, + symbol, + time_frame): + # if this callback is from a technical evaluator: ensure strategy should be notified at this moment + for full_cycle_evaluator in self.get_full_cycle_evaluator_types(): + if evaluator_type == full_cycle_evaluator: + # ensure this time frame is within the strategy's time frames + if time_frame is None or common_enums.TimeFrames(time_frame) not in self.strategy_time_frames or \ + not self.is_evaluator_cycle_complete(matrix_id, + evaluator_name, + evaluator_type, + exchange_name, + cryptocurrency, + symbol, + time_frame): + # do not call the strategy + return + await self.matrix_callback( + matrix_id, + evaluator_name, + evaluator_type, + eval_note, + eval_note_type, + eval_note_description, + eval_note_metadata, + exchange_name, + cryptocurrency, + symbol, + time_frame + ) + + async def matrix_callback(self, + matrix_id, + evaluator_name, + evaluator_type, + eval_note, + eval_note_type, + eval_note_description, + eval_note_metadata, + exchange_name, + cryptocurrency, + symbol, + time_frame): + # To be used to trigger an evaluation + # Do not forget to check if evaluator_name is self.name + pass + + def _are_every_evaluation_valid_and_up_to_date(self, + matrix_id, + evaluator_name, + evaluator_type, + exchange_name, + cryptocurrency, + symbol, + time_frame, + can_retry=True): + to_validate_node_paths = self._get_available_node_paths(matrix_id, + evaluator_type, + exchange_name, + cryptocurrency, + symbol, + use_cache=True) + current_time = self._get_exchange_current_time(exchange_name, matrix_id) + # ensure all evaluations are valid (do not trigger on an expired evaluation) + try: + if all(matrix.is_tentacle_value_valid(self.matrix_id, evaluation_node_path, + timestamp=current_time, + delta=self.allowed_time_delta) + for evaluation_node_path in to_validate_node_paths): + self._save_last_evaluation(matrix_id, exchange_name, evaluator_type, evaluator_name, + cryptocurrency, symbol, time_frame) + return True + return False + except KeyError: + self.clear_cache() + if can_retry: + return self._are_every_evaluation_valid_and_up_to_date(matrix_id, + evaluator_name, + evaluator_type, + exchange_name, + cryptocurrency, + symbol, + time_frame, + can_retry=False) + raise + + def _get_available_node_paths(self, + matrix_id, + evaluator_type, + exchange_name, + cryptocurrency, + symbol, + use_cache=True): + if use_cache: + try: + return self.available_node_paths_cache[matrix_id][exchange_name][evaluator_type][cryptocurrency][symbol] + except KeyError: + # No cache usage here to be use to refresh data + node_paths = self._inner_get_available_node_paths(matrix_id, evaluator_type, exchange_name, + cryptocurrency, symbol, use_cache=False) + if matrix_id not in self.available_node_paths_cache: + self.available_node_paths_cache[matrix_id] = {} + if exchange_name not in self.available_node_paths_cache[matrix_id]: + self.available_node_paths_cache[matrix_id][exchange_name] = {} + if evaluator_type not in self.available_node_paths_cache[matrix_id][exchange_name]: + self.available_node_paths_cache[matrix_id][exchange_name][evaluator_type] = {} + if cryptocurrency not in self.available_node_paths_cache[matrix_id][exchange_name][evaluator_type]: + self.available_node_paths_cache[matrix_id][exchange_name][evaluator_type][cryptocurrency] = {} + self.available_node_paths_cache[matrix_id][exchange_name][evaluator_type][cryptocurrency][symbol] = \ + node_paths + return node_paths + return self._inner_get_available_node_paths(matrix_id, evaluator_type, exchange_name, + cryptocurrency, symbol, use_cache=use_cache) + + def _inner_get_available_node_paths(self, matrix_id, evaluator_type, exchange_name, cryptocurrency, symbol, + use_cache=True): + paths = [] + for time_frame in self.get_available_time_frames(matrix_id, exchange_name, evaluator_type, + cryptocurrency, symbol, use_cache=use_cache): + if common_enums.TimeFrames(time_frame) in self.strategy_time_frames: + for evaluator_name in self._get_available_evaluators(matrix_id, exchange_name, evaluator_type, + use_cache=use_cache): + path = matrix.get_matrix_default_value_path(tentacle_name=evaluator_name, + tentacle_type=evaluator_type, + exchange_name=exchange_name, + cryptocurrency=cryptocurrency, + symbol=symbol, + time_frame=time_frame) + if matrix.get_tentacle_node(matrix_id, path) is not None: + paths.append(path) + return paths + + def _save_last_evaluation(self, matrix_id, exchange_name, evaluator_type, tentacle_name, + cryptocurrency, symbol, time_frame): + self._set_last_evaluation_time(exchange_name, + evaluator_type, + cryptocurrency, + symbol, + time_frame, + matrix.get_tentacle_eval_time(matrix_id, + matrix.get_matrix_default_value_path( + tentacle_name=tentacle_name, + tentacle_type=evaluator_type, + exchange_name=exchange_name, + cryptocurrency=cryptocurrency, + symbol=symbol, + time_frame=time_frame) + ) + ) + + def _already_sent_this_technical_evaluation(self, matrix_id, evaluator, evaluator_type, exchange_name, + cryptocurrency, symbol, time_frame): + try: + update_time = matrix.get_tentacle_eval_time(matrix_id, + matrix.get_matrix_default_value_path( + tentacle_name=evaluator, + tentacle_type=evaluator_type, + exchange_name=exchange_name, + cryptocurrency=cryptocurrency, + symbol=symbol, + time_frame=time_frame) + ) + return self.evaluations_last_updates[exchange_name][evaluator_type][cryptocurrency][symbol][time_frame] \ + == update_time + except KeyError: + return False + + def _set_last_evaluation_time(self, exchange_name, evaluator_type, cryptocurrency, symbol, time_frame, value): + try: + self.evaluations_last_updates[exchange_name][evaluator_type][cryptocurrency][symbol][time_frame] = value + except KeyError: + if exchange_name not in self.evaluations_last_updates: + self.evaluations_last_updates[exchange_name] = {} + if evaluator_type not in self.evaluations_last_updates[exchange_name]: + self.evaluations_last_updates[exchange_name][evaluator_type] = {} + if cryptocurrency not in self.evaluations_last_updates[exchange_name][evaluator_type]: + self.evaluations_last_updates[exchange_name][evaluator_type][cryptocurrency] = {} + if symbol not in self.evaluations_last_updates[exchange_name][evaluator_type][cryptocurrency]: + self.evaluations_last_updates[exchange_name][evaluator_type][cryptocurrency][symbol] = {} + self.evaluations_last_updates[exchange_name][evaluator_type][cryptocurrency][symbol] = { + time_frame: value + } + + def _init_exchange_allowed_time_delta(self, exchange_name, matrix_id): + try: + import octobot_trading.api as exchange_api + exchange_manager = exchange_api.get_exchange_manager_from_exchange_name_and_id( + exchange_name, + exchange_api.get_exchange_id_from_matrix_id(exchange_name, matrix_id) + ) + self.allowed_time_delta = exchange_api.get_exchange_allowed_time_lag(exchange_manager) + except ImportError: + self.logger.error("Strategy requires OctoBot-Trading package installed") + + async def evaluators_callback(self, + matrix_id, + evaluator_name, + evaluator_type, + exchange_name, + cryptocurrency, + symbol, + time_frame, + data): + # Used to communicate between evaluators + if data[constants.EVALUATOR_CHANNEL_DATA_ACTION] == constants.RESET_EVALUATION: + for time_frame in data[constants.EVALUATOR_CHANNEL_DATA_TIME_FRAMES]: + self._set_last_evaluation_time(exchange_name, enums.EvaluatorMatrixTypes.TA.value, + cryptocurrency, symbol, time_frame.value, None) + + def _get_available_evaluators(self, matrix_id, exchange_name, tentacle_type, use_cache=True): + if use_cache: + try: + return self.available_evaluators_cache[matrix_id][exchange_name][tentacle_type] + except KeyError: + available_evaluators = matrix.get_node_children_by_names_at_path( + matrix_id, matrix.get_tentacle_path(exchange_name=exchange_name, tentacle_type=tentacle_type) + ).keys() + if matrix_id not in self.available_evaluators_cache: + self.available_evaluators_cache[matrix_id] = {} + if exchange_name not in self.available_evaluators_cache[matrix_id]: + self.available_evaluators_cache[matrix_id][exchange_name] = {} + self.available_evaluators_cache[matrix_id][exchange_name][tentacle_type] = available_evaluators + return available_evaluators + return matrix.get_node_children_by_names_at_path( + matrix_id, matrix.get_tentacle_path(exchange_name=exchange_name, tentacle_type=tentacle_type) + ).keys() + + def get_available_time_frames(self, + matrix_id, + exchange_name=None, + tentacle_type=None, + cryptocurrency=None, + symbol=None, + use_cache=True): + if use_cache: + try: + return self.available_time_frames_cache[matrix_id][exchange_name][tentacle_type][cryptocurrency][symbol] + except KeyError: + available_time_frames = matrix.get_available_time_frames( + matrix_id, exchange_name, tentacle_type, cryptocurrency, symbol) + if matrix_id not in self.available_time_frames_cache: + self.available_time_frames_cache[matrix_id] = {} + if exchange_name not in self.available_time_frames_cache[matrix_id]: + self.available_time_frames_cache[matrix_id][exchange_name] = {} + if tentacle_type not in self.available_time_frames_cache[matrix_id][exchange_name]: + self.available_time_frames_cache[matrix_id][exchange_name][tentacle_type] = {} + if cryptocurrency not in self.available_time_frames_cache[matrix_id][exchange_name][tentacle_type]: + self.available_time_frames_cache[matrix_id][exchange_name][tentacle_type][cryptocurrency] = {} + self.available_time_frames_cache[matrix_id][exchange_name][tentacle_type][cryptocurrency][symbol] = \ + available_time_frames + return available_time_frames + return matrix.get_available_time_frames(matrix_id, exchange_name, tentacle_type, cryptocurrency, symbol) + + def _get_tentacle_registration_topic(self, all_symbols_by_crypto_currencies, time_frames, real_time_time_frames): + strategy_currencies, symbols, self.strategy_time_frames = super()._get_tentacle_registration_topic( + all_symbols_by_crypto_currencies, + time_frames, + real_time_time_frames) + # by default no time frame registration for strategies + return strategy_currencies, symbols, [self.time_frame] + + @classmethod + def get_required_time_frames(cls, config: dict, + tentacles_setup_config: tm_configuration.TentaclesSetupConfiguration, + strategy_config=None): + if constants.CONFIG_FORCED_TIME_FRAME in config: + return time_frame_manager.parse_time_frames(config[constants.CONFIG_FORCED_TIME_FRAME]) + strategy_config: dict = strategy_config or api.get_tentacle_config(tentacles_setup_config, cls) + if constants.STRATEGIES_REQUIRED_TIME_FRAME in strategy_config: + return time_frame_manager.parse_time_frames(strategy_config[constants.STRATEGIES_REQUIRED_TIME_FRAME]) + else: + raise Exception(f"'{constants.STRATEGIES_REQUIRED_TIME_FRAME}' is missing in configuration file") + + @classmethod + def get_required_evaluators(cls, tentacles_config: tm_configuration.TentaclesSetupConfiguration, + strategy_config: dict = None) -> list: + """ + :param tentacles_config: the tentacles config to find the current strategy config from + :param strategy_config: the strategy configuration dict + :return: the list of required evaluators, [CONFIG_WILDCARD] means any evaluator + """ + strategy_config: dict = strategy_config or api.get_tentacle_config(tentacles_config, cls) + if constants.STRATEGIES_REQUIRED_EVALUATORS in strategy_config: + return strategy_config[constants.STRATEGIES_REQUIRED_EVALUATORS] + else: + raise Exception(f"'{constants.STRATEGIES_REQUIRED_EVALUATORS}' is missing in configuration file") + + @classmethod + def get_compatible_evaluators_types(cls, tentacles_config: tm_configuration.TentaclesSetupConfiguration, + strategy_config: dict = None) -> list: + """ + :param tentacles_config: the tentacles config to find the current strategy config from + :param strategy_config: the strategy configuration dict + :return: the list of compatible evaluator type, [CONFIG_WILDCARD] means any type + """ + strategy_config: dict = strategy_config or api.get_tentacle_config(tentacles_config, cls) + if constants.STRATEGIES_COMPATIBLE_EVALUATOR_TYPES in strategy_config: + return strategy_config[constants.STRATEGIES_COMPATIBLE_EVALUATOR_TYPES] + return [common_constants.CONFIG_WILDCARD] + + @classmethod + def get_default_evaluators(cls, tentacles_config: tm_configuration.TentaclesSetupConfiguration, + strategy_config: dict = None): + strategy_config: dict = strategy_config or api.get_tentacle_config(tentacles_config, cls) + if constants.TENTACLE_DEFAULT_CONFIG in strategy_config: + return strategy_config[constants.TENTACLE_DEFAULT_CONFIG] + else: + required_evaluators = cls.get_required_evaluators(tentacles_config, strategy_config) + if required_evaluators == common_constants.CONFIG_WILDCARD: + return [] + return required_evaluators diff --git a/packages/evaluators/octobot_evaluators/matrix/__init__.py b/packages/evaluators/octobot_evaluators/matrix/__init__.py new file mode 100644 index 0000000000..4de93bc58c --- /dev/null +++ b/packages/evaluators/octobot_evaluators/matrix/__init__.py @@ -0,0 +1,83 @@ +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_evaluators.matrix import matrix +from octobot_evaluators.matrix.matrix import ( + Matrix, +) + +from octobot_evaluators.matrix import matrix_manager +from octobot_evaluators.matrix import matrices +from octobot_evaluators.matrix import channel + +from octobot_evaluators.matrix.matrix_manager import ( + get_matrix, + set_tentacle_value, + get_tentacle_node, + delete_tentacle_node, + get_tentacle_value, + get_tentacle_eval_time, + get_matrix_default_value_path, + get_tentacle_nodes, + get_node_children_by_names_at_path, + get_tentacles_value_nodes, + get_latest_eval_time, + get_tentacle_path, + get_tentacle_value_path, + get_evaluations_by_evaluator, + get_available_time_frames, + get_available_symbols, + is_tentacle_value_valid, + is_evaluation_valid_in_time, + is_tentacles_values_valid, +) +from octobot_evaluators.matrix.matrices import ( + Matrices, +) +from octobot_evaluators.matrix.channel import ( + MatrixChannelConsumer, + MatrixChannelSupervisedConsumer, + MatrixChannelProducer, + MatrixChannel, +) + +__all__ = [ + "get_matrix", + "set_tentacle_value", + "get_tentacle_node", + "delete_tentacle_node", + "get_tentacle_value", + "get_tentacle_eval_time", + "get_matrix_default_value_path", + "get_tentacle_nodes", + "get_node_children_by_names_at_path", + "get_tentacles_value_nodes", + "get_latest_eval_time", + "get_tentacle_path", + "get_tentacle_value_path", + "get_evaluations_by_evaluator", + "get_available_time_frames", + "get_available_symbols", + "is_tentacle_value_valid", + "is_evaluation_valid_in_time", + "is_tentacles_values_valid", + "Matrices", + "Matrix", + "MatrixChannelConsumer", + "MatrixChannelSupervisedConsumer", + "MatrixChannelProducer", + "MatrixChannel", +] diff --git a/packages/evaluators/octobot_evaluators/matrix/channel/__init__.py b/packages/evaluators/octobot_evaluators/matrix/channel/__init__.py new file mode 100644 index 0000000000..d74c3be1ad --- /dev/null +++ b/packages/evaluators/octobot_evaluators/matrix/channel/__init__.py @@ -0,0 +1,31 @@ +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_evaluators.matrix.channel import matrix + +from octobot_evaluators.matrix.channel.matrix import ( + MatrixChannelConsumer, + MatrixChannelSupervisedConsumer, + MatrixChannelProducer, + MatrixChannel, +) + +__all__ = [ + "MatrixChannelConsumer", + "MatrixChannelSupervisedConsumer", + "MatrixChannelProducer", + "MatrixChannel", +] diff --git a/packages/evaluators/octobot_evaluators/matrix/channel/matrix.py b/packages/evaluators/octobot_evaluators/matrix/channel/matrix.py new file mode 100644 index 0000000000..4106aa700f --- /dev/null +++ b/packages/evaluators/octobot_evaluators/matrix/channel/matrix.py @@ -0,0 +1,215 @@ +# cython: language_level=3 +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import async_channel.constants as channel_constants + +import octobot_commons.logging as logging + +import octobot_evaluators.evaluators.channel as evaluator_channels +import octobot_evaluators.constants as constants +import octobot_evaluators.matrix as matrix + + +class MatrixChannelConsumer(evaluator_channels.EvaluatorChannelConsumer): + """ + EvaluatorChannelConsumer adapted for MatrixChannel + """ + + +class MatrixChannelSupervisedConsumer(evaluator_channels.EvaluatorChannelSupervisedConsumer): + """ + EvaluatorChannelSupervisedConsumer adapted for MatrixChannel + """ + + +class MatrixChannelProducer(evaluator_channels.EvaluatorChannelProducer): + """ + EvaluatorChannelProducer adapted for MatrixChannel + """ + + # noinspection PyMethodOverriding + async def send(self, + matrix_id, + evaluator_name, + evaluator_type, + eval_note, + eval_note_type=constants.EVALUATOR_EVAL_DEFAULT_TYPE, + eval_note_description=None, + eval_note_metadata=None, + exchange_name=None, + cryptocurrency=channel_constants.CHANNEL_WILDCARD, + symbol=channel_constants.CHANNEL_WILDCARD, + time_frame=None, + origin_consumer=None): + for consumer in self.channel.get_filtered_consumers(matrix_id=matrix_id, + cryptocurrency=cryptocurrency, + symbol=symbol, + time_frame=time_frame, + evaluator_type=evaluator_type, + evaluator_name=evaluator_name, + exchange_name=exchange_name, + origin_consumer=origin_consumer): + await consumer.queue.put({ + "matrix_id": matrix_id, + "evaluator_name": evaluator_name, + "evaluator_type": evaluator_type, + "eval_note": eval_note, + "eval_note_type": eval_note_type, + "eval_note_description": eval_note_description, + "eval_note_metadata": eval_note_metadata, + "exchange_name": exchange_name, + "cryptocurrency": cryptocurrency, + "symbol": symbol, + "time_frame": time_frame + }) + + async def send_eval_note(self, + matrix_id: str, + evaluator_name: str, + evaluator_type, + eval_note, + eval_note_type, + eval_note_description=None, + eval_note_metadata=None, + eval_time: float = 0, + exchange_name: str = None, + cryptocurrency: str = None, + symbol: str = None, + time_frame=None, + origin_consumer=None, + notify: bool = True): + matrix.set_tentacle_value( + matrix_id=matrix_id, + tentacle_type=eval_note_type, + tentacle_value=eval_note, + timestamp=eval_time, + tentacle_path=matrix.get_matrix_default_value_path( + exchange_name=exchange_name, + tentacle_type=evaluator_type, + tentacle_name=evaluator_name, + cryptocurrency=cryptocurrency, + symbol=symbol, + time_frame=time_frame + ), + description=eval_note_description, + metadata=eval_note_metadata + ) + if notify: + await self.send(matrix_id=matrix_id, + evaluator_name=evaluator_name, + evaluator_type=evaluator_type, + eval_note=eval_note, + eval_note_type=eval_note_type, + eval_note_description=eval_note_description, + eval_note_metadata=eval_note_metadata, + exchange_name=exchange_name, + cryptocurrency=cryptocurrency, + symbol=symbol, + time_frame=time_frame, + origin_consumer=origin_consumer) + + +class MatrixChannel(evaluator_channels.EvaluatorChannel): + FILTER_SIZE = 1 + PRODUCER_CLASS = MatrixChannelProducer + CONSUMER_CLASS = MatrixChannelConsumer + + MATRIX_ID_KEY = "matrix_id" + CRYPTOCURRENCY_KEY = "cryptocurrency" + SYMBOL_KEY = "symbol" + TIME_FRAME_KEY = "time_frame" + EVALUATOR_TYPE_KEY = "evaluator_type" + EXCHANGE_NAME_KEY = "exchange_name" + EVALUATOR_NAME_KEY = "evaluator_name" + + def __init__(self, matrix_id): + super().__init__(matrix_id) + self.logger = logging.get_logger(f"{self.__class__.__name__}") + + # noinspection PyMethodOverriding + async def new_consumer(self, + callback: object, + size: int = 0, + priority_level: int = evaluator_channels.EvaluatorChannel.DEFAULT_PRIORITY_LEVEL, + matrix_id: str = channel_constants.CHANNEL_WILDCARD, + cryptocurrency: str = channel_constants.CHANNEL_WILDCARD, + symbol: str = channel_constants.CHANNEL_WILDCARD, + evaluator_name: str = channel_constants.CHANNEL_WILDCARD, + evaluator_type: object = channel_constants.CHANNEL_WILDCARD, + exchange_name: str = channel_constants.CHANNEL_WILDCARD, + time_frame=channel_constants.CHANNEL_WILDCARD, + supervised: bool = False): + consumer_class = MatrixChannelSupervisedConsumer if supervised else MatrixChannelConsumer + consumer = consumer_class(callback, size=size, priority_level=priority_level) + await self._add_new_consumer_and_run(consumer, + matrix_id=matrix_id, + cryptocurrency=cryptocurrency, + symbol=symbol, + evaluator_name=evaluator_name, + evaluator_type=evaluator_type, + exchange_name=exchange_name, + time_frame=time_frame) + return consumer + + def get_filtered_consumers(self, + matrix_id=channel_constants.CHANNEL_WILDCARD, + cryptocurrency=channel_constants.CHANNEL_WILDCARD, + symbol=channel_constants.CHANNEL_WILDCARD, + evaluator_type=channel_constants.CHANNEL_WILDCARD, + time_frame=channel_constants.CHANNEL_WILDCARD, + evaluator_name=channel_constants.CHANNEL_WILDCARD, + exchange_name=channel_constants.CHANNEL_WILDCARD, + origin_consumer=None): + return self.get_consumer_from_filters({ + self.MATRIX_ID_KEY: matrix_id, + self.CRYPTOCURRENCY_KEY: cryptocurrency, + self.SYMBOL_KEY: symbol, + self.TIME_FRAME_KEY: time_frame, + self.EVALUATOR_TYPE_KEY: evaluator_type, + self.EVALUATOR_NAME_KEY: evaluator_name, + self.EXCHANGE_NAME_KEY: exchange_name + }, + origin_consumer=origin_consumer) + + async def _add_new_consumer_and_run(self, consumer, + matrix_id=channel_constants.CHANNEL_WILDCARD, + cryptocurrency=channel_constants.CHANNEL_WILDCARD, + symbol=channel_constants.CHANNEL_WILDCARD, + evaluator_name=channel_constants.CHANNEL_WILDCARD, + evaluator_type=channel_constants.CHANNEL_WILDCARD, + exchange_name=channel_constants.CHANNEL_WILDCARD, + time_frame=None): + consumer_filters: dict = { + self.MATRIX_ID_KEY: matrix_id, + self.CRYPTOCURRENCY_KEY: cryptocurrency, + self.SYMBOL_KEY: symbol, + self.TIME_FRAME_KEY: time_frame, + self.EVALUATOR_NAME_KEY: evaluator_name, + self.EXCHANGE_NAME_KEY: exchange_name, + self.EVALUATOR_TYPE_KEY: evaluator_type + } + + self.add_new_consumer(consumer, consumer_filters) + await consumer.run() + self.logger.debug(f"Consumer started for : " + f"[matrix_id={matrix_id}," + f" cryptocurrency={cryptocurrency}," + f" symbol={symbol}," + f" time_frame={time_frame}," + f" evaluator_name={evaluator_name}," + f" exchange_name={exchange_name}," + f" evaluator_type={evaluator_type}]") diff --git a/packages/evaluators/octobot_evaluators/matrix/matrices.py b/packages/evaluators/octobot_evaluators/matrix/matrices.py new file mode 100644 index 0000000000..93b9aa2c64 --- /dev/null +++ b/packages/evaluators/octobot_evaluators/matrix/matrices.py @@ -0,0 +1,38 @@ +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.logging as logging +import octobot_commons.singleton as singleton + +import octobot_evaluators.matrix as matrix + + +class Matrices(singleton.Singleton): + def __init__(self): + self.matrices: dict = {} + + def add_matrix(self, matrix) -> None: + if matrix.matrix_id not in self.matrices: + self.matrices[matrix.matrix_id] = matrix + + def get_matrix(self, matrix_id) -> matrix.Matrix: + return self.matrices[matrix_id] + + def del_matrix(self, matrix_id) -> None: + try: + if self.matrices[matrix_id]: + self.matrices.pop(matrix_id, None) + except KeyError: + logging.get_logger(self.__class__.__name__).warning(f"Can't del matrix with id {matrix_id}") diff --git a/packages/evaluators/octobot_evaluators/matrix/matrix.py b/packages/evaluators/octobot_evaluators/matrix/matrix.py new file mode 100644 index 0000000000..eb8151f2ef --- /dev/null +++ b/packages/evaluators/octobot_evaluators/matrix/matrix.py @@ -0,0 +1,93 @@ +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import uuid + +import octobot_commons.tree as tree + + +class Matrix: + """ + Matrix dataclass store tentacles data in a BaseTree + """ + __slots__ = ['matrix_id', 'matrix'] + + def __init__(self): + """ + Initialize the matrix as an BaseTree instance + """ + self.matrix_id = str(uuid.uuid4()) + self.matrix = tree.BaseTree() + + def set_node_value(self, value, value_type, value_path, timestamp=0, description=None, metadata=None): + """ + Set the node value at node path + :param value_path: the node path + :param value_type: the node type + :param value: the node value + :param timestamp: the value modification timestamp. + :param description: the node description + :param metadata: the node metadata + """ + self.matrix.set_node_at_path(value, value_type, value_path, timestamp=timestamp, description=description, metadata=metadata) + + def get_node_children_at_path(self, node_path, starting_node=None): + """ + Get the node children list + :param node_path: the node path + :param starting_node: the node to start the relative path + :return: the list of node children + """ + try: + return list(self.matrix.get_node(node_path, starting_node=starting_node).children.values()) + except tree.NodeExistsError: + return [] + + def get_node_children_by_names_at_path(self, node_path, starting_node=None): + """ + Get the node children dict with node name as key + :param node_path: the node path + :param starting_node: the node to start the relative path + :return: the dict of node children + """ + try: + return {key: val + for key, val in self.matrix.get_node(node_path, starting_node=starting_node).children.items()} + except tree.NodeExistsError: + return {} + + def get_node_at_path(self, node_path, starting_node=None): + """ + Get the BaseTreeNode at path + :param node_path: the node path + :param starting_node: the node to start the relative path + :return: the node instance at path + """ + try: + return self.matrix.get_node(node_path, starting_node=starting_node) + except tree.NodeExistsError: + return None + + def delete_node_at_path(self, node_path, starting_node=None): + """ + Delete the BaseTreeNode at path + :param node_path: the node path + :param starting_node: the node to start the relative path + :return: the deleted node + """ + try: + return self.matrix.delete_node(node_path, starting_node=starting_node) + except tree.NodeExistsError: + return None diff --git a/packages/evaluators/octobot_evaluators/matrix/matrix_manager.py b/packages/evaluators/octobot_evaluators/matrix/matrix_manager.py new file mode 100644 index 0000000000..6c735aaf4c --- /dev/null +++ b/packages/evaluators/octobot_evaluators/matrix/matrix_manager.py @@ -0,0 +1,409 @@ +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import time +import typing + +import octobot_commons.constants as common_constants +import octobot_commons.enums as common_enums +import octobot_commons.evaluators_util as evaluators_util +import octobot_commons.logging as logging + +import octobot_evaluators.enums as enums +import octobot_evaluators.constants as constants +import octobot_evaluators.errors as errors +import octobot_evaluators.matrix as matrix + + +def get_matrix(matrix_id: str) -> matrix.Matrix: + """ + Get the matrix from its id + :param matrix_id: the matrix id + :return: the matrix instance + """ + return matrix.Matrices.instance().get_matrix(matrix_id) + + +def set_tentacle_value(matrix_id: str, tentacle_path, tentacle_type, tentacle_value, timestamp=0, + description=None, metadata=None): + """ + Set the node value at tentacle path + :param matrix_id: the matrix id + :param tentacle_path: the tentacle path + :param tentacle_type: the tentacle type + :param tentacle_value: the tentacle value + :param timestamp: the value modification timestamp. + :param description: the tentacle description + :param metadata: the tentacle metadata + """ + get_matrix(matrix_id).set_node_value(value=tentacle_value, value_type=tentacle_type, + value_path=tentacle_path, timestamp=timestamp, + description=description, metadata=metadata) + + +def get_tentacle_node(matrix_id: str, tentacle_path): + """ + Return the node at tentacle path + :param matrix_id: the matrix id + :param tentacle_path: the tentacle path + :return: the tentacle node + """ + return get_matrix(matrix_id).get_node_at_path(node_path=tentacle_path) + + +def delete_tentacle_node(matrix_id, tentacle_path): + """ + Delete the node at tentacle path + :param matrix_id: the matrix id + :param tentacle_path: the tentacle path + :return: the deleted node + """ + return get_matrix(matrix_id).delete_node_at_path(node_path=tentacle_path) + + +def get_tentacle_value(matrix_id: str, tentacle_path) -> typing.Any: + """ + Get the value of the node at tentacle path + :param matrix_id: the matrix id + :param tentacle_path: the tentacle path + :return: the tentacle value + """ + tentacle_node = get_tentacle_node(matrix_id, tentacle_path) + if tentacle_node: + return tentacle_node.node_value + return None + + +def get_tentacle_eval_time(matrix_id: str, tentacle_path) -> typing.Optional[float]: + """ + Get the evaluation time of the node at tentacle path + :param matrix_id: the matrix id + :param tentacle_path: the tentacle path + :return: the tentacle evaluation time + """ + tentacle_node = get_tentacle_node(matrix_id, tentacle_path) + if tentacle_node: + return tentacle_node.node_value_time + return None + + +def get_matrix_default_value_path(tentacle_name: str, + tentacle_type: str, + exchange_name: typing.Optional[str] = None, + cryptocurrency: typing.Optional[str] = None, + symbol: typing.Optional[str] = None, + time_frame: typing.Optional[str] = None) -> list[str]: + """ + Create matrix value path with default path + :param tentacle_name: + :param tentacle_type: + :param exchange_name: + :param cryptocurrency: + :param symbol: + :param time_frame: + :return: the default matrix + """ + return get_tentacle_path(exchange_name=exchange_name, + tentacle_type=tentacle_type, + tentacle_name=tentacle_name) + get_tentacle_value_path( + cryptocurrency=cryptocurrency, + symbol=symbol, + time_frame=time_frame) + + +def get_tentacle_nodes(matrix_id, exchange_name=None, tentacle_type=None, tentacle_name=None): + """ + Returns the list of nodes related to the exchange_name, tentacle_type and tentacle_name, ignored if None + :param matrix_id: the matrix id + :param exchange_name: the exchange name to search for in the matrix + :param tentacle_type: the tentacle type to search for in the matrix + :param tentacle_name: the tentacle name to search for in the matrix + :return: nodes linked to the given params + """ + return get_matrix(matrix_id).get_node_children_at_path(get_tentacle_path(exchange_name=exchange_name, + tentacle_type=tentacle_type, + tentacle_name=tentacle_name)) + + +def get_node_children_by_names_at_path(matrix_id, node_path, starting_node=None) -> dict: + """ + :param matrix_id: the matrix id + :param node_path: the node's path to inspect + :param starting_node: the node to start the path from, default is the matrix root + :return: a dict of the children nodes of the given path identified by their name + """ + return get_matrix(matrix_id).get_node_children_by_names_at_path(node_path, starting_node=starting_node) + + +def get_tentacles_value_nodes(matrix_id, tentacle_nodes, cryptocurrency=None, symbol=None, time_frame=None): + """ + Returns the list of nodes related to the symbol and / or time_frame from the given tentacle_nodes list + :param matrix_id: the matrix id + :param tentacle_nodes: the exchange name to search for in the matrix + :param cryptocurrency: the cryptocurrency to search for in the given node list + :param symbol: the symbol to search for in the given node list + :param time_frame: the time frame to search for in the given nodes list + :return: nodes linked to the given params + """ + return [node_at_path for node_at_path in [ + get_matrix(matrix_id).get_node_at_path(get_tentacle_value_path(cryptocurrency=cryptocurrency, + symbol=symbol, + time_frame=time_frame), + starting_node=n) + for n in tentacle_nodes] + if node_at_path is not None] + + +def get_latest_eval_time(matrix_id, exchange_name=None, tentacle_type=None, cryptocurrency=None, + symbol=None, time_frame=None): + eval_times = [] + for value_node in matrix.get_tentacles_value_nodes( + matrix_id, + get_tentacle_nodes(matrix_id, + exchange_name=exchange_name, + tentacle_type=tentacle_type), + cryptocurrency=cryptocurrency, + symbol=symbol, + time_frame=time_frame): + + if isinstance(value_node.node_value_time, (float, int)): + eval_times.append(value_node.node_value_time) + return max(eval_times) if eval_times else None + + +def get_tentacle_path( + exchange_name: typing.Optional[str] = None, + tentacle_type: typing.Optional[str] = None, + tentacle_name: typing.Optional[str] = None +) -> list[str]: + """ + Returns the path related to the tentacle name, type and exchange name + :param tentacle_type: the tentacle type to add in the path, ignored if None + :param tentacle_name: the tentacle name to add in the path, ignored if None + :param exchange_name: the exchange name to add in the path (as the first element), ignored if None + :return: a list of string that represents the path of the given params + """ + node_path = [] + if exchange_name is not None: + node_path.append(exchange_name) + if tentacle_type is not None: + node_path.append(tentacle_type) + if tentacle_name is not None: + node_path.append(tentacle_name) + return node_path + + +def get_tentacle_value_path( + cryptocurrency: typing.Optional[str] = None, + symbol: typing.Optional[str] = None, + time_frame: typing.Optional[str] = None +) -> list[str]: + """ + Returns the path related to symbol and / or time_frame values + :param cryptocurrency: the cryptocurrency to add in the path, ignored if None + :param symbol: the symbol to add in the path, ignored if None + :param time_frame: the time frame to add in the path, ignored if None + :return: a list of string that represents the path of the given params + """ + node_path: list = [] + if cryptocurrency is not None: + node_path.append(cryptocurrency) + if symbol is not None: + node_path.append(symbol) + if time_frame is not None: + node_path.append(time_frame) + return node_path + + +def get_evaluations_by_evaluator(matrix_id: str, + exchange_name: typing.Optional[str] = None, + tentacle_type: typing.Optional[str] = None, + cryptocurrency: typing.Optional[str] = None, + symbol: typing.Optional[str] = None, + time_frame: typing.Optional[str] = None, + allow_missing: bool = True, + allowed_values: typing.Optional[list[typing.Any]] = None) -> dict[str, typing.Any]: + """ + Return a dict of evaluation nodes by evaluator name + :param matrix_id: the matrix id + :param exchange_name: the exchange name + :param tentacle_type: the tentacle type + :param cryptocurrency: the currency ticker + :param symbol: the traded pair + :param time_frame: the evaluation time frame + :param allow_missing: if False will raise UnsetTentacleEvaluation on missing or invalid evaluation + :param allowed_values: a white list of allowed values not to be taken as invalid + :return: the dict of evaluation nodes by evaluator name + """ + evaluator_nodes = get_node_children_by_names_at_path(matrix_id, + get_tentacle_path(exchange_name=exchange_name, + tentacle_type=tentacle_type)) + evaluations_by_evaluator = {} + for evaluator_name, node in evaluator_nodes.items(): + evaluation = get_tentacles_value_nodes(matrix_id, [node], cryptocurrency=cryptocurrency, + symbol=symbol, time_frame=time_frame) + if len(evaluation) > 1: + logging.get_logger("matrix_manager").warning( + "More than one evaluation corresponding to the given tentacle filter, " + "this means there is an issue in this methods given arguments") + elif evaluation: + eval_value = evaluation[0].node_value + if (allowed_values is not None and eval_value in allowed_values) or \ + evaluators_util.check_valid_eval_note(eval_value): + evaluations_by_evaluator[evaluator_name] = evaluation[0] + elif not allow_missing: + raise errors.UnsetTentacleEvaluation(f"Missing {time_frame if time_frame else 'evaluation'} " + f"for {evaluator_name} on {symbol}, evaluation is " + f"{repr(eval_value)}).") + return evaluations_by_evaluator + +def get_evaluation_descriptions_by_evaluator(matrix_id: str, + exchange_name: typing.Optional[str] = None, + tentacle_type: typing.Optional[str] = None, + cryptocurrency: typing.Optional[str] = None, + symbol: typing.Optional[str] = None, + time_frame: typing.Optional[str] = None, + allow_missing: bool = True, + allowed_values: typing.Optional[list[str]] = None +) -> dict[str, str]: + """ + Return a dict of evaluation descriptions by evaluator name + :param matrix_id: the matrix id + :param exchange_name: the exchange name + :param tentacle_type: the tentacle type + :param cryptocurrency: the currency ticker + :param symbol: the traded pair + :param time_frame: the evaluation time frame + :return: the dict of evaluation descriptions by evaluator name + """ + return None # TODO + +def get_available_time_frames(matrix_id: str, exchange_name: str, tentacle_type: str, cryptocurrency: str, symbol: str) -> list[str]: + """ + Return the list of available time frames for the given tentacle + :param matrix_id: the matrix id + :param exchange_name: the exchange name + :param tentacle_type: the tentacle type + :param cryptocurrency: the currency ticker + :param symbol: the traded pair + :return: the list of available time frames for the given tentacle + """ + try: + evaluator_nodes = get_node_children_by_names_at_path(matrix_id, + get_tentacle_path(exchange_name=exchange_name, + tentacle_type=tentacle_type)) + first_node = next(iter(evaluator_nodes.values())) + return list(get_node_children_by_names_at_path(matrix_id, + get_tentacle_value_path(cryptocurrency=cryptocurrency, + symbol=symbol), + starting_node=first_node)) + except StopIteration: + return [] + + +def get_available_symbols(matrix_id: str, + exchange_name: str, + cryptocurrency: str, + tentacle_type: typing.Optional[str] = enums.EvaluatorMatrixTypes.TA.value, + second_tentacle_type: typing.Optional[str] = enums.EvaluatorMatrixTypes.REAL_TIME.value) -> list[str]: + """ + Return the list of available symbols for the given currency + :param matrix_id: the matrix id + :param exchange_name: the exchange name + :param cryptocurrency: the cryptocurrency ticker + :param tentacle_type: the tentacle type to look into first + :param second_tentacle_type: the tentacle type to look into if no symbol is found in the first tentacle type + :return: the list of available symbols for the given currency + """ + try: + evaluator_nodes = get_node_children_by_names_at_path(matrix_id, + get_tentacle_path(exchange_name=exchange_name, + tentacle_type=tentacle_type)) + first_node = next(iter(evaluator_nodes.values())) + possible_symbols = list(get_node_children_by_names_at_path( + matrix_id, + get_tentacle_value_path(cryptocurrency=cryptocurrency), + starting_node=first_node)) + if possible_symbols: + return possible_symbols + elif tentacle_type != second_tentacle_type: + # try with second tentacle type + return get_available_symbols(matrix_id, exchange_name, + cryptocurrency, second_tentacle_type, second_tentacle_type) + except StopIteration: + return [] + + +def is_tentacle_value_valid( + matrix_id: str, tentacle_path, timestamp=0, delta=constants.EVALUATION_ALLOWED_TIME_DELTA +) -> bool: + """ + Check if the node is ready to be used + WARNING: This method only works with complete default tentacle path + :param matrix_id: the matrix id + :param tentacle_path: the tentacle node path + :param timestamp: the timestamp to use + :param delta: the authorized delta to be valid (in seconds) + :return: True if the node is valid else False + """ + if timestamp == 0: + timestamp = time.time() + try: + node = get_tentacle_node(matrix_id, tentacle_path) + if node is None: + raise KeyError(f"No node at {tentacle_path}") + return is_evaluation_valid_in_time( + timestamp, + node.node_value_time, + common_enums.TimeFrames(tentacle_path[-1]), + delta, + ) + except (IndexError, ValueError): + return False + + +def is_evaluation_valid_in_time( + current_time: float, evaluation_time: float, time_frame: common_enums.TimeFrames, + allowed_delta=constants.EVALUATION_ALLOWED_TIME_DELTA +): + """ + :param current_time: the reference current time + :param evaluation_time: the evaluation time to check + :param time_frame: the evaluation time frame + :param allowed_delta: the authorized delta to be valid (in seconds) + :return: True if the time is valid else False + """ + return current_time - ( + evaluation_time + common_enums.TimeFramesMinutes[time_frame] * common_constants.MINUTE_TO_SECONDS + + allowed_delta + ) < 0 + + +def is_tentacles_values_valid( + matrix_id: str, tentacle_path_list, timestamp=0, delta=constants.EVALUATION_ALLOWED_TIME_DELTA +) -> bool: + """ + Check if each of the tentacle path value is valid + :param matrix_id: the matrix id + :param tentacle_path_list: the tentacle node path list + :param timestamp: the timestamp to use + :param delta: the authorized delta to be valid (in seconds) + :return: True if all the node values are valid else False + """ + return all([is_tentacle_value_valid(matrix_id=matrix_id, + tentacle_path=tentacle_path, + timestamp=timestamp, + delta=delta) + for tentacle_path in tentacle_path_list]) diff --git a/packages/evaluators/octobot_evaluators/octobot_channel_consumer.py b/packages/evaluators/octobot_evaluators/octobot_channel_consumer.py new file mode 100644 index 0000000000..18a56db037 --- /dev/null +++ b/packages/evaluators/octobot_evaluators/octobot_channel_consumer.py @@ -0,0 +1,84 @@ +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import enum + +import async_channel.channels as channel_instances +import octobot_commons.channels_name as channels_name +import octobot_commons.logging as logging + +import octobot_commons.enums as enums + +import octobot_evaluators.api as api + +OCTOBOT_CHANNEL_EVALUATOR_CONSUMER_LOGGER_TAG = "OctoBotChannelEvaluatorConsumer" + + +class OctoBotChannelEvaluatorActions(enum.Enum): + """ + OctoBot Channel consumer supported actions + """ + + EVALUATOR = "evaluator" + + +class OctoBotChannelEvaluatorDataKeys(enum.Enum): + """ + OctoBot Channel consumer supported data keys + """ + + EXCHANGE_CONFIGURATION = "exchange_configuration" + MATRIX_ID = "matrix_id" + TENTACLES_SETUP_CONFIG = "tentacles_setup_config" + + +async def octobot_channel_callback(bot_id, subject, action, data) -> None: + """ + OctoBot channel consumer callback + :param bot_id: the callback bot id + :param subject: the callback subject + :param action: the callback action + :param data: the callback data + """ + if subject == enums.OctoBotChannelSubjects.CREATION.value: + await _handle_creation(bot_id, action, data) + + +async def _handle_creation(bot_id, action, data): + if action == OctoBotChannelEvaluatorActions.EVALUATOR.value: + try: + exchange_configuration = data[OctoBotChannelEvaluatorDataKeys.EXCHANGE_CONFIGURATION.value] + await api.create_and_start_all_type_evaluators( + tentacles_setup_config=data[OctoBotChannelEvaluatorDataKeys.TENTACLES_SETUP_CONFIG.value], + matrix_id=data[OctoBotChannelEvaluatorDataKeys.MATRIX_ID.value], + exchange_name=exchange_configuration.exchange_name, + bot_id=bot_id, + symbols_by_crypto_currencies=exchange_configuration.symbols_by_crypto_currencies, + symbols=exchange_configuration.symbols, + time_frames=exchange_configuration.available_required_time_frames, + real_time_time_frames=exchange_configuration.real_time_time_frames + ) + await channel_instances.get_chan_at_id(channels_name.OctoBotChannelsName.OCTOBOT_CHANNEL.value, + bot_id).get_internal_producer() \ + .send(bot_id=bot_id, + subject=enums.OctoBotChannelSubjects.NOTIFICATION.value, + action=action, + data={OctoBotChannelEvaluatorDataKeys.MATRIX_ID.value: + data[OctoBotChannelEvaluatorDataKeys.MATRIX_ID.value]}) + + except Exception as e: + logging.get_logger(OCTOBOT_CHANNEL_EVALUATOR_CONSUMER_LOGGER_TAG).exception( + e, True, f"Error when creating new evaluator {e}" + ) diff --git a/packages/evaluators/octobot_evaluators/util/__init__.py b/packages/evaluators/octobot_evaluators/util/__init__.py new file mode 100644 index 0000000000..67bf302531 --- /dev/null +++ b/packages/evaluators/octobot_evaluators/util/__init__.py @@ -0,0 +1,34 @@ +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_evaluators.util import evaluation_util + +from octobot_evaluators.util.evaluation_util import ( + get_eval_time, + get_shortest_time_frame, + local_trading_context, + local_cache_client, + get_required_candles_count, +) + +__all__ = [ + "get_eval_time", + "get_shortest_time_frame", + "local_trading_context", + "local_cache_client", + "get_required_candles_count", +] + diff --git a/packages/evaluators/octobot_evaluators/util/evaluation_util.py b/packages/evaluators/octobot_evaluators/util/evaluation_util.py new file mode 100644 index 0000000000..f743683353 --- /dev/null +++ b/packages/evaluators/octobot_evaluators/util/evaluation_util.py @@ -0,0 +1,97 @@ +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.enums as enums +import octobot_commons.constants as constants +import octobot_commons.databases as databases +import octobot_commons.time_frame_manager as time_frame_manager +import octobot_tentacles_manager.api as tentacles_manager_api + + +def get_eval_time(full_candle=None, time_frame=None, partial_candle=None, kline=None): + if full_candle is not None and time_frame is not None: + # add one full time frame seconds since a full candle is available when the next has started + return full_candle[enums.PriceIndexes.IND_PRICE_TIME.value] + \ + enums.TimeFramesMinutes[enums.TimeFrames(time_frame)] * constants.MINUTE_TO_SECONDS + if partial_candle is not None: + return partial_candle[enums.PriceIndexes.IND_PRICE_TIME.value] + if kline is not None: + return kline[enums.PriceIndexes.IND_PRICE_TIME.value] + raise ValueError("Invalid arguments") + + +def get_shortest_time_frame(ideal_time_frame, preferred_available_time_frames, others): + if ideal_time_frame in preferred_available_time_frames: + return ideal_time_frame + if preferred_available_time_frames: + return time_frame_manager.sort_time_frames(preferred_available_time_frames)[0] + else: + return time_frame_manager.sort_time_frames(others)[0] + + +def local_trading_context(evaluator, symbol, time_frame, trigger_cache_timestamp, + cryptocurrency=None, exchange=None, exchange_id=None, + trigger_source=None, trigger_value=None): + try: + import octobot_trading.api as exchange_api + import octobot_trading.modes as modes + exchange_manager = exchange_api.get_exchange_manager_from_exchange_name_and_id( + exchange or evaluator.exchange_name, + exchange_id or exchange_api.get_exchange_id_from_matrix_id(evaluator.exchange_name, evaluator.matrix_id) + ) + trading_modes = exchange_api.get_trading_modes(exchange_manager) + return modes.Context( + evaluator, + exchange_manager, + exchange_api.get_trader(exchange_manager), + exchange or evaluator.exchange_name, + symbol, + evaluator.matrix_id, + cryptocurrency, + time_frame, + evaluator.logger, + trading_modes[0].__class__, + trigger_cache_timestamp, + trigger_source, + trigger_value, + None, + None, + ) + except ImportError: + evaluator.logger.error("OctoBot-Evaluator local_trading_context requires OctoBot-Trading package installed") + raise + + +def local_cache_client(evaluator, symbol, time_frame, exchange_name=None): + try: + exchange_name = exchange_name or evaluator.exchange_name + import octobot_trading.api as exchange_api + exchange_manager = exchange_api.get_exchange_manager_from_exchange_name_and_id( + exchange_name, + exchange_api.get_exchange_id_from_matrix_id(exchange_name, evaluator.matrix_id) + ) + return databases.CacheClient(evaluator, exchange_name, symbol, time_frame, + evaluator.tentacles_setup_config, + not exchange_api.get_is_backtesting(exchange_manager)) + except ImportError: + evaluator.logger.error("OctoBot-Evaluator local_cache_client requires OctoBot-Trading package installed") + raise + + +def get_required_candles_count(trading_mode_class, tentacles_setup_config): + return tentacles_manager_api.get_tentacle_config(tentacles_setup_config, trading_mode_class).get( + constants.CONFIG_TENTACLES_REQUIRED_CANDLES_COUNT, + constants.DEFAULT_IGNORED_VALUE + ) diff --git a/packages/evaluators/requirements.txt b/packages/evaluators/requirements.txt new file mode 100644 index 0000000000..42ac857bf6 --- /dev/null +++ b/packages/evaluators/requirements.txt @@ -0,0 +1,5 @@ +# Setup requirements +numpy==2.4.2 + +# Drakkar-Software requirements +OctoBot-Tulipy>=0.4.11b13 diff --git a/packages/evaluators/standard.rc b/packages/evaluators/standard.rc new file mode 100644 index 0000000000..a743da1366 --- /dev/null +++ b/packages/evaluators/standard.rc @@ -0,0 +1,510 @@ +[MASTER] + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. +extension-pkg-whitelist= + +# Specify a score threshold to be exceeded before program exits with error. +fail-under=10.0 + +# Add files or directories to the blacklist. They should be base names, not +# paths. +ignore=CVS,tests + +# Add files or directories matching the regex patterns to the blacklist. The +# regex matches against base names, not paths. +ignore-patterns= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the +# number of processors available to use. +jobs=1 + +# Control the amount of potential inferred values when inferring a single +# object. This can help the performance when dealing with large functions or +# complex, nested conditions. +limit-inference-results=100 + +# List of plugins (as comma separated values of python module names) to load, +# usually to register additional checkers. +load-plugins= + +# Pickle collected data for later comparisons. +persistent=yes + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED. +confidence= + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once). You can also use "--disable=all" to +# disable everything first and then reenable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use "--disable=all --enable=classes +# --disable=W". +disable=R, + I, + C, + W, + no-member + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +enable= + + +[REPORTS] + +# Python expression which should return a score less than or equal to 10. You +# have access to the variables 'error', 'warning', 'refactor', and 'convention' +# which contain the number of messages in each category, as well as 'statement' +# which is the total number of statements analyzed. This score is used by the +# global evaluation report (RP0004). +evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details. +#msg-template= + +# Set the output format. Available formats are text, parseable, colorized, json +# and msvs (visual studio). You can also give a reporter class, e.g. +# mypackage.mymodule.MyReporterClass. +output-format=text + +# Tells whether to display a full report or only the messages. +reports=no + +# Activate the evaluation score. +score=yes + + +[REFACTORING] + +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 + +# Complete name of functions that never returns. When checking for +# inconsistent-return-statements if a never returning function is called then +# it will be considered as an explicit return statement and no message will be +# printed. +never-returning-functions=sys.exit + + +[SIMILARITIES] + +# Ignore comments when computing similarities. +ignore-comments=yes + +# Ignore docstrings when computing similarities. +ignore-docstrings=yes + +# Ignore imports when computing similarities. +ignore-imports=no + +# Minimum lines number of a similarity. +min-similarity-lines=4 + + +[LOGGING] + +# The type of string formatting that logging methods do. `old` means using % +# formatting, `new` is for `{}` formatting. +logging-format-style=old + +# Logging modules to check that the string format arguments are in logging +# function parameter format. +logging-modules=logging + + +[STRING] + +# This flag controls whether inconsistent-quotes generates a warning when the +# character used as a quote delimiter is used inconsistently within a module. +check-quote-consistency=no + +# This flag controls whether the implicit-str-concat should generate a warning +# on implicit string concatenation in sequences defined over several lines. +check-str-concat-over-line-jumps=no + + +[SPELLING] + +# Limits count of emitted suggestions for spelling mistakes. +max-spelling-suggestions=4 + +# Spelling dictionary name. Available dictionaries: none. To make it work, +# install the python-enchant package. +spelling-dict= + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains the private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to the private dictionary (see the +# --spelling-private-dict-file option) instead of raising a message. +spelling-store-unknown-words=no + + +[FORMAT] + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Maximum number of characters on a single line. +max-line-length=100 + +# Maximum number of lines in a module. +max-module-lines=1000 + +# Allow the body of a class to be on the same line as the declaration if body +# contains single statement. +single-line-class-stmt=no + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME, + XXX, + TODO + +# Regular expression of note tags to take in consideration. +#notes-rgx= + + +[BASIC] + +# Naming style matching correct argument names. +argument-naming-style=snake_case + +# Regular expression matching correct argument names. Overrides argument- +# naming-style. +#argument-rgx= + +# Naming style matching correct attribute names. +attr-naming-style=snake_case + +# Regular expression matching correct attribute names. Overrides attr-naming- +# style. +#attr-rgx= + +# Bad variable names which should always be refused, separated by a comma. +bad-names=foo, + bar, + baz, + toto, + tutu, + tata + +# Bad variable names regexes, separated by a comma. If names match any regex, +# they will always be refused +bad-names-rgxs= + +# Naming style matching correct class attribute names. +class-attribute-naming-style=any + +# Regular expression matching correct class attribute names. Overrides class- +# attribute-naming-style. +#class-attribute-rgx= + +# Naming style matching correct class names. +class-naming-style=PascalCase + +# Regular expression matching correct class names. Overrides class-naming- +# style. +#class-rgx= + +# Naming style matching correct constant names. +const-naming-style=UPPER_CASE + +# Regular expression matching correct constant names. Overrides const-naming- +# style. +#const-rgx= + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + +# Naming style matching correct function names. +function-naming-style=snake_case + +# Regular expression matching correct function names. Overrides function- +# naming-style. +#function-rgx= + +# Good variable names which should always be accepted, separated by a comma. +good-names=i, + j, + k, + ex, + Run, + _ + +# Good variable names regexes, separated by a comma. If names match any regex, +# they will always be accepted +good-names-rgxs= + +# Include a hint for the correct naming format with invalid-name. +include-naming-hint=no + +# Naming style matching correct inline iteration names. +inlinevar-naming-style=any + +# Regular expression matching correct inline iteration names. Overrides +# inlinevar-naming-style. +#inlinevar-rgx= + +# Naming style matching correct method names. +method-naming-style=snake_case + +# Regular expression matching correct method names. Overrides method-naming- +# style. +#method-rgx= + +# Naming style matching correct module names. +module-naming-style=snake_case + +# Regular expression matching correct module names. Overrides module-naming- +# style. +#module-rgx= + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +# These decorators are taken in consideration only for invalid-name. +property-classes=abc.abstractproperty + +# Naming style matching correct variable names. +variable-naming-style=snake_case + +# Regular expression matching correct variable names. Overrides variable- +# naming-style. +#variable-rgx= + + +[VARIABLES] + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid defining new builtins when possible. +additional-builtins= + +# Tells whether unused global variables should be treated as a violation. +allow-global-unused-variables=yes + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_, + _cb + +# A regular expression matching the name of dummy variables (i.e. expected to +# not be used). +dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ + +# Argument names that match this expression will be ignored. Default to name +# with leading underscore. +ignored-argument-names=_.*|^ignored_|^unused_ + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io + + +[TYPECHECK] + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + +# Tells whether missing members accessed in mixin class should be ignored. A +# mixin class is detected if its name ends with "mixin" (case insensitive). +ignore-mixin-members=yes + +# Tells whether to warn about missing members when the owner of the attribute +# is inferred to be None. +ignore-none=yes + +# This flag controls whether pylint should warn about no-member and similar +# checks whenever an opaque object is returned when inferring. The inference +# can return multiple potential results while evaluating a Python object, but +# some branches might not be evaluated, which results in partial inference. In +# that case, it might be useful to still emit no-member and other checks for +# the rest of the inferred objects. +ignore-on-opaque-inference=yes + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local + +# List of module names for which member attributes should not be checked +# (useful for modules/projects where namespaces are manipulated during runtime +# and thus existing member attributes cannot be deduced by static analysis). It +# supports qualified module names, as well as Unix pattern matching. +ignored-modules= + +# Show a hint with possible names when a member name was not found. The aspect +# of finding the hint is based on edit distance. +missing-member-hint=yes + +# The minimum edit distance a name should have in order to be considered a +# similar match for a missing member name. +missing-member-hint-distance=1 + +# The total number of similar names that should be taken in consideration when +# showing a hint for a missing member. +missing-member-max-choices=1 + +# List of decorators that change the signature of a decorated function. +signature-mutators= + + +[IMPORTS] + +# List of modules that can be imported at any level, not just the top level +# one. +allow-any-import-level= + +# Allow wildcard imports from modules that define __all__. +allow-wildcard-with-all=no + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + +# Deprecated modules which should not be used, separated by a comma. +deprecated-modules=optparse,tkinter.tix + +# Create a graph of external dependencies in the given file (report RP0402 must +# not be disabled). +ext-import-graph= + +# Create a graph of every (i.e. internal and external) dependencies in the +# given file (report RP0402 must not be disabled). +import-graph= + +# Create a graph of internal dependencies in the given file (report RP0402 must +# not be disabled). +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant + +# Couples of modules and preferred modules, separated by a comma. +preferred-modules= + + +[CLASSES] + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__, + __new__, + setUp, + __post_init__ + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict, + _fields, + _replace, + _source, + _make + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=cls + + +[DESIGN] + +# Maximum number of arguments for function / method. +max-args=5 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Maximum number of boolean expressions in an if statement (see R0916). +max-bool-expr=5 + +# Maximum number of branch for function / method body. +max-branches=12 + +# Maximum number of locals for function / method body. +max-locals=15 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 + +# Maximum number of return / yield for function / method body. +max-returns=6 + +# Maximum number of statements in function / method body. +max-statements=50 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when being caught. Defaults to +# "BaseException, Exception". +overgeneral-exceptions=builtins.BaseException, + builtins.Exception diff --git a/packages/evaluators/tests/__init__.py b/packages/evaluators/tests/__init__.py new file mode 100644 index 0000000000..51f19b2bb1 --- /dev/null +++ b/packages/evaluators/tests/__init__.py @@ -0,0 +1,86 @@ +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import asyncio +import pytest +import pytest_asyncio +import os.path as path +import aiohttp + +import async_channel.util.channel_creator as channel_creator +import octobot_commons.asyncio_tools as asyncio_tools +import octobot_evaluators.api as evaluator_api +import octobot_evaluators.evaluators.channel as evaluator_channels +import octobot_evaluators.matrix.channel as matrix_channels +import octobot_evaluators.matrix.matrices as matrices +import octobot_tentacles_manager.api as tentacles_api +import octobot_tentacles_manager.constants as constants +import octobot_tentacles_manager.managers as managers + + +@pytest.fixture +def event_loop(): + loop = asyncio.new_event_loop() + # use ErrorContainer to catch otherwise hidden exceptions occurring in async scheduled tasks + error_container = asyncio_tools.ErrorContainer() + loop.set_exception_handler(error_container.exception_handler) + yield loop + # will fail if exceptions have been silently raised + loop.run_until_complete(error_container.check()) + loop.close() + + +@pytest_asyncio.fixture +async def matrix_id(): + created_matrix_id = evaluator_api.create_matrix() + yield created_matrix_id + matrices.Matrices.instance().del_matrix(created_matrix_id) + + +@pytest_asyncio.fixture +async def install_tentacles(): + tentacles_folder_name = constants.TENTACLES_PATH + def _cleanup(raises=True): + if path.exists(tentacles_folder_name): + managers.TentaclesSetupManager.delete_tentacles_arch( + force=True, raises=raises, tentacles_folder_name=tentacles_folder_name + ) + + def _tentacles_local_path(): + return path.join("tests", "static", "tentacles.zip") + + _cleanup(False) + async with aiohttp.ClientSession() as session: + if nb_errors := await tentacles_api.install_all_tentacles( + _tentacles_local_path(), tentacle_path=tentacles_folder_name, aiohttp_session=session + ): + raise AssertionError(f"Failed to install tentacles: {nb_errors} error(s) occurred") + yield + import tentacles + _cleanup() + + +@pytest_asyncio.fixture +async def evaluators_and_matrix_channels(matrix_id): + evaluators_channel = await channel_creator.create_channel_instance(evaluator_channels.EvaluatorsChannel, + evaluator_channels.set_chan, + matrix_id=matrix_id) + matrix_channel = await channel_creator.create_channel_instance(matrix_channels.MatrixChannel, + evaluator_channels.set_chan, + matrix_id=matrix_id) + yield matrix_id + await evaluators_channel.stop() + await matrix_channel.stop() + evaluator_api.del_evaluator_channels(matrix_id) diff --git a/packages/evaluators/tests/conftest.py b/packages/evaluators/tests/conftest.py new file mode 100644 index 0000000000..2f5d532b27 --- /dev/null +++ b/packages/evaluators/tests/conftest.py @@ -0,0 +1,12 @@ +import pytest +import mock +import octobot_tentacles_manager.constants as constants + + +@pytest.fixture(autouse=True) +def allow_unsigned_test_tentacles(request): + if "signature_verification" in request.keywords: + yield + else: + with mock.patch.object(constants, "ALLOW_UNSIGNED_TENTACLES", True): + yield diff --git a/packages/evaluators/tests/evaluators/__init__.py b/packages/evaluators/tests/evaluators/__init__.py new file mode 100644 index 0000000000..9e32cbc868 --- /dev/null +++ b/packages/evaluators/tests/evaluators/__init__.py @@ -0,0 +1,15 @@ +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. diff --git a/packages/evaluators/tests/evaluators/channel/__init__.py b/packages/evaluators/tests/evaluators/channel/__init__.py new file mode 100644 index 0000000000..9e32cbc868 --- /dev/null +++ b/packages/evaluators/tests/evaluators/channel/__init__.py @@ -0,0 +1,15 @@ +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. diff --git a/packages/evaluators/tests/evaluators/channel/test_evaluator_channel.py b/packages/evaluators/tests/evaluators/channel/test_evaluator_channel.py new file mode 100644 index 0000000000..cc6f2282d5 --- /dev/null +++ b/packages/evaluators/tests/evaluators/channel/test_evaluator_channel.py @@ -0,0 +1,59 @@ +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import pytest +import pytest_asyncio + +from async_channel.util.channel_creator import create_channel_instance +from octobot_evaluators.api.initialization import del_evaluator_channels +from octobot_evaluators.evaluators.channel.evaluator_channel import get_chan, set_chan, EvaluatorChannel +import octobot_commons.asyncio_tools as asyncio_tools + +from tests import matrix_id + +EVALUATOR_CHANNEL_NAME = "Evaluator" + + +async def evaluator_callback(): + pass + + +@pytest_asyncio.fixture +async def evaluator_channel(matrix_id): + channel = None + try: + del_evaluator_channels(matrix_id) + channel = await create_channel_instance(EvaluatorChannel, set_chan, matrix_id=matrix_id) + yield matrix_id + finally: + if channel is not None: + # gracefully stop channel + await channel.stop() + del_evaluator_channels(matrix_id) + + +@pytest.mark.asyncio +async def test_evaluator_channel_get_consumer_from_filters(evaluator_channel): + consumer_1 = await get_chan(EVALUATOR_CHANNEL_NAME, evaluator_channel).new_consumer(evaluator_callback) + consumer_2 = await get_chan(EVALUATOR_CHANNEL_NAME, evaluator_channel).new_consumer(evaluator_callback) + consumer_3 = await get_chan(EVALUATOR_CHANNEL_NAME, evaluator_channel).new_consumer(evaluator_callback) + assert get_chan(EVALUATOR_CHANNEL_NAME, evaluator_channel) \ + .get_consumer_from_filters({}) == [consumer_1, consumer_2, consumer_3] + assert get_chan(EVALUATOR_CHANNEL_NAME, evaluator_channel) \ + .get_consumer_from_filters({}, origin_consumer=consumer_2) == [consumer_1, consumer_3] + assert get_chan(EVALUATOR_CHANNEL_NAME, evaluator_channel) \ + .get_consumer_from_filters({}, origin_consumer=consumer_1) == [consumer_2, consumer_3] + assert get_chan(EVALUATOR_CHANNEL_NAME, evaluator_channel) \ + .get_consumer_from_filters({}, origin_consumer=consumer_3) == [consumer_1, consumer_2] diff --git a/packages/evaluators/tests/evaluators/test_evaluator_factory.py b/packages/evaluators/tests/evaluators/test_evaluator_factory.py new file mode 100644 index 0000000000..9fcbd724e6 --- /dev/null +++ b/packages/evaluators/tests/evaluators/test_evaluator_factory.py @@ -0,0 +1,273 @@ +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import pytest +import mock + +import octobot_tentacles_manager.api as tentacles_api +import octobot_evaluators.evaluators as evaluators +import octobot_commons.enums as enums +import octobot_commons.constants as constants +import octobot_commons.symbols.symbol_util as symbol_util +import octobot_evaluators.evaluators.evaluator_factory as evaluator_factory +from octobot_evaluators.evaluators.evaluator_factory import _extract_traded_pairs, _filter_pairs + +from tests import event_loop, matrix_id, install_tentacles, evaluators_and_matrix_channels + +pytestmark = pytest.mark.asyncio + +exchange_name = "TEST_EXCHANGE_NAME" +bot_id = "TEST_BOT_ID" +symbols_by_crypto_currencies = { + "Bitcoin": ["BTC/USDT"], + "Ethereum": ["ETH/USD", "ETH/BTC"] +} +symbols = ["BTC/USDT", "ETH/USD", "ETH/BTC"] +time_frames = [enums.TimeFrames.ONE_HOUR, enums.TimeFrames.FOUR_HOURS] + +crypto_currency_name_by_crypto_currencies = {} +symbols_by_crypto_currency_tickers = {} +for name, symbol_list in symbols_by_crypto_currencies.items(): + ticker = symbol_util.parse_symbol(symbol_list[0]).base + crypto_currency_name_by_crypto_currencies[ticker] = name + symbols_by_crypto_currency_tickers[ticker] = symbol_list + + +@pytest.mark.usefixtures("event_loop", "install_tentacles") +async def test_create_all_type_evaluators(evaluators_and_matrix_channels): + tentacles_setup_config = tentacles_api.get_tentacles_setup_config() + with mock.patch.object(evaluator_factory, "_start_evaluators", mock.AsyncMock()) as _start_evaluators_mock: + created_evaluators = await evaluators.create_and_start_all_type_evaluators(tentacles_setup_config, + matrix_id=evaluators_and_matrix_channels, + exchange_name=exchange_name, + bot_id=bot_id, + symbols_by_crypto_currencies=symbols_by_crypto_currencies, + symbols=symbols, + time_frames=time_frames) + _start_evaluators_mock.assert_not_called() + + assert not created_evaluators # Trading package is not installed + + +@pytest.mark.usefixtures("event_loop", "install_tentacles") +async def test_create_strategy_evaluators(evaluators_and_matrix_channels): + import tentacles + await _test_evaluators_creation(evaluators.StrategyEvaluator, evaluators_and_matrix_channels, [ + tentacles.SimpleStrategyEvaluator, + tentacles.DipAnalyserStrategyEvaluator, + tentacles.MoveSignalsStrategyEvaluator + ]) + + +@pytest.mark.usefixtures("event_loop", "install_tentacles") +async def test_create_ta_evaluators(evaluators_and_matrix_channels): + import tentacles + await _test_evaluators_creation(evaluators.TAEvaluator, evaluators_and_matrix_channels, [ + tentacles.RSIMomentumEvaluator, + tentacles.ADXMomentumEvaluator, + tentacles.StochasticRSIVolatilityEvaluator + ]) + + +@pytest.mark.usefixtures("event_loop", "install_tentacles") +async def test_create_social_evaluators(evaluators_and_matrix_channels): + import tentacles + await _test_evaluators_creation(evaluators.SocialEvaluator, evaluators_and_matrix_channels, [ + tentacles.RedditForumEvaluator + ]) + + +@pytest.mark.usefixtures("event_loop", "install_tentacles") +async def test_create_rt_evaluators(evaluators_and_matrix_channels): + import tentacles + await _test_evaluators_creation(evaluators.RealTimeEvaluator, evaluators_and_matrix_channels, [ + tentacles.InstantFluctuationsEvaluator + ]) + + +async def _test_evaluators_creation(evaluator_parent_class, fixture_matrix_id, expected_evaluators): + tentacles_setup_config = tentacles_api.get_tentacles_setup_config() + + # activate all evaluators in tentacle config + for tentacle_type_key, tentacle_type_value in tentacles_setup_config.tentacles_activation.items(): + for tentacle_name in tentacle_type_value: + tentacles_setup_config.tentacles_activation[tentacle_type_key][tentacle_name] = True + + # mock start method to prevent side effects (octobot-trading imports, etc) + created_evaluators = await evaluators.create_evaluators(evaluator_parent_class=evaluator_parent_class, + tentacles_setup_config=tentacles_setup_config, + matrix_id=fixture_matrix_id, + exchange_name=exchange_name, + bot_id=bot_id, + crypto_currency_name_by_crypto_currencies=crypto_currency_name_by_crypto_currencies, + symbols_by_crypto_currency_tickers=symbols_by_crypto_currency_tickers, + symbols=symbols, + time_frames=time_frames) + assert created_evaluators + for evaluator in created_evaluators: + assert evaluator.__class__ in expected_evaluators + + +async def test_start_evaluators(): + eval_mock = mock.Mock() + eval_mock.start_evaluator = mock.AsyncMock() + with mock.patch.object(evaluator_factory, "_prioritized_evaluators", mock.Mock(return_value=[eval_mock])) \ + as prioritized_evaluators_mock: + await evaluator_factory._start_evaluators([[1, 2], [3, None]], "tentacles_setup_config", "bot_id") + prioritized_evaluators_mock.assert_called_once_with([1, 2, 3], "tentacles_setup_config") + eval_mock.start_evaluator.assert_called_once_with("bot_id") + + +async def test_prioritized_evaluators(): + eval_mock_1 = mock.Mock() + eval_mock_1.get_evaluator_priority = mock.Mock(return_value=constants.DEFAULT_EVALUATOR_PRIORITY) + eval_mock_2 = mock.Mock() + eval_mock_2.get_evaluator_priority = mock.Mock(return_value=constants.DEFAULT_EVALUATOR_PRIORITY) + eval_mock_3 = mock.Mock() + eval_mock_3.get_evaluator_priority = mock.Mock(return_value=constants.DEFAULT_EVALUATOR_PRIORITY) + assert evaluator_factory._prioritized_evaluators( + [eval_mock_1, eval_mock_2, eval_mock_3], + "tentacles_setup_config") == [eval_mock_1, eval_mock_2, eval_mock_3] + eval_mock_1.get_evaluator_priority = mock.Mock(return_value=1) + eval_mock_2.get_evaluator_priority = mock.Mock(return_value=-5.6) + assert evaluator_factory._prioritized_evaluators( + [eval_mock_1, eval_mock_2, eval_mock_3], + "tentacles_setup_config") == [eval_mock_1, eval_mock_3, eval_mock_2] + eval_mock_2 = mock.Mock() + eval_mock_2.get_evaluator_priority = mock.Mock(return_value=5.6) + assert evaluator_factory._prioritized_evaluators( + [eval_mock_1, eval_mock_2, eval_mock_3], + "tentacles_setup_config") == [eval_mock_2, eval_mock_1, eval_mock_3] + + +async def test_extract_traded_pairs(): + exchange_name = "binance" + matrix_id = "id" + exchange_api = ExchangeAPIMock() + + # no symbol config + symbols_by_crypto_currencies = None + crypto_currency_name_by_crypto_currencies, symbols_by_crypto_currency_tickers = \ + _extract_traded_pairs(symbols_by_crypto_currencies, exchange_name, matrix_id, exchange_api) + assert crypto_currency_name_by_crypto_currencies == {} + assert symbols_by_crypto_currency_tickers == {} + + # normal symbol config + symbols_by_crypto_currencies = { + 'AAVE': ['AAVE/BTC', 'AAVE/USDT'], + 'Cardano': ['ADA/BTC'] + } + crypto_currency_name_by_crypto_currencies, symbols_by_crypto_currency_tickers = \ + _extract_traded_pairs(symbols_by_crypto_currencies, exchange_name, matrix_id, exchange_api) + assert crypto_currency_name_by_crypto_currencies == { + 'AAVE': 'AAVE', + 'ADA': 'Cardano' + } + assert symbols_by_crypto_currency_tickers == { + 'AAVE': {'AAVE/BTC', 'AAVE/USDT'}, + 'ADA': {'ADA/BTC'} + } + + # AAVE/USDT in Cardano symbol config + symbols_by_crypto_currencies = { + 'AAVE': ['AAVE/BTC'], + 'Cardano': ['AAVE/USDT', 'ADA/BTC'] + } + crypto_currency_name_by_crypto_currencies, symbols_by_crypto_currency_tickers = \ + _extract_traded_pairs(symbols_by_crypto_currencies, exchange_name, matrix_id, exchange_api) + assert crypto_currency_name_by_crypto_currencies == { + 'AAVE': 'AAVE', + 'ADA': 'Cardano' + } + assert symbols_by_crypto_currency_tickers == { + 'AAVE': {'AAVE/BTC', 'AAVE/USDT'}, + 'ADA': {'ADA/BTC'} + } + + # Many symbol config by reference market + symbols_by_crypto_currencies = { + 'Bitcoin': [ + 'AAVE/BTC', 'ADA/BTC', 'ATOM/BTC', 'BAT/BTC', 'BNB/BTC', 'DASH/BTC', 'DOT/BTC', + 'EOS/BTC', 'ETC/BTC', 'ETH/BTC', 'FIL/BTC', 'LINK/BTC', 'LTC/BTC', 'NEO/BTC', + 'ONT/BTC', 'ROSE/BTC', 'SUSHI/BTC', 'SXP/BTC', 'THETA/BTC', 'TOMO/BTC', 'UNI/BTC', + 'WAN/BTC', 'XLM/BTC', 'XMR/BTC', 'XTZ/BTC', 'YFI/BTC' + ], + 'Tether': [ + 'AAVE/USDT', 'ADA/USDT', 'ATOM/USDT', 'BAT/USDT', 'BNB/USDT', 'BTC/USDT', + 'DASH/USDT', 'DOT/USDT', 'EOS/USDT', 'ETC/USDT', 'ETH/USDT', 'FIL/USDT', + 'LINK/USDT', 'LTC/USDT', 'NEO/USDT', 'ONT/USDT', 'ROSE/USDT', 'SUSHI/USDT', + 'SXP/USDT', 'THETA/USDT', 'TOMO/USDT', 'UNI/USDT', 'WAN/USDT', 'XLM/USDT', + 'XMR/USDT', 'XTZ/USDT', 'YFI/USDT' + ] + } + crypto_currency_name_by_crypto_currencies, symbols_by_crypto_currency_tickers = \ + _extract_traded_pairs(symbols_by_crypto_currencies, exchange_name, matrix_id, exchange_api) + assert crypto_currency_name_by_crypto_currencies == { + 'AAVE': 'Bitcoin', 'ADA': 'Bitcoin', 'ATOM': 'Bitcoin', + 'BAT': 'Bitcoin', 'BNB': 'Bitcoin', 'DASH': 'Bitcoin', + 'DOT': 'Bitcoin', 'EOS': 'Bitcoin', 'ETC': 'Bitcoin', + 'ETH': 'Bitcoin', 'FIL': 'Bitcoin', 'LINK': 'Bitcoin', + 'LTC': 'Bitcoin', 'NEO': 'Bitcoin', 'ONT': 'Bitcoin', + 'ROSE': 'Bitcoin', 'SUSHI': 'Bitcoin', 'SXP': 'Bitcoin', + 'THETA': 'Bitcoin', 'TOMO': 'Bitcoin', 'UNI': 'Bitcoin', + 'WAN': 'Bitcoin', 'XLM': 'Bitcoin', 'XMR': 'Bitcoin', + 'XTZ': 'Bitcoin', 'YFI': 'Bitcoin', 'BTC': 'Tether' + } + assert symbols_by_crypto_currency_tickers == { + 'AAVE': {'AAVE/BTC', 'AAVE/USDT'}, 'ADA': {'ADA/BTC', 'ADA/USDT'}, 'ATOM': {'ATOM/BTC', 'ATOM/USDT'}, + 'BAT': {'BAT/BTC', 'BAT/USDT'}, 'BNB': {'BNB/USDT', 'BNB/BTC'}, 'DASH': {'DASH/USDT', 'DASH/BTC'}, + 'DOT': {'DOT/BTC', 'DOT/USDT'}, 'EOS': {'EOS/BTC', 'EOS/USDT'}, 'ETC': {'ETC/USDT', 'ETC/BTC'}, + 'ETH': {'ETH/USDT', 'ETH/BTC'}, 'FIL': {'FIL/BTC', 'FIL/USDT'}, 'LINK': {'LINK/USDT', 'LINK/BTC'}, + 'LTC': {'LTC/USDT', 'LTC/BTC'}, 'NEO': {'NEO/USDT', 'NEO/BTC'}, 'ONT': {'ONT/BTC', 'ONT/USDT'}, + 'ROSE': {'ROSE/USDT', 'ROSE/BTC'}, 'SUSHI': {'SUSHI/USDT', 'SUSHI/BTC'}, 'SXP': {'SXP/BTC', 'SXP/USDT'}, + 'THETA': {'THETA/USDT', 'THETA/BTC'}, 'TOMO': {'TOMO/BTC', 'TOMO/USDT'}, 'UNI': {'UNI/BTC', 'UNI/USDT'}, + 'WAN': {'WAN/BTC', 'WAN/USDT'}, 'XLM': {'XLM/USDT', 'XLM/BTC'}, 'XMR': {'XMR/BTC', 'XMR/USDT'}, + 'XTZ': {'XTZ/BTC', 'XTZ/USDT'}, 'YFI': {'YFI/USDT', 'YFI/BTC'}, 'BTC': {'BTC/USDT'} + } + + +async def test_filter_pairs(): + exchange_api = ExchangeAPIMock() + exchange_manager = None + + assert _filter_pairs(['BAT/BTC', 'BAT/USDT', 'BNB/USDT', 'BNB/BTC'], 'BAT', exchange_api, exchange_manager) == \ + {'BAT/BTC', 'BAT/USDT'} + + assert _filter_pairs(['BAT/BTC', 'BAT/USDT', 'BNB/USDT', 'BNB/BTC'], 'BNB', exchange_api, exchange_manager) == \ + {'BNB/BTC', 'BNB/USDT'} + + assert _filter_pairs(['BAT/BTC', 'BAT/USDT', 'BNB/USDT', 'BNB/BTC'], 'BTC', exchange_api, exchange_manager) == \ + set() + + assert _filter_pairs(['BAT/BTC', 'BAT/USDT', 'BNB/USDT', 'BNB/BTC'], 'USDT', exchange_api, exchange_manager) == \ + set() + + assert _filter_pairs([], 'USDT', exchange_api, exchange_manager) == \ + set() + + assert _filter_pairs([], 'USDT', exchange_api, exchange_manager) == \ + set() + + +class ExchangeAPIMock: + + def get_exchange_id_from_matrix_id(self, *args): + return "1" + + def get_exchange_manager_from_exchange_name_and_id(self, *args): + return None + + def get_base_currency(self, exchange_manager, symbol): + return symbol_util.parse_symbol(symbol).base diff --git a/packages/evaluators/tests/evaluators/test_evaluator_factory_create_evaluators.py b/packages/evaluators/tests/evaluators/test_evaluator_factory_create_evaluators.py new file mode 100644 index 0000000000..32c0d8c96e --- /dev/null +++ b/packages/evaluators/tests/evaluators/test_evaluator_factory_create_evaluators.py @@ -0,0 +1,282 @@ +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import pytest +from mock import patch +import octobot_evaluators.evaluators.evaluator_factory as evaluator_factory +from octobot_commons.enums import TimeFrames +import octobot_commons.symbols +import octobot_commons.tentacles_management as tentacles_management + + +# All test coroutines will be treated as marked. +pytestmark = pytest.mark.asyncio + + +@pytest.fixture +def symbols_by_crypto_currencies(): + return {"Bitcoin": ["BTC/USD", "BTC/USDT"], "Ethereum": ["ETH/BTC"]} + + +@pytest.fixture +def symbols(): + return ["BTC/USD", "BTC/USDT", "ETH/BTC"] + + +@pytest.fixture +def time_frames(): + return [TimeFrames.THREE_DAYS, TimeFrames.ONE_HOUR] + + +async def test_create_evaluators_no_wild_card(symbols_by_crypto_currencies, symbols, time_frames): + evaluators = await _create_evaluators(EvaluatorNoWildCard, symbols_by_crypto_currencies, symbols, time_frames) + assert len(evaluators) == 6 + + # one eval per time frame per symbol (1 symbols) + assert len([e for e in evaluators if e.cryptocurrency == "ETH"]) == len(time_frames) * 1 + # one eval per time frame per symbol (2 symbols) + assert len([e for e in evaluators if e.cryptocurrency == "BTC"]) == len(time_frames) * 2 + + # right crypto-currency name + assert all([e.cryptocurrency_name == "Ethereum" for e in evaluators if e.cryptocurrency == "ETH"]) + assert all([e.cryptocurrency_name == "Bitcoin" for e in evaluators if e.cryptocurrency == "BTC"]) + + # right crypto-currency + assert all([e.cryptocurrency == "ETH" for e in evaluators if e.symbol == "ETH/BTC"]) + assert all([e.cryptocurrency == "BTC" for e in evaluators if e.symbol in ["BTC/USD", "BTC/USDT"]]) + + # right symbol + assert all([e.symbol == "ETH/BTC" for e in evaluators if e.cryptocurrency == "ETH"]) + assert all([e.symbol in ["BTC/USD", "BTC/USDT"] for e in evaluators if e.cryptocurrency == "BTC"]) + # all symbols taken into account + assert len(set([e.symbol for e in evaluators if e.cryptocurrency == "BTC"])) == 2 + assert len(set([e.symbol for e in evaluators if e.cryptocurrency == "ETH"])) == 1 + + # valid time frames + assert all([e.time_frame in time_frames for e in evaluators]) + # all time frames taken into account + assert len(set([e.time_frame for e in evaluators if e.symbol == "BTC/USD"])) == 2 + assert len(set([e.time_frame for e in evaluators if e.symbol == "BTC/USDT"])) == 2 + assert len(set([e.time_frame for e in evaluators if e.symbol == "ETH/BTC"])) == 2 + + +async def test_create_evaluators_wild_card(symbols_by_crypto_currencies, symbols, time_frames): + evaluators = await _create_evaluators(EvaluatorWildCard, symbols_by_crypto_currencies, symbols, time_frames) + + # only one wild card evaluator created + assert len(evaluators) == 1 + assert evaluators[0].cryptocurrency is evaluators[0].cryptocurrency_name is evaluators[0].symbol is \ + evaluators[0].time_frame is None + + +async def test_create_evaluators_no_cc_wild_card(symbols_by_crypto_currencies, symbols, time_frames): + evaluators = await _create_evaluators(EvaluatorNoCCWildCard, symbols_by_crypto_currencies, symbols, time_frames) + + # only difference is the cryptocurrency attribute + assert len(evaluators) == 2 + assert evaluators[0].cryptocurrency_name is evaluators[0].symbol is evaluators[0].time_frame is None + assert sorted([e.cryptocurrency for e in evaluators]) == sorted(["BTC", "ETH"]) + + +async def test_create_evaluators_no_cc_name_wild_card(symbols_by_crypto_currencies, symbols, time_frames): + evaluators = await _create_evaluators(EvaluatorNoCCNameWildCard, symbols_by_crypto_currencies, symbols, time_frames) + + # only one wild card evaluator created since EvaluatorNoCCNameWildCard is crypto-currency wild card + assert len(evaluators) == 1 + assert evaluators[0].cryptocurrency is evaluators[0].cryptocurrency_name is evaluators[0].symbol is \ + evaluators[0].time_frame is None + + +async def test_create_evaluators_no_cc_name_and_no_cc_wild_card(symbols_by_crypto_currencies, symbols, time_frames): + evaluators = await _create_evaluators(EvaluatorNoCCNameNoCCWildCard, symbols_by_crypto_currencies, + symbols, time_frames) + + # only differences are the cryptocurrency and cryptocurrency_name attribute + assert len(evaluators) == 2 + assert evaluators[0].symbol is evaluators[0].time_frame is None + assert sorted([e.cryptocurrency for e in evaluators]) == sorted(["BTC", "ETH"]) + assert sorted([e.cryptocurrency_name for e in evaluators]) == sorted(["Bitcoin", "Ethereum"]) + + +async def test_create_evaluators_no_symbol_wild_card(symbols_by_crypto_currencies, symbols, time_frames): + evaluators = await _create_evaluators(EvaluatorNoSymbolWildCard, symbols_by_crypto_currencies, symbols, time_frames) + + # only difference is the symbol attribute + assert len(evaluators) == 3 + assert evaluators[0].cryptocurrency is evaluators[0].cryptocurrency_name is evaluators[0].time_frame is None + assert [e.symbol for e in evaluators] == symbols + + +async def test_create_evaluators_no_time_frame_wild_card(symbols_by_crypto_currencies, symbols, time_frames): + evaluators = await _create_evaluators(EvaluatorNoTimeFrameWildCard, symbols_by_crypto_currencies, + symbols, time_frames) + + # only difference is the time_frame attribute + assert len(evaluators) == 2 + assert evaluators[0].cryptocurrency is evaluators[0].cryptocurrency_name is evaluators[0].symbol is None + assert [e.time_frame for e in evaluators] == time_frames + + +async def test_create_evaluators_no_cc_name_no_symbol_wild_card(symbols_by_crypto_currencies, symbols, time_frames): + evaluators = await _create_evaluators(EvaluatorNoCCNameSymbolWildCard, symbols_by_crypto_currencies, + symbols, time_frames) + + # only difference is the symbol attribute since no crypto-currency wildcard name requires also no + # cryptocurrency wildcard + assert len(evaluators) == 3 + assert evaluators[0].cryptocurrency is evaluators[0].cryptocurrency_name is evaluators[0].time_frame is None + assert [e.symbol for e in evaluators] == symbols + + +async def test_create_evaluators_no_cc_time_frame_wild_card(symbols_by_crypto_currencies, symbols, time_frames): + evaluators = await _create_evaluators(EvaluatorNoCCTimeFrameWildCard, symbols_by_crypto_currencies, + symbols, time_frames) + + # only differences are on cryptocurrency and time frame attributes + assert len(evaluators) == 4 + assert evaluators[0].symbol is evaluators[0].cryptocurrency_name is None + assert sorted(list(set(e.cryptocurrency for e in evaluators))) == sorted(["ETH", "BTC"]) + tfs = set(e.time_frame for e in evaluators) + assert len(tfs) == 2 + assert all(tf in time_frames for tf in tfs) + + +async def test_create_evaluators_no_symbol_time_frame_wild_card(symbols_by_crypto_currencies, symbols, time_frames): + evaluators = await _create_evaluators(EvaluatorNoSymbolTimeFrameWildCard, symbols_by_crypto_currencies, + symbols, time_frames) + + # only differences are on symbol and time frame attributes + assert len(evaluators) == 6 + assert evaluators[0].cryptocurrency is evaluators[0].cryptocurrency_name is None + assert sorted(list(set(e.symbol for e in evaluators))) == sorted(symbols) + tfs = set(e.time_frame for e in evaluators) + assert len(tfs) == 2 + assert all(tf in time_frames for tf in tfs) + + +async def _create_evaluators(evaluator_parent_class, symbols_by_crypto_currencies, symbols, time_frames): + crypto_currency_name_by_crypto_currencies = {} + symbols_by_crypto_currency_tickers = {} + for name, symbol_list in symbols_by_crypto_currencies.items(): + ticker = octobot_commons.symbols.parse_symbol(symbol_list[0]).base + crypto_currency_name_by_crypto_currencies[ticker] = name + symbols_by_crypto_currency_tickers[ticker] = symbol_list + with patch("octobot_evaluators.evaluators.evaluator_factory.create_evaluator", new=_mocked_create_evaluator), \ + patch("octobot_commons.tentacles_management.get_all_classes_from_parent", + new=_mocked_get_all_classes_from_parent): + return await evaluator_factory.create_evaluators( + evaluator_parent_class=evaluator_parent_class, + tentacles_setup_config=None, + matrix_id="", + exchange_name="", + bot_id="", + crypto_currency_name_by_crypto_currencies=crypto_currency_name_by_crypto_currencies, + symbols_by_crypto_currency_tickers=symbols_by_crypto_currency_tickers, + symbols=symbols, + time_frames=time_frames + ) + + +def _mocked_get_all_classes_from_parent(evaluator_parent_class): + return [evaluator_parent_class] + + +async def _mocked_create_evaluator(evaluator_class, + tentacles_setup_config: object, + matrix_id: str, + exchange_name: str, + bot_id: str, + cryptocurrency: str = None, + cryptocurrency_name: str = None, + symbol: str = None, + time_frame=None, + relevant_evaluators=None, + all_symbols_by_crypto_currencies=None, + time_frames=None, + real_time_time_frames=None, + evaluator_configuration=None): + return evaluator_class(cryptocurrency, cryptocurrency_name, symbol, time_frame, all_symbols_by_crypto_currencies) + + +class EvaluatorWildCard(tentacles_management.AbstractTentacle): + def __init__(self, cryptocurrency, cryptocurrency_name, symbol, time_frame, all_symbols_by_crypto_currencies): + self.cryptocurrency = cryptocurrency + self.cryptocurrency_name = cryptocurrency_name + self.symbol = symbol + self.time_frame = time_frame + self.all_symbols_by_crypto_currencies = all_symbols_by_crypto_currencies + + @classmethod + def get_is_cryptocurrency_name_wildcard(cls): + return True + + @classmethod + def get_is_symbol_wildcard(cls): + return True + + @classmethod + def get_is_time_frame_wildcard(cls): + return True + + + @classmethod + def get_is_cryptocurrencies_wildcard(cls): + return True + + +class EvaluatorNoCCWildCard(EvaluatorWildCard): + @classmethod + def get_is_cryptocurrencies_wildcard(cls): + return False + + +class EvaluatorNoCCNameWildCard(EvaluatorWildCard): + @classmethod + def get_is_cryptocurrency_name_wildcard(cls): + return False + + +class EvaluatorNoSymbolWildCard(EvaluatorWildCard): + @classmethod + def get_is_symbol_wildcard(cls): + return False + + +class EvaluatorNoTimeFrameWildCard(EvaluatorWildCard): + @classmethod + def get_is_time_frame_wildcard(cls): + return False + + +class EvaluatorNoCCNameNoCCWildCard(EvaluatorNoCCNameWildCard, EvaluatorNoCCWildCard): + pass + + +class EvaluatorNoCCNameSymbolWildCard(EvaluatorNoCCNameWildCard, EvaluatorNoSymbolWildCard): + pass + + +class EvaluatorNoCCTimeFrameWildCard(EvaluatorNoCCWildCard, EvaluatorNoTimeFrameWildCard): + pass + + +class EvaluatorNoSymbolTimeFrameWildCard(EvaluatorNoSymbolWildCard, EvaluatorNoTimeFrameWildCard): + pass + + +class EvaluatorNoWildCard(EvaluatorNoCCWildCard, EvaluatorNoCCNameWildCard, + EvaluatorNoSymbolWildCard, EvaluatorNoTimeFrameWildCard): + def __init__(self, *args): + super().__init__(*args) diff --git a/packages/evaluators/tests/matrix/__init__.py b/packages/evaluators/tests/matrix/__init__.py new file mode 100644 index 0000000000..9e32cbc868 --- /dev/null +++ b/packages/evaluators/tests/matrix/__init__.py @@ -0,0 +1,15 @@ +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. diff --git a/packages/evaluators/tests/matrix/channel/__init__.py b/packages/evaluators/tests/matrix/channel/__init__.py new file mode 100644 index 0000000000..9e32cbc868 --- /dev/null +++ b/packages/evaluators/tests/matrix/channel/__init__.py @@ -0,0 +1,15 @@ +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. diff --git a/packages/evaluators/tests/matrix/channel/test_matrix.py b/packages/evaluators/tests/matrix/channel/test_matrix.py new file mode 100644 index 0000000000..a78bac11c9 --- /dev/null +++ b/packages/evaluators/tests/matrix/channel/test_matrix.py @@ -0,0 +1,92 @@ +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import pytest + +from octobot_evaluators.api.evaluators import create_matrix +from octobot_evaluators.api.initialization import create_evaluator_channels, del_evaluator_channels +from octobot_evaluators.evaluators.channel.evaluator_channel import get_chan +from octobot_evaluators.constants import MATRIX_CHANNEL +from octobot_evaluators.matrix.matrix_manager import get_tentacle_path +from octobot_evaluators.matrix.matrices import Matrices + +MATRIX_TEST_ID = "test" + + +async def matrix_callback(matrix_id, + evaluator_name, + evaluator_type, + eval_note, + eval_note_type, + eval_note_description, + eval_note_metadata, + exchange_name, + cryptocurrency, + symbol, + time_frame): + pass + + +@pytest.mark.asyncio +async def test_evaluator_channel_creation(): + del_evaluator_channels(MATRIX_TEST_ID) + await create_evaluator_channels(MATRIX_TEST_ID) + await get_chan(MATRIX_CHANNEL, MATRIX_TEST_ID).new_consumer(matrix_callback) + await get_chan(MATRIX_CHANNEL, MATRIX_TEST_ID).stop() + + +@pytest.mark.asyncio +async def test_evaluator_channel_send(): + del_evaluator_channels(MATRIX_TEST_ID) + matrix_id = create_matrix() + await create_evaluator_channels(MATRIX_TEST_ID) + await get_chan(MATRIX_CHANNEL, MATRIX_TEST_ID).new_consumer(matrix_callback) + await get_chan(MATRIX_CHANNEL, MATRIX_TEST_ID).get_internal_producer().send(matrix_id=matrix_id, + evaluator_name="test", + evaluator_type="test2", + eval_note=1) + + # following assert should be None because send() doesn't call set_tentacle_value + assert Matrices.instance().get_matrix(matrix_id).get_node_at_path( + get_tentacle_path(tentacle_name="test", tentacle_type="test2")) is None + await get_chan(MATRIX_CHANNEL, MATRIX_TEST_ID).stop() + Matrices.instance().del_matrix(matrix_id) + + +@pytest.mark.asyncio +async def test_evaluator_channel_send_eval_note(): + del_evaluator_channels(MATRIX_TEST_ID) + matrix_id = create_matrix() + await create_evaluator_channels(MATRIX_TEST_ID) + await get_chan(MATRIX_CHANNEL, MATRIX_TEST_ID).new_consumer(matrix_callback) + await get_chan(MATRIX_CHANNEL, MATRIX_TEST_ID).get_internal_producer().send_eval_note(matrix_id=matrix_id, + evaluator_name="test", + evaluator_type="test2", + eval_note=1, + eval_note_type=int, + eval_note_description="test description", + eval_time=1000, + eval_note_metadata={"test": "test"}) + + assert Matrices.instance().get_matrix(matrix_id).get_node_at_path( + get_tentacle_path(tentacle_name="test", tentacle_type="test2")).node_value == 1 + assert Matrices.instance().get_matrix(matrix_id).get_node_at_path( + get_tentacle_path(tentacle_name="test", tentacle_type="test2")).node_description == "test description" + assert Matrices.instance().get_matrix(matrix_id).get_node_at_path( + get_tentacle_path(tentacle_name="test", tentacle_type="test2")).node_metadata == {"test": "test"} + assert Matrices.instance().get_matrix(matrix_id).get_node_at_path( + get_tentacle_path(tentacle_name="test", tentacle_type="test2")).node_value_time == 1000 + await get_chan(MATRIX_CHANNEL, MATRIX_TEST_ID).stop() + Matrices.instance().del_matrix(matrix_id) diff --git a/packages/evaluators/tests/matrix/test_matrices.py b/packages/evaluators/tests/matrix/test_matrices.py new file mode 100644 index 0000000000..188be1c10a --- /dev/null +++ b/packages/evaluators/tests/matrix/test_matrices.py @@ -0,0 +1,64 @@ +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import pytest + +from octobot_evaluators.api.evaluators import create_matrix +from octobot_evaluators.matrix.matrix import Matrix +from octobot_evaluators.matrix.matrices import Matrices + + +def cleanup_matrices(): + matrices = Matrices.instance() + for m_id in list(matrices.matrices): + Matrices.instance().del_matrix(m_id) + + +def test_default_matrices(): + cleanup_matrices() + matrices = Matrices.instance() + assert matrices.matrices == {} + + +@pytest.mark.asyncio +async def test_add_matrix(): + matrices = Matrices.instance() + assert matrices.matrices == {} + + created_matrix: Matrix = Matrix() + Matrices.instance().add_matrix(created_matrix) + + assert matrices.matrices != {} + assert created_matrix.matrix_id in matrices.matrices + + +@pytest.mark.asyncio +async def test_get_matrix(): + matrix_id = create_matrix() + + assert Matrices.instance().get_matrix(matrix_id) is not None + + with pytest.raises(KeyError): + assert Matrices.instance().get_matrix(matrix_id + "t") is None + + +@pytest.mark.asyncio +async def test_del_matrix(): + matrices = Matrices.instance() + matrix_id = create_matrix() + + assert matrix_id in matrices.matrices + Matrices.instance().del_matrix(matrix_id) + assert matrix_id not in matrices.matrices diff --git a/packages/evaluators/tests/matrix/test_matrix.py b/packages/evaluators/tests/matrix/test_matrix.py new file mode 100644 index 0000000000..6bc322fa3e --- /dev/null +++ b/packages/evaluators/tests/matrix/test_matrix.py @@ -0,0 +1,76 @@ +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import pytest + +from octobot_evaluators.matrix.matrix import Matrix + + +def test_default_matrix(): + matrix = Matrix() + assert matrix.matrix.root.children == {} + + +@pytest.mark.asyncio +async def test_get_node_at_path(): + matrix = Matrix() + test_node_path = ["test-path", "test-path-2", "test-path3", 4] + created_node = matrix.matrix.get_or_create_node(test_node_path) + assert matrix.get_node_at_path(test_node_path) is created_node + + +@pytest.mark.asyncio +async def test_set_tentacle_value(): + matrix = Matrix() + test_node_path = ["test-path", "test-path-2"] + matrix.set_node_value("test-value", str, test_node_path) + assert matrix.matrix.get_or_create_node(test_node_path).node_type == str + assert matrix.matrix.get_or_create_node(test_node_path).node_value == "test-value" + + +@pytest.mark.asyncio +async def test_get_node_children_at_path(): + matrix = Matrix() + test_node_1_path = ["test-path-parent", "test-path-child", "test-path-1"] + test_node_2_path = ["test-path-parent", "test-path-child", "test-path-2"] + test_node_3_path = ["test-path-parent", "test-path-child", "test-path-3"] + created_node_1 = matrix.matrix.get_or_create_node(test_node_1_path) + created_node_2 = matrix.matrix.get_or_create_node(test_node_2_path) + created_node_3 = matrix.matrix.get_or_create_node(test_node_3_path) + assert matrix.get_node_children_at_path(["test-path-parent", "test-path-child"]) == [created_node_1, + created_node_2, + created_node_3] + + +@pytest.mark.asyncio +async def test_get_node_children_by_names_at_path(): + matrix = Matrix() + test_node_1_path = ["test-path-parent", "test-path-child", "test-path-1"] + test_node_2_path = ["test-path-parent", "test-path-child", "test-path-2"] + test_node_3_path = ["test-path-parent", "test-path-child", "test-path-3"] + test_node_4_path = ["test-path-parent", "test-path-4"] + test_node_5_path = ["test-path-parent", "test-path-4", "test-path-5"] + test_node_6_path = ["test-path-parent", "test-path-child", "test-path-2", "test-path-6"] + created_node_1 = matrix.matrix.get_or_create_node(test_node_1_path) + created_node_2 = matrix.matrix.get_or_create_node(test_node_2_path) + created_node_3 = matrix.matrix.get_or_create_node(test_node_3_path) + created_node_4 = matrix.matrix.get_or_create_node(test_node_4_path) + created_node_5 = matrix.matrix.get_or_create_node(test_node_5_path) + created_node_6 = matrix.matrix.get_or_create_node(test_node_6_path) + assert matrix.get_node_children_by_names_at_path(["test-path-parent", "test-path-child"]) == { + "test-path-1": created_node_1, + "test-path-2": created_node_2, + "test-path-3": created_node_3 + } diff --git a/packages/evaluators/tests/matrix/test_matrix_manager.py b/packages/evaluators/tests/matrix/test_matrix_manager.py new file mode 100644 index 0000000000..7b91e06d82 --- /dev/null +++ b/packages/evaluators/tests/matrix/test_matrix_manager.py @@ -0,0 +1,662 @@ +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import time + +import pytest +from octobot_commons.constants import MINUTE_TO_SECONDS, START_PENDING_EVAL_NOTE + +from octobot_commons.enums import TimeFramesMinutes, TimeFrames + +from octobot_evaluators.matrix.matrix import Matrix +from octobot_evaluators.matrix.matrix_manager import get_tentacle_path, get_tentacle_value_path, \ + get_tentacle_nodes, get_tentacles_value_nodes, get_matrix_default_value_path, set_tentacle_value, \ + get_tentacle_value, get_tentacle_node, get_available_symbols, \ + is_tentacle_value_valid, is_tentacles_values_valid, get_evaluations_by_evaluator, get_available_time_frames, \ + delete_tentacle_node +from octobot_evaluators.errors import UnsetTentacleEvaluation +from octobot_evaluators.matrix.matrices import Matrices + + +@pytest.mark.asyncio +async def test_set_tentacle_value(): + matrix = Matrix() + Matrices.instance().add_matrix(matrix) + set_tentacle_value(matrix.matrix_id, tentacle_type="TA", tentacle_value=0, tentacle_path=["test-path"]) + assert matrix.get_node_at_path(["test-path"]).node_value == 0 + + set_tentacle_value(matrix.matrix_id, tentacle_type="TA", tentacle_value="value", tentacle_path= + get_tentacle_path(tentacle_type="TA", tentacle_name="Test-TA", exchange_name="binance")) + assert matrix.get_node_at_path( + get_tentacle_path(tentacle_type="TA", tentacle_name="Test-TA", exchange_name="binance")).node_value == "value" + Matrices.instance().del_matrix(matrix.matrix_id) + + +@pytest.mark.asyncio +async def test_get_tentacle_value(): + matrix = Matrix() + Matrices.instance().add_matrix(matrix) + assert not get_tentacle_value(matrix.matrix_id, tentacle_path=["Test-TA"]) + + matrix.matrix.get_or_create_node(path=["Test-TA"]) + matrix.set_node_value(value_type="TA", value_path=["Test-TA"], value=0) + assert get_tentacle_value(matrix.matrix_id, tentacle_path=["Test-TA"]) == 0 + + matrix.matrix.get_or_create_node(get_tentacle_path(tentacle_type="TA", tentacle_name="Test-TA")) + matrix.set_node_value(value_type="TA", value_path=get_tentacle_path(tentacle_type="TA", tentacle_name="Test-TA"), + value="test") + assert get_tentacle_value(matrix.matrix_id, tentacle_path=get_tentacle_path(tentacle_type="TA", + tentacle_name="Test-TA")) == "test" + Matrices.instance().del_matrix(matrix.matrix_id) + + +def test_get_matrix_default_value_path(): + assert get_matrix_default_value_path(tentacle_type="TA", tentacle_name="Test-TA", exchange_name="binance") == \ + get_tentacle_path(tentacle_type="TA", tentacle_name="Test-TA", exchange_name="binance") + assert get_matrix_default_value_path(tentacle_type="TA", tentacle_name="Test-TA", + symbol="ETH", time_frame="1h") == \ + get_tentacle_path(tentacle_type="TA", tentacle_name="Test-TA") + \ + get_tentacle_value_path(symbol="ETH", time_frame="1h") + assert get_matrix_default_value_path(tentacle_type="TA", tentacle_name="Test-TA", + symbol="ETH", exchange_name="bitmex") == \ + get_tentacle_path(tentacle_type="TA", tentacle_name="Test-TA", exchange_name="bitmex") + \ + get_tentacle_value_path(symbol="ETH") + + +def test_get_tentacle_path(): + assert get_tentacle_path(tentacle_type="TA", tentacle_name="Test-TA", exchange_name="binance") == ["binance", "TA", + "Test-TA"] + assert get_tentacle_path(tentacle_type="TA", tentacle_name="Test-TA") == ["TA", "Test-TA"] + assert get_tentacle_path(exchange_name="binance", tentacle_name="Test-TA") == ["binance", "Test-TA"] + assert get_tentacle_path(tentacle_name="Test-TA") == ["Test-TA"] + + +def test_get_tentacle_value_path(): + assert get_tentacle_value_path() == [] + assert get_tentacle_value_path(symbol="BTC") == ["BTC"] + assert get_tentacle_value_path(time_frame="1m") == ["1m"] + assert get_tentacle_value_path(symbol="ETH", time_frame="1h") == ["ETH", "1h"] + + +@pytest.mark.asyncio +async def test_get_tentacle_nodes_on_root(): + matrix = Matrix() + Matrices.instance().add_matrix(matrix) + created_node_1 = matrix.matrix.get_or_create_node( + get_tentacle_path(tentacle_type="NO_TYPE", tentacle_name="Test-TA")) + created_node_2 = matrix.matrix.get_or_create_node( + get_tentacle_path(exchange_name="binance", tentacle_name="Test-TA-2")) + created_node_3 = matrix.matrix.get_or_create_node(get_tentacle_path(tentacle_name="Test-TA-3")) + assert get_tentacle_nodes(matrix.matrix_id) == [matrix.get_node_at_path(get_tentacle_path(tentacle_type="NO_TYPE")), + matrix.get_node_at_path(get_tentacle_path(exchange_name="binance")), + created_node_3] + + assert get_tentacle_nodes(matrix.matrix_id, tentacle_type="TA") == [] + assert get_tentacle_nodes(matrix.matrix_id, exchange_name="bitfinex") == [] + assert get_tentacle_nodes(matrix.matrix_id, tentacle_type="NO_TYPE") == [created_node_1] + assert get_tentacle_nodes(matrix.matrix_id, exchange_name="binance") == [created_node_2] + Matrices.instance().del_matrix(matrix.matrix_id) + + +@pytest.mark.asyncio +async def test_get_tentacle_nodes_on_tentacle_type(): + matrix = Matrix() + Matrices.instance().add_matrix(matrix) + created_node_1 = matrix.matrix.get_or_create_node( + get_tentacle_path(tentacle_type="NO_TYPE", tentacle_name="Test-TA")) + created_node_2 = matrix.matrix.get_or_create_node( + get_tentacle_path(tentacle_type="TEST_TYPE", tentacle_name="Test-TA-2")) + + assert get_tentacle_nodes(matrix.matrix_id, tentacle_type="TA") == [] + assert get_tentacle_nodes(matrix.matrix_id, tentacle_type="NO_TYPE") == [created_node_1] + assert get_tentacle_nodes(matrix.matrix_id, tentacle_type="TEST_TYPE") == [created_node_2] + Matrices.instance().del_matrix(matrix.matrix_id) + + +@pytest.mark.asyncio +async def test_get_tentacle_nodes_on_exchange_name_and_tentacle_type(): + matrix = Matrix() + Matrices.instance().add_matrix(matrix) + created_node_1 = matrix.matrix.get_or_create_node(get_tentacle_path(tentacle_type="NO_TYPE", + tentacle_name="Test-TA", + exchange_name="binance")) + created_node_2 = matrix.matrix.get_or_create_node(get_tentacle_path(tentacle_type="TEST_TYPE", + tentacle_name="Test-TA-2", + exchange_name="binance")) + assert get_tentacle_nodes(matrix.matrix_id, exchange_name="binance") == [ + matrix.get_node_at_path(get_tentacle_path(exchange_name="binance", + tentacle_type="NO_TYPE")), + matrix.get_node_at_path(get_tentacle_path(exchange_name="binance", + tentacle_type="TEST_TYPE"))] + assert get_tentacle_nodes(matrix.matrix_id, exchange_name="binance", tentacle_type="NO_TYPE") == [created_node_1] + assert get_tentacle_nodes(matrix.matrix_id, exchange_name="binance", tentacle_type="TEST_TYPE") == [created_node_2] + assert get_tentacle_nodes(matrix.matrix_id, exchange_name="bitfinex") == [] + Matrices.instance().del_matrix(matrix.matrix_id) + + +@pytest.mark.asyncio +async def test_get_tentacle_nodes_on_exchange_name(): + matrix = Matrix() + Matrices.instance().add_matrix(matrix) + created_node_1 = matrix.matrix.get_or_create_node(get_tentacle_path(tentacle_name="Test-TA", + exchange_name="binance")) + created_node_2 = matrix.matrix.get_or_create_node(get_tentacle_path(tentacle_name="Test-TA-2", + exchange_name="binance")) + + assert get_tentacle_nodes(matrix.matrix_id, exchange_name="bitfinex") == [] + assert get_tentacle_nodes(matrix.matrix_id, tentacle_type="NO_TYPE") == [] + assert get_tentacle_nodes(matrix.matrix_id, exchange_name="binance") == [created_node_1, created_node_2] + Matrices.instance().del_matrix(matrix.matrix_id) + + +@pytest.mark.asyncio +async def test_get_tentacle_nodes_on_multiple_tentacle_type(): + matrix = Matrix() + Matrices.instance().add_matrix(matrix) + created_node_1 = matrix.matrix.get_or_create_node(get_tentacle_path(tentacle_type="TA", tentacle_name="Test-TA")) + created_node_2 = matrix.matrix.get_or_create_node(get_tentacle_path(tentacle_type="TA", tentacle_name="Test-TA-2")) + created_node_3 = matrix.matrix.get_or_create_node(get_tentacle_path(tentacle_type="TA", tentacle_name="Test-TA-3")) + assert get_tentacle_nodes(matrix.matrix_id, tentacle_type="TA") == [created_node_1, created_node_2, created_node_3] + Matrices.instance().del_matrix(matrix.matrix_id) + + +@pytest.mark.asyncio +async def test_get_tentacle_nodes_on_multiple_tentacle_type_and_exchange_name(): + matrix = Matrix() + Matrices.instance().add_matrix(matrix) + created_node_1 = matrix.matrix.get_or_create_node(get_tentacle_path(tentacle_type="TA", + tentacle_name="Test-TA", + exchange_name="binance")) + created_node_2 = matrix.matrix.get_or_create_node(get_tentacle_path(tentacle_type="TA", + tentacle_name="Test-TA-2", + exchange_name="binance")) + created_node_3 = matrix.matrix.get_or_create_node(get_tentacle_path(tentacle_type="TA", + tentacle_name="Test-TA-3", + exchange_name="binance")) + assert get_tentacle_nodes(matrix.matrix_id, exchange_name="binance", tentacle_type="TA") == [created_node_1, + created_node_2, + created_node_3] + Matrices.instance().del_matrix(matrix.matrix_id) + + +@pytest.mark.asyncio +async def test_get_tentacle_nodes_mixed(): + matrix = Matrix() + Matrices.instance().add_matrix(matrix) + created_node_1 = matrix.matrix.get_or_create_node(get_tentacle_path(tentacle_type="TA", + tentacle_name="Test-TA", + exchange_name="binance")) + created_node_2 = matrix.matrix.get_or_create_node(get_tentacle_path(tentacle_type="TA", + tentacle_name="Test-TA-2", + exchange_name="bitfinex")) + created_node_3 = matrix.matrix.get_or_create_node(get_tentacle_path(tentacle_type="TA", + tentacle_name="Test-TA-3", + exchange_name="binance")) + created_node_4 = matrix.matrix.get_or_create_node(get_tentacle_path(tentacle_type="TEST-TYPE", + tentacle_name="Test-TA-4", + exchange_name="binance")) + created_node_5 = matrix.matrix.get_or_create_node(get_tentacle_path(tentacle_type="TEST-TYPE", + tentacle_name="Test-TA-5", + exchange_name="bitfinex")) + created_node_6 = matrix.matrix.get_or_create_node(get_tentacle_path(tentacle_type="TEST-TYPE", + tentacle_name="Test-TA-6")) + created_node_7 = matrix.matrix.get_or_create_node(get_tentacle_path(tentacle_name="Test-TA-7", + exchange_name="bitfinex")) + created_node_8 = matrix.matrix.get_or_create_node(get_tentacle_path(tentacle_name="Test-TA-8")) + created_node_9 = matrix.matrix.get_or_create_node(get_tentacle_path(tentacle_name="Test-TA-9", + exchange_name="binance")) + assert get_tentacle_nodes(matrix.matrix_id, exchange_name="binance", tentacle_type="TA") == [created_node_1, + created_node_3] + assert get_tentacle_nodes(matrix.matrix_id, exchange_name="binance", tentacle_type="TEST-TYPE") == [created_node_4] + + assert get_tentacle_nodes(matrix.matrix_id, exchange_name="bitfinex", tentacle_type="TEST-TYPE") == [created_node_5] + + assert get_tentacle_nodes(matrix.matrix_id, exchange_name="bitfinex") == [ + matrix.get_node_at_path(get_tentacle_path(exchange_name="bitfinex", tentacle_type="TA")), + matrix.get_node_at_path(get_tentacle_path(exchange_name="bitfinex", tentacle_type="TEST-TYPE")), + created_node_7] + + assert get_tentacle_nodes(matrix.matrix_id, tentacle_type="TEST-TYPE") == [created_node_6] + + assert get_tentacle_nodes(matrix.matrix_id, exchange_name="binance") == [ + matrix.get_node_at_path(get_tentacle_path(exchange_name="binance", tentacle_type="TA")), + matrix.get_node_at_path(get_tentacle_path(exchange_name="binance", tentacle_type="TEST-TYPE")), + created_node_9] + Matrices.instance().del_matrix(matrix.matrix_id) + + +@pytest.mark.asyncio +async def test_get_tentacles_value_nodes_with_symbol(): + matrix = Matrix() + Matrices.instance().add_matrix(matrix) + created_node = matrix.matrix.get_or_create_node(get_tentacle_path(tentacle_type="TA", tentacle_name="Test-TA")) + created_node_2 = matrix.matrix.get_or_create_node(get_tentacle_path(tentacle_type="TA", tentacle_name="Test-TA-2")) + btc_node = matrix.matrix.get_or_create_node(get_tentacle_value_path(symbol="BTC"), starting_node=created_node) + eth_node = matrix.matrix.get_or_create_node(get_tentacle_value_path(symbol="ETH"), starting_node=created_node) + btc_node_2 = matrix.matrix.get_or_create_node(get_tentacle_value_path(symbol="BTC"), starting_node=created_node_2) + assert get_tentacles_value_nodes(matrix.matrix_id, tentacle_nodes=[created_node, created_node_2], + symbol="BTC") == [btc_node, btc_node_2] + assert get_tentacles_value_nodes(matrix.matrix_id, tentacle_nodes=[created_node, created_node_2], + symbol="ETH") == [eth_node] + Matrices.instance().del_matrix(matrix.matrix_id) + + +@pytest.mark.asyncio +async def test_get_tentacles_value_nodes_with_time_frame(): + matrix = Matrix() + Matrices.instance().add_matrix(matrix) + assert get_tentacle_value_path(time_frame="1m") == ["1m"] + created_node = matrix.matrix.get_or_create_node(get_tentacle_path(tentacle_type="TA", tentacle_name="Test-TA")) + created_node_2 = matrix.matrix.get_or_create_node(get_tentacle_path(tentacle_type="TA", tentacle_name="Test-TA-2")) + m_node = matrix.matrix.get_or_create_node(get_tentacle_value_path(time_frame="1m"), starting_node=created_node) + h_node = matrix.matrix.get_or_create_node(get_tentacle_value_path(time_frame="1h"), starting_node=created_node) + h_node_2 = matrix.matrix.get_or_create_node(get_tentacle_value_path(time_frame="1h"), starting_node=created_node_2) + assert get_tentacles_value_nodes(matrix.matrix_id, tentacle_nodes=[created_node, created_node_2], + time_frame="1h") == [h_node, h_node_2] + assert get_tentacles_value_nodes(matrix.matrix_id, tentacle_nodes=[created_node, created_node_2], + symbol="1m") == [m_node] + Matrices.instance().del_matrix(matrix.matrix_id) + + +@pytest.mark.asyncio +async def test_get_tentacles_value_nodes_with_symbol_and_time_frame(): + matrix = Matrix() + Matrices.instance().add_matrix(matrix) + assert get_tentacle_value_path(symbol="ETH", time_frame="1h") == ["ETH", "1h"] + + created_node = matrix.matrix.get_or_create_node(get_tentacle_path(tentacle_type="TA", tentacle_name="Test-TA")) + created_node_2 = matrix.matrix.get_or_create_node(get_tentacle_path(tentacle_type="TA", tentacle_name="Test-TA-2")) + created_node_3 = matrix.matrix.get_or_create_node(get_tentacle_path(tentacle_type="TA", tentacle_name="Test-TA-3")) + btc_h_node = matrix.matrix.get_or_create_node(get_tentacle_value_path(symbol="BTC", time_frame="1h"), + starting_node=created_node) + btc_m_node = matrix.matrix.get_or_create_node(get_tentacle_value_path(symbol="BTC", time_frame="1m"), + starting_node=created_node_2) + eth_h_node = matrix.matrix.get_or_create_node(get_tentacle_value_path(symbol="ETH", time_frame="1h"), + starting_node=created_node_3) + eth_m_node = matrix.matrix.get_or_create_node(get_tentacle_value_path(symbol="ETH", time_frame="1m"), + starting_node=created_node_2) + eth_d_node = matrix.matrix.get_or_create_node(get_tentacle_value_path(symbol="ETH", time_frame="1d"), + starting_node=created_node) + ltc_h_node = matrix.matrix.get_or_create_node(get_tentacle_value_path(symbol="LTC", time_frame="1h"), + starting_node=created_node_2) + assert get_tentacles_value_nodes(matrix.matrix_id, tentacle_nodes=[created_node, created_node_2, created_node_3], + symbol="BTC", time_frame="1h") == [btc_h_node] + assert get_tentacles_value_nodes(matrix.matrix_id, tentacle_nodes=[created_node, created_node_2, created_node_3], + symbol="BTC") == [ + matrix.get_node_at_path(get_tentacle_value_path(symbol="BTC"), starting_node=created_node), + matrix.get_node_at_path(get_tentacle_value_path(symbol="BTC"), starting_node=created_node_2)] + assert get_tentacles_value_nodes(matrix.matrix_id, tentacle_nodes=[created_node, created_node_3], + symbol="BTC") == [ + matrix.get_node_at_path(get_tentacle_value_path(symbol="BTC"), starting_node=created_node)] + assert get_tentacles_value_nodes(matrix.matrix_id, tentacle_nodes=[created_node, created_node_3], + symbol="BTC", time_frame="1m") == [] + assert get_tentacles_value_nodes(matrix.matrix_id, tentacle_nodes=[created_node_3], + symbol="BTC") == [] + assert get_tentacles_value_nodes(matrix.matrix_id, tentacle_nodes=[created_node, created_node_2, created_node_3], + symbol="ETH") == [ + matrix.get_node_at_path(get_tentacle_value_path(symbol="ETH"), starting_node=created_node), + matrix.get_node_at_path(get_tentacle_value_path(symbol="ETH"), starting_node=created_node_2), + matrix.get_node_at_path(get_tentacle_value_path(symbol="ETH"), starting_node=created_node_3)] + Matrices.instance().del_matrix(matrix.matrix_id) + + +@pytest.mark.asyncio +async def test_get_tentacles_value_nodes_mixed(): + matrix = Matrix() + Matrices.instance().add_matrix(matrix) + + created_node = matrix.matrix.get_or_create_node(get_tentacle_path(tentacle_type="TA", tentacle_name="Test-TA")) + created_node_2 = matrix.matrix.get_or_create_node(get_tentacle_path(tentacle_type="TA", tentacle_name="Test-TA-2")) + created_node_3 = matrix.matrix.get_or_create_node(get_tentacle_path(tentacle_type="TA", tentacle_name="Test-TA-3")) + btc_h_node = matrix.matrix.get_or_create_node(get_tentacle_value_path(symbol="BTC", time_frame="1h"), + starting_node=created_node) + btc_node = matrix.matrix.get_or_create_node(get_tentacle_value_path(symbol="BTC"), starting_node=created_node_2) + eth_node = matrix.matrix.get_or_create_node(get_tentacle_value_path(symbol="ETH"), starting_node=created_node_3) + eth_m_node = matrix.matrix.get_or_create_node(get_tentacle_value_path(symbol="ETH", time_frame="1m"), + starting_node=created_node_2) + eth_d_node = matrix.matrix.get_or_create_node(get_tentacle_value_path(symbol="ETH", time_frame="1d"), + starting_node=created_node) + ltc_h_node = matrix.matrix.get_or_create_node(get_tentacle_value_path(symbol="LTC", time_frame="1h"), + starting_node=created_node_2) + + assert get_tentacles_value_nodes(matrix.matrix_id, tentacle_nodes=[created_node, created_node_2, created_node_3], + symbol="BTC", time_frame="1h") == [btc_h_node] + assert get_tentacles_value_nodes(matrix.matrix_id, tentacle_nodes=[created_node, created_node_2, created_node_3], + symbol="BTC") == [ + matrix.get_node_at_path(get_tentacle_value_path(symbol="BTC"), starting_node=created_node), + matrix.get_node_at_path(get_tentacle_value_path(symbol="BTC"), starting_node=created_node_2)] + assert get_tentacles_value_nodes(matrix.matrix_id, tentacle_nodes=[created_node, created_node_3], + symbol="BTC") == [ + matrix.get_node_at_path(get_tentacle_value_path(symbol="BTC"), starting_node=created_node)] + assert get_tentacles_value_nodes(matrix.matrix_id, tentacle_nodes=[created_node, created_node_3], + symbol="BTC", time_frame="1m") == [] + assert get_tentacles_value_nodes(matrix.matrix_id, tentacle_nodes=[created_node_3], + symbol="BTC") == [] + assert get_tentacles_value_nodes(matrix.matrix_id, tentacle_nodes=[created_node, created_node_2, created_node_3], + symbol="ETH") == [ + matrix.get_node_at_path(get_tentacle_value_path(symbol="ETH"), starting_node=created_node), + matrix.get_node_at_path(get_tentacle_value_path(symbol="ETH"), starting_node=created_node_2), + matrix.get_node_at_path(get_tentacle_value_path(symbol="ETH"), starting_node=created_node_3)] + Matrices.instance().del_matrix(matrix.matrix_id) + + +def test_delete_tentacle_node(): + matrix = Matrix() + Matrices.instance().add_matrix(matrix) + + evaluator_1_path = get_matrix_default_value_path(tentacle_type="TA", tentacle_name="Test-TA", + cryptocurrency="BTC", + symbol="BTC/USD", + time_frame="1m") + evaluator_2_path = get_matrix_default_value_path(tentacle_type="TA", tentacle_name="Test-TA", + cryptocurrency="BTC", + symbol="BTC/USD", + time_frame="1h") + + # simulate AbstractEvaluator.initialize() + set_tentacle_value(matrix.matrix_id, evaluator_1_path, "TA", None) + set_tentacle_value(matrix.matrix_id, evaluator_2_path, "TA", None) + + assert delete_tentacle_node(matrix.matrix_id, ["non_existing"]) is None + + # deleted: returned the deleted node + assert delete_tentacle_node(matrix.matrix_id, evaluator_2_path) is not None + + # already deleted + assert delete_tentacle_node(matrix.matrix_id, evaluator_2_path) is None + + + +@pytest.mark.asyncio +async def test_is_tentacle_value_valid(): + matrix = Matrix() + Matrices.instance().add_matrix(matrix) + + evaluator_1_path = get_matrix_default_value_path(tentacle_type="TA", tentacle_name="Test-TA", + cryptocurrency="BTC", + symbol="BTC/USD", + time_frame="1m") + evaluator_2_path = get_matrix_default_value_path(tentacle_type="TA", tentacle_name="Test-TA", + cryptocurrency="BTC", + symbol="BTC/USD", + time_frame="1h") + + # simulate AbstractEvaluator.initialize() + set_tentacle_value(matrix.matrix_id, evaluator_1_path, "TA", None) + set_tentacle_value(matrix.matrix_id, evaluator_2_path, "TA", None) + + get_tentacle_node(matrix.matrix_id, evaluator_1_path).node_value_time = time.time() + assert is_tentacle_value_valid(matrix.matrix_id, evaluator_1_path) + assert not is_tentacle_value_valid(matrix.matrix_id, evaluator_2_path) + + set_tentacle_value(matrix.matrix_id, evaluator_2_path, "TA", None, timestamp=100) + assert not is_tentacle_value_valid(matrix.matrix_id, evaluator_2_path) + + set_tentacle_value(matrix.matrix_id, evaluator_2_path, "TA", None, + timestamp=time.time() - TimeFramesMinutes[TimeFrames.ONE_HOUR] * 2 * MINUTE_TO_SECONDS) + assert not is_tentacle_value_valid(matrix.matrix_id, evaluator_2_path) + + set_tentacle_value(matrix.matrix_id, evaluator_2_path, "TA", None, + timestamp=time.time() - TimeFramesMinutes[TimeFrames.ONE_HOUR] * MINUTE_TO_SECONDS) + assert is_tentacle_value_valid(matrix.matrix_id, evaluator_2_path) + + # test non existing node + with pytest.raises(KeyError): + is_tentacle_value_valid(matrix.matrix_id, evaluator_2_path + ["other"]) + + # test delta + set_tentacle_value(matrix.matrix_id, evaluator_2_path, "TA", None, + timestamp=time.time() - TimeFramesMinutes[TimeFrames.ONE_HOUR] * MINUTE_TO_SECONDS - 10) + assert not is_tentacle_value_valid(matrix.matrix_id, evaluator_2_path) + + # test delta + set_tentacle_value(matrix.matrix_id, evaluator_2_path, "TA", None, + timestamp=time.time() - TimeFramesMinutes[TimeFrames.ONE_HOUR] * MINUTE_TO_SECONDS - 9) + assert is_tentacle_value_valid(matrix.matrix_id, evaluator_2_path) + + # test modified delta + set_tentacle_value(matrix.matrix_id, evaluator_2_path, "TA", None, + timestamp=time.time() - TimeFramesMinutes[TimeFrames.ONE_HOUR] * MINUTE_TO_SECONDS - 29) + assert is_tentacle_value_valid(matrix.matrix_id, evaluator_2_path, delta=30) + + # test modified delta + set_tentacle_value(matrix.matrix_id, evaluator_2_path, "TA", None, + timestamp=time.time() - TimeFramesMinutes[TimeFrames.ONE_HOUR] * MINUTE_TO_SECONDS - 31) + assert not is_tentacle_value_valid(matrix.matrix_id, evaluator_2_path, delta=30) + + Matrices.instance().del_matrix(matrix.matrix_id) + + +@pytest.mark.asyncio +async def test_is_tentacles_values_valid(): + matrix = Matrix() + Matrices.instance().add_matrix(matrix) + + evaluator_1_path = get_matrix_default_value_path(tentacle_type="TA", tentacle_name="Test-TA", + cryptocurrency="BTC", + symbol="BTC/USD", + time_frame="1m") + evaluator_2_path = get_matrix_default_value_path(tentacle_type="TA", tentacle_name="Test-TA", + cryptocurrency="BTC", + symbol="BTC/USD", + time_frame="1h") + + # simulate AbstractEvaluator.initialize() + set_tentacle_value(matrix.matrix_id, evaluator_1_path, "TA", None) + set_tentacle_value(matrix.matrix_id, evaluator_2_path, "TA", None) + + assert not is_tentacles_values_valid(matrix.matrix_id, [evaluator_1_path, evaluator_2_path]) + + set_tentacle_value(matrix.matrix_id, evaluator_1_path, "TA", None, + timestamp=time.time() - TimeFramesMinutes[TimeFrames.ONE_MINUTE] * 2 * MINUTE_TO_SECONDS) + set_tentacle_value(matrix.matrix_id, evaluator_2_path, "TA", None, + timestamp=time.time() - TimeFramesMinutes[TimeFrames.ONE_HOUR] * 2 * MINUTE_TO_SECONDS) + assert not is_tentacles_values_valid(matrix.matrix_id, [evaluator_1_path, evaluator_2_path]) + + set_tentacle_value(matrix.matrix_id, evaluator_1_path, "TA", None, + timestamp=time.time() - TimeFramesMinutes[TimeFrames.ONE_MINUTE] * MINUTE_TO_SECONDS) + assert not is_tentacles_values_valid(matrix.matrix_id, [evaluator_1_path, evaluator_2_path]) + + set_tentacle_value(matrix.matrix_id, evaluator_2_path, "TA", None, + timestamp=time.time() - TimeFramesMinutes[TimeFrames.ONE_HOUR] * MINUTE_TO_SECONDS) + assert is_tentacles_values_valid(matrix.matrix_id, [evaluator_1_path, evaluator_2_path]) + Matrices.instance().del_matrix(matrix.matrix_id) + + +@pytest.mark.asyncio +async def test_get_evaluations_by_evaluator(): + matrix = Matrix() + Matrices.instance().add_matrix(matrix) + + evaluator_1_path = get_matrix_default_value_path(tentacle_type="TA", + exchange_name="kraken", + tentacle_name="RSI", + cryptocurrency="BTC", + symbol="BTC/USD", + time_frame="1m") + evaluator_2_path = get_matrix_default_value_path(tentacle_type="TA", + exchange_name="kraken", + tentacle_name="ADX", + cryptocurrency="BTC", + symbol="BTC/USD", + time_frame="1m") + evaluator_3_path = get_matrix_default_value_path(tentacle_type="TA", + exchange_name="kraken", + tentacle_name="ADX", + cryptocurrency="BTC", + symbol="BTC/USD", + time_frame="1h") + + # simulate AbstractEvaluator.initialize() + set_tentacle_value(matrix.matrix_id, evaluator_1_path, "TA", 1) + set_tentacle_value(matrix.matrix_id, evaluator_2_path, "TA", -0.5) + set_tentacle_value(matrix.matrix_id, evaluator_3_path, "TA", 0) + + assert get_evaluations_by_evaluator(matrix.matrix_id, + tentacle_type="TA", + exchange_name="kraken", + cryptocurrency="BTC", + symbol="BTC/USD", + time_frame="1m") == { + "RSI": get_tentacle_node(matrix.matrix_id, evaluator_1_path), + "ADX": get_tentacle_node(matrix.matrix_id, evaluator_2_path) + } + + # set invalid value to not to add this value in result dict + set_tentacle_value(matrix.matrix_id, evaluator_2_path, "TA", START_PENDING_EVAL_NOTE) + with pytest.raises(UnsetTentacleEvaluation): + get_evaluations_by_evaluator(matrix.matrix_id, + tentacle_type="TA", + exchange_name="kraken", + cryptocurrency="BTC", + symbol="BTC/USD", + time_frame="1m", + allow_missing=False) + assert get_evaluations_by_evaluator(matrix.matrix_id, + tentacle_type="TA", + exchange_name="kraken", + cryptocurrency="BTC", + symbol="BTC/USD", + time_frame="1m", + allow_missing=True) == { + "RSI": get_tentacle_node(matrix.matrix_id, evaluator_1_path) + } + assert get_evaluations_by_evaluator(matrix.matrix_id, + tentacle_type="TA", + exchange_name="kraken", + cryptocurrency="BTC", + symbol="BTC/USD", + time_frame="1m", + allow_missing=True, + allowed_values=[START_PENDING_EVAL_NOTE]) == { + "RSI": get_tentacle_node(matrix.matrix_id, evaluator_1_path), + "ADX": get_tentacle_node(matrix.matrix_id, evaluator_2_path) + } + + # invalid path + assert get_evaluations_by_evaluator(matrix.matrix_id, + tentacle_type="TA_invalid", + exchange_name="kraken", + cryptocurrency="BTC", + symbol="BTC/USD", + time_frame="1m", + allow_missing=True, + allowed_values=[START_PENDING_EVAL_NOTE]) == {} + + +@pytest.mark.asyncio +async def test_get_available_time_frames(): + matrix = Matrix() + Matrices.instance().add_matrix(matrix) + + evaluator_1_path = get_matrix_default_value_path(tentacle_type="TA", + exchange_name="kraken", + tentacle_name="RSI", + cryptocurrency="BTC", + symbol="BTC/USD", + time_frame="1m") + evaluator_2_path = get_matrix_default_value_path(tentacle_type="TA", + exchange_name="kraken", + tentacle_name="ADX", + cryptocurrency="BTC", + symbol="BTC/USD", + time_frame="1m") + evaluator_3_path = get_matrix_default_value_path(tentacle_type="TA", + exchange_name="kraken", + tentacle_name="ADX", + cryptocurrency="BTC", + symbol="BTC/USD", + time_frame="1h") + evaluator_4_path = get_matrix_default_value_path(tentacle_type="TA", + exchange_name="kraken", + tentacle_name="RSI", + cryptocurrency="BTC", + symbol="BTC/USD", + time_frame="1h") + + # simulate AbstractEvaluator.initialize() + set_tentacle_value(matrix.matrix_id, evaluator_1_path, "TA", 1) + set_tentacle_value(matrix.matrix_id, evaluator_2_path, "TA", -0.5) + set_tentacle_value(matrix.matrix_id, evaluator_3_path, "TA", 0) + set_tentacle_value(matrix.matrix_id, evaluator_4_path, "TA", -1) + + assert get_available_time_frames(matrix.matrix_id, + exchange_name="kraken", + tentacle_type="TA", + cryptocurrency="BTC", + symbol="BTC/USD") == ["1m", "1h"] + + # invalid path + assert get_available_time_frames(matrix.matrix_id, + exchange_name="kraken", + tentacle_type="TA_invalid", + cryptocurrency="BTC", + symbol="BTC/USD") == [] + + +@pytest.mark.asyncio +async def test_get_available_symbols(): + matrix = Matrix() + Matrices.instance().add_matrix(matrix) + + evaluator_1_path = get_matrix_default_value_path(tentacle_type="TA", + exchange_name="kraken", + tentacle_name="RSI", + cryptocurrency="BTC", + symbol="BTC/USD", + time_frame="1m") + evaluator_2_path = get_matrix_default_value_path(tentacle_type="TA", + exchange_name="kraken", + tentacle_name="RSI", + cryptocurrency="BTC", + symbol="BTCX/USDC", + time_frame="1m") + evaluator_3_path = get_matrix_default_value_path(tentacle_type="TA", + exchange_name="kraken", + tentacle_name="RSI", + cryptocurrency="BTC", + symbol="BTCX/USDC", + time_frame="1h") + evaluator_4_path = get_matrix_default_value_path(tentacle_type="TA", + exchange_name="kraken", + tentacle_name="RSI", + cryptocurrency="BTC", + symbol="BTC/USD", + time_frame="1h") + + # simulate AbstractEvaluator.initialize() + set_tentacle_value(matrix.matrix_id, evaluator_1_path, "TA", 1) + set_tentacle_value(matrix.matrix_id, evaluator_2_path, "TA", -0.5) + set_tentacle_value(matrix.matrix_id, evaluator_3_path, "TA", 0) + set_tentacle_value(matrix.matrix_id, evaluator_4_path, "TA", -1) + + assert get_available_symbols(matrix.matrix_id, + exchange_name="kraken", + cryptocurrency="BTC") == ["BTC/USD", "BTCX/USDC"] + + # invalid path + assert get_available_symbols(matrix.matrix_id, exchange_name="invalid_exchange", cryptocurrency="BTC") == [] + assert get_available_symbols(matrix.matrix_id, exchange_name="kraken", cryptocurrency="BTCX") == [] + assert get_available_symbols(matrix.matrix_id, exchange_name="invalid_exchange", cryptocurrency="BTCX") == [] + + # now valid using real-time evaluation + evaluator_5_path = get_matrix_default_value_path(tentacle_type="REAL_TIME", + exchange_name="kraken", + tentacle_name="RSI", + cryptocurrency="BTCX", + symbol="BTCX/USD", + time_frame="1h") + set_tentacle_value(matrix.matrix_id, evaluator_5_path, "REAL_TIME", -1) + assert get_available_symbols(matrix.matrix_id, exchange_name="kraken", cryptocurrency="BTCX") == ["BTCX/USD"] diff --git a/packages/evaluators/tests/requirements/test_tulipy.py b/packages/evaluators/tests/requirements/test_tulipy.py new file mode 100644 index 0000000000..c3214dbb12 --- /dev/null +++ b/packages/evaluators/tests/requirements/test_tulipy.py @@ -0,0 +1,45 @@ +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + + +import numpy +import tulipy + + +DATA = numpy.array([81.59, 81.06, 82.87, 83, 83.61, + 83.15, 82.84, 83.99, 84.55, 84.36, + 85.53, 86.54, 86.89, 87.77, 87.29]) + +""" +The goal of these tests is to ensure the Tulipy technical evaluators lib is properly working within pure python +and cythonized environments. +""" + + +def test_TA_basics(): + rsi_result = tulipy.rsi(DATA, period=9) + assert all(74 < v < 86 + for v in rsi_result) + assert len(rsi_result) == 6 + sma_result = tulipy.sma(DATA, period=9) + assert all(74 < v < 86 + for v in sma_result) + assert len(sma_result) == 7 + bands = tulipy.bbands(DATA, period=9, stddev=2) + assert all(80 < v < 89 + for band in bands + for v in band) + assert len(bands) == 3 diff --git a/packages/evaluators/tests/static/config.json b/packages/evaluators/tests/static/config.json new file mode 100644 index 0000000000..fc357d2255 --- /dev/null +++ b/packages/evaluators/tests/static/config.json @@ -0,0 +1,32 @@ +{ + "time_frame": ["1h", "4h"], + "crypto-currencies":{ + "Bitcoin": { + "pairs" : ["BTC/USDT"] + }, + "Ethereum": { + "pairs" : ["ETH/USDT", "ETH/BTC"] + } + }, + "exchanges": { + "binance": { + "api-key": "", + "api-secret": "", + "web-socket": false + } + }, + "trading":{ + "risk": 1, + "reference-market": "BTC" + }, + "trader":{ + "enabled": false + }, + "trader-simulator":{ + "enabled": true, + "starting-portfolio": { + "BTC": 10, + "USDT": 1000 + } + } +} \ No newline at end of file diff --git a/packages/evaluators/tests/static/tentacles.zip b/packages/evaluators/tests/static/tentacles.zip new file mode 100644 index 0000000000..0998816e20 Binary files /dev/null and b/packages/evaluators/tests/static/tentacles.zip differ diff --git a/packages/evaluators/tests/util/__init__.py b/packages/evaluators/tests/util/__init__.py new file mode 100644 index 0000000000..9e32cbc868 --- /dev/null +++ b/packages/evaluators/tests/util/__init__.py @@ -0,0 +1,15 @@ +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. diff --git a/packages/evaluators/tests/util/test_evaluation_util.py b/packages/evaluators/tests/util/test_evaluation_util.py new file mode 100644 index 0000000000..974244e57d --- /dev/null +++ b/packages/evaluators/tests/util/test_evaluation_util.py @@ -0,0 +1,40 @@ +# Drakkar-Software OctoBot-Evaluators +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import pytest + +from octobot_commons.enums import PriceIndexes, TimeFrames +from octobot_evaluators.util.evaluation_util import get_eval_time, get_shortest_time_frame + + +def test_get_eval_time(): + candle = [] + candle.insert(PriceIndexes.IND_PRICE_TIME.value, 123456) + partial_candle = [] + partial_candle.insert(PriceIndexes.IND_PRICE_TIME.value, 1234567) + kline = [] + kline.insert(PriceIndexes.IND_PRICE_TIME.value, 12345678) + assert get_eval_time(full_candle=candle, time_frame=TimeFrames.ONE_HOUR) == 123456 + 60 * 60 + assert get_eval_time(partial_candle=partial_candle) == 1234567 + assert get_eval_time(kline=kline) == 12345678 + + +def test_get_shortest_time_frame(): + pref_time_frames = [TimeFrames.ONE_HOUR, TimeFrames.ONE_DAY, TimeFrames.ONE_MONTH] + assert get_shortest_time_frame(TimeFrames.ONE_HOUR, pref_time_frames, []) == TimeFrames.ONE_HOUR + assert get_shortest_time_frame(TimeFrames.ONE_MINUTE, pref_time_frames, [TimeFrames.ONE_WEEK]) == TimeFrames.ONE_HOUR + assert get_shortest_time_frame(TimeFrames.ONE_MINUTE, [], [TimeFrames.ONE_MONTH]) == TimeFrames.ONE_MONTH + assert get_shortest_time_frame(TimeFrames.ONE_MINUTE, [], [TimeFrames.ONE_MONTH, TimeFrames.ONE_DAY]) == TimeFrames.ONE_DAY + assert get_shortest_time_frame(TimeFrames.ONE_MINUTE, [], [TimeFrames.ONE_HOUR, TimeFrames.ONE_MONTH]) == TimeFrames.ONE_HOUR diff --git a/packages/flow/BUILD b/packages/flow/BUILD new file mode 100644 index 0000000000..692c6c9db8 --- /dev/null +++ b/packages/flow/BUILD @@ -0,0 +1,9 @@ +python_sources(name="octobot_flow", sources=["octobot_flow/**/*.py"]) + +python_tests( + name="tests", + sources=["tests/**/test_*.py"], + dependencies=[ + ":octobot_flow", + ], +) \ No newline at end of file diff --git a/packages/flow/README.md b/packages/flow/README.md new file mode 100644 index 0000000000..8929b22faa --- /dev/null +++ b/packages/flow/README.md @@ -0,0 +1,3 @@ +# Mini OctoBot + +OctoBot automations runner \ No newline at end of file diff --git a/packages/flow/octobot_flow/__init__.py b/packages/flow/octobot_flow/__init__.py new file mode 100644 index 0000000000..928bcfaf6c --- /dev/null +++ b/packages/flow/octobot_flow/__init__.py @@ -0,0 +1,30 @@ +import octobot_commons.logging + +_import_tentacles = False +try: + import tentacles + _import_tentacles = True +except ImportError: + octobot_commons.logging.get_logger("octobot_flow").info( + "tentacles is not installed, tentacles operators will not be available" + ) + +if _import_tentacles: + from octobot_flow.jobs.automation_job import AutomationJob + from octobot_flow.entities import ( + AbstractActionDetails, + parse_action_details, + AutomationState, + ActionsDAG, + TradingSignal, + ) + + + __all__ = [ + "AutomationJob", + "AbstractActionDetails", + "parse_action_details", + "ActionsDAG", + "AutomationState", + "TradingSignal", + ] diff --git a/packages/flow/octobot_flow/constants.py b/packages/flow/octobot_flow/constants.py new file mode 100644 index 0000000000..2751efbab4 --- /dev/null +++ b/packages/flow/octobot_flow/constants.py @@ -0,0 +1,22 @@ +import os + +import octobot_commons.os_util as os_util +import octobot_commons.enums as commons_enums +import octobot_commons.constants as commons_constants + +import octobot_copy.constants as copy_constants + + +SAVE_STATE_AFTER_EVERY_ACTION = os_util.parse_boolean_environment_var("SAVE_STATE_AFTER_EVERY_ACTION", "false") + +DEFAULT_EXTERNAL_TRIGGER_ONLY_NO_ORDER_TIMEFRAME = commons_enums.TimeFrames.ONE_DAY + +# Copy-trading mirrored open-order grace (aligned with octobot_copy fill timeout by default) +DEFAULT_COPY_TRADING_ORPHAN_CANCEL_GRACE_SECONDS = float(copy_constants.FILL_ORDER_TIMEOUT) +DEFAULT_COPY_TRADING_ORPHAN_GRACE_ABORT_THRESHOLD = 2 +DEFAULT_COPY_TRADING_MISSED_SIGNALS_GRACE_ABORT_THRESHOLD = ( + copy_constants.DEFAULT_MISSED_SIGNALS_GRACE_ABORT_THRESHOLD +) +DEFAULT_COPY_TRADING_ORPHAN_GRACE_PAIR_RATIO_MAX_DELTA = ( + copy_constants.DEFAULT_MIRRORED_ORPHAN_GRACE_PAIR_RATIO_MAX_DELTA +) diff --git a/packages/flow/octobot_flow/encryption/__init__.py b/packages/flow/octobot_flow/encryption/__init__.py new file mode 100644 index 0000000000..ec2ff1a0ed --- /dev/null +++ b/packages/flow/octobot_flow/encryption/__init__.py @@ -0,0 +1,8 @@ +from octobot_flow.encryption.decrypt import ( + decrypted_bots_configurations, +) + + +__all__ = [ + "decrypted_bots_configurations", +] diff --git a/packages/flow/octobot_flow/encryption/decrypt.py b/packages/flow/octobot_flow/encryption/decrypt.py new file mode 100644 index 0000000000..4cb1c523aa --- /dev/null +++ b/packages/flow/octobot_flow/encryption/decrypt.py @@ -0,0 +1,40 @@ +import contextlib + +import octobot_flow.entities +import octobot_trading.exchanges.util.exchange_data as exchange_data_import + + +@contextlib.contextmanager +def decrypted_bots_configurations( + automation_state: octobot_flow.entities.AutomationState +): + try: + if automation_state.exchange_account_details: + _decrypt_exchange_credentials(automation_state.exchange_account_details.auth_details) + yield automation_state + finally: + if automation_state.exchange_account_details: + _clear_decrypted_exchange_credentials(automation_state.exchange_account_details.exchange_details) + + +def _decrypt_exchange_credentials( + auth_details: exchange_data_import.ExchangeAuthDetails +): # pylint: disable=undefined-variable + if auth_details.encrypted: + raise NotImplementedError("_decrypt_exchange_credentials not implemented") + # todo + message = pgpy.PGPMessage.from_blob(base64.b64decode(auth_details.encrypted)) + decrypted = json.loads(message.decrypt(api_key).message) + auth_details.api_key = decrypted.get("apiKey", "") + auth_details.api_secret = decrypted.get("apiSecret", "") + auth_details.api_password = decrypted.get("password", "") + auth_details.access_token = decrypted.get("accessToken", "") + + +def _clear_decrypted_exchange_credentials( + auth_details: exchange_data_import.ExchangeAuthDetails +): + auth_details.api_key = "" + auth_details.api_secret = "" + auth_details.api_password = "" + auth_details.access_token = "" diff --git a/packages/flow/octobot_flow/entities/__init__.py b/packages/flow/octobot_flow/entities/__init__.py new file mode 100644 index 0000000000..43840f82cf --- /dev/null +++ b/packages/flow/octobot_flow/entities/__init__.py @@ -0,0 +1,66 @@ +from octobot_flow.entities.accounts import ( + PortfolioAssetHolding, + ExchangeAccountElements, + ExchangeAccountDetails, + ExchangeAccountPortfolio, + AccountElements, +) +from octobot_flow.entities.actions import ( + ActionDependency, + AbstractActionDetails, + DSLScriptActionDetails, + ConfiguredActionDetails, + parse_action_details, + ActionsDAG, +) +from octobot_flow.entities.automations import ( + FetchedExchangeAccountElements, + FetchedExchangePublicData, + FetchedExchangeData, + FetchedCopyTradingData, + AutomationMetadata, + AutomationDetails, + AutomationState, + FetchedDependencies, + TriggerDetails, + DegradedStateDetails, + ExecutionDetails, + AdditionalActions, + RefreshExchangeBotsAuthenticatedDataDetails, + NextIterationDetails, + PostIterationActionsDetails, +) +from octobot_flow.entities.community import ( + UserAuthentication, + TradingSignal, +) +__all__ = [ + "AccountElements", + "ExchangeAccountElements", + "PortfolioAssetHolding", + "ExchangeAccountDetails", + "ExchangeAccountPortfolio", + "ActionDependency", + "AbstractActionDetails", + "DSLScriptActionDetails", + "ConfiguredActionDetails", + "parse_action_details", + "ActionsDAG", + "RefreshExchangeBotsAuthenticatedDataDetails", + "NextIterationDetails", + "PostIterationActionsDetails", + "FetchedExchangeAccountElements", + "FetchedExchangePublicData", + "FetchedExchangeData", + "FetchedCopyTradingData", + "AutomationMetadata", + "AutomationDetails", + "AutomationState", + "FetchedDependencies", + "TriggerDetails", + "DegradedStateDetails", + "ExecutionDetails", + "AdditionalActions", + "UserAuthentication", + "TradingSignal", +] diff --git a/packages/flow/octobot_flow/entities/accounts/__init__.py b/packages/flow/octobot_flow/entities/accounts/__init__.py new file mode 100644 index 0000000000..3cb525c719 --- /dev/null +++ b/packages/flow/octobot_flow/entities/accounts/__init__.py @@ -0,0 +1,12 @@ +from octobot_flow.entities.accounts.portfolio_asset_holdings import PortfolioAssetHolding +from octobot_flow.entities.accounts.exchange_account_elements import ExchangeAccountElements +from octobot_flow.entities.accounts.exchange_account_details import ExchangeAccountDetails, ExchangeAccountPortfolio +from octobot_flow.entities.accounts.account_elements import AccountElements + +__all__ = [ + "PortfolioAssetHolding", + "ExchangeAccountElements", + "ExchangeAccountDetails", + "ExchangeAccountPortfolio", + "AccountElements", +] diff --git a/packages/flow/octobot_flow/entities/accounts/account_elements.py b/packages/flow/octobot_flow/entities/accounts/account_elements.py new file mode 100644 index 0000000000..78755cf396 --- /dev/null +++ b/packages/flow/octobot_flow/entities/accounts/account_elements.py @@ -0,0 +1,41 @@ +import dataclasses +import typing + +import octobot_commons.dataclasses +import octobot_trading.exchanges.util.exchange_data as exchange_data_import +import octobot_trading.enums +import octobot_flow.enums + + + +@dataclasses.dataclass +class AccountElements(octobot_commons.dataclasses.MinimizableDataclass, octobot_commons.dataclasses.UpdatableDataclass): + """ + Defines the ideal exchange account state of an automation. Only contains sharable data + """ + name: typing.Optional[str] = None + portfolio: exchange_data_import.PortfolioDetails = dataclasses.field(default_factory=exchange_data_import.PortfolioDetails) + transactions: list[dict] = dataclasses.field(default_factory=list) + + def __post_init__(self): + if self.portfolio and isinstance(self.portfolio, dict): + self.portfolio = exchange_data_import.PortfolioDetails.from_dict(self.portfolio) + + def sync_from_transactions(self, transactions: list[dict]) -> list[octobot_flow.enums.ChangedElements]: + changed_elements = [] + if self._sync_transactions(transactions): + changed_elements.append(octobot_flow.enums.ChangedElements.TRANSACTIONS) + return changed_elements + + def _sync_transactions(self, transactions: list[dict]) -> bool: + previous_transactions_ids = { + transaction[octobot_trading.enums.ExchangeConstantsTransactionColumns.TXID.value] + for transaction in self.transactions + } + added_transactions = [ + transaction + for transaction in transactions + if transaction[octobot_trading.enums.ExchangeConstantsTransactionColumns.TXID.value] not in previous_transactions_ids + ] + self.transactions.extend(added_transactions) + return bool(added_transactions) diff --git a/packages/flow/octobot_flow/entities/accounts/exchange_account_details.py b/packages/flow/octobot_flow/entities/accounts/exchange_account_details.py new file mode 100644 index 0000000000..4dc9bd2aab --- /dev/null +++ b/packages/flow/octobot_flow/entities/accounts/exchange_account_details.py @@ -0,0 +1,52 @@ +import dataclasses +import typing +import decimal + +import octobot_commons.dataclasses +import octobot_commons.profiles.profile_data as profile_data_import +import octobot_trading.exchanges.util.exchange_data as exchange_data_import +import octobot_flow.entities.accounts.portfolio_asset_holdings as portfolio_asset_holdings_import + + +@dataclasses.dataclass +class ExchangeAccountPortfolio(octobot_commons.dataclasses.MinimizableDataclass, octobot_commons.dataclasses.UpdatableDataclass): + content: list[portfolio_asset_holdings_import.PortfolioAssetHolding] = dataclasses.field(default_factory=list) + unit: str = "" + + +@dataclasses.dataclass +class ExchangeAccountDetails(octobot_commons.dataclasses.MinimizableDataclass): + exchange_details: profile_data_import.ExchangeData = dataclasses.field( + default_factory=profile_data_import.ExchangeData, repr=True + ) + auth_details: exchange_data_import.ExchangeAuthDetails = dataclasses.field(default_factory=exchange_data_import.ExchangeAuthDetails, repr=False) + portfolio: ExchangeAccountPortfolio = dataclasses.field(default_factory=ExchangeAccountPortfolio, repr=True) + + def to_minimal_exchange_data(self, portfolio: typing.Optional[dict[str, dict[str, decimal.Decimal]]]) -> exchange_data_import.ExchangeData: + exchange_data = exchange_data_import.ExchangeData( + exchange_details=exchange_data_import.ExchangeDetails( + name=self.exchange_details.internal_name, # type: ignore + ), + auth_details=self.auth_details, + ) + if portfolio: + exchange_data.portfolio_details.content = portfolio # type: ignore + return exchange_data + + def is_simulated(self) -> bool: + # is simulated if no auth details are provided + return not ( + self.auth_details.api_key + or self.auth_details.api_secret + or self.auth_details.api_password + or self.auth_details.access_token + or self.auth_details.encrypted + ) + + def __post_init__(self): + if self.portfolio and isinstance(self.portfolio, dict): + self.portfolio = ExchangeAccountPortfolio.from_dict(self.portfolio) + if self.exchange_details and isinstance(self.exchange_details, dict): + self.exchange_details = profile_data_import.ExchangeData.from_dict(self.exchange_details) + if self.auth_details and isinstance(self.auth_details, dict): + self.auth_details = exchange_data_import.ExchangeAuthDetails.from_dict(self.auth_details) diff --git a/packages/flow/octobot_flow/entities/accounts/exchange_account_elements.py b/packages/flow/octobot_flow/entities/accounts/exchange_account_elements.py new file mode 100644 index 0000000000..3f189e0f02 --- /dev/null +++ b/packages/flow/octobot_flow/entities/accounts/exchange_account_elements.py @@ -0,0 +1,121 @@ +import dataclasses +import typing + +import octobot_commons.logging +import octobot_trading.exchanges.util.exchange_data as exchange_data_import +import octobot_trading.exchanges +import octobot_trading.storage.orders_storage +import octobot_trading.api +import octobot_trading.enums +import octobot_trading.constants +import octobot_trading.personal_data + +import octobot_flow.enums +import octobot_flow.entities.accounts.account_elements as account_elements_import + + +@dataclasses.dataclass +class ExchangeAccountElements(account_elements_import.AccountElements): + """ + Defines the ideal exchange account state of an automation. Only contains sharable data + """ + orders: exchange_data_import.OrdersDetails = dataclasses.field(default_factory=exchange_data_import.OrdersDetails) + positions: list[exchange_data_import.PositionDetails] = dataclasses.field(default_factory=list) + trades: list[dict] = dataclasses.field(default_factory=list) + + def __post_init__(self): + super().__post_init__() + if self.orders and isinstance(self.orders, dict): + self.orders = exchange_data_import.OrdersDetails.from_dict(self.orders) + if self.positions and isinstance(self.positions[0], dict): + self.positions = [ + exchange_data_import.PositionDetails.from_dict(position) for position in self.positions # type: ignore + ] + if self.trades and isinstance(self.trades[0], dict): + self.trades = [ + dict(trade) for trade in self.trades # type: ignore + ] + + def has_pending_chained_orders(self) -> bool: + for order in self.orders.missing_orders: + if order.get(octobot_trading.constants.STORAGE_ORIGIN_VALUE, {}).get(octobot_trading.enums.StoredOrdersAttr.CHAINED_ORDERS.value): + return True + return False + + def has_pending_groups(self) -> bool: + # TODO + return False + + def sync_from_exchange_manager( + self, + exchange_manager: typing.Optional[octobot_trading.exchanges.ExchangeManager], + transactions: list[dict] + ) -> list[octobot_flow.enums.ChangedElements]: + changed_elements = self.sync_from_transactions(transactions) + if exchange_manager: + if self.sync_orders_from_exchange_manager(exchange_manager): + changed_elements.append(octobot_flow.enums.ChangedElements.ORDERS) + if self.sync_portfolio_from_exchange_manager(exchange_manager): + changed_elements.append(octobot_flow.enums.ChangedElements.PORTFOLIO) + if self.sync_positions_from_exchange_manager(exchange_manager): + changed_elements.append(octobot_flow.enums.ChangedElements.POSITIONS) + if self._sync_trades_from_exchange_manager(exchange_manager): + changed_elements.append(octobot_flow.enums.ChangedElements.TRADES) + return changed_elements + + def sync_orders_from_exchange_manager(self, exchange_manager: octobot_trading.exchanges.ExchangeManager) -> bool: + previous_orders = self.orders + updated_open_orders_exchange_ids = set() + updated_open_orders = [] + updated_missing_orders = [] + for order in octobot_trading.api.get_open_orders(exchange_manager): + if order.is_self_managed(): + octobot_commons.logging.get_logger(self.__class__.__name__).error( + f"Self managed order created. This type of [{exchange_manager.exchange_name}] " + f"order is not supported, order is ignored. Order: {order}" + ) + continue + updated_open_orders_exchange_ids.add(order.exchange_order_id) + updated_open_orders.append( + octobot_trading.storage.orders_storage._format_order(order, exchange_manager) + ) + updated_missing_orders = [ + order + for exchange_id, order in octobot_trading.personal_data.get_enriched_orders_by_exchange_id(previous_orders.open_orders).items() + if exchange_id not in updated_open_orders_exchange_ids + ] + self.orders.open_orders = updated_open_orders + self.orders.missing_orders = updated_missing_orders + return previous_orders != self.orders + + def sync_portfolio_from_exchange_manager(self, exchange_manager: octobot_trading.exchanges.ExchangeManager) -> bool: + previous_portfolio = self.portfolio.content + self.portfolio.content = { + key: values + for key, values in octobot_trading.api.get_portfolio(exchange_manager, as_decimal=False).items() + if any(value for value in values.values()) # skip 0 value assets + } + return previous_portfolio != self.portfolio.content + + def sync_positions_from_exchange_manager(self, exchange_manager: octobot_trading.exchanges.ExchangeManager) -> bool: + previous_positions = self.positions + self.positions = [ + exchange_data_import.PositionDetails(position.to_dict(), position.symbol_contract.to_dict()) + for position in octobot_trading.api.get_positions(exchange_manager) + ] + return previous_positions != self.positions + + def _sync_trades_from_exchange_manager(self, exchange_manager: octobot_trading.exchanges.ExchangeManager) -> bool: + previous_trades_count = len(self.trades) + if update_trades := octobot_trading.api.get_trade_history(exchange_manager, as_dict=True): + current_trade_ids = { + trade[octobot_trading.enums.ExchangeConstantsOrderColumns.EXCHANGE_TRADE_ID.value] + for trade in self.trades + } + if new_trades := tuple( + trade + for trade in update_trades + if trade[octobot_trading.enums.ExchangeConstantsOrderColumns.EXCHANGE_TRADE_ID.value] not in current_trade_ids + ): + self.trades.extend(new_trades) + return previous_trades_count != len(self.trades) diff --git a/packages/flow/octobot_flow/entities/accounts/portfolio_asset_holdings.py b/packages/flow/octobot_flow/entities/accounts/portfolio_asset_holdings.py new file mode 100644 index 0000000000..1bdc09b957 --- /dev/null +++ b/packages/flow/octobot_flow/entities/accounts/portfolio_asset_holdings.py @@ -0,0 +1,31 @@ +import dataclasses +import typing +import decimal + +import octobot_commons.dataclasses +import octobot_commons.constants +import octobot_trading.constants + +@dataclasses.dataclass +class PortfolioAssetHolding(octobot_commons.dataclasses.FlexibleDataclass): + asset: str + available: float + total: float + value: float = 0 + unlocked_available: typing.Optional[float] = None + unlocked_total: typing.Optional[float] = None + unlocked_value: typing.Optional[float] = None + + def to_portfolio_asset_dict(self, zeroize_negative_values: bool) -> dict[str, decimal.Decimal]: + formatted = { + octobot_commons.constants.PORTFOLIO_AVAILABLE: decimal.Decimal(str(self.available)), + octobot_commons.constants.PORTFOLIO_TOTAL: decimal.Decimal(str(self.total)), + } + if zeroize_negative_values: + if formatted[octobot_commons.constants.PORTFOLIO_TOTAL] < octobot_trading.constants.ZERO: + # total can't be negative + formatted[octobot_commons.constants.PORTFOLIO_TOTAL] = octobot_trading.constants.ZERO + if formatted[octobot_commons.constants.PORTFOLIO_AVAILABLE] > formatted[octobot_commons.constants.PORTFOLIO_TOTAL]: + # available can't be greater than total + formatted[octobot_commons.constants.PORTFOLIO_AVAILABLE] = formatted[octobot_commons.constants.PORTFOLIO_TOTAL] + return formatted diff --git a/packages/flow/octobot_flow/entities/actions/__init__.py b/packages/flow/octobot_flow/entities/actions/__init__.py new file mode 100644 index 0000000000..567fcf3724 --- /dev/null +++ b/packages/flow/octobot_flow/entities/actions/__init__.py @@ -0,0 +1,17 @@ +from octobot_flow.entities.actions.action_details import ( + ActionDependency, + AbstractActionDetails, + DSLScriptActionDetails, + ConfiguredActionDetails, + parse_action_details, +) +from octobot_flow.entities.actions.actions_dag import ActionsDAG + +__all__ = [ + "ActionDependency", + "AbstractActionDetails", + "DSLScriptActionDetails", + "ConfiguredActionDetails", + "parse_action_details", + "ActionsDAG", +] diff --git a/packages/flow/octobot_flow/entities/actions/action_details.py b/packages/flow/octobot_flow/entities/actions/action_details.py new file mode 100644 index 0000000000..6835b5dbcb --- /dev/null +++ b/packages/flow/octobot_flow/entities/actions/action_details.py @@ -0,0 +1,161 @@ +import dataclasses +import typing +import time + +import octobot_commons.dsl_interpreter +import octobot_commons.dataclasses +import octobot_commons.constants +import octobot_commons.logging +import octobot_flow.enums +import octobot_flow.errors + +@dataclasses.dataclass +class ActionDependency(octobot_commons.dataclasses.FlexibleDataclass): + # id of the action this dependency is on + action_id: str = dataclasses.field(repr=True) + # value of the dependency result. Used by an action to resolve its own DSL script when it has dependencies + parameter: typing.Optional[str] = dataclasses.field(default=None, repr=False) + # keys into the dependency action's dict result, e.g. ["exchange_specific_order_values", "address_from"] + result_path: typing.Optional[list[str]] = dataclasses.field(default=None, repr=False) + + +@dataclasses.dataclass +class AbstractActionDetails(octobot_commons.dataclasses.FlexibleDataclass): + # unique id of the action + id: str = dataclasses.field(repr=True) + # result of the action. Set after the action is executed + result: typing.Optional[ + octobot_commons.dsl_interpreter.ComputedOperatorParameterType + ] = dataclasses.field(default=None, repr=octobot_commons.constants.ALLOW_PRIVATE_DATA_LOGS) + # error status of the action. Set after the action is executed, in case an error occured + error_status: typing.Optional[str] = dataclasses.field(default=None, repr=True) # ActionErrorStatus + # time at which the action was executed + executed_at: typing.Optional[float] = dataclasses.field(default=None, repr=True) + # dependencies of this action. If an action has dependencies, it will not be executed until all its dependencies are completed + dependencies: list["ActionDependency"] = dataclasses.field(default_factory=list, repr=True) + # id of the action to reset the DAG to. If set, will reset the DAG to this action after this action is completed. + reset_target_action_id: typing.Optional[str] = dataclasses.field(default=None, repr=False) + # result of the previous execution of this action. Used when the action is reset + previous_execution_result: typing.Optional[dict] = dataclasses.field(default=None, repr=False) + + def __post_init__(self): + if self.dependencies: + self.dependencies = [ + ActionDependency.from_dict(dependency) if + isinstance(dependency, dict) else dependency + for dependency in self.dependencies + ] + + def complete( + self, + result: typing.Optional[dict] = None, + error_status: typing.Optional[str] = None, + ): + self.executed_at = time.time() + if result: + self.result = result + if error_status: + self.error_status = error_status + + def is_completed(self) -> bool: + return self.executed_at is not None + + def update_execution_details(self, action: "AbstractActionDetails"): + self.result = action.result + self.executed_at = action.executed_at + self.error_status = action.error_status + + def should_be_historised_in_database(self) -> bool: + return False + + def add_dependency( + self, + action_id: str, + parameter: typing.Optional[str] = None, + result_path: typing.Optional[list[str]] = None, + ): + self.dependencies.append(ActionDependency(action_id, parameter, result_path)) + + def get_summary(self, minimal: bool = False) -> str: + raise NotImplementedError("get_summary is not implemented for this bot action type") + + def get_rescheduled_parameters(self) -> dict: + rescheduled_parameters = {} + if self.previous_execution_result: + if octobot_commons.dsl_interpreter.ReCallingOperatorResult.is_re_calling_operator_result( + self.previous_execution_result + ): + rescheduled_parameters[ + octobot_commons.dsl_interpreter.ReCallableOperatorMixin.LAST_EXECUTION_RESULT_KEY + ] = self.previous_execution_result + return rescheduled_parameters + + def reset(self): + self.previous_execution_result = self.result # type: ignore + self.result = None + self.error_status = None + self.executed_at = None + + def update_configuration(self, action: "AbstractActionDetails"): + raise NotImplementedError("update_configuration is not implemented") + + +@dataclasses.dataclass +class DSLScriptActionDetails(AbstractActionDetails): + # DSL script to execute + dsl_script: typing.Optional[str] = dataclasses.field(default=None, repr=octobot_commons.constants.ALLOW_PRIVATE_DATA_LOGS) # should be set to the DSL script + # resolved DSL script. self.dsl_script where all the dependencies have been replaced by their actual values + resolved_dsl_script: typing.Optional[str] = dataclasses.field(default=None, repr=False) # should be set to the resolved DSL script + + def get_summary(self, minimal: bool = False) -> str: + if minimal: + # only return the first operator name + return str(self.dsl_script).split("(")[0] + return str(self.dsl_script) + + def get_resolved_dsl_script(self) -> str: + if not self.resolved_dsl_script: + raise octobot_flow.errors.UnresolvedDSLScriptError( + f"Resolved DSL script is not set: {self.resolved_dsl_script}") + if octobot_commons.dsl_interpreter.has_unresolved_parameters(self.resolved_dsl_script): + raise octobot_flow.errors.UnresolvedDSLScriptError( + f"Resolved DSL script has unresolved parameters: {octobot_commons.logging.get_private_minimized_message_if_necessary(self.resolved_dsl_script)}" + ) + return self.resolved_dsl_script + + def clear_resolved_dsl_script(self): + self.resolved_dsl_script = None + + def update_configuration(self, action: "AbstractActionDetails"): + if not isinstance(action, DSLScriptActionDetails): + raise TypeError( + f"Expected DSLScriptActionDetails, got {type(action).__name__}" + ) + self.dsl_script = action.dsl_script + + +@dataclasses.dataclass +class ConfiguredActionDetails(AbstractActionDetails): + # type of the action. Must be an ActionType + action: str = dataclasses.field(default=octobot_flow.enums.ActionType.UNKNOWN.value, repr=True) + # configuration of the action. A dict specific to the action type + config: typing.Optional[dict] = dataclasses.field(default=None, repr=False) + + def get_summary(self, minimal: bool = False) -> str: + return self.action + + def update_configuration(self, action: "AbstractActionDetails"): + if not isinstance(action, ConfiguredActionDetails): + raise TypeError( + f"Expected ConfiguredActionDetails, got {type(action).__name__}" + ) + self.action = action.action + self.config = dict(action.config) if action.config is not None else None + + +def parse_action_details(action_details: dict) -> AbstractActionDetails: + if "dsl_script" in action_details: + return DSLScriptActionDetails.from_dict(action_details) + elif "action" in action_details: + return ConfiguredActionDetails.from_dict(action_details) + raise ValueError(f"Invalid action details: {action_details}") diff --git a/packages/flow/octobot_flow/entities/actions/actions_dag.py b/packages/flow/octobot_flow/entities/actions/actions_dag.py new file mode 100644 index 0000000000..fdb8303bc4 --- /dev/null +++ b/packages/flow/octobot_flow/entities/actions/actions_dag.py @@ -0,0 +1,188 @@ +import dataclasses +import typing + +import octobot_commons.dsl_interpreter +import octobot_commons.dataclasses + +import octobot_flow.entities.actions.action_details as action_details +import octobot_flow.enums +import octobot_flow.errors + + +def _navigate_dict_path(root: typing.Any, path: list[str]) -> typing.Any: + cursor = root + for i, key in enumerate(path): + if isinstance(cursor, dict): + try: + cursor = cursor[key] + except KeyError as err: + raise octobot_flow.errors.ActionDependencyError( + f"Dependency result has no path {path[: i + 1]!r} (missing key: {key} in {list(cursor)})" + ) from err + elif isinstance(cursor, list) and key.isdigit(): + idx = int(key) + if idx >= len(cursor): + raise octobot_flow.errors.ActionDependencyError( + f"Dependency result path {path[:i]!r} list index {idx!r} out of range (len={len(cursor)})" + ) + cursor = cursor[idx] + else: + raise octobot_flow.errors.ActionDependencyError( + f"Dependency result path {path[:i]!r} is not a dict or list (got {type(cursor).__name__}), " + f"cannot apply segment {key!r}" + ) + return cursor + + +@dataclasses.dataclass +class ActionsDAG(octobot_commons.dataclasses.FlexibleDataclass): + actions: list[action_details.AbstractActionDetails] = dataclasses.field(default_factory=list) + + def __post_init__(self): + self.actions = [ + action_details.parse_action_details(action) if isinstance(action, dict) else action + for action in self.actions + ] + + def add_action(self, action: action_details.AbstractActionDetails): + self.actions.append(action) + + def __bool__(self) -> bool: + return bool(self.actions) + + def get_actions_by_id(self) -> dict[str, action_details.AbstractActionDetails]: + return { + action.id: action for action in self.actions + } + + def update_actions_results(self, actions: list[action_details.AbstractActionDetails]): + actions_by_id = self.get_actions_by_id() + for action in actions: + actions_by_id[action.id].update_execution_details(action) + + def get_executable_actions(self) -> list[action_details.AbstractActionDetails]: + """Return actions that can be executed: not yet executed, and either have no + dependencies or all their dependencies have results (executed_at is set). + """ + return [ + action + for action in self.actions + if not action.is_completed() and self.filled_all_dependencies(action) + ] + + def completed_all_actions(self) -> bool: + return all(action.is_completed() for action in self.actions) + + def get_pending_actions(self) -> list[action_details.AbstractActionDetails]: + return [ + action + for action in self.actions + if not action.is_completed() + ] + + def _get_dependents_map(self) -> dict[str, set[str]]: + """Return a map: action_id -> set of action_ids that directly depend on it.""" + dependents: dict[str, set[str]] = {action.id: set() for action in self.actions} + for action in self.actions: + for dep in action.dependencies: + dependents.setdefault(dep.action_id, set()).add(action.id) + return dependents + + def _get_transitive_dependents(self, action_id: str, dependents_map: dict[str, set[str]]) -> set[str]: + """Return all action_ids that depend on the given action_id (directly or indirectly).""" + result: set[str] = set() + to_visit = [action_id] + visited: set[str] = set() + while to_visit: + current = to_visit.pop() + if current in visited: + continue + visited.add(current) + for dependent_id in dependents_map.get(current, set()): + if dependent_id not in visited: + result.add(dependent_id) + to_visit.append(dependent_id) + return result + + def reset_to(self, action_id: str): + """ + Reset the action identified by action_id and all DAG actions that depend + directly or indirectly from this action. + """ + actions_by_id = self.get_actions_by_id() + if action_id not in actions_by_id: + raise octobot_flow.errors.ActionDependencyNotFoundError( + f"Action {action_id} not found in DAG" + ) + dependents_map = self._get_dependents_map() + to_reset = self._get_transitive_dependents(action_id, dependents_map) | {action_id} + for aid in to_reset: + actions_by_id[aid].reset() + + def filled_all_dependencies(self, action: action_details.AbstractActionDetails) -> bool: + try: + actions_by_id = self.get_actions_by_id() + return all( + actions_by_id[dep.action_id].is_completed() + for dep in action.dependencies + ) + except KeyError as err: + raise octobot_flow.errors.ActionDependencyNotFoundError( + f"Action {action.id} has dependencies with unknown action IDs: {err}" + ) from err + + def resolve_dsl_scripts( + self, actions: list[action_details.AbstractActionDetails] + ): + """ + Return the resolved DSL script, with all the dependencies resolved. + If the DSL script is not set, return None. + """ + actions_by_id = self.get_actions_by_id() + for action in actions: + if isinstance(action, action_details.DSLScriptActionDetails): + self._resolve_dsl_script(action, actions_by_id) + + def _resolve_dsl_script( + self, + action: action_details.DSLScriptActionDetails, + actions_by_id: dict[str, action_details.AbstractActionDetails] + ): + resolved_dsl_script = str(action.dsl_script) + for dependency in action.dependencies: + dependency_action = actions_by_id[dependency.action_id] + if dependency_action.error_status != octobot_flow.enums.ActionErrorStatus.NO_ERROR.value: + raise octobot_flow.errors.ActionDependencyError( + f"Dependency {dependency.parameter} returned an error: {dependency_action.error_status}" + ) + if not dependency.parameter: + # no parameter name: this dependency is not a parameter: it just needs to have been executed + continue + value = dependency_action.result + if dependency.result_path: + value = _navigate_dict_path(value, dependency.result_path) + resolved_dsl_script = octobot_commons.dsl_interpreter.apply_resolved_parameter_value( + resolved_dsl_script, dependency.parameter, value + ) + reschedule_params = action.get_rescheduled_parameters() + for rescheduled_parameter, rescheduled_value in reschedule_params.items(): + if script_override := octobot_commons.dsl_interpreter.ReCallingOperatorResult.get_script_override(rescheduled_value): + # the script override is the new DSL script to execute for this action call + resolved_dsl_script = script_override + for rescheduled_parameter, rescheduled_value in reschedule_params.items(): + operator = octobot_commons.dsl_interpreter.ReCallingOperatorResult.get_keyword( + rescheduled_value + ) + if not operator: + raise octobot_flow.errors.ActionDependencyError( + f"Dependency {rescheduled_parameter} returned a re-calling operator result with no keyword value: {rescheduled_value}" + ) + resolved_dsl_script = octobot_commons.dsl_interpreter.add_resolved_parameter_value( + resolved_dsl_script, operator, rescheduled_parameter, rescheduled_value + ) + action.resolved_dsl_script = resolved_dsl_script + + def __repr__(self) -> str: + return ( + f"ActionsDAG([{len(self.actions)}]: {', '.join([str(action) for action in self.actions])})" + ) diff --git a/packages/flow/octobot_flow/entities/automations/__init__.py b/packages/flow/octobot_flow/entities/automations/__init__.py new file mode 100644 index 0000000000..8d1b60bf57 --- /dev/null +++ b/packages/flow/octobot_flow/entities/automations/__init__.py @@ -0,0 +1,42 @@ +from octobot_flow.entities.automations.fetched_exchange_data import ( + FetchedExchangeAccountElements, + FetchedExchangePublicData, + FetchedExchangeData, +) +from octobot_flow.entities.automations.fetched_copy_trading_data import ( + FetchedCopyTradingData, +) +from octobot_flow.entities.automations.automation_details import ( + AutomationMetadata, + AutomationDetails, +) +from octobot_flow.entities.automations.automation_state import AutomationState +from octobot_flow.entities.automations.fetched_dependencies import FetchedDependencies +from octobot_flow.entities.automations.execution_details import ( + TriggerDetails, + DegradedStateDetails, + ExecutionDetails, +) +from octobot_flow.entities.automations.additional_actions import AdditionalActions +from octobot_flow.entities.automations.post_iteration_actions_details import ( + RefreshExchangeBotsAuthenticatedDataDetails, + NextIterationDetails, + PostIterationActionsDetails, +) +__all__ = [ + "FetchedExchangeAccountElements", + "FetchedExchangePublicData", + "FetchedExchangeData", + "FetchedCopyTradingData", + "AutomationMetadata", + "AutomationDetails", + "AutomationState", + "FetchedDependencies", + "TriggerDetails", + "DegradedStateDetails", + "ExecutionDetails", + "AdditionalActions", + "RefreshExchangeBotsAuthenticatedDataDetails", + "NextIterationDetails", + "PostIterationActionsDetails", +] diff --git a/packages/flow/octobot_flow/entities/automations/additional_actions.py b/packages/flow/octobot_flow/entities/automations/additional_actions.py new file mode 100644 index 0000000000..94f9cf8749 --- /dev/null +++ b/packages/flow/octobot_flow/entities/automations/additional_actions.py @@ -0,0 +1,22 @@ +import dataclasses +import octobot_commons.dataclasses + + +@dataclasses.dataclass +class AdditionalActions(octobot_commons.dataclasses.MinimizableDataclass): + # todo implement this when necessary + check_min_portfolio: bool = False + optimize_portfolio: bool = False + optimize_portfolio_for_restart: bool = False + trigger_initial_orders: bool = False + minimum_wait_time_before_next_iteration: float = 0 + + @classmethod + def default_iteration(cls): + return cls( + check_min_portfolio=False, optimize_portfolio=False, + optimize_portfolio_for_restart=False, trigger_initial_orders=False + ) + + def has_trading_actions(self) -> bool: + return self.optimize_portfolio or self.optimize_portfolio_for_restart diff --git a/packages/flow/octobot_flow/entities/automations/automation_details.py b/packages/flow/octobot_flow/entities/automations/automation_details.py new file mode 100644 index 0000000000..b13f7a5828 --- /dev/null +++ b/packages/flow/octobot_flow/entities/automations/automation_details.py @@ -0,0 +1,55 @@ +import dataclasses +import typing + +import octobot_commons.dataclasses + +import octobot_flow.entities.automations.execution_details as execution_details_import +import octobot_flow.entities.accounts.exchange_account_elements as exchange_account_elements_import +import octobot_flow.entities.actions.actions_dag as actions_dag_import +import octobot_flow.entities.accounts.account_elements as account_elements_import +import octobot_flow.entities.automations.post_iteration_actions_details as post_iteration_actions_details_import + + +@dataclasses.dataclass +class AutomationMetadata( + octobot_commons.dataclasses.MinimizableDataclass, + octobot_commons.dataclasses.UpdatableDataclass +): + automation_id: str = dataclasses.field(default="", repr=True) + emit_signals: bool = dataclasses.field(default=False, repr=True) + strategy_id: str = dataclasses.field(default="", repr=True) + + +@dataclasses.dataclass +class AutomationDetails(octobot_commons.dataclasses.MinimizableDataclass, octobot_commons.dataclasses.UpdatableDataclass): + """ + Defines an automation made of: + - An actions DAG defining the actions to be executed as DSL or configured actions + This actions DAG also defines bot strategies in the form of a keyword with parameters + - Exchange account elements for this workflow ((sub)portfolio, orders, positions, trades, ...) + - Extra accounts elements if any (blockchain wallets, etc.) + - Current and previous execution details + - Post actions if any (local to an iteration) + """ + + metadata: AutomationMetadata = dataclasses.field(default_factory=AutomationMetadata, repr=True) + actions_dag: actions_dag_import.ActionsDAG = dataclasses.field(default_factory=actions_dag_import.ActionsDAG, repr=True) + exchange_account_elements: typing.Optional[exchange_account_elements_import.ExchangeAccountElements] = dataclasses.field(default=None, repr=True) + extra_accounts: list[account_elements_import.AccountElements] = dataclasses.field(default_factory=list, repr=True) + execution: execution_details_import.ExecutionDetails = dataclasses.field(default_factory=execution_details_import.ExecutionDetails, repr=False) + post_actions: post_iteration_actions_details_import.PostIterationActionsDetails = dataclasses.field(default_factory=post_iteration_actions_details_import.PostIterationActionsDetails, repr=False) + + def __post_init__(self): + if self.metadata and isinstance(self.metadata, dict): + self.metadata = AutomationMetadata.from_dict(self.metadata) + if self.execution and isinstance(self.execution, dict): + self.execution = execution_details_import.ExecutionDetails.from_dict(self.execution) + if self.exchange_account_elements and isinstance(self.exchange_account_elements, dict): + self.exchange_account_elements = exchange_account_elements_import.ExchangeAccountElements.from_dict(self.exchange_account_elements) + if self.extra_accounts and isinstance(self.extra_accounts[0], dict): + self.extra_accounts = [ + account_elements_import.AccountElements.from_dict(account) + for account in self.extra_accounts + ] + if self.post_actions and isinstance(self.post_actions, dict): + self.post_actions = post_iteration_actions_details_import.PostIterationActionsDetails.from_dict(self.post_actions) diff --git a/packages/flow/octobot_flow/entities/automations/automation_state.py b/packages/flow/octobot_flow/entities/automations/automation_state.py new file mode 100644 index 0000000000..b86fd2f00c --- /dev/null +++ b/packages/flow/octobot_flow/entities/automations/automation_state.py @@ -0,0 +1,94 @@ +# This file is part of OctoBot Node (https://github.com/Drakkar-Software/OctoBot-Node) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + +import dataclasses +import typing +import decimal + +import octobot_commons.dataclasses +import octobot_trading.exchanges.util.exchange_data as exchange_data_import + +import octobot_flow.entities.accounts.exchange_account_details as exchange_account_details_import +import octobot_flow.entities.automations.automation_details as automation_details_import +import octobot_flow.errors +import octobot_flow.entities.actions.action_details as action_details_import + + +def required_exchange_account_details(func: typing.Callable) -> typing.Callable: + def required_exchange_account_details_wrapper(self, *args, **kwargs): + if not self.exchange_account_details: + raise octobot_flow.errors.NoExchangeAccountDetailsError("Exchange account details are required") + return func(self, *args, **kwargs) + return required_exchange_account_details_wrapper + + +@dataclasses.dataclass +class AutomationState(octobot_commons.dataclasses.MinimizableDataclass): + """ + Defines the state of a single automation which is potentially associated to an exchange account. + """ + # Description of the automation + automation: automation_details_import.AutomationDetails = dataclasses.field(default_factory=automation_details_import.AutomationDetails, repr=True) + # Global info of the exchange account of this automation. + # Equal to automation account when simulated, conttains the full (not sub) exchange portfolio otherwise + exchange_account_details: typing.Optional[exchange_account_details_import.ExchangeAccountDetails] = dataclasses.field(default=None, repr=True) + # Priority actions to be executed before the automation DAG when they are not already executed + priority_actions: list[action_details_import.AbstractActionDetails] = dataclasses.field(default_factory=list, repr=True) + + def upsert_automation_actions(self, actions: list[action_details_import.AbstractActionDetails]): + existing_actions = self.automation.actions_dag.get_actions_by_id() + for action in actions: + if action.id not in existing_actions: + self.automation.actions_dag.add_action(action) + else: + existing_actions[action.id].update_configuration(action) + + def has_exchange(self) -> bool: + return bool( + self.exchange_account_details is not None + and self.exchange_account_details.exchange_details.internal_name + ) + + @required_exchange_account_details + def to_minimal_exchange_data(self, _automation_id: typing.Optional[str] = None) -> exchange_data_import.ExchangeData: + return self.exchange_account_details.to_minimal_exchange_data( + self._get_automation_portfolio() + ) + + def _get_automation_portfolio(self) -> dict[str, dict[str, decimal.Decimal]]: + elements = self.automation.exchange_account_elements + return elements.portfolio.content if elements else {} # type: ignore + + def update_priority_actions(self, added_actions: list[action_details_import.AbstractActionDetails]): + included_action_ids = set( + action.id for action in self.priority_actions + ) + self.priority_actions.extend( + action + for action in added_actions + if action.id not in included_action_ids + ) + + def get_pending_priority_actions(self) -> list[action_details_import.AbstractActionDetails]: + return [ + action for action in self.priority_actions if not action.is_completed() + ] + + def __post_init__(self): + if self.automation and isinstance(self.automation, dict): + self.automation = automation_details_import.AutomationDetails.from_dict(self.automation) + if self.exchange_account_details and isinstance(self.exchange_account_details, dict): + self.exchange_account_details = exchange_account_details_import.ExchangeAccountDetails.from_dict(self.exchange_account_details) diff --git a/packages/flow/octobot_flow/entities/automations/execution_details.py b/packages/flow/octobot_flow/entities/automations/execution_details.py new file mode 100644 index 0000000000..06f5ded478 --- /dev/null +++ b/packages/flow/octobot_flow/entities/automations/execution_details.py @@ -0,0 +1,74 @@ +import dataclasses +import time +import typing + +import octobot_commons.dataclasses +import octobot_flow.enums +import octobot_flow.entities.automations.additional_actions as additional_actions_import + + +@dataclasses.dataclass +class TriggerDetails(octobot_commons.dataclasses.MinimizableDataclass): + scheduled_to: float = 0 + triggered_at: float = 0 + trigger_reason: typing.Optional[str] = None + additional_actions: additional_actions_import.AdditionalActions = dataclasses.field(default_factory=additional_actions_import.AdditionalActions) + custom_action_ids: list[str] = dataclasses.field(default_factory=list) + signal_ids: list[str] = dataclasses.field(default_factory=list) + strategy_executed_at: float = 0 + was_completed: bool = False + + def __post_init__(self): + if self.additional_actions and isinstance(self.additional_actions, dict): + self.additional_actions = additional_actions_import.AdditionalActions.from_dict(self.additional_actions) + + def has_custom_actions_or_signals_to_fetch(self) -> bool: + return bool( + self.trigger_reason == octobot_flow.enums.LastTriggerReason.CUSTOM_ACTION.value + or self.custom_action_ids + or self.trigger_reason == octobot_flow.enums.LastTriggerReason.SIGNAL.value + or self.signal_ids + ) + + def was_interrupted(self) -> bool: + return not self.was_completed + + +@dataclasses.dataclass +class DegradedStateDetails(octobot_commons.dataclasses.MinimizableDataclass): + since: float = 0 + reason: typing.Optional[str] = None + + +@dataclasses.dataclass +class ExecutionDetails(octobot_commons.dataclasses.MinimizableDataclass): + previous_execution: TriggerDetails = dataclasses.field(default_factory=TriggerDetails) + current_execution: TriggerDetails = dataclasses.field(default_factory=TriggerDetails) + degraded_state: DegradedStateDetails = dataclasses.field(default_factory=DegradedStateDetails) + execution_error: typing.Optional[str] = None + + def __post_init__(self): + if self.previous_execution and isinstance(self.previous_execution, dict): + self.previous_execution = TriggerDetails.from_dict(self.previous_execution) + if self.current_execution and isinstance(self.current_execution, dict): + self.current_execution = TriggerDetails.from_dict(self.current_execution) + if self.degraded_state and isinstance(self.degraded_state, dict): + self.degraded_state = DegradedStateDetails.from_dict(self.degraded_state) + + def should_fetch_custom_actions_or_signals(self) -> bool: + return ( + self.current_execution.has_custom_actions_or_signals_to_fetch() + or (self.previous_execution.was_interrupted() and self.previous_execution.has_custom_actions_or_signals_to_fetch()) + ) + + def start_execution(self): + self.current_execution.triggered_at = time.time() + + def complete_execution(self, next_execution_scheduled_to: float): + self.current_execution.was_completed = True + self.previous_execution = self.current_execution + self.current_execution = TriggerDetails( + scheduled_to=next_execution_scheduled_to, + trigger_reason=octobot_flow.enums.LastTriggerReason.SCHEDULED.value, + additional_actions=additional_actions_import.AdditionalActions.default_iteration(), + ) diff --git a/packages/flow/octobot_flow/entities/automations/fetched_copy_trading_data.py b/packages/flow/octobot_flow/entities/automations/fetched_copy_trading_data.py new file mode 100644 index 0000000000..b198946520 --- /dev/null +++ b/packages/flow/octobot_flow/entities/automations/fetched_copy_trading_data.py @@ -0,0 +1,18 @@ +import dataclasses +import octobot_commons.dataclasses +import octobot_flow.entities.community.trading_signal as trading_signal_import + + +@dataclasses.dataclass +class FetchedCopyTradingData(octobot_commons.dataclasses.MinimizableDataclass): + trading_signals: list[trading_signal_import.TradingSignal] = dataclasses.field(default_factory=list, repr=True) + + def __post_init__(self): + if self.trading_signals and isinstance(self.trading_signals[0], dict): + self.trading_signals = [ + trading_signal_import.TradingSignal.from_dict(trading_signal) + for trading_signal in self.trading_signals + ] + + def __bool__(self) -> bool: + return bool(self.trading_signals) diff --git a/packages/flow/octobot_flow/entities/automations/fetched_dependencies.py b/packages/flow/octobot_flow/entities/automations/fetched_dependencies.py new file mode 100644 index 0000000000..3559ee477d --- /dev/null +++ b/packages/flow/octobot_flow/entities/automations/fetched_dependencies.py @@ -0,0 +1,12 @@ +import dataclasses +import typing + +import octobot_commons.dataclasses + +import octobot_flow.entities.automations.fetched_exchange_data as fetched_exchange_data_import +import octobot_flow.entities.automations.fetched_copy_trading_data as fetched_copy_trading_data_import + +@dataclasses.dataclass +class FetchedDependencies(octobot_commons.dataclasses.MinimizableDataclass): + fetched_exchange_data: typing.Optional[fetched_exchange_data_import.FetchedExchangeData] = None + fetched_copy_trading_data: typing.Optional[fetched_copy_trading_data_import.FetchedCopyTradingData] = None diff --git a/packages/flow/octobot_flow/entities/automations/fetched_exchange_data.py b/packages/flow/octobot_flow/entities/automations/fetched_exchange_data.py new file mode 100644 index 0000000000..57e1039c22 --- /dev/null +++ b/packages/flow/octobot_flow/entities/automations/fetched_exchange_data.py @@ -0,0 +1,124 @@ +import dataclasses +import typing +import decimal + +import octobot_commons.dataclasses +import octobot_commons.logging +import octobot_trading.exchanges.util.exchange_data as exchange_data_import +import octobot_trading.exchanges +import octobot_trading.storage.orders_storage +import octobot_trading.api +import octobot_trading.enums +import octobot_trading.constants +import octobot_trading.personal_data + +import octobot_flow.enums + + + +@dataclasses.dataclass +class FetchedExchangeAccountElements(octobot_commons.dataclasses.MinimizableDataclass, octobot_commons.dataclasses.UpdatableDataclass): + portfolio: exchange_data_import.PortfolioDetails = dataclasses.field(default_factory=exchange_data_import.PortfolioDetails) + orders: exchange_data_import.OrdersDetails = dataclasses.field(default_factory=exchange_data_import.OrdersDetails) + positions: list[exchange_data_import.PositionDetails] = dataclasses.field(default_factory=list) + trades: list[dict] = dataclasses.field(default_factory=list) + + def __post_init__(self): + if self.portfolio and isinstance(self.portfolio, dict): + self.portfolio = exchange_data_import.PortfolioDetails.from_dict(self.portfolio) + if self.orders and isinstance(self.orders, dict): + self.orders = exchange_data_import.OrdersDetails.from_dict(self.orders) + if self.positions and isinstance(self.positions[0], dict): + self.positions = [ + exchange_data_import.PositionDetails.from_dict(position) for position in self.positions + ] + + def sync_from_exchange_manager( + self, exchange_manager: octobot_trading.exchanges.ExchangeManager + ) -> list[octobot_flow.enums.ChangedElements]: + changed_elements = [] + if self.sync_orders_from_exchange_manager(exchange_manager): + changed_elements.append(octobot_flow.enums.ChangedElements.ORDERS) + if self._sync_trades_from_exchange_manager(exchange_manager): + changed_elements.append(octobot_flow.enums.ChangedElements.TRADES) + if self.sync_portfolio_from_exchange_manager(exchange_manager): + changed_elements.append(octobot_flow.enums.ChangedElements.PORTFOLIO) + if self.sync_positions_from_exchange_manager(exchange_manager): + changed_elements.append(octobot_flow.enums.ChangedElements.POSITIONS) + return changed_elements + + def sync_orders_from_exchange_manager(self, exchange_manager: octobot_trading.exchanges.ExchangeManager) -> bool: + previous_orders = self.orders + updated_open_orders_exchange_ids = set() + updated_open_orders = [] + updated_missing_orders = [] + for order in octobot_trading.api.get_open_orders(exchange_manager): + if order.is_self_managed(): + octobot_commons.logging.get_logger(self.__class__.__name__).error( + f"Self managed order created. This type of [{exchange_manager.exchange_name}] " + f"order is not supported, order is ignored. Order: {order}" + ) + continue + updated_open_orders_exchange_ids.add(order.exchange_order_id) + updated_open_orders.append( + octobot_trading.storage.orders_storage._format_order(order, exchange_manager) + ) + updated_missing_orders = [ + order + for exchange_id, order in octobot_trading.personal_data.get_enriched_orders_by_exchange_id(previous_orders.open_orders).items() + if exchange_id not in updated_open_orders_exchange_ids + ] + self.orders.open_orders = updated_open_orders + self.orders.missing_orders = updated_missing_orders + return previous_orders != self.orders + + def _sync_trades_from_exchange_manager(self, exchange_manager: octobot_trading.exchanges.ExchangeManager) -> bool: + previous_trades = self.trades + self.trades = octobot_trading.api.get_trade_history(exchange_manager, as_dict=True) + return previous_trades != self.trades + + def sync_portfolio_from_exchange_manager(self, exchange_manager: octobot_trading.exchanges.ExchangeManager) -> bool: + previous_portfolio = self.portfolio.content + self.portfolio.content = { + key: values + for key, values in octobot_trading.api.get_portfolio(exchange_manager, as_decimal=False).items() + if any(value for value in values.values()) # skip 0 value assets + } + return previous_portfolio != self.portfolio.content + + def sync_positions_from_exchange_manager(self, exchange_manager: octobot_trading.exchanges.ExchangeManager) -> bool: + previous_positions = self.positions + self.positions = [ + exchange_data_import.PositionDetails(position.to_dict(), position.symbol_contract.to_dict()) + for position in octobot_trading.api.get_positions(exchange_manager) + ] + return previous_positions != self.positions + + +@dataclasses.dataclass +class FetchedExchangePublicData(octobot_commons.dataclasses.MinimizableDataclass): + markets: list[exchange_data_import.MarketDetails] = dataclasses.field(default_factory=list) + tickers: dict[str, dict[str, typing.Any]] = dataclasses.field(default_factory=dict) + + +@dataclasses.dataclass +class FetchedExchangeData(octobot_commons.dataclasses.MinimizableDataclass): + public_data: FetchedExchangePublicData = dataclasses.field(default_factory=FetchedExchangePublicData) + + def __post_init__(self): + if self.public_data and isinstance(self.public_data, dict): + self.public_data = FetchedExchangePublicData.from_dict(self.public_data) + + def get_last_price(self, symbol: str) -> decimal.Decimal: + # use if as in most cases, tickers are not available for all symbols + if symbol in self.public_data.tickers: + try: + return decimal.Decimal(str( + self.public_data.tickers[symbol][ + octobot_trading.enums.ExchangeConstantsTickersColumns.CLOSE.value + ] + )) + except (KeyError, decimal.DecimalException): + return octobot_trading.constants.ZERO + else: + return octobot_trading.constants.ZERO diff --git a/packages/flow/octobot_flow/entities/automations/post_iteration_actions_details.py b/packages/flow/octobot_flow/entities/automations/post_iteration_actions_details.py new file mode 100644 index 0000000000..6e02669101 --- /dev/null +++ b/packages/flow/octobot_flow/entities/automations/post_iteration_actions_details.py @@ -0,0 +1,40 @@ +import dataclasses +import typing + +import octobot_commons.dataclasses + + +@dataclasses.dataclass +class RefreshExchangeBotsAuthenticatedDataDetails: + # todo update this when global view refresh trigger is implemented + exchange_community_internal_name: str + exchange_id: str + exchange_account_id: typing.Optional[str] + to_recall_bot_id: typing.Optional[str] = None + update_account_status: bool = False + ignored_exchange_account_ids: typing.Optional[set[str]] = None + + +@dataclasses.dataclass +class NextIterationDetails(octobot_commons.dataclasses.FlexibleDataclass): + instant_trigger: bool = False + unclearable_trade_exchange_order_ids: list[str] = dataclasses.field(default_factory=list) + + +@dataclasses.dataclass +class PostIterationActionsDetails(octobot_commons.dataclasses.MinimizableDataclass): + stop_automation: bool = False + postpone_execution: bool = False + postpone_reason: typing.Optional[str] = None + raisable_error: typing.Optional[str] = None + trigger_global_view_refresh: bool = False + trigger_global_view_refresh_args: typing.Optional[RefreshExchangeBotsAuthenticatedDataDetails] = None + next_iteration_details: typing.Optional[NextIterationDetails] = None + + def has_automation_actions(self) -> bool: + return bool(self.stop_automation) + + def should_cancel_iteration(self) -> bool: + # cancelled if global view refresh is triggered, otherwise proceed + # with next iteration required steps + return self.trigger_global_view_refresh \ No newline at end of file diff --git a/packages/flow/octobot_flow/entities/community/__init__.py b/packages/flow/octobot_flow/entities/community/__init__.py new file mode 100644 index 0000000000..083a40e029 --- /dev/null +++ b/packages/flow/octobot_flow/entities/community/__init__.py @@ -0,0 +1,7 @@ +from octobot_flow.entities.community.user_authentication import UserAuthentication +from octobot_flow.entities.community.trading_signal import TradingSignal + +__all__ = [ + "UserAuthentication", + "TradingSignal", +] diff --git a/packages/flow/octobot_flow/entities/community/trading_signal.py b/packages/flow/octobot_flow/entities/community/trading_signal.py new file mode 100644 index 0000000000..e3640b637a --- /dev/null +++ b/packages/flow/octobot_flow/entities/community/trading_signal.py @@ -0,0 +1,9 @@ +import dataclasses +import octobot_commons.dataclasses +import octobot_copy.entities + + +@dataclasses.dataclass +class TradingSignal(octobot_commons.dataclasses.MinimizableDataclass): + strategy_id: str = dataclasses.field(repr=True) + account: octobot_copy.entities.Account = dataclasses.field(repr=True) diff --git a/packages/flow/octobot_flow/entities/community/user_authentication.py b/packages/flow/octobot_flow/entities/community/user_authentication.py new file mode 100644 index 0000000000..cd5fb84c48 --- /dev/null +++ b/packages/flow/octobot_flow/entities/community/user_authentication.py @@ -0,0 +1,16 @@ +import dataclasses +import typing +import octobot_commons.dataclasses + + +@dataclasses.dataclass +class UserAuthentication(octobot_commons.dataclasses.FlexibleDataclass): + email: typing.Optional[str] = None + password: typing.Optional[str] = None + hidden: bool = False + user_id: typing.Optional[str] = None + auth_key: typing.Optional[str] = None + encrypted_keys_by_exchange: dict[str, str] = dataclasses.field(default_factory=dict) + + def has_auth_details(self) -> bool: + return bool(self.password or self.auth_key) \ No newline at end of file diff --git a/packages/flow/octobot_flow/enums.py b/packages/flow/octobot_flow/enums.py new file mode 100644 index 0000000000..adf9629bfb --- /dev/null +++ b/packages/flow/octobot_flow/enums.py @@ -0,0 +1,55 @@ +import enum + + +class LastTriggerReason(enum.Enum): + SCHEDULED = "scheduled" + CUSTOM_ACTION = "custom_action" + SIGNAL = "signal" + CONFIGURATION_UPDATE = "configuration_update" + UNDEFINED = None + + +class DegradedStateReasons(enum.Enum): + INVALID_EXCHANGE_CREDENTIALS = "invalid_exchange_credentials" + MISSING_API_KEY_TRADING_RIGHTS = "missing_api_key_trading_rights" + MISSING_STRATEGY_MINIMAL_FUNDS = "missing_strategy_minimal_funds" + WORKFLOW_INIT_ERROR = "workflow_init_error" + UNDEFINED = None + + +class ChangedElements(enum.Enum): + ORDERS = "orders" + TRADES = "trades" + PORTFOLIO = "portfolio" + POSITIONS = "positions" + TRANSACTIONS = "transactions" + + +class ActionType(enum.Enum): + APPLY_CONFIGURATION = "apply_configuration" + UNKNOWN = "unknown" + + +class ActionErrorStatus(enum.Enum): + NO_ERROR = None + NOT_ENOUGH_FUNDS = "not_enough_funds" + MISSING_SYMBOL = "missing_symbol" + SYMBOL_INCOMPATIBLE_WITH_ACCOUNT = "symbol_incompatible_with_account" + ORDER_NOT_FOUND = "order_not_found" + INVALID_ORDER = "invalid_order" + INVALID_CONFIG = "invalid_config" + INVALID_SIGNAL_FORMAT = "invalid_signal_format" + UNSUPPORTED_STOP_ORDER = "unsupported_stop_order" + INCOMPATIBLE_TRADING_TYPE = "incompatible_trading_type" + UNSUPPORTED_HEDGE_POSITION = "unsupported_hedge_position" + INTERNAL_ERROR = "internal_error" + BLOCKCHAIN_WALLET_ERROR = "blockchain_wallet_error" + DISABLED_FUNDS_TRANSFER_ERROR = "disabled_funds_transfer_error" + UNSUPPORTED_ACTION_TYPE = "unsupported_action_type" + MAX_ATTEMPTS_EXCEEDED = "max_attempts_exceeded" + DSL_EXECUTION_ERROR = "dsl_execution_error" + + +class AutomationWorkflowErrorStatus(enum.Enum): + EXCEPTION_DURING_ITERATION = "exception_during_iteration" + INVALID_ACTION_CONFIGURATION = "invalid_action_configuration" diff --git a/packages/flow/octobot_flow/environment.py b/packages/flow/octobot_flow/environment.py new file mode 100644 index 0000000000..e32bacb11d --- /dev/null +++ b/packages/flow/octobot_flow/environment.py @@ -0,0 +1,10 @@ +import octobot.constants # will load .env file and init constants + +import octobot_flow.repositories.community +import octobot_trading.constants + + +def initialize_environment(allow_funds_transfer: bool = False) -> None: + octobot_flow.repositories.community.initialize_community_authentication() + if allow_funds_transfer: + octobot_trading.constants.ALLOW_FUNDS_TRANSFER = True diff --git a/packages/flow/octobot_flow/errors.py b/packages/flow/octobot_flow/errors.py new file mode 100644 index 0000000000..e616ea9e6d --- /dev/null +++ b/packages/flow/octobot_flow/errors.py @@ -0,0 +1,85 @@ +class OctobotFlowError(Exception): + """parent class for all octobot flow errors""" + +class ConfigurationError(OctobotFlowError): + """an error related to the configuration of the bot""" + +class ExchangeError(OctobotFlowError): + """an error related to the bot's communication with the exchange""" + +class AutomationActionError(OctobotFlowError): + """an error related to an automation action execution""" + +class DSLExecutorError(OctobotFlowError): + """raise when a DSL executor error occurs""" + +class ExchangeAccountInitializationError(ExchangeError): + """raise when an exchange account initialization fails""" + +class InitializationRunFailedError(ConfigurationError): + """raise when an initialization run fails""" + + +class NoExchangeAccountDetailsError(ConfigurationError): + """raise when no exchange account details are available""" + + +class AutomationValidationError(ConfigurationError): + """raise when an automation configuration or state is invalid""" + + +class UnsupportedActionTypeError(AutomationActionError): + """raise when an unsupported action type is encountered""" + + +class UnsupportedConfiguredActionTypeError(UnsupportedActionTypeError): + """raise when an unsupported configured action type is encountered""" + + + +class InvalidAutomationActionError(ConfigurationError): + """raise when an automation action is invalid""" + + +class InvalidConfigurationActionError(ConfigurationError): + """raise when a configuration action is invalid""" + + +class NoProfileDataError(ConfigurationError): + """raise when no profile data is available""" + + +class NoAutomationError(ConfigurationError): + """raise when a automations state does not contain any automation""" + + +class CommunityError(ConfigurationError): + """an error related to the community authentication of the bot""" + + +class CommunityAuthenticationRequiredError(CommunityError): + """raise when community authentication is required""" + + +class CommunityTradingSignalError(CommunityError): + """an error related to the trading signal of the bot""" + + +class UnresolvedDSLScriptError(AutomationActionError): + """raise when a DSL script is not resolved""" + + +class ActionDependencyError(AutomationActionError): + """raise when an action dependency is invalid""" + + +class AutomationDAGResetError(AutomationActionError): + """raise when a DAG reset fails""" + + +class ActionDependencyNotFoundError(ActionDependencyError): + """raise when an action dependency is not found""" + + +class MissingDSLExecutorDependencyError(DSLExecutorError): + """raise when a DSL executor dependency is missing""" diff --git a/packages/flow/octobot_flow/jobs/__init__.py b/packages/flow/octobot_flow/jobs/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/packages/flow/octobot_flow/jobs/automation_job.py b/packages/flow/octobot_flow/jobs/automation_job.py new file mode 100644 index 0000000000..97bee6adbf --- /dev/null +++ b/packages/flow/octobot_flow/jobs/automation_job.py @@ -0,0 +1,430 @@ +import contextlib +import time +import typing + +import octobot_commons.json_util as json_util +import octobot_commons.logging as common_logging +import octobot.community +import octobot_commons.profiles.profile_data as profile_data_import + +import octobot_flow.entities +import octobot_flow.constants +import octobot_flow.enums +import octobot_flow.errors +import octobot_flow.logic.actions +import octobot_flow.logic.configuration +import octobot_flow.logic.dsl +import octobot_flow.repositories.community +import octobot_flow.encryption +import octobot_flow.jobs.exchange_account_job as exchange_account_job_import +import octobot_flow.jobs.automation_runner_job as automation_runner_job_import + + +class AutomationJob: + """ + Configures the automation environment and runs it: + 1. Parse the automation, initialize if necessary, resolve dependencies and DAG actions to prepare the automation environment. + 2. Use the AutomationRunner to run the automation itself. + 3. Execute pending priority actions if any, otherwise execute the DAG's executable actions. + """ + def __init__( + self, + automation_state: dict[str, typing.Any], + added_priority_actions: list[octobot_flow.entities.AbstractActionDetails], + updated_trading_signals: list[octobot_flow.entities.TradingSignal], + auth_details: typing.Union[octobot_flow.entities.UserAuthentication, dict], + ): + self.automation_state: octobot_flow.entities.AutomationState = ( + octobot_flow.entities.AutomationState.from_dict(automation_state) + ) + if added_priority_actions: + # Include added priority actions in the automation state. + # All pending priority actions will be executed before any other actions. + self.automation_state.update_priority_actions(added_priority_actions) + if updated_trading_signals: + default_reference_market = octobot_flow.logic.configuration.infer_reference_market( + self.automation_state.exchange_account_details, [], + ) + octobot_flow.logic.actions.update_trading_signals( + self.automation_state.automation.actions_dag.actions, + updated_trading_signals, + default_reference_market, + ) + self._validate_input() + self.auth_details: octobot_flow.entities.UserAuthentication = octobot_flow.entities.UserAuthentication.from_dict(auth_details) if isinstance(auth_details, dict) else auth_details + self.is_initialization_run = self._requires_initialization_run() + self.fetched_actions: list[octobot_flow.entities.AbstractActionDetails] = [] + self._logger: common_logging.BotLogger = common_logging.get_logger(self.__class__.__name__) + + async def run(self) -> list[octobot_flow.entities.AbstractActionDetails]: + if self.is_initialization_run: + # Configure the automation + return await self.execute_initialization_run() + t0 = time.time() + executed_actions = [] + async with self._maybe_authenticator() as maybe_authenticator: + maybe_community_repository = ( + octobot_flow.repositories.community.CommunityRepository(maybe_authenticator) + if maybe_authenticator else None + ) + with octobot_flow.encryption.decrypted_bots_configurations(self.automation_state): + to_execute_actions, are_priority_actions = self._get_actions_to_execute() + if are_priority_actions: + self._logger.info(f"Running {len(to_execute_actions)} priority actions: {to_execute_actions}") + self._resolve_dsl_scripts(to_execute_actions, True) + else: + # fetch the actions and signals if any + await self._fetch_actions(maybe_authenticator) + # resolve the DSL scripts in case it has dependencies on other actions + self._resolve_dsl_scripts( + self.automation_state.automation.actions_dag.get_executable_actions(), + True + ) + # fetch the dependencies of the automation environment + fetched_dependencies = await self._fetch_dependencies( + maybe_community_repository, to_execute_actions + ) + # Align on the previous scheduled time when possible when running priority actions + # to keep sleep cycles consistency when a priority action is processed. + default_next_execution_scheduled_to = ( + self.automation_state.automation.execution.current_execution.scheduled_to + if are_priority_actions else 0 + ) + # execute the automation + executed_actions = await self._execute_automation_actions( + maybe_community_repository, fetched_dependencies, to_execute_actions, + default_next_execution_scheduled_to + ) + # don't keep resolved DSL scripts after execution to avoid side effects + self._clear_resolved_dsl_scripts(executed_actions) + self._logger.info(f"Automation updated successfully in {round(time.time() - t0, 2)} seconds") + return executed_actions + + def update_actions_from_copy_trading_data( + self, + actions: list[octobot_flow.entities.AbstractActionDetails], + copy_trading_data: octobot_flow.entities.FetchedCopyTradingData, + default_reference_market: str, + ): + # adapt actions to reflect the new trading signals + for trading_signal in copy_trading_data.trading_signals: + for action in actions: + try: + octobot_flow.logic.actions.update_action_trading_signal_if_relevant( + action, trading_signal, default_reference_market + ) + except octobot_flow.errors.CommunityTradingSignalError: + # Signal applies to a different strategy than this copy_exchange_account action. + continue + + @contextlib.asynccontextmanager + async def _maybe_authenticator(self) -> typing.AsyncGenerator[typing.Optional[octobot.community.CommunityAuthentication], None]: + authenticator_factory = octobot_flow.repositories.community.CommunityAuthenticatorFactory( + self.auth_details + ) + if authenticator_factory.enable_community_authentication(): + if self.auth_details.has_auth_details(): + async with authenticator_factory.local_authenticator() as authenticator: + yield authenticator + else: + async with authenticator_factory.local_anon_authenticator() as authenticator: + yield authenticator + else: + yield None + + async def execute_initialization_run(self) -> list[octobot_flow.entities.AbstractActionDetails]: + executed_actions = [] + async with self._maybe_authenticator() as maybe_authenticator: + await self._fetch_actions(maybe_authenticator) + executed_actions = await self._initialize_exchange_account_details_from_actions() + if self._requires_initialization_run(): + raise octobot_flow.errors.InitializationRunFailedError( + "Initialization run is still required after running the initialization run" + ) + self._logger.info( + f"Initialization run completed, automation initialized on " + f"{self.automation_state.exchange_account_details.exchange_details.internal_name}" + ) + return executed_actions + + async def _initialize_exchange_account_details_from_actions(self) -> list[octobot_flow.entities.AbstractActionDetails]: + already_applied_config = False + actions, _ = self._get_actions_to_execute() + for action in actions: + if isinstance(action, octobot_flow.entities.ConfiguredActionDetails) and action.action == octobot_flow.enums.ActionType.APPLY_CONFIGURATION.value: + if already_applied_config: + raise octobot_flow.errors.InitializationRunFailedError( + "Only one configuration action is allowed" + ) + await self._apply_configuration_from_action(action) + already_applied_config = True + else: + self._logger.info(f"Ignoring non configuration action before initialization: {action}") + return actions + + async def _apply_configuration_from_action( + self, action: octobot_flow.entities.ConfiguredActionDetails + ): + if self.automation_state.exchange_account_details is None: + self.automation_state.exchange_account_details = octobot_flow.entities.ExchangeAccountDetails() + action_configuration_updater = octobot_flow.logic.configuration.AutomationConfigurationUpdater( + self.automation_state, action + ) + await action_configuration_updater.update() + + async def _fetch_actions( + self, maybe_authenticator: typing.Optional[octobot.community.CommunityAuthentication] + ): + automation = self.automation_state.automation + if automation.execution.should_fetch_custom_actions_or_signals(): + user_actions_to_fetch = automation.execution.current_execution.custom_action_ids + signals_to_fetch = automation.execution.current_execution.signal_ids + if user_actions_to_fetch or signals_to_fetch: + authenticator = octobot_flow.repositories.community.ensure_is_authenticated(maybe_authenticator) + t0 = time.time() + all_actions: list[octobot_flow.entities.AbstractActionDetails] = [] + repository = octobot_flow.repositories.community.CustomActionsRepository(authenticator) + if user_actions_to_fetch: + all_actions.extend(await repository.fetch_custom_actions( + user_actions_to_fetch, select_pending_user_actions_only=True + )) + if signals_to_fetch: + all_actions.extend(await repository.fetch_signals( + signals_to_fetch, select_pending_signals_only=True + )) + self._logger.info( + f"Fetched {len(all_actions)} custom actions/signals for automation " + f"{automation.metadata.automation_id} in {round(time.time() - t0, 2)} seconds" + ) + self.fetched_actions.extend(all_actions) + + def _requires_initialization_run(self) -> bool: + return ( + self.automation_state.automation.execution.previous_execution.triggered_at == 0 + and ( + not self.automation_state.exchange_account_details + or not self.automation_state.exchange_account_details.exchange_details.internal_name + ) + ) + + async def _fetch_dependencies( + self, + maybe_community_repository: typing.Optional[octobot_flow.repositories.community.CommunityRepository], + to_execute_actions: list[octobot_flow.entities.AbstractActionDetails], + ) -> octobot_flow.entities.FetchedDependencies: + self._logger.info("Fetching automation dependencies.") + minimal_profile_data = octobot_flow.logic.configuration.create_profile_data( + self.automation_state.exchange_account_details, + self.automation_state.automation.metadata.automation_id, + set() + ) + if fetched_copy_trading_data := await self._init_all_required_copy_trading_data( + maybe_community_repository, to_execute_actions, minimal_profile_data, + ): + default_reference_market = octobot_flow.logic.configuration.infer_reference_market( + self.automation_state.exchange_account_details, [], + ) + self.update_actions_from_copy_trading_data( + to_execute_actions, fetched_copy_trading_data, default_reference_market + ) + fetched_exchange_data = ( + await self._init_all_required_exchange_data( + self.automation_state.exchange_account_details, + maybe_community_repository, to_execute_actions, + minimal_profile_data, + ) + if self.automation_state.has_exchange() else None + ) + return octobot_flow.entities.FetchedDependencies( + fetched_exchange_data=fetched_exchange_data, + fetched_copy_trading_data=fetched_copy_trading_data, + ) + + async def _init_all_required_exchange_data( + self, + exchange_account_details: octobot_flow.entities.ExchangeAccountDetails, + maybe_community_repository: typing.Optional[octobot_flow.repositories.community.CommunityRepository], + to_execute_actions: list[octobot_flow.entities.AbstractActionDetails], + minimal_profile_data: profile_data_import.ProfileData, + ) -> octobot_flow.entities.FetchedExchangeData: + t0 = time.time() + exchange_summary = ( + f"[{exchange_account_details.exchange_details.internal_name}] " + f"account with id: {exchange_account_details.exchange_details.exchange_account_id}" + ) + self._logger.info(f"Initializing all required data for {exchange_summary}.") + exchange_account_job = exchange_account_job_import.ExchangeAccountJob( + self.automation_state, self.fetched_actions + ) + symbols = set( + exchange_account_job.get_all_actions_symbols(minimal_profile_data) + + octobot_flow.logic.dsl.get_actions_symbol_dependencies( + to_execute_actions, minimal_profile_data + ) + ) + async with exchange_account_job.account_exchange_context( + octobot_flow.logic.configuration.create_profile_data( + self.automation_state.exchange_account_details, + self.automation_state.automation.metadata.automation_id, + symbols, + as_simulator=None, + ) + ): + await exchange_account_job.update_public_data() + self._logger.info( + f"Public data updated for {exchange_account_details.exchange_details.internal_name} in {round(time.time() - t0, 2)} seconds" + ) + t1 = time.time() + await exchange_account_job.update_authenticated_data() + self._logger.info( + f"Authenticated data updated for {exchange_account_details.exchange_details.internal_name} in {round(time.time() - t1, 2)} seconds" + ) + self._logger.info( + f"Initialized all required data for {exchange_summary} in {round(time.time() - t0, 2)} seconds." + ) + return exchange_account_job.fetched_dependencies.fetched_exchange_data # type: ignore + + async def _init_all_required_copy_trading_data( + self, + maybe_community_repository: typing.Optional[octobot_flow.repositories.community.CommunityRepository], + to_execute_actions: list[octobot_flow.entities.AbstractActionDetails], + minimal_profile_data: profile_data_import.ProfileData, + ) -> typing.Optional[octobot_flow.entities.FetchedCopyTradingData]: + copy_trading_data = None + if to_fetch_signals := [ + copy_trading_dependency.strategy_id + for copy_trading_dependency in octobot_flow.logic.dsl.get_copy_trading_dependencies( + to_execute_actions, minimal_profile_data + ) + if copy_trading_dependency.refresh_required + ]: + if maybe_community_repository is None: + raise octobot_flow.errors.CommunityTradingSignalError( + "Community authentication is required to fetch copy trading signals" + ) + trading_signals_repository = octobot_flow.repositories.community.TradingSignalsRepository( + maybe_community_repository.authenticator + ) + self._logger.info(f"Fetching copy trading signals for {to_fetch_signals} strategies") + trading_signals = await trading_signals_repository.fetch_trading_signals( + to_fetch_signals, + octobot_flow.constants.DEFAULT_COPY_TRADING_MISSED_SIGNALS_GRACE_ABORT_THRESHOLD + ) + copy_trading_data = octobot_flow.entities.FetchedCopyTradingData( + trading_signals=trading_signals + ) + return copy_trading_data + + async def _execute_automation_actions( + self, + maybe_community_repository: typing.Optional[octobot_flow.repositories.community.CommunityRepository], + fetched_dependencies: octobot_flow.entities.FetchedDependencies, + to_execute_actions: list[octobot_flow.entities.AbstractActionDetails], + default_next_execution_scheduled_to: float + ) -> list[octobot_flow.entities.AbstractActionDetails]: + automation_runner_job = automation_runner_job_import.AutomationRunnerJob( + self.automation_state, fetched_dependencies, maybe_community_repository, + default_next_execution_scheduled_to + ) + automation = self.automation_state.automation + exchange_account_desc = ( + 'simulated exchange account' if self.automation_state.exchange_account_details.is_simulated() + else 'real exchange account' + ) + automation_signature = f"{exchange_account_desc} automation {automation.metadata.automation_id}" + try: + self._logger.info(f"Updating {automation_signature}") + automation_runner_job.validate(automation) + start_time = time.time() + async with automation_runner_job.actions_context( + to_execute_actions, + update_execution_details=True, + ): + await automation_runner_job.run() + self._logger.info( + f"{automation_signature} successfully updated in {round(time.time() - start_time, 2)} seconds" + ) + if automation.metadata.emit_signals: + await self._emit_trading_signals( + maybe_community_repository, automation, fetched_dependencies + ) + except octobot_flow.errors.AutomationValidationError as err: + self._logger.error( + f"{automation_signature} automation configuration is invalid: {err}" + ) + raise + except Exception as err: + self._logger.error( + f"Unexpected error when updating {automation_signature}: {err.__class__.__name__}: {err}" + ) + raise + return to_execute_actions + + async def _emit_trading_signals( + self, + maybe_community_repository: typing.Optional[octobot_flow.repositories.community.CommunityRepository], + automation: octobot_flow.entities.AutomationDetails, + fetched_dependencies: octobot_flow.entities.FetchedDependencies, + ): + if not maybe_community_repository: + raise octobot_flow.errors.CommunityTradingSignalError( + "Community authentication is required to emit trading signals" + ) + reference_market = octobot_flow.logic.configuration.infer_reference_market( + self.automation_state.exchange_account_details, [], + ) + account = octobot_flow.logic.actions.reference_exchange_elements_to_account( + automation.exchange_account_elements, + fetched_dependencies.fetched_exchange_data, + reference_market + ) + trading_signals_repository = octobot_flow.repositories.community.TradingSignalsRepository( + maybe_community_repository.authenticator + ) + await trading_signals_repository.insert_trading_signal( + octobot_flow.entities.TradingSignal( + strategy_id=automation.metadata.strategy_id, account=account + ) + ) + + def _get_actions_to_execute(self) -> tuple[list[octobot_flow.entities.AbstractActionDetails], bool]: + if pending_priority_actions := self._get_pending_priority_actions(): + return pending_priority_actions, True + executable_actions = self.automation_state.automation.actions_dag.get_executable_actions() + return executable_actions + self.fetched_actions, False + + def _get_pending_priority_actions(self) -> list[octobot_flow.entities.AbstractActionDetails]: + return self.automation_state.get_pending_priority_actions() + + def _resolve_dsl_scripts( + self, actions: list[octobot_flow.entities.AbstractActionDetails], + from_actions_dag: bool + ): + if from_actions_dag: + self.automation_state.automation.actions_dag.resolve_dsl_scripts( + actions + ) + else: + local_dag = octobot_flow.entities.ActionsDAG(actions=actions) + local_dag.resolve_dsl_scripts(actions) + + def _clear_resolved_dsl_scripts(self, actions: list[octobot_flow.entities.AbstractActionDetails]): + for action in actions: + if isinstance(action, octobot_flow.entities.DSLScriptActionDetails): + action.clear_resolved_dsl_script() + + def dump(self) -> dict: + return json_util.sanitize( + self.automation_state.to_dict(include_default_values=False) + ) # type: ignore + + async def __aenter__(self) -> "AutomationJob": + return self + + async def __aexit__(self, exc_type, exc_value, traceback) -> None: + # nothing to do for now + pass + + def _validate_input(self): + if not self.automation_state.automation.metadata.automation_id: + raise octobot_flow.errors.NoAutomationError("Automation is required") diff --git a/packages/flow/octobot_flow/jobs/automation_runner_job.py b/packages/flow/octobot_flow/jobs/automation_runner_job.py new file mode 100644 index 0000000000..a27273e72c --- /dev/null +++ b/packages/flow/octobot_flow/jobs/automation_runner_job.py @@ -0,0 +1,160 @@ +import contextlib +import typing + +import octobot_commons.profiles as commons_profiles +import octobot_commons.context_util as context_util +import octobot_trading.exchanges.util.exchange_data as exchange_data_import + +import octobot_flow.entities +import octobot_flow.enums +import octobot_flow.errors +import octobot_flow.logic.configuration +import octobot_flow.logic.dsl +import octobot_flow.repositories.exchange +import octobot_flow.repositories.community +import octobot_flow.logic.actions + + +class AutomationRunnerJob(octobot_flow.repositories.exchange.ExchangeContextMixin): + """ + Runs the automation from the configured environment. + Sequentially executes the automation pre-actions, actions and post-actions. + Finally, completes the current execution and register the next execution scheduled time. + """ + USE_PREDICTIVE_ORDERS_SYNC: bool = True + + def __init__( + self, + automation_state: octobot_flow.entities.AutomationState, + fetched_dependencies: octobot_flow.entities.FetchedDependencies, + maybe_community_repository: typing.Optional[octobot_flow.repositories.community.CommunityRepository], + default_next_execution_scheduled_to: float, + ): + super().__init__(automation_state, fetched_dependencies) + + self._maybe_community_repository: typing.Optional[ + octobot_flow.repositories.community.CommunityRepository + ] = maybe_community_repository + self._to_execute_actions: list[octobot_flow.entities.AbstractActionDetails] = None # type: ignore + self._default_next_execution_scheduled_to: float = default_next_execution_scheduled_to + self._update_execution_details: bool = True + + def validate(self, automation: octobot_flow.entities.AutomationDetails): + if not automation.metadata.automation_id: + raise octobot_flow.errors.AutomationValidationError( + f"automation_id is required. Found: {automation.metadata.automation_id}" + ) + + async def run(self): + if self._update_execution_details: + self.automation_state.automation.execution.start_execution() + # TODO implement to remove after POC 4 + # # 1. for each automation, process additional actions if necessary (ex: portfolio optimization) + # if self.automation_state.automation.execution.current_execution.additional_actions.has_trading_actions(): + # await self._process_additional_actions() + # TODO implement to remove after POC 4 + # # 2. process on filled and cancelled orders actions if necessary + # await self._process_on_filled_and_cancelled_orders_actions() + # # 3. update strategy if necessary + changed_elements, next_execution_scheduled_to = await self._execute_actions() + # if octobot_flow.enums.ChangedElements.ORDERS in changed_elements: + # TODO implement to remove after POC 4 + # # 4. process on filled and cancelled orders actions again if necessary + # await self._process_on_filled_and_cancelled_orders_actions() + # 5. execute post actions if necessary + if self.automation_state.automation.post_actions.has_automation_actions(): + await self._execute_post_actions() + # 6. register execution completion + if self._update_execution_details: + self.automation_state.automation.execution.complete_execution(next_execution_scheduled_to) + + async def _execute_actions(self) -> tuple[list[octobot_flow.enums.ChangedElements], float]: + actions_executor = octobot_flow.logic.actions.ActionsExecutor( + self._maybe_community_repository, self._exchange_manager, + self.profile_data_provider.get_profile_data(), + self.automation_state.automation, self._to_execute_actions, + self._update_execution_details, + ) + await actions_executor.execute() + return actions_executor.changed_elements, ( + # use self._default_next_execution_scheduled_to if set when no next_execution_scheduled_to + # is configured + actions_executor.next_execution_scheduled_to or self._default_next_execution_scheduled_to + ) + + async def _process_on_filled_and_cancelled_orders_actions(self): + # update chained orders, groups and other mechanics if necessary + if not self.automation_state.has_exchange(): + return + exchange_account_elements = self.automation_state.automation.exchange_account_elements + if exchange_account_elements is None: + return + if exchange_account_elements.has_pending_chained_orders(): + await self._update_chained_orders() + if exchange_account_elements.has_pending_groups(): + await self._update_groups() + + async def _update_chained_orders(self): + raise NotImplementedError("_update_chained_orders not implemented") + + async def _update_groups(self): + raise NotImplementedError("_update_groups not implemented") + + async def _process_additional_actions(self): + raise NotImplementedError("_process_additional_actions not implemented") + + async def _update_stopped_automation_sub_portfolio_if_necessary(self): + # TODO implement when supporting sub portfolios: unregister automation sub portfolio + pass + + async def _execute_post_actions(self): + if self.automation_state.automation.post_actions.stop_automation: + await self._update_stopped_automation_sub_portfolio_if_necessary() + + def init_predictive_orders_exchange_data(self, exchange_data: exchange_data_import.ExchangeData): + exchange_account_elements = self.automation_state.automation.exchange_account_elements + if exchange_account_elements is None: + return + exchange_data.markets = self.fetched_dependencies.fetched_exchange_data.public_data.markets + exchange_data.portfolio_details.content = exchange_account_elements.portfolio.content + exchange_data.orders_details.open_orders = exchange_account_elements.orders.open_orders + exchange_data.trades = exchange_account_elements.trades + + def _get_profile_data(self) -> commons_profiles.ProfileData: + minimal_profile_data = octobot_flow.logic.configuration.create_profile_data( + self.automation_state.exchange_account_details, + self.automation_state.automation.metadata.automation_id, + set() + ) + return octobot_flow.logic.configuration.create_profile_data( + self.automation_state.exchange_account_details, + self.automation_state.automation.metadata.automation_id, + set(octobot_flow.logic.dsl.get_actions_symbol_dependencies( + self._to_execute_actions, minimal_profile_data + )), + as_simulator=None, + ) + + @contextlib.asynccontextmanager + async def actions_context( + self, + actions: list[octobot_flow.entities.AbstractActionDetails], + update_execution_details: bool, + ): + try: + self._to_execute_actions = actions + self._update_execution_details = update_execution_details + with ( + self._maybe_community_repository.automation_context( + self.automation_state.automation + ) if self._maybe_community_repository else context_util.EmptyContextManager(), + self.profile_data_provider.profile_data_context(self._get_profile_data()) + ): + if not self.profile_data_provider.get_profile_data().profile_details.bot_id: + raise octobot_flow.errors.AutomationValidationError( + f"A bot_id is required to run a bot. Found: {self.profile_data_provider.get_profile_data().profile_details.bot_id}" + ) + async with self.exchange_manager_context(): + yield self + finally: + self._to_execute_actions = None # type: ignore diff --git a/packages/flow/octobot_flow/jobs/exchange_account_job.py b/packages/flow/octobot_flow/jobs/exchange_account_job.py new file mode 100644 index 0000000000..6bb882004f --- /dev/null +++ b/packages/flow/octobot_flow/jobs/exchange_account_job.py @@ -0,0 +1,220 @@ +import asyncio +import contextlib +import typing + +import octobot_commons.profiles as commons_profiles +import octobot_commons.constants as common_constants +import octobot_commons.symbols as symbol_util +import octobot_commons.list_util as list_util +import octobot_commons.logging as common_logging +import octobot_trading.constants as trading_constants +import octobot_trading.enums +import octobot_trading.errors +import octobot_trading.personal_data as personal_data +import octobot_trading.exchanges +import octobot_trading.exchange_data +import octobot_trading.exchanges.util.exchange_data as exchange_data_import +import tentacles.Meta.Keywords.scripting_library as scripting_library +import octobot_flow.repositories.exchange +import octobot_flow.entities +import octobot_flow.errors + +import octobot_flow.logic.exchange +import octobot_flow.logic.dsl + + +class ExchangeAccountJob(octobot_flow.repositories.exchange.ExchangeContextMixin): + def __init__( + self, + automation_state: octobot_flow.entities.AutomationState, + actions: list[octobot_flow.entities.AbstractActionDetails], + ): + super().__init__(automation_state, octobot_flow.entities.FetchedDependencies()) + self.actions: list[octobot_flow.entities.AbstractActionDetails] = actions + + self._logger: common_logging.BotLogger = common_logging.get_logger(self.__class__.__name__) + + async def update_public_data(self): + """ + Fetches all public data that might be required for any bot from the exchange + """ + self._ensure_exchange_dependencies() + await self._fetch_tickers() + await self._fetch_ohlcvs() + + async def update_authenticated_data(self): + fetched_authenticated_data = octobot_flow.entities.FetchedExchangeAccountElements() + self._ensure_exchange_dependencies() + await self._fetch_authenticated_data(fetched_authenticated_data) + await self._update_bot_authenticated_data(fetched_authenticated_data) + + async def _fetch_authenticated_data(self, fetched_authenticated_data: octobot_flow.entities.FetchedExchangeAccountElements): + coros = [ + self._fetch_open_orders(fetched_authenticated_data), + self._fetch_portfolio(fetched_authenticated_data), + ] + if self._exchange_manager.is_future: + coros.append(self._fetch_positions(fetched_authenticated_data)) + await asyncio.gather(*coros) + + async def _update_bot_authenticated_data( + self, + fetched_authenticated_data: octobot_flow.entities.FetchedExchangeAccountElements, + ): + # bind fetched data to the relevant automation account + is_simulated = self.automation_state.exchange_account_details.is_simulated() + if is_simulated: + simulated_exchange_account_resolver = octobot_flow.logic.exchange.SimulatedExchangeAccountResolver( + self.automation_state, + self.fetched_dependencies, + self.actions, + ) + await simulated_exchange_account_resolver.resolve() + else: + # updating account with real trading data: + target_account = self.automation_state.automation.exchange_account_elements + if target_account is None: + raise octobot_flow.errors.ExchangeAccountInitializationError( + "Exchange account elements are required to update the account" + ) + target_account.orders = fetched_authenticated_data.orders + target_account.positions = fetched_authenticated_data.positions + sub_portfolio_resolver = octobot_flow.logic.exchange.SubPortfolioResolver( + self.automation_state + ) + await sub_portfolio_resolver.resolve() + + @contextlib.asynccontextmanager + async def account_exchange_context(self, global_profile_data: commons_profiles.ProfileData): + with self.profile_data_provider.profile_data_context(global_profile_data): + async with self.exchange_manager_context() as exchange_manager: + await octobot_trading.exchanges.create_temporary_exchange_channels_and_producers( + exchange_manager, + create_authenticated_producers=not self.profile_data_provider.get_profile_data().trader_simulator.enabled, + ) + yield + + async def _fetch_and_save_ohlcv( + self, repository: octobot_flow.repositories.exchange.OhlcvRepository, + symbol: str, time_frame: str, limit: int, tickers: dict[str, dict[str, typing.Any]] + ): + market = await repository.fetch_ohlcv(symbol, time_frame, limit, tickers) + self._logger.info( + f"Fetched [{self._exchange_manager.exchange_name}] OHLCV for {symbol} {time_frame}: ({len(market.close)} candles)" + ) + self.fetched_dependencies.fetched_exchange_data.public_data.markets.append(market) + + async def _fetch_ohlcvs(self): + repository = self.get_exchange_repository_factory().get_ohlcv_repository() + history_size = scripting_library.get_required_candles_count( + self.profile_data_provider.get_profile_data(), trading_constants.MIN_CANDLES_HISTORY_SIZE + ) + symbols = self._get_traded_symbols() + time_frames = self._get_time_frames() + await asyncio.gather(*[ + self._fetch_and_save_ohlcv( + repository, symbol, time_frame, history_size, + self.fetched_dependencies.fetched_exchange_data.public_data.tickers + ) + for symbol in symbols + for time_frame in time_frames + ]) + + + async def _fetch_tickers(self): + repository = self.get_exchange_repository_factory().get_tickers_repository() + self.fetched_dependencies.fetched_exchange_data.public_data.tickers = await repository.fetch_tickers( + self._get_traded_symbols() + ) + ticker_close_by_symbols = { + symbol: ticker[octobot_trading.enums.ExchangeConstantsTickersColumns.CLOSE.value] + for symbol, ticker in self.fetched_dependencies.fetched_exchange_data.public_data.tickers.items() + } + logged_tickers = f" tickers: {ticker_close_by_symbols}" if len(ticker_close_by_symbols) < 10 else "" + self._logger.info( + f"Fetched [{self._exchange_manager.exchange_name}] {len(self.fetched_dependencies.fetched_exchange_data.public_data.tickers)}{logged_tickers}" + ) + + async def _fetch_positions(self, fetched_authenticated_data: octobot_flow.entities.FetchedExchangeAccountElements): + repository = self.get_exchange_repository_factory().get_positions_repository() + fetched_authenticated_data.positions = await repository.fetch_positions(self._get_traded_symbols()) + self._logger.info( + f"Fetched [{self._exchange_manager.exchange_name}] {len(fetched_authenticated_data.positions)} positions: " + f"{[position.position for position in fetched_authenticated_data.positions]}" + ) + + async def _fetch_open_orders(self, fetched_authenticated_data: octobot_flow.entities.FetchedExchangeAccountElements): + repository = self.get_exchange_repository_factory().get_orders_repository() + symbols = self._get_traded_symbols() + try: + open_orders = await repository.fetch_open_orders(symbols) + except octobot_trading.errors.NotSupported as err: + self._logger.info(f"Fetching open orders is not supported: {err}.") + open_orders = [] + account_elements = self.automation_state.automation.exchange_account_elements + previous_open_orders = ( + account_elements.orders.open_orders if account_elements is not None else [] + ) + fetched_authenticated_data.orders.open_orders = repository.update_enriched_orders( + open_orders, + previous_open_orders + ) + self._logger.info( + f"Fetched [{self._exchange_manager.exchange_name}] " + f"{personal_data.get_symbol_count(open_orders) or "0"} open orders for {symbols}" + ) + + async def _fetch_portfolio(self, fetched_authenticated_data: octobot_flow.entities.FetchedExchangeAccountElements): + repository_factory = self.get_exchange_repository_factory() + repository = repository_factory.get_portfolio_repository() + try: + fetched_authenticated_data.portfolio.full_content = await repository.fetch_portfolio() # type: ignore + except octobot_trading.errors.NotSupported as err: + self._logger.info(f"Fetching portfolio is not supported: {err}. Diabling portfolio validations.") + fetched_authenticated_data.portfolio.full_content = {} + balance_summary = common_logging.get_private_placeholder_if_necessary( + personal_data.get_balance_summary(fetched_authenticated_data.portfolio.full_content, use_exchange_format=False) + ) + self._logger.info( + f"Fetched [{self._exchange_manager.exchange_name}] full " + f"[{'simulated' if repository_factory.is_simulated else 'real'}] portfolio: " + f"{balance_summary}" + ) + self._update_exchange_account_portfolio(fetched_authenticated_data.portfolio) + + def _update_exchange_account_portfolio(self, portfolio: exchange_data_import.PortfolioDetails): + unit = scripting_library.get_default_exchange_reference_market(self._exchange_manager.exchange_name) + self.automation_state.exchange_account_details.portfolio.content = [ + octobot_flow.entities.PortfolioAssetHolding( + asset, + float(values[common_constants.PORTFOLIO_AVAILABLE]), + float(values[common_constants.PORTFOLIO_TOTAL]), + value=float( + ( + self.fetched_dependencies.fetched_exchange_data.get_last_price( + symbol_util.merge_currencies(asset, unit) + ) if asset != unit else trading_constants.ONE + ) * values[common_constants.PORTFOLIO_TOTAL] # type: ignore + ), + ) + for asset, values in portfolio.full_content.items() + ] + + def _get_traded_symbols(self) -> list[str]: + profile_data = self.profile_data_provider.get_profile_data() + config_symbols = profile_data.get_traded_symbols() + return list_util.deduplicate( + config_symbols + self.get_all_actions_symbols(profile_data) + ) + + def get_all_actions_symbols(self, profile_data: commons_profiles.ProfileData) -> list[str]: + return octobot_flow.logic.dsl.get_actions_symbol_dependencies( + self.actions, profile_data + ) + + def _get_time_frames(self) -> list[str]: + return scripting_library.get_time_frames(self.profile_data_provider.get_profile_data()) + + def _ensure_exchange_dependencies(self): + if not self.fetched_dependencies.fetched_exchange_data: + self.fetched_dependencies.fetched_exchange_data = octobot_flow.entities.FetchedExchangeData() diff --git a/packages/flow/octobot_flow/logic/__init__.py b/packages/flow/octobot_flow/logic/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/packages/flow/octobot_flow/logic/actions/__init__.py b/packages/flow/octobot_flow/logic/actions/__init__.py new file mode 100644 index 0000000000..85b1537b04 --- /dev/null +++ b/packages/flow/octobot_flow/logic/actions/__init__.py @@ -0,0 +1,17 @@ +from octobot_flow.logic.actions.actions_executor import ActionsExecutor +from octobot_flow.logic.actions.actions_factory import create_copy_exchange_account_action +from octobot_flow.logic.actions.account_copy_util import ( + update_action_trading_signal_if_relevant, + update_trading_signals, + reference_exchange_elements_to_account, + create_account_copy_settings, +) + +__all__ = [ + "ActionsExecutor", + "create_copy_exchange_account_action", + "update_action_trading_signal_if_relevant", + "update_trading_signals", + "reference_exchange_elements_to_account", + "create_account_copy_settings", +] diff --git a/packages/flow/octobot_flow/logic/actions/abstract_action_executor.py b/packages/flow/octobot_flow/logic/actions/abstract_action_executor.py new file mode 100644 index 0000000000..8097c1cb16 --- /dev/null +++ b/packages/flow/octobot_flow/logic/actions/abstract_action_executor.py @@ -0,0 +1,20 @@ +import typing + +import octobot_commons.logging +import octobot.community + +import octobot_flow.entities + + +class AbstractActionExecutor: + def __init__( + self, + ): + self.pending_bot_logs: list[octobot.community.BotLogData] = [] + + async def execute_action(self, action: octobot_flow.entities.AbstractActionDetails) -> typing.Any: + raise NotImplementedError("execute_action is not implemented for this action type") + + + def get_logger(self) -> octobot_commons.logging.BotLogger: + return octobot_commons.logging.get_logger(self.__class__.__name__) diff --git a/packages/flow/octobot_flow/logic/actions/account_copy_util.py b/packages/flow/octobot_flow/logic/actions/account_copy_util.py new file mode 100644 index 0000000000..a90fb858e0 --- /dev/null +++ b/packages/flow/octobot_flow/logic/actions/account_copy_util.py @@ -0,0 +1,146 @@ +import decimal +import time + +import octobot_commons.constants as common_constants +import octobot_commons.dsl_interpreter +import octobot_commons.profiles as commons_profiles +import octobot_commons.symbols as symbol_util +import octobot_commons.logging as logging + +import octobot_copy.constants as copy_constants +import octobot_copy.entities as copy_entities + +import octobot_flow.constants +import octobot_flow.entities +import octobot_flow.errors +import octobot_flow.logic.actions.actions_factory as actions_factory +import octobot_flow.logic.dsl as dsl_logic + +import tentacles.Meta.DSL_operators.exchange_operators as exchange_operators + + +def update_action_trading_signal_if_relevant( + action: octobot_flow.entities.AbstractActionDetails, + trading_signal: octobot_flow.entities.TradingSignal, + default_reference_market: str, +) -> None: + if not isinstance(action, octobot_flow.entities.DSLScriptActionDetails): + return + dsl_script = action.dsl_script + if dsl_script is None or not str(dsl_script).strip(): + raise octobot_flow.errors.InvalidAutomationActionError( + "DSL script is required to update trading signal on a DSL action" + ) + dsl_script_str = str(dsl_script) + if common_constants.UNRESOLVED_PARAMETER_PLACEHOLDER in dsl_script_str: + raise octobot_flow.errors.UnresolvedDSLScriptError( + "DSL script has unresolved parameters; resolve dependencies before applying trading signals" + ) + dsl_executor = dsl_logic.DSLExecutor( + commons_profiles.ProfileData(), + None, + dsl_script_str, + ) + top = dsl_executor.get_top_operator() + if not isinstance(top, octobot_commons.dsl_interpreter.Operator): + return + if top.get_name() not in ( + exchange_operators.CopyExchangeAccountOperatorNames.COPY_EXCHANGE_ACCOUNT.value, + ): + return + params = top.get_computed_value_by_parameter() + dsl_strategy_id = str(params["strategy_id"]) + if dsl_strategy_id != str(trading_signal.strategy_id): + raise octobot_flow.errors.CommunityTradingSignalError( + f"Trading signal strategy_id {trading_signal.strategy_id!r} does not match " + f"copy_exchange_account strategy_id {dsl_strategy_id!r}" + ) + account_copy_settings = copy_entities.parse_account_copy_settings( + params.get("account_copy_settings") + ) + reference_market = str(params["reference_market"]) or default_reference_market + new_details = actions_factory.create_copy_exchange_account_action( + params["strategy_id"], # type: ignore + reference_market, + trading_signal.account, + account_copy_settings, + ) + action.dsl_script = new_details.dsl_script + action.resolved_dsl_script = new_details.resolved_dsl_script + + +def update_trading_signals( + actions: list[octobot_flow.entities.AbstractActionDetails], + trading_signals: list[octobot_flow.entities.TradingSignal], + default_reference_market: str, +) -> None: + for trading_signal in trading_signals: + for action in actions: + try: + update_action_trading_signal_if_relevant( + action, trading_signal, default_reference_market + ) + except octobot_flow.errors.CommunityTradingSignalError: + # Signal applies to a different strategy than this copy_exchange_account action. + continue + + +def reference_exchange_elements_to_account( + elements: octobot_flow.entities.ExchangeAccountElements, + fetched_exchange_data: octobot_flow.entities.FetchedExchangeData, + reference_market: str, +) -> copy_entities.Account: + content: dict[str, dict[str, decimal.Decimal]] = {} + value_by_asset = {} + zero_value = decimal.Decimal("0") + for asset, values in elements.portfolio.content.items(): + content[asset] = { + key: decimal.Decimal(str(amount)) for key, amount in values.items() + } + if asset == reference_market: + value_by_asset[asset] = decimal.Decimal(str(values[common_constants.PORTFOLIO_TOTAL])) + else: + asset_value = zero_value + try: + if price := fetched_exchange_data.get_last_price( + symbol_util.merge_currencies(asset, reference_market) + ): + asset_value = decimal.Decimal(str(values[common_constants.PORTFOLIO_TOTAL])) * price + else: + logging.get_logger("account_copy_util").error( + f"No ticker price found for {symbol_util.merge_currencies(asset, reference_market)}. " + f"Portfolio ratios will be inaccurate." + ) + except KeyError as err: + logging.get_logger("account_copy_util").error( + f"Impossible to evaluate {symbol_util.merge_currencies(asset, reference_market)} price: " + f"no fetched ticker price ({err})" + ) + value_by_asset[asset] = asset_value + total_value = sum(value_by_asset.values()) + for asset, values in elements.portfolio.content.items(): + if total_value == zero_value: + content[asset][copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO] = zero_value + else: + content[asset][copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO] = value_by_asset[asset] / total_value + return copy_entities.Account( + updated_at=time.time(), + content=content, + orders=elements.orders.open_orders + ) + + +def create_account_copy_settings( + automation: octobot_flow.entities.AutomationDetails, +) -> copy_entities.AccountCopySettings: + grace_seconds = octobot_flow.constants.DEFAULT_COPY_TRADING_ORPHAN_CANCEL_GRACE_SECONDS + threshold = octobot_flow.constants.DEFAULT_COPY_TRADING_ORPHAN_GRACE_ABORT_THRESHOLD + missed_signals_threshold = octobot_flow.constants.DEFAULT_COPY_TRADING_MISSED_SIGNALS_GRACE_ABORT_THRESHOLD + return copy_entities.AccountCopySettings( + mirrored_orphan_cancel_grace_seconds=grace_seconds, + mirrored_orphan_grace_abort_threshold=threshold, + missed_signals_grace_abort_threshold=missed_signals_threshold, + mirrored_orphan_grace_pair_ratio_max_delta=( + octobot_flow.constants.DEFAULT_COPY_TRADING_ORPHAN_GRACE_PAIR_RATIO_MAX_DELTA + ), + ) diff --git a/packages/flow/octobot_flow/logic/actions/actions_executor.py b/packages/flow/octobot_flow/logic/actions/actions_executor.py new file mode 100644 index 0000000000..4be98beebd --- /dev/null +++ b/packages/flow/octobot_flow/logic/actions/actions_executor.py @@ -0,0 +1,184 @@ +import typing + +import octobot_commons.logging +import octobot_commons.dsl_interpreter +import octobot_commons.profiles +import octobot_trading.exchanges + +import octobot.community + +import octobot_flow.entities +import octobot_flow.repositories.community +import octobot_flow.logic.dsl +import octobot_flow.enums +import octobot_flow.errors + +import tentacles.Meta.DSL_operators.exchange_operators as exchange_operators +import tentacles.Meta.DSL_operators.blockchain_wallet_operators as blockchain_wallet_operators + + +class ActionsExecutor: + def __init__( + self, + maybe_community_repository: typing.Optional[octobot_flow.repositories.community.CommunityRepository], + exchange_manager: typing.Optional[octobot_trading.exchanges.ExchangeManager], + profile_data: octobot_commons.profiles.ProfileData, + automation: octobot_flow.entities.AutomationDetails, + actions: list[octobot_flow.entities.AbstractActionDetails], + update_execution_details: bool, + ): + self.changed_elements: list[octobot_flow.enums.ChangedElements] = [] + self.next_execution_scheduled_to: float = 0 + + self._maybe_community_repository: typing.Optional[ + octobot_flow.repositories.community.CommunityRepository + ] = maybe_community_repository + self._exchange_manager: typing.Optional[octobot_trading.exchanges.ExchangeManager] = exchange_manager + self._profile_data: octobot_commons.profiles.ProfileData = profile_data + self._automation: octobot_flow.entities.AutomationDetails = automation + self._actions: list[octobot_flow.entities.AbstractActionDetails] = actions + self._update_execution_details: bool = update_execution_details + + async def execute(self): + dsl_executor = octobot_flow.logic.dsl.DSLExecutor( + self._profile_data, + self._exchange_manager, + None, + ) + if self._exchange_manager: + await octobot_trading.exchanges.create_exchange_channels(self._exchange_manager) + recall_dag_details: typing.Optional[octobot_commons.dsl_interpreter.ReCallingOperatorResult] = None + async with dsl_executor.dependencies_context(self._actions): + for index, action in enumerate(self._actions): + await self._execute_action(dsl_executor, action) + if self._update_execution_details: + recall_dag_details, should_stop_processing = self._handle_execution_result(action, index) + if should_stop_processing: + break + self._sync_after_execution() + if self._update_execution_details: + await self._update_actions_history() + await self._insert_execution_bot_logs(dsl_executor.pending_bot_logs) + if recall_dag_details: + self._reset_dag_to(recall_dag_details) + # next execution is scheduled to the time configured by the reset operator + self.next_execution_scheduled_to = self._compute_next_execution_scheduled_to( + recall_dag_details + ) + elif self._update_execution_details: + # no reset: schedule immediately + self.next_execution_scheduled_to = 0 + + def _handle_execution_result( + self, action: octobot_flow.entities.AbstractActionDetails, index: int + ) -> tuple[typing.Optional[octobot_commons.dsl_interpreter.ReCallingOperatorResult], bool]: + if not isinstance(action.result, dict): + return None, False + if octobot_flow.entities.PostIterationActionsDetails.__name__ in action.result: + post_iteration_actions_details = octobot_flow.entities.PostIterationActionsDetails.from_dict( + action.result[octobot_flow.entities.PostIterationActionsDetails.__name__] + ) + if post_iteration_actions_details.stop_automation: + self._get_logger().info(f"Stopping automation: {self._automation.metadata.automation_id}") + self._automation.post_actions.stop_automation = True + # todo cancel open orders and sell assets if required in action config + return None, True + return None, False + if octobot_commons.dsl_interpreter.ReCallingOperatorResult.is_re_calling_operator_result(action.result): + recall_dag_details = octobot_commons.dsl_interpreter.ReCallingOperatorResult.from_dict( + action.result[octobot_commons.dsl_interpreter.ReCallingOperatorResult.__name__] + ) + if not recall_dag_details.reset_to_id: + # reset to the current action if no specific id is provided (loop on this action) + recall_dag_details.reset_to_id = action.id + if recall_dag_details.reset_to_id == action.id: + # Keep executing other selected actions if any: those are not affected by the reset + # as they don't depend on the reset action + return recall_dag_details, False + # Reset to a past action: interrupt execution of the following actions + # as they might depend on the reset action + if index < len(self._actions) - 1: + interrupted_action = self._actions[index + 1: ] + self._get_logger().info( + f"DAG reset required. Interrupting execution of " + f"{len(interrupted_action)} actions: " + f"{', '.join([action.id for action in interrupted_action])}" + ) + return recall_dag_details, True + return None, False + + async def _execute_action( + self, + dsl_executor: "octobot_flow.logic.dsl.DSLExecutor", + action: octobot_flow.entities.AbstractActionDetails + ): + if isinstance(action, octobot_flow.entities.DSLScriptActionDetails): + return await dsl_executor.execute_action(action) + raise octobot_flow.errors.UnsupportedActionTypeError( + f"{self.__class__.__name__} does not support action type: {type(action)}" + ) from None + + def _reset_dag_to( + self, recall_dag_details: octobot_commons.dsl_interpreter.ReCallingOperatorResult + ): + if not recall_dag_details.reset_to_id: + raise octobot_flow.errors.AutomationDAGResetError( + f"Reset to id is required to reset the DAG. got: {recall_dag_details}" + ) + self._automation.actions_dag.reset_to(recall_dag_details.reset_to_id) + + def _compute_next_execution_scheduled_to( + self, recall_dag_details: octobot_commons.dsl_interpreter.ReCallingOperatorResult + ) -> float: + return recall_dag_details.get_next_call_time() or 0 + + async def _update_actions_history(self): + if to_update_actions := [ + action + for action in self._actions + if action.should_be_historised_in_database() + ]: + raise NotImplementedError("_update_actions_history is not implemented yet") + + async def _insert_execution_bot_logs(self, log_data: list[octobot.community.BotLogData]): + try: + community_repository = octobot_flow.repositories.community.ensure_authenticated_community_repository( + self._maybe_community_repository + ) + await community_repository.insert_bot_logs(log_data) + except octobot_flow.errors.CommunityAuthenticationRequiredError: + # no available community repository: skip bot logs to insert + self._get_logger().info( + "No available community repository: bot logs upload is skipped" + ) + + def _sync_after_execution(self): + if exchange_account_elements := self._automation.exchange_account_elements: + new_transactions = self._get_new_transactions_from_actions_results(exchange_account_elements) + self._sync_exchange_account_elements(exchange_account_elements, new_transactions) + + def _get_new_transactions_from_actions_results( + self, + exchange_account_elements: octobot_flow.entities.ExchangeAccountElements, + ): + new_transactions = [] + for action in self._actions: + if not action.is_completed() or not isinstance(action.result, dict): + continue + if created_transactions := ( + action.result.get(exchange_operators.CREATED_WITHDRAWALS_KEY, []) + + action.result.get(blockchain_wallet_operators.CREATED_TRANSACTIONS_KEY, []) + ): + new_transactions.extend(created_transactions) + return new_transactions + + def _sync_exchange_account_elements( + self, + exchange_account_elements: octobot_flow.entities.ExchangeAccountElements, + new_transactions: list[dict], + ): + if self._exchange_manager or new_transactions: + self.changed_elements = exchange_account_elements.sync_from_exchange_manager(self._exchange_manager, new_transactions) + + def _get_logger(self) -> octobot_commons.logging.BotLogger: + return octobot_commons.logging.get_logger(self.__class__.__name__) diff --git a/packages/flow/octobot_flow/logic/actions/actions_factory.py b/packages/flow/octobot_flow/logic/actions/actions_factory.py new file mode 100644 index 0000000000..f33738f68a --- /dev/null +++ b/packages/flow/octobot_flow/logic/actions/actions_factory.py @@ -0,0 +1,36 @@ +import decimal +import enum +import json +import typing + +import octobot_copy.entities as copy_entities +import octobot_flow.entities + + +def _json_serialize_for_dsl(obj: typing.Any) -> typing.Any: + if isinstance(obj, enum.Enum): + return obj.value + if isinstance(obj, decimal.Decimal): + return str(obj) + raise TypeError(f"Object of type {type(obj).__name__} is not JSON serializable") + + +def create_copy_exchange_account_action( + strategy_id: str, + reference_market: str, + reference_account: copy_entities.Account, + account_copy_settings: typing.Optional[copy_entities.AccountCopySettings] = None, +) -> octobot_flow.entities.DSLScriptActionDetails: + reference_dict = reference_account.to_dict(include_default_values=False) + settings_dict = account_copy_settings.to_dict(include_default_values=False) if account_copy_settings else {} + ref_json = json.dumps(reference_dict, default=_json_serialize_for_dsl) + settings_json = json.dumps(settings_dict, default=_json_serialize_for_dsl) + dsl_script = ( + f"copy_exchange_account(strategy_id={json.dumps(strategy_id)}, reference_market='{reference_market}', " + f"reference_account='{ref_json}', account_copy_settings='{settings_json}')" + ) + return octobot_flow.entities.DSLScriptActionDetails( + id="copy_exchange_account", + dsl_script=dsl_script, + resolved_dsl_script=dsl_script, + ) diff --git a/packages/flow/octobot_flow/logic/configuration/__init__.py b/packages/flow/octobot_flow/logic/configuration/__init__.py new file mode 100644 index 0000000000..3d5f5bd80f --- /dev/null +++ b/packages/flow/octobot_flow/logic/configuration/__init__.py @@ -0,0 +1,13 @@ +from octobot_flow.logic.configuration.profile_data_provider import ProfileDataProvider +from octobot_flow.logic.configuration.automation_configuration_updater import AutomationConfigurationUpdater +from octobot_flow.logic.configuration.profile_data_factory import ( + create_profile_data, + infer_reference_market, +) + +__all__ = [ + "ProfileDataProvider", + "AutomationConfigurationUpdater", + "create_profile_data", + "infer_reference_market", +] diff --git a/packages/flow/octobot_flow/logic/configuration/automation_configuration_updater.py b/packages/flow/octobot_flow/logic/configuration/automation_configuration_updater.py new file mode 100644 index 0000000000..c83443fd2a --- /dev/null +++ b/packages/flow/octobot_flow/logic/configuration/automation_configuration_updater.py @@ -0,0 +1,116 @@ +# This file is part of OctoBot Node (https://github.com/Drakkar-Software/OctoBot-Node) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + +import time +import copy +import typing + +import octobot_commons.logging as common_logging +import octobot_commons.profiles.profile_data as profiles_import +import octobot_trading.exchanges.util.exchange_data as exchange_data_import + +import octobot_flow.entities +import octobot_flow.errors + + +class AutomationConfigurationUpdater: + def __init__( + self, + automation_state: octobot_flow.entities.AutomationState, + action: octobot_flow.entities.ConfiguredActionDetails, + ): + self.automation_state: octobot_flow.entities.AutomationState = automation_state + self.action: octobot_flow.entities.ConfiguredActionDetails = action + self._logger: common_logging.BotLogger = common_logging.get_logger(self.__class__.__name__) + + async def update(self): + start_time = time.time() + try: + automation_state_update = octobot_flow.entities.AutomationState.from_dict( + self.action.config + ) + except TypeError as err: + raise octobot_flow.errors.InvalidConfigurationActionError( + f"Invalid configuration update format: {err}. " + f"A octobot_flow.entities.AutomationState parsable dict is expected." + ) from err + self._apply_automation_state_configuration_update(automation_state_update) + self._register_execution_time(start_time) + self._complete_execution_and_register_next_schedule_time() + self.action.complete() + + def _apply_automation_state_configuration_update( + self, automation_state_update: octobot_flow.entities.AutomationState + ): + if automation_state_update.exchange_account_details: + updating_exchange_account_id = self._update_exchange_details( + automation_state_update.exchange_account_details + ) + if updating_exchange_account_id: + self._logger.info("Resetting exchange auth details as the exchange account id has changed") + self.automation_state.exchange_account_details.auth_details = exchange_data_import.ExchangeAuthDetails() + else: + self._update_auth_details(automation_state_update.exchange_account_details) + self._update_portfolio(automation_state_update.exchange_account_details) + self._update_automation(automation_state_update) + + def _update_exchange_details( + self, configuration_update: octobot_flow.entities.ExchangeAccountDetails + ) -> bool: + exchange_data_update = profiles_import.ExchangeData().get_update( + configuration_update.exchange_details + ) + updating_exchange_account_id = bool( + exchange_data_update.exchange_account_id + and exchange_data_update.exchange_account_id != self.automation_state.exchange_account_details.exchange_details.exchange_account_id + ) + self.automation_state.exchange_account_details.exchange_details.update(exchange_data_update) + return updating_exchange_account_id + + def _update_auth_details( + self, configuration_update: octobot_flow.entities.ExchangeAccountDetails + ): + local_auth_details = copy.deepcopy(configuration_update.auth_details) + base_auth_details = exchange_data_import.ExchangeAuthDetails() + local_auth_details.exchange_credential_id = None + auth_details_update = base_auth_details.get_update(local_auth_details) + self.automation_state.exchange_account_details.auth_details.update(auth_details_update) + + def _update_portfolio( + self, configuration_update: octobot_flow.entities.ExchangeAccountDetails + ): + if self.automation_state.exchange_account_details.is_simulated(): + portfolio_update = octobot_flow.entities.ExchangeAccountPortfolio().get_update(configuration_update.portfolio) + self.automation_state.exchange_account_details.portfolio.update(portfolio_update) + + def _update_automation( + self, automation_state_update: octobot_flow.entities.AutomationState + ): + automation_update = automation_state_update.automation + base_automation = octobot_flow.entities.AutomationDetails() + update_result = base_automation.get_update(automation_update) + self.automation_state.automation.update(update_result) + + def _register_execution_time(self, start_time: float): + automation = self.automation_state.automation + if automation.execution.previous_execution.triggered_at: + automation.execution.current_execution.triggered_at = automation.execution.previous_execution.triggered_at + else: + automation.execution.current_execution.triggered_at = start_time + + def _complete_execution_and_register_next_schedule_time(self): + self.automation_state.automation.execution.complete_execution(0) + self._logger.info(f"Next action will trigger immediately") diff --git a/packages/flow/octobot_flow/logic/configuration/profile_data_factory.py b/packages/flow/octobot_flow/logic/configuration/profile_data_factory.py new file mode 100644 index 0000000000..0856f0ae29 --- /dev/null +++ b/packages/flow/octobot_flow/logic/configuration/profile_data_factory.py @@ -0,0 +1,57 @@ +import typing + +import octobot_commons.profiles.profile_data as profile_data_import +import octobot_commons.constants +import octobot_trading.enums as trading_enums + +import octobot_flow.entities + +import tentacles.Meta.Keywords.scripting_library as scripting_library + + +def create_profile_data( + exchange_account_details: typing.Optional[octobot_flow.entities.ExchangeAccountDetails], + automation_id: str, + symbols: set[str], + as_simulator: typing.Optional[bool] = None, +) -> profile_data_import.ProfileData: + crypto_currencies = _get_crypto_currencies(symbols) + return profile_data_import.ProfileData( + profile_details=profile_data_import.ProfileDetailsData( + bot_id=automation_id + ), + crypto_currencies=crypto_currencies, + exchanges=[exchange_account_details.exchange_details] if exchange_account_details else [], + trading=profile_data_import.TradingData( + reference_market=infer_reference_market(exchange_account_details, crypto_currencies) + ), + trader_simulator=profile_data_import.TraderSimulatorData( + enabled=as_simulator if as_simulator is not None else ( + exchange_account_details.is_simulated() if exchange_account_details else True + ) + ), + tentacles=[], # no tentacles: only the generic dsl executor will be used + ) + +def infer_reference_market( + exchange_account_details: typing.Optional[octobot_flow.entities.ExchangeAccountDetails], + crypto_currencies: list[profile_data_import.CryptoCurrencyData]) -> str: + if ( + exchange_account_details + and exchange_account_details.exchange_details.exchange_type == trading_enums.ExchangeTypes.FUTURE + ): + return octobot_commons.constants.DEFAULT_REFERENCE_MARKET + if crypto_currencies: + return octobot_commons.symbols.parse_symbol(crypto_currencies[0].trading_pairs[0]).quote # type: ignore + elif exchange_account_details: + if exchange_account_details.portfolio.unit: + # portfolio unit can be used to define the reference market + return exchange_account_details.portfolio.unit + return scripting_library.get_default_exchange_reference_market(exchange_account_details.exchange_details.internal_name) + return octobot_commons.constants.DEFAULT_REFERENCE_MARKET + +def _get_crypto_currencies(symbols: set[str]) -> list[profile_data_import.CryptoCurrencyData]: + return [ + profile_data_import.CryptoCurrencyData(trading_pairs=[symbol], name=symbol) + for symbol in symbols + ] \ No newline at end of file diff --git a/packages/flow/octobot_flow/logic/configuration/profile_data_provider.py b/packages/flow/octobot_flow/logic/configuration/profile_data_provider.py new file mode 100644 index 0000000000..99d747a001 --- /dev/null +++ b/packages/flow/octobot_flow/logic/configuration/profile_data_provider.py @@ -0,0 +1,25 @@ +import contextlib +import typing +import octobot_commons.profiles as commons_profiles + +import octobot_flow.errors + + +class ProfileDataProvider: + def __init__(self): + self.profile_data: typing.Optional[commons_profiles.ProfileData] = None + + @contextlib.contextmanager + def profile_data_context(self, profile_data: commons_profiles.ProfileData): + try: + self.profile_data = profile_data + yield + finally: + self.profile_data = None + + def get_profile_data(self) -> commons_profiles.ProfileData: + if self.profile_data is None: + raise octobot_flow.errors.NoProfileDataError( + f"{self.__class__.__name__} is not in a profile data context" + ) + return self.profile_data diff --git a/packages/flow/octobot_flow/logic/dsl/__init__.py b/packages/flow/octobot_flow/logic/dsl/__init__.py new file mode 100644 index 0000000000..04a014f694 --- /dev/null +++ b/packages/flow/octobot_flow/logic/dsl/__init__.py @@ -0,0 +1,16 @@ +from octobot_flow.logic.dsl.dsl_dependencies import ( + get_actions_symbol_dependencies, + get_actions_time_frames_dependencies, + get_copy_trading_dependencies, +) +from octobot_flow.logic.dsl.dsl_executor import DSLExecutor + +from octobot_flow.logic.dsl.dsl_action_execution_context import dsl_action_execution + +__all__ = [ + "get_actions_symbol_dependencies", + "get_actions_time_frames_dependencies", + "get_copy_trading_dependencies", + "DSLExecutor", + "dsl_action_execution", +] \ No newline at end of file diff --git a/packages/flow/octobot_flow/logic/dsl/dsl_action_execution_context.py b/packages/flow/octobot_flow/logic/dsl/dsl_action_execution_context.py new file mode 100644 index 0000000000..d85abcb285 --- /dev/null +++ b/packages/flow/octobot_flow/logic/dsl/dsl_action_execution_context.py @@ -0,0 +1,53 @@ +import octobot_commons.dsl_interpreter +import octobot_commons.errors +import octobot_commons.logging +import octobot_commons.constants +import octobot_trading.errors +import octobot_trading.enums + +import octobot_flow.entities +import octobot_flow.enums + + +def dsl_action_execution(func): + async def _action_execution_error_handler_wrapper( + self, action: octobot_flow.entities.DSLScriptActionDetails + ): + """ + Handle the error of the DSL script execution. + action.result should only be a value of octobot_flow.enums.ActionErrorStatus. + """ + try: + call_result: octobot_commons.dsl_interpreter.DSLCallResult = await func(self, action) + if call_result.succeeded(): + action.complete(result=call_result.result) + else: + action.complete(error_status=call_result.error) + except octobot_trading.errors.DisabledFundsTransferError as err: + action.complete(error_status=octobot_flow.enums.ActionErrorStatus.DISABLED_FUNDS_TRANSFER_ERROR.value) + except octobot_trading.errors.MissingMinimalExchangeTradeVolume as err: + octobot_commons.logging.get_logger("action_execution").exception(err, True, f"Missing minimal exchange trade volume error: {err}") + action.complete(error_status=octobot_flow.enums.ActionErrorStatus.INVALID_ORDER.value) + except (octobot_trading.errors.UnsupportedHedgeContractError, octobot_trading.errors.InvalidPositionSide) as err: + action.complete(error_status=octobot_flow.enums.ActionErrorStatus.UNSUPPORTED_HEDGE_POSITION.value) + except octobot_trading.errors.ExchangeAccountSymbolPermissionError as err: + action.complete(error_status=octobot_flow.enums.ActionErrorStatus.SYMBOL_INCOMPATIBLE_WITH_ACCOUNT.value) + except octobot_commons.errors.InvalidParameterFormatError as err: + action.complete(error_status=octobot_flow.enums.ActionErrorStatus.INVALID_SIGNAL_FORMAT.value) + except octobot_trading.errors.NotSupportedOrderTypeError as err: + if err.order_type == octobot_trading.enums.TraderOrderType.STOP_LOSS: + action.complete(error_status=octobot_flow.enums.ActionErrorStatus.UNSUPPORTED_STOP_ORDER.value) + else: + action.complete(error_status=octobot_flow.enums.ActionErrorStatus.INVALID_ORDER.value) + except octobot_trading.errors.BlockchainWalletError as err: + octobot_commons.logging.get_logger("action_execution").exception(err, True, f"Blockchain wallet error: {err}") + action.complete(error_status=octobot_flow.enums.ActionErrorStatus.BLOCKCHAIN_WALLET_ERROR.value) + except Exception as err: + octobot_commons.logging.get_logger("action_execution").exception( + err, + True, + f"Failed to interpret DSL script '{action.get_summary(not octobot_commons.constants.ALLOW_PRIVATE_DATA_LOGS)}' " + f"for action: {action.id}: {err}" + ) + action.complete(error_status=octobot_flow.enums.ActionErrorStatus.INTERNAL_ERROR.value) + return _action_execution_error_handler_wrapper diff --git a/packages/flow/octobot_flow/logic/dsl/dsl_dependencies.py b/packages/flow/octobot_flow/logic/dsl/dsl_dependencies.py new file mode 100644 index 0000000000..9cd1a8053e --- /dev/null +++ b/packages/flow/octobot_flow/logic/dsl/dsl_dependencies.py @@ -0,0 +1,86 @@ +import octobot_trading.dsl +import octobot_commons.enums +import octobot_commons.dsl_interpreter +import octobot_flow.entities +import octobot_flow.logic.dsl.dsl_executor as dsl_executor +import octobot_commons.profiles.profile_data as profile_data_import + + +def get_actions_symbol_dependencies( + actions: list[octobot_flow.entities.AbstractActionDetails], + minimal_profile_data: profile_data_import.ProfileData +) -> list[str]: + all_symbol_dependencies = [ + _get_symbol_dependencies(minimal_profile_data, action.get_resolved_dsl_script()) + for action in actions + if isinstance(action, octobot_flow.entities.DSLScriptActionDetails) + ] + return list(set( + symbol_dependency.symbol + for symbol_dependencies in all_symbol_dependencies + for symbol_dependency in symbol_dependencies + )) + + +def get_actions_time_frames_dependencies( + actions: list[octobot_flow.entities.AbstractActionDetails], + minimal_profile_data: profile_data_import.ProfileData +) -> list[octobot_commons.enums.TimeFrames]: + all_symbol_dependencies = [ + _get_symbol_dependencies(minimal_profile_data, action.get_resolved_dsl_script()) + for action in actions + if isinstance(action, octobot_flow.entities.DSLScriptActionDetails) + ] + return list(set( + octobot_commons.enums.TimeFrames(symbol_dependency.time_frame) + for symbol_dependencies in all_symbol_dependencies + for symbol_dependency in symbol_dependencies + if symbol_dependency.time_frame + )) + + +def get_copy_trading_dependencies( + actions: list[octobot_flow.entities.AbstractActionDetails], + minimal_profile_data: profile_data_import.ProfileData, +) -> list[octobot_trading.dsl.CopyTradingDependency]: + all_copy_trading_dependencies = [ + _get_copy_trading_dependencies(minimal_profile_data, action.get_resolved_dsl_script()) + for action in actions + if isinstance(action, octobot_flow.entities.DSLScriptActionDetails) + ] + return list(set( + copy_trading_dependency + for copy_trading_dependencies in all_copy_trading_dependencies + for copy_trading_dependency in copy_trading_dependencies + )) + + +def _get_symbol_dependencies( + minimal_profile_data: profile_data_import.ProfileData, + dsl_script: str +) -> list[octobot_trading.dsl.SymbolDependency]: + return [ + symbol_dependency + for symbol_dependency in get_dsl_dependencies(minimal_profile_data, dsl_script) + if isinstance(symbol_dependency, octobot_trading.dsl.SymbolDependency) + ] + + +def _get_copy_trading_dependencies( + minimal_profile_data: profile_data_import.ProfileData, + dsl_script: str +) -> list[octobot_trading.dsl.CopyTradingDependency]: + return [ + copy_trading_dependency + for copy_trading_dependency in get_dsl_dependencies(minimal_profile_data, dsl_script) + if isinstance(copy_trading_dependency, octobot_trading.dsl.CopyTradingDependency) + ] + +def get_dsl_dependencies( + minimal_profile_data: profile_data_import.ProfileData, + dsl_script: str +) -> list[octobot_commons.dsl_interpreter.InterpreterDependency]: + dependencies_only_executor = dsl_executor.DSLExecutor( + minimal_profile_data, None, dsl_script + ) + return dependencies_only_executor.get_dependencies() diff --git a/packages/flow/octobot_flow/logic/dsl/dsl_executor.py b/packages/flow/octobot_flow/logic/dsl/dsl_executor.py new file mode 100644 index 0000000000..8f2638d7ca --- /dev/null +++ b/packages/flow/octobot_flow/logic/dsl/dsl_executor.py @@ -0,0 +1,142 @@ +import typing +import contextlib + +import octobot_commons.dsl_interpreter +import octobot_commons.signals +import octobot_commons.errors +import octobot_commons.profiles +import octobot_commons.logging +import octobot_trading.exchanges +import octobot_trading.dsl +import octobot_trading.modes as trading_modes + +import tentacles.Meta.DSL_operators as dsl_operators + +import octobot_flow.entities +import octobot_flow.errors +import octobot_flow.enums + +# avoid circular import +from octobot_flow.logic.dsl.dsl_action_execution_context import dsl_action_execution +from octobot_flow.logic.actions.abstract_action_executor import AbstractActionExecutor + + + +class DSLExecutor(AbstractActionExecutor): + def __init__( + self, + profile_data: octobot_commons.profiles.ProfileData, + exchange_manager: typing.Optional[octobot_trading.exchanges.ExchangeManager], + dsl_script: typing.Optional[str], + dependencies: typing.Optional[octobot_commons.signals.SignalDependencies] = None, + ): + super().__init__() + self._exchange_manager = exchange_manager + self._dependencies = dependencies + self._dependencies_config: dict = profile_data.to_profile("").config + self._interpreter: octobot_commons.dsl_interpreter.Interpreter = self._create_interpreter(None) + if dsl_script: + self._interpreter.prepare(dsl_script) + + def _create_interpreter( + self, previous_execution_result: typing.Optional[dict] + ): + return octobot_commons.dsl_interpreter.Interpreter( + octobot_commons.dsl_interpreter.get_all_operators() + + dsl_operators.create_ohlcv_operators(self._exchange_manager, None, None) + + dsl_operators.create_portfolio_operators(self._exchange_manager) + + dsl_operators.create_create_order_operators( + self._exchange_manager, trading_mode=None, dependencies=self._dependencies + ) + + dsl_operators.create_cancel_order_operators( + self._exchange_manager, trading_mode=None, dependencies=self._dependencies + ) + + dsl_operators.create_fetch_order_operators(self._exchange_manager) + + dsl_operators.create_blockchain_wallet_operators(self._exchange_manager) + + trading_modes.create_all_trading_mode_operators( + self._exchange_manager, self._dependencies_config + ) + + dsl_operators.create_copy_exchange_account_operators( + copier_exchange_manager=self._exchange_manager, + copier_trading_mode=None, + ) + ) + + def get_dependencies(self) -> list[ + octobot_commons.dsl_interpreter.InterpreterDependency + ]: + return self._interpreter.get_dependencies() + + def get_top_operator(self) -> typing.Union[ + octobot_commons.dsl_interpreter.Operator, + octobot_commons.dsl_interpreter.ComputedOperatorParameterType, + ]: + return self._interpreter.get_top_operator() + + @dsl_action_execution + async def execute_action(self, action: octobot_flow.entities.DSLScriptActionDetails) -> typing.Any: + self._interpreter = self._create_interpreter( + action.previous_execution_result + ) + expression = action.get_resolved_dsl_script() + try: + return octobot_commons.dsl_interpreter.DSLCallResult( + statement=expression, + result=await self._interpreter.interprete(expression), + ) + except octobot_commons.errors.MaxAttemptsExceededError as err: + self._logger().error(f"Max attempts exceeded: {err}") + return octobot_commons.dsl_interpreter.DSLCallResult( + statement=expression, + error=octobot_flow.enums.ActionErrorStatus.MAX_ATTEMPTS_EXCEEDED.value + ) + except octobot_commons.errors.ErrorStatementEncountered as err: + self._logger().exception( + err, True, f"Generic DSL error statement encountered: {err}" + ) + validated_error = ( + err.args[0] if err.args and err.args[0] in octobot_flow.enums.ActionErrorStatus + else octobot_flow.enums.ActionErrorStatus.DSL_EXECUTION_ERROR.value + ) + return octobot_commons.dsl_interpreter.DSLCallResult( + statement=expression, + error=validated_error + ) + + @contextlib.asynccontextmanager + async def dependencies_context( + self, actions: list[octobot_flow.entities.AbstractActionDetails] + ) -> typing.AsyncGenerator[None, None]: + try: + all_dependencies = self._get_all_dependencies(actions) if actions else [] + # 1. validate static dependencies + self._validate_dependencies(all_dependencies) + # 2. instanciate dynamic dependencies + # todo initialize dynamic dependencies when implemented + yield + finally: + # todo clean up dynamic dependencies when required + pass + + def _validate_dependencies(self, dependencies: list[octobot_commons.dsl_interpreter.InterpreterDependency]): + if any( + isinstance(dependency, octobot_trading.dsl.SymbolDependency) for dependency in dependencies + ) and not self._exchange_manager: + raise octobot_flow.errors.MissingDSLExecutorDependencyError( + "Exchange manager is required when using symbol dependencies" + ) + + def _get_all_dependencies( + self, actions: list[octobot_flow.entities.AbstractActionDetails] + ) -> list[octobot_commons.dsl_interpreter.InterpreterDependency]: + dependencies = [] + for action in actions: + if isinstance(action, octobot_flow.entities.DSLScriptActionDetails): + dsl_script = action.get_resolved_dsl_script() + self._interpreter.prepare(dsl_script) + dependencies.extend(self._interpreter.get_dependencies()) + return dependencies + + @classmethod + def _logger(cls) -> octobot_commons.logging.BotLogger: + return octobot_commons.logging.get_logger(cls.__name__) diff --git a/packages/flow/octobot_flow/logic/exchange/__init__.py b/packages/flow/octobot_flow/logic/exchange/__init__.py new file mode 100644 index 0000000000..8e57f535f7 --- /dev/null +++ b/packages/flow/octobot_flow/logic/exchange/__init__.py @@ -0,0 +1,11 @@ +from octobot_flow.logic.exchange.sub_portfolio import SubPortfolioResolver +from octobot_flow.logic.exchange.simulator import ( + SimulatedExchangeAccountResolver, + SimulatedPriceEventsFactory, +) + +__all__ = [ + "SubPortfolioResolver", + "SimulatedExchangeAccountResolver", + "SimulatedPriceEventsFactory", +] diff --git a/packages/flow/octobot_flow/logic/exchange/simulator/__init__.py b/packages/flow/octobot_flow/logic/exchange/simulator/__init__.py new file mode 100644 index 0000000000..70957ed759 --- /dev/null +++ b/packages/flow/octobot_flow/logic/exchange/simulator/__init__.py @@ -0,0 +1,7 @@ +from octobot_flow.logic.exchange.simulator.simulated_exchange_account_resolver import SimulatedExchangeAccountResolver +from octobot_flow.logic.exchange.simulator.simulated_price_events_factory import SimulatedPriceEventsFactory + +__all__ = [ + "SimulatedExchangeAccountResolver", + "SimulatedPriceEventsFactory", +] diff --git a/packages/flow/octobot_flow/logic/exchange/simulator/simulated_exchange_account_resolver.py b/packages/flow/octobot_flow/logic/exchange/simulator/simulated_exchange_account_resolver.py new file mode 100644 index 0000000000..475768e3d5 --- /dev/null +++ b/packages/flow/octobot_flow/logic/exchange/simulator/simulated_exchange_account_resolver.py @@ -0,0 +1,111 @@ +import octobot_commons.asyncio_tools as asyncio_tools +import octobot_commons.logging as commons_logging +import octobot_commons.profiles as commons_profiles + +import octobot_trading.exchanges.util.exchange_data as exchange_data_import + +import octobot_flow.entities as entities_import +import octobot_flow.errors as flow_errors +import octobot_flow.logic.configuration as configuration_import +import octobot_flow.logic.dsl as dsl_import +import octobot_flow.logic.exchange.simulator.simulated_price_events_factory as simulated_price_events_factory_import +import octobot_flow.repositories.exchange.exchange_context_mixin as exchange_context_mixin_import + + +# leave 3 async cycles to process events and potential level 2 or 3 callbacks +_ORDERS_UPDATE_CYCLES = 3 + + +class _SimulatedResolutionExchangeContext(exchange_context_mixin_import.ExchangeContextMixin): + USE_PREDICTIVE_ORDERS_SYNC: bool = True + + def __init__( + self, + automation_state: entities_import.AutomationState, + fetched_dependencies: entities_import.FetchedDependencies, + ): + # enable order fill events to simulate fills from events + super().__init__(automation_state, fetched_dependencies, True) + + def init_predictive_orders_exchange_data(self, exchange_data: exchange_data_import.ExchangeData) -> None: + exchange_account_elements = self.automation_state.automation.exchange_account_elements + if exchange_account_elements is None: + return + fetched_exchange_data = self.fetched_dependencies.fetched_exchange_data + if fetched_exchange_data is not None: + exchange_data.markets = fetched_exchange_data.public_data.markets + exchange_data.portfolio_details.content = exchange_account_elements.portfolio.content + exchange_data.orders_details.open_orders = list(exchange_account_elements.orders.open_orders) + exchange_data.orders_details.missing_orders = list(exchange_account_elements.orders.missing_orders) + exchange_data.positions = list(exchange_account_elements.positions) + + +class SimulatedExchangeAccountResolver: + def __init__( + self, + automation_state: entities_import.AutomationState, + fetched_dependencies: entities_import.FetchedDependencies, + actions: list[entities_import.AbstractActionDetails], + ): + self._simulation_exchange_context: _SimulatedResolutionExchangeContext = _SimulatedResolutionExchangeContext( + automation_state, fetched_dependencies + ) + self._actions: list[entities_import.AbstractActionDetails] = actions + + def _get_profile_data(self) -> commons_profiles.ProfileData: + minimal_profile_data = configuration_import.create_profile_data( + self._simulation_exchange_context.automation_state.exchange_account_details, + self._simulation_exchange_context.automation_state.automation.metadata.automation_id, + set(), + ) + return configuration_import.create_profile_data( + self._simulation_exchange_context.automation_state.exchange_account_details, + self._simulation_exchange_context.automation_state.automation.metadata.automation_id, + set(dsl_import.get_actions_symbol_dependencies(self._actions, minimal_profile_data)), + as_simulator=True + ) + + async def resolve(self) -> None: + account_elements = self._simulation_exchange_context.automation_state.automation.exchange_account_elements + if account_elements is None: + self._logger().debug( + "SimulatedExchangeAccountResolver: no exchange account elements, skipping" + ) + return + + if not account_elements.orders.open_orders and not account_elements.positions: + self._logger().debug( + "SimulatedExchangeAccountResolver: no open orders and no positions on account elements, skipping" + ) + return + + with self._simulation_exchange_context.profile_data_provider.profile_data_context( + self._get_profile_data() + ): + async with self._simulation_exchange_context.exchange_manager_context() as simulated_exchange_manager: + if simulated_exchange_manager is None: + raise flow_errors.ExchangeAccountInitializationError( + "Simulated exchange manager was not initialized inside exchange_manager_context" + ) + + fetched_exchange_data = self._simulation_exchange_context.fetched_dependencies.fetched_exchange_data + if fetched_exchange_data is None: + raise flow_errors.ExchangeAccountInitializationError( + "SimulatedExchangeAccountResolver: fetched exchange data is not initialized" + ) + + simulated_price_events_factory_import.SimulatedPriceEventsFactory( + simulated_exchange_manager + ).push_mark_price_and_recent_trades_updates( + account_elements, + fetched_exchange_data, + ) + for _ in range(_ORDERS_UPDATE_CYCLES): + # let the async loop process the triggered price events if any + await asyncio_tools.wait_asyncio_next_cycle() + + # simulation is now synchronized, sync the account elements from the updated simulated exchange manager + account_elements.sync_from_exchange_manager(simulated_exchange_manager, []) + + def _logger(self) -> commons_logging.BotLogger: + return commons_logging.get_logger(self.__class__.__name__) diff --git a/packages/flow/octobot_flow/logic/exchange/simulator/simulated_price_events_factory.py b/packages/flow/octobot_flow/logic/exchange/simulator/simulated_price_events_factory.py new file mode 100644 index 0000000000..eea1f9f2fc --- /dev/null +++ b/packages/flow/octobot_flow/logic/exchange/simulator/simulated_price_events_factory.py @@ -0,0 +1,163 @@ +import decimal +import typing + +import octobot_commons.enums as commons_enums +import octobot_commons.logging as commons_logging + +import octobot_trading.constants as trading_constants +import octobot_trading.enums as trading_enums +import octobot_trading.exchange_data.recent_trades.recent_trades_manager as recent_trades_manager_import +import octobot_trading.exchanges as trading_exchanges +import octobot_trading.exchanges.util.exchange_data as exchange_data_import + +import octobot_flow.entities as entities_import + + +class SimulatedPriceEventsFactory: + + def __init__( + self, + simulated_exchange_manager: trading_exchanges.ExchangeManager, + ) -> None: + self._simulated_exchange_manager = simulated_exchange_manager + + def push_mark_price_and_recent_trades_updates( + self, + account_elements: entities_import.ExchangeAccountElements, + fetched_exchange_data: entities_import.FetchedExchangeData, + ) -> None: + symbols = self._symbols_for_mark_price_updates( + account_elements.orders.open_orders + ) + markets = fetched_exchange_data.public_data.markets + for symbol in symbols: + if ticker := fetched_exchange_data.public_data.tickers.get(symbol): + if close_price := ticker.get( + trading_enums.ExchangeConstantsTickersColumns.CLOSE.value + ): + self._simulated_exchange_manager.get_symbol_data(symbol).handle_mark_price_update( + decimal.Decimal(str(close_price)), + trading_enums.MarkPriceSources.TICKER_CLOSE_PRICE.value, + reset_mark_price_from_other_sources=True, + ) + else: + self._logger().error( + "SimulatedPriceEventsResolver: ticker for %s has no close, skip mark price", + symbol, + ) + else: + self._logger().error( + "SimulatedPriceEventsResolver: no ticker for %s, skip mark price " + "(simulated fills may miss)", + symbol, + ) + + if chosen_market := self._pick_shortest_timeframe_market(markets, symbol): + if trades := self._synthetic_recent_trades_from_ohlcv_market( + chosen_market, symbol, self._simulated_exchange_manager.exchange + ): + self._simulated_exchange_manager.get_symbol_data(symbol).handle_recent_trade_update( + trades, replace_all=False + ) + else: + self._logger().info( + "No eligible OHLCV market for %s, skip synthetic recent trades", + symbol, + ) + + @staticmethod + def _pick_shortest_timeframe_market( + markets: list[exchange_data_import.MarketDetails], + symbol: str, + ) -> typing.Optional[exchange_data_import.MarketDetails]: + eligible: list[tuple[int, str, exchange_data_import.MarketDetails]] = [] + for market in markets: + if market.symbol != symbol: + continue + if not market.time_frame or not market.time or not market.close: + continue + if len(market.time) != len(market.close): + continue + try: + time_frame_enum = commons_enums.TimeFrames(market.time_frame) + timeframe_minutes = commons_enums.TimeFramesMinutes[time_frame_enum] + except (ValueError, KeyError): + continue + eligible.append((timeframe_minutes, market.time_frame, market)) + if not eligible: + return None + eligible.sort(key=lambda eligible_entry: (eligible_entry[0], eligible_entry[1])) + return eligible[0][2] + + @staticmethod + def _synthetic_recent_trades_from_ohlcv_market( + market: exchange_data_import.MarketDetails, + symbol: str, + exchange: trading_exchanges.AbstractExchange, + ) -> list[dict]: + recent_candles_window = min( + len(market.time), + recent_trades_manager_import.RecentTradesManager.MAX_RECENT_TRADES_COUNT, + ) + recent_times = market.time[-recent_candles_window:] + recent_closes = market.close[-recent_candles_window:] + high_low_lists_aligned = ( + len(market.high) == len(market.close) + and len(market.low) == len(market.close) + ) + recent_highs = ( + market.high[-recent_candles_window:] if high_low_lists_aligned else () + ) + recent_lows = ( + market.low[-recent_candles_window:] if high_low_lists_aligned else () + ) + time_frame_enum = commons_enums.TimeFrames(market.time_frame) + timeframe_seconds = commons_enums.TimeFramesMinutes[time_frame_enum] * 60 + order_columns = trading_enums.ExchangeConstantsOrderColumns + trades: list[dict] = [] + for candle_index, candle_time in enumerate(recent_times): + uniformized_open_timestamp = exchange.get_uniformized_timestamp(candle_time) + trade_timestamp = int(uniformized_open_timestamp) + timeframe_seconds + close_price = recent_closes[candle_index] + if high_low_lists_aligned: + high_price = recent_highs[candle_index] + low_price = recent_lows[candle_index] + if high_price != close_price or low_price != close_price: + for trade_role_suffix, price in ( + ("low", low_price), + ("high", high_price), + ): + trades.append({ + order_columns.PRICE.value: price, + order_columns.TIMESTAMP.value: trade_timestamp, + order_columns.SYMBOL.value: symbol, + order_columns.EXCHANGE_TRADE_ID.value: ( + f"sim_ohlcv:{symbol}:{market.time_frame}:" + f"{candle_time}:{candle_index}:{trade_role_suffix}" + ), + }) + continue + trades.append({ + order_columns.PRICE.value: close_price, + order_columns.TIMESTAMP.value: trade_timestamp, + order_columns.SYMBOL.value: symbol, + order_columns.EXCHANGE_TRADE_ID.value: ( + f"sim_ohlcv:{symbol}:{market.time_frame}:{candle_time}:{candle_index}" + ), + }) + return trades + + @staticmethod + def _symbols_for_mark_price_updates(open_orders: list[dict]) -> list[str]: + symbols: set[str] = set() + for order in open_orders: + storage = order.get(trading_constants.STORAGE_ORIGIN_VALUE, order) + order_symbol = storage.get( + trading_enums.ExchangeConstantsOrderColumns.SYMBOL.value + ) + if order_symbol: + symbols.add(order_symbol) + return list(symbols) + + def _logger(self) -> commons_logging.BotLogger: + return commons_logging.get_logger(self.__class__.__name__) diff --git a/packages/flow/octobot_flow/logic/exchange/sub_portfolio/__init__.py b/packages/flow/octobot_flow/logic/exchange/sub_portfolio/__init__.py new file mode 100644 index 0000000000..c9f949dfcb --- /dev/null +++ b/packages/flow/octobot_flow/logic/exchange/sub_portfolio/__init__.py @@ -0,0 +1,5 @@ +from octobot_flow.logic.exchange.sub_portfolio.sub_portfolio_resolver import SubPortfolioResolver + +__all__ = [ + "SubPortfolioResolver", +] diff --git a/packages/flow/octobot_flow/logic/exchange/sub_portfolio/sub_portfolio_resolver.py b/packages/flow/octobot_flow/logic/exchange/sub_portfolio/sub_portfolio_resolver.py new file mode 100644 index 0000000000..e8f30a73b0 --- /dev/null +++ b/packages/flow/octobot_flow/logic/exchange/sub_portfolio/sub_portfolio_resolver.py @@ -0,0 +1,36 @@ +import octobot_commons.constants as common_constants + +import octobot_flow.entities +import octobot_flow.errors + + +class SubPortfolioResolver: + def __init__(self, automation_state: octobot_flow.entities.AutomationState): + self._automation_state = automation_state + + async def resolve(self): + # equivalent to serverless global view update + # 1. identify missing orders #TODO + # 2. resolve missing orders #TODO + # 3. resolve (sub)portfolios + await self._resolve_full_portfolio(self._automation_state.automation) + # await self._resolve_sub_portfolio(bot_details) + + async def _resolve_sub_portfolio(self, automation: octobot_flow.entities.AutomationDetails): + # TODO: implement to support sub portfolios + # for now only uses the global portfolio content + raise NotImplementedError("SubPortfolioResolver._resolve_sub_portfolio is not implemented") + + async def _resolve_full_portfolio(self, automation: octobot_flow.entities.AutomationDetails): + if automation.exchange_account_elements is None: + raise octobot_flow.errors.ExchangeAccountInitializationError( + "Exchange account elements are required to resolve the portfolio" + ) + automation.exchange_account_elements.portfolio.content = { + asset.asset: { + common_constants.PORTFOLIO_AVAILABLE: asset.available, + common_constants.PORTFOLIO_TOTAL: asset.total, + } + for asset in self._automation_state.exchange_account_details.portfolio.content + if asset.total > 0 + } diff --git a/packages/flow/octobot_flow/parsers/__init__.py b/packages/flow/octobot_flow/parsers/__init__.py new file mode 100644 index 0000000000..38a6ace79f --- /dev/null +++ b/packages/flow/octobot_flow/parsers/__init__.py @@ -0,0 +1,11 @@ +from octobot_flow.parsers.actions_dag_parser import ( + ActionsDAGParser, + key_val_to_dict, +) +from octobot_flow.parsers.automation_state_reader import AutomationStateReader + +__all__ = [ + "ActionsDAGParser", + "AutomationStateReader", + "key_val_to_dict", +] \ No newline at end of file diff --git a/packages/flow/octobot_flow/parsers/actions_dag_parser.py b/packages/flow/octobot_flow/parsers/actions_dag_parser.py new file mode 100644 index 0000000000..8952a2c95c --- /dev/null +++ b/packages/flow/octobot_flow/parsers/actions_dag_parser.py @@ -0,0 +1,795 @@ +import typing +import dataclasses +import enum +import uuid +import json + +import octobot_commons.constants as commons_constants +import octobot_commons.symbols +import octobot_commons.profiles.profile_data as profiles_import +import octobot_commons.dataclasses +import octobot_commons.configuration +import octobot_trading.blockchain_wallets as blockchain_wallets +import octobot_trading.blockchain_wallets.simulator.blockchain_wallet_simulator as blockchain_wallets_simulator +import octobot_trading.exchanges.util.exchange_data as exchange_data_import +import octobot_trading.enums as trading_enums +import octobot_flow.errors +import octobot_flow.entities +import octobot_flow.enums + +import tentacles.Trading.Mode.trading_view_signals_trading_mode.actions_params as actions_params +import tentacles.Trading.Mode.trading_view_signals_trading_mode.trading_view_signals_trading as trading_view_signals_trading +import tentacles.Trading.Mode.trading_view_signals_trading_mode.tradingview_signal_to_dsl_translator as tradingview_signal_to_dsl_translator + +def key_val_to_dict(key_val: str) -> dict: + return trading_view_signals_trading.TradingViewSignalsTradingMode.parse_signal_data(key_val, None, None, None, []) + + +class ActionType(enum.Enum): + WAIT = "wait" + TRADE = "trade" + CANCEL = "cancel" + WITHDRAW = "withdraw" + DEPOSIT = "deposit" + TRANSFER = "transfer" + LOOP_UNTIL_BLOCKCHAIN_BALANCE = "loop_until_blockchain_balance" + LOOP_UNTIL_ORDER_CLOSED = "loop_until_order_closed" + + +CONTENT_KEY = "CONTENT" + +# dependency::::::::... — path into that action's result (dict keys and list indices as digit strings) +DEPENDENCY_SEPARATOR = "::" +DEPENDENCY_IDENTIFIER = "dependency" +PARAM_DEPENDENCY_IDENTIFIER = "param_dependency" +DEPENDENCY_PARAM_PREFIX = f"{DEPENDENCY_IDENTIFIER}{DEPENDENCY_SEPARATOR}" +PARAM_DEPENDENCY_PREFIX = f"{PARAM_DEPENDENCY_IDENTIFIER}{DEPENDENCY_SEPARATOR}" + + +# Returned by _resolve_param_dependency_string_value when the target field is still a param_dependency string. +_PARAM_DEPENDENCY_RESOLUTION_DEFERRED = object() + + +@dataclasses.dataclass +class ActionsDAGParserParams(octobot_commons.dataclasses.FlexibleDataclass): + ACTIONS: list[str] = dataclasses.field(default_factory=list) + AUTOMATION_ID: str = dataclasses.field(default_factory=lambda: str(uuid.uuid4())) + EXCHANGE_TO: typing.Optional[str] = None + API_KEY: typing.Optional[str] = None + API_SECRET: typing.Optional[str] = None + SIMULATED_PORTFOLIO: typing.Optional[dict[str, float]] = dataclasses.field(default_factory=dict) + ORDER_SIDE: typing.Optional[str] = None + ORDER_SYMBOL: typing.Optional[str] = None + ORDER_AMOUNT: typing.Optional[float] = None + ORDER_PRICE: typing.Optional[float] = None + ORDER_STOP_PRICE: typing.Optional[float] = None + ORDER_TAG: typing.Optional[str] = None + ORDER_REDUCE_ONLY: typing.Optional[bool] = None + ORDER_TYPE: typing.Optional[str] = None + ORDER_EXTRA_PARAMS: typing.Optional[dict] = None + ORDER_EXCHANGE_ID: typing.Optional[str] = None + EXCHANGE_FROM: typing.Optional[str] = None + MIN_DELAY: typing.Optional[float] = None + MAX_DELAY: typing.Optional[float] = None + BLOCKCHAIN_FROM: typing.Optional[str] = None + BLOCKCHAIN_FROM_AMOUNT: typing.Optional[float] = None + BLOCKCHAIN_FROM_ASSET: typing.Optional[str] = None + BLOCKCHAIN_FROM_ADDRESS: typing.Optional[str] = None + BLOCKCHAIN_FROM_MNEMONIC_SEED: typing.Optional[str] = None + BLOCKCHAIN_FROM_BLOCK_HEIGHT: typing.Union[int, str, None] = None + BLOCKCHAIN_FROM_SECRET_VIEW_KEY: typing.Optional[str] = None + BLOCKCHAIN_FROM_SECRET_SPEND_KEY: typing.Optional[str] = None + BLOCKCHAIN_FROM_PRIVATE_KEY: typing.Optional[str] = None + BLOCKCHAIN_TO: typing.Optional[str] = None + BLOCKCHAIN_TO_ASSET: typing.Optional[str] = None + BLOCKCHAIN_TO_AMOUNT: typing.Optional[float] = None + BLOCKCHAIN_TO_ADDRESS: typing.Optional[str] = None + BLOCKCHAIN_TO_MNEMONIC_SEED: typing.Optional[str] = None + BLOCKCHAIN_TO_BLOCK_HEIGHT: typing.Union[int, str, None] = None + BLOCKCHAIN_TO_SECRET_VIEW_KEY: typing.Optional[str] = None + BLOCKCHAIN_TO_SECRET_SPEND_KEY: typing.Optional[str] = None + BLOCKCHAIN_TO_PRIVATE_KEY: typing.Optional[str] = None + BLOCKCHAIN_BALANCE_ADDRESS: typing.Optional[str] = None + BLOCKCHAIN_BALANCE_AMOUNT: typing.Optional[str] = None + BLOCKCHAIN_BALANCE_ASSET: typing.Optional[str] = None + BLOCKCHAIN_BALANCE: typing.Optional[str] = None + LOOP_INTERVAL: typing.Optional[float] = None + LOOP_TIMEOUT: typing.Optional[float] = None + LOOP_MAX_ATTEMPTS: typing.Optional[int] = None + CONTENT: typing.Optional[dict] = None + + def __post_init__(self): + if self.ACTIONS and isinstance(self.ACTIONS, str): + # action is a string, convert it to a list + self.ACTIONS = self.ACTIONS.split(",") # pylint: disable=no-member + if isinstance(self.ORDER_EXTRA_PARAMS, str): + self.ORDER_EXTRA_PARAMS = json.loads(self.ORDER_EXTRA_PARAMS) + if isinstance(self.SIMULATED_PORTFOLIO, str): + self.SIMULATED_PORTFOLIO = json.loads(self.SIMULATED_PORTFOLIO) + self._resolve_param_dependencies() + self.validate() + + def validate(self): + if self.EXCHANGE_TO and self.EXCHANGE_FROM: + if self.EXCHANGE_TO != self.EXCHANGE_FROM: + raise octobot_flow.errors.InvalidAutomationActionError("EXCHANGE_TO and EXCHANGE_FROM must be the same") + + def get_reference_market(self) -> typing.Optional[str]: + if self.ORDER_SYMBOL: + parsed_symbol = octobot_commons.symbols.parse_symbol(self.ORDER_SYMBOL) + return parsed_symbol.quote + return None + + def has_next_schedule(self) -> bool: + return self.MIN_DELAY is not None or self.MAX_DELAY is not None + + def _get_next_schedule_delay(self) -> tuple[float, float]: + if self.MIN_DELAY is None and self.MAX_DELAY is None: + return 0, 0 + if self.MIN_DELAY is not None and self.MAX_DELAY is None: + return self.MIN_DELAY, self.MIN_DELAY # type: ignore + if self.MIN_DELAY is None and self.MAX_DELAY is not None: + return self.MAX_DELAY, self.MAX_DELAY # type: ignore + return self.MIN_DELAY, self.MAX_DELAY # type: ignore + + def get_exchange_internal_name(self) -> typing.Optional[str]: + if self.EXCHANGE_TO or self.EXCHANGE_FROM: + return (self.EXCHANGE_TO or self.EXCHANGE_FROM).lower() # type: ignore + return None + + def get_blockchain_and_wallet_descriptors_from_wallet_details( + self + ) -> dict[str, typing.Any]: + if ( + not self.BLOCKCHAIN_FROM or + not self.BLOCKCHAIN_FROM_ASSET or + not self.BLOCKCHAIN_FROM_AMOUNT + ): + raise octobot_flow.errors.InvalidAutomationActionError( + f"BLOCKCHAIN_FROM, BLOCKCHAIN_FROM_ASSET, BLOCKCHAIN_FROM_ADDRESS and BLOCKCHAIN_FROM_AMOUNT " + f"must be provided for a blockchain from wallet" + ) + if not ( + # sending details + not self.BLOCKCHAIN_FROM_PRIVATE_KEY + or not self.BLOCKCHAIN_FROM_MNEMONIC_SEED + or not ( + self.BLOCKCHAIN_FROM_SECRET_VIEW_KEY + and self.BLOCKCHAIN_FROM_SECRET_SPEND_KEY + ) + ): + raise octobot_flow.errors.InvalidAutomationActionError( + f"BLOCKCHAIN_FROM_PRIVATE_KEY, BLOCKCHAIN_FROM_MNEMONIC_SEED, BLOCKCHAIN_FROM_SECRET_VIEW_KEY " + f"or BLOCKCHAIN_FROM_SECRET_SPEND_KEY must be provided for a blockchain from wallet" + ) + blockchain, blockchain_descriptor_specific_config, wallet_descriptor_specific_config = self.get_blockchain_and_specific_configs(self.BLOCKCHAIN_FROM) + return { + "blockchain_descriptor": blockchain_wallets.BlockchainDescriptor( + blockchain=blockchain, + network=self.BLOCKCHAIN_FROM, + native_coin_symbol=self.BLOCKCHAIN_FROM_ASSET, + specific_config=blockchain_descriptor_specific_config, + ), + "wallet_descriptor": blockchain_wallets.WalletDescriptor( + address=self.BLOCKCHAIN_FROM_ADDRESS, + private_key=self.BLOCKCHAIN_FROM_PRIVATE_KEY, + mnemonic_seed=self.BLOCKCHAIN_FROM_MNEMONIC_SEED, + specific_config=wallet_descriptor_specific_config, + ) + } + + def get_blockchain_and_wallet_descriptors_to_wallet_details( + self + ) -> blockchain_wallets.BlockchainWalletParameters: + if ( + not self.BLOCKCHAIN_TO or + not self.BLOCKCHAIN_TO_ADDRESS + ): + raise octobot_flow.errors.InvalidAutomationActionError( + f"BLOCKCHAIN_TO, BLOCKCHAIN_TO_ADDRESS and BLOCKCHAIN_TO_ASSET must be provided for a blockchain to wallet" + ) + if not ( + self.BLOCKCHAIN_TO_ADDRESS + and not self.BLOCKCHAIN_TO_PRIVATE_KEY + and not self.BLOCKCHAIN_TO_MNEMONIC_SEED + and not self.BLOCKCHAIN_TO_SECRET_VIEW_KEY + ): + raise octobot_flow.errors.InvalidAutomationActionError( + f"BLOCKCHAIN_TO_ADDRESS, BLOCKCHAIN_TO_PRIVATE_KEY, BLOCKCHAIN_TO_MNEMONIC_SEED " + f"or BLOCKCHAIN_TO_SECRET_VIEW_KEY must be provided for a blockchain to wallet" + ) + blockchain, blockchain_descriptor_specific_config, wallet_descriptor_specific_config = self.get_blockchain_and_specific_configs(self.BLOCKCHAIN_TO) + return blockchain_wallets.BlockchainWalletParameters( + blockchain_descriptor=blockchain_wallets.BlockchainDescriptor( + blockchain=blockchain, + network=self.BLOCKCHAIN_TO, + native_coin_symbol=self.BLOCKCHAIN_TO_ASSET, + specific_config=blockchain_descriptor_specific_config, + ), + wallet_descriptor=blockchain_wallets.WalletDescriptor( + address=self.BLOCKCHAIN_TO_ADDRESS, + specific_config=wallet_descriptor_specific_config, + ) + ) + + def get_blockchain_and_wallet_descriptors_for_balance_check( + self + ) -> blockchain_wallets.BlockchainWalletParameters: + if ( + not self.BLOCKCHAIN_BALANCE or + not self.BLOCKCHAIN_BALANCE_ADDRESS or + not self.BLOCKCHAIN_BALANCE_ASSET + ): + raise octobot_flow.errors.InvalidAutomationActionError( + f"BLOCKCHAIN_BALANCE, BLOCKCHAIN_BALANCE_ADDRESS and BLOCKCHAIN_BALANCE_ASSET must be provided for a blockchain to wallet" + ) + blockchain, blockchain_descriptor_specific_config, wallet_descriptor_specific_config = self.get_blockchain_and_specific_configs(self.BLOCKCHAIN_BALANCE) + return blockchain_wallets.BlockchainWalletParameters( + blockchain_descriptor=blockchain_wallets.BlockchainDescriptor( + blockchain=blockchain, + network=self.BLOCKCHAIN_BALANCE, + native_coin_symbol=self.BLOCKCHAIN_BALANCE_ASSET, + specific_config=blockchain_descriptor_specific_config, + ), + wallet_descriptor=blockchain_wallets.WalletDescriptor( + address=self.BLOCKCHAIN_BALANCE_ADDRESS, + specific_config=wallet_descriptor_specific_config, + ) + ) + + def get_blockchain_and_specific_configs( + self, blockchain: str + ) -> tuple[str, dict, dict]: + blockchain_wallet_class = blockchain_wallets.get_blockchain_wallet_class_by_blockchain()[blockchain.lower()] + simulator_config = { + blockchain_wallets_simulator.BlockchainWalletSimulatorConfigurationKeys.ASSETS.value: { + self.BLOCKCHAIN_FROM_ASSET: self.BLOCKCHAIN_FROM_AMOUNT, + } + } + specific_config = self._create_generic_blockchain_wallet_specific_config(blockchain) + all_config = {**simulator_config, **specific_config} + return ( + blockchain_wallet_class.BLOCKCHAIN, + blockchain_wallet_class.create_blockchain_descriptor_specific_config(**all_config), + blockchain_wallet_class.create_wallet_descriptor_specific_config(**all_config), + ) + + def _create_generic_blockchain_wallet_specific_config(self, blockchain: str) -> dict: + is_blockchain_from = blockchain == self.BLOCKCHAIN_FROM + prefix = "BLOCKCHAIN_FROM_" if is_blockchain_from else "BLOCKCHAIN_TO_" + return { + key.replace(prefix, "").lower(): value + for key, value in dataclasses.asdict(self).items() + if key.startswith(prefix) + } + + def _resolve_param_dependencies(self) -> None: + valid_field_names = frozenset(self.get_field_names()) + field_list = dataclasses.fields(self) + max_rounds = len(field_list) + 1 + for _ in range(max_rounds): + progressed = False + for field in field_list: + value = getattr(self, field.name) + if isinstance(value, dict): + if self._resolve_param_dependencies_in_mapping( + value, valid_field_names, field.name + ): + progressed = True + continue + if not isinstance(value, str) or not value.startswith(PARAM_DEPENDENCY_PREFIX): + continue + resolved_value = _resolve_param_dependency_string_value( + self, value, valid_field_names, f"field {field.name!r}" + ) + if resolved_value is _PARAM_DEPENDENCY_RESOLUTION_DEFERRED: + continue + object.__setattr__(self, field.name, resolved_value) + progressed = True + if not progressed: + break + for field in field_list: + value = getattr(self, field.name) + if isinstance(value, str) and value.startswith(PARAM_DEPENDENCY_PREFIX): + raise octobot_flow.errors.InvalidAutomationActionError( + f"Unresolved param_dependency cycle or chain for field {field.name!r}: {value!r}" + ) + if isinstance(value, dict) and self._mapping_contains_unresolved_param_dependency(value): + raise octobot_flow.errors.InvalidAutomationActionError( + f"Unresolved param_dependency cycle or chain inside field {field.name!r}" + ) + + def _resolve_param_dependencies_in_mapping( + self, + mapping: dict, + valid_field_names: frozenset[str], + context_path: str, + ) -> bool: + """ + Replace param_dependency::... string values inside a dict (recursively). + Returns True if at least one value was resolved this pass. + """ + progressed = False + for key, entry_value in mapping.items(): + entry_path = f"{context_path}[{key!r}]" + if isinstance(entry_value, dict): + if self._resolve_param_dependencies_in_mapping( + entry_value, valid_field_names, entry_path + ): + progressed = True + elif isinstance(entry_value, str) and entry_value.startswith(PARAM_DEPENDENCY_PREFIX): + resolved_value = _resolve_param_dependency_string_value( + self, entry_value, valid_field_names, entry_path + ) + if resolved_value is _PARAM_DEPENDENCY_RESOLUTION_DEFERRED: + continue + mapping[key] = resolved_value + progressed = True + return progressed + + @staticmethod + def _mapping_contains_unresolved_param_dependency(mapping: dict) -> bool: + for entry_value in mapping.values(): + if isinstance(entry_value, dict): + if ActionsDAGParserParams._mapping_contains_unresolved_param_dependency(entry_value): + return True + elif isinstance(entry_value, str) and entry_value.startswith(PARAM_DEPENDENCY_PREFIX): + return True + return False + +class ActionsDAGParser: + def __init__(self, params: dict): + if content := params.get(CONTENT_KEY): + if isinstance(content, str): + try: + content = json.loads(content) + except json.JSONDecodeError: + raise octobot_flow.errors.InvalidAutomationActionError( + f"Invalid json value in {CONTENT_KEY} column: {content}" + ) + params = {**params, **content} + self.params: ActionsDAGParserParams = ActionsDAGParserParams.from_dict(params) + self.blockchain_param_index = 0 + + def parse(self) -> octobot_flow.entities.ActionsDAG: + init_action = self._create_init_action( + self.params.AUTOMATION_ID, + self.params.get_exchange_internal_name(), + self.params.API_KEY, + self.params.API_SECRET, + self.params.SIMULATED_PORTFOLIO, + ) + actions_dag = octobot_flow.entities.ActionsDAG([init_action]) + self._parse_generic_actions(actions_dag) + return actions_dag + + def _parse_generic_actions(self, actions_dag: octobot_flow.entities.ActionsDAG) -> None: + latest_action = actions_dag.get_executable_actions()[0] + for index, action in enumerate(self.params.ACTIONS): + new_action = self._create_generic_action(action, index + 1) + new_action.add_dependency(latest_action.id) + actions_dag.add_action(new_action) + latest_action = new_action + + def _create_generic_action( + self, action: str, index: int + ) -> octobot_flow.entities.AbstractActionDetails: + match action: + case ActionType.TRADE.value: + return self._create_order_action(index) + case ActionType.CANCEL.value: + return self._create_cancel_action(index) + case ActionType.WITHDRAW.value: + return self._create_withdraw_action(index) + case ActionType.DEPOSIT.value: + return self._create_deposit_action(index) + case ActionType.TRANSFER.value: + return self._create_transfer_action(index) + case ActionType.LOOP_UNTIL_BLOCKCHAIN_BALANCE.value: + return self._create_loop_until_blockchain_balance_action(index) + case ActionType.LOOP_UNTIL_ORDER_CLOSED.value: + return self._create_loop_until_order_closed_action(index) + case ActionType.WAIT.value: + return self._create_wait_action(index) + case _: + raise ValueError( + f"Unknown action: {action}" + ) + + def _create_order_action(self, index: int) -> octobot_flow.entities.AbstractActionDetails: + self._ensure_params( + ["ORDER_SYMBOL", "ORDER_AMOUNT", "ORDER_TYPE"], + "trade", + ) + parsed_symbol = octobot_commons.symbols.parse_symbol(self.params.ORDER_SYMBOL) + if self.params.ORDER_SIDE: + signal = self.params.ORDER_SIDE.lower() + elif parsed_symbol.base == self.params.BLOCKCHAIN_FROM_ASSET and parsed_symbol.quote == self.params.BLOCKCHAIN_TO_ASSET: # type: ignore + # sell the first blockchain asset to get the second one + signal = trading_view_signals_trading.TradingViewSignalsTradingMode.SELL_SIGNAL + elif parsed_symbol.base == self.params.BLOCKCHAIN_TO_ASSET and parsed_symbol.quote == self.params.BLOCKCHAIN_FROM_ASSET: # type: ignore + # buy the second blockchain asset to get the first one + signal = trading_view_signals_trading.TradingViewSignalsTradingMode.BUY_SIGNAL + else: + raise octobot_flow.errors.InvalidAutomationActionError( + f"Invalid order symbol: {self.params.ORDER_SYMBOL}: symbol must contain the 2 " + f"blockchain assets to determine the side of the order" + ) + order_details = { + trading_view_signals_trading.TradingViewSignalsTradingMode.EXCHANGE_KEY: self.params.get_exchange_internal_name(), + trading_view_signals_trading.TradingViewSignalsTradingMode.SYMBOL_KEY: self.params.ORDER_SYMBOL, + trading_view_signals_trading.TradingViewSignalsTradingMode.VOLUME_KEY: self.params.ORDER_AMOUNT, + trading_view_signals_trading.TradingViewSignalsTradingMode.ORDER_TYPE_SIGNAL: self.params.ORDER_TYPE, + } + if self.params.ORDER_PRICE: + order_details[trading_view_signals_trading.TradingViewSignalsTradingMode.PRICE_KEY] = self.params.ORDER_PRICE + if self.params.ORDER_STOP_PRICE: + order_details[trading_view_signals_trading.TradingViewSignalsTradingMode.STOP_PRICE_KEY] = self.params.ORDER_STOP_PRICE + if self.params.ORDER_TAG: + order_details[trading_view_signals_trading.TradingViewSignalsTradingMode.TAG_KEY] = self.params.ORDER_TAG + if self.params.ORDER_REDUCE_ONLY: + order_details[trading_view_signals_trading.TradingViewSignalsTradingMode.REDUCE_ONLY_KEY] = self.params.ORDER_REDUCE_ONLY + if self.params.ORDER_EXTRA_PARAMS: + for extra_param, value in self.params.ORDER_EXTRA_PARAMS.items(): + order_details[f"{trading_view_signals_trading.TradingViewSignalsTradingMode.PARAM_PREFIX_KEY}{extra_param}"] = value + return self.create_dsl_script_from_tv_format_action_details( + f"action_trade_{index}", signal, order_details, + ) + + def _create_cancel_action(self, index: int) -> octobot_flow.entities.AbstractActionDetails: + self._ensure_params( + ["ORDER_SYMBOL"], + "cancel", + ) + cancel_details = { + trading_view_signals_trading.TradingViewSignalsTradingMode.SYMBOL_KEY: self.params.ORDER_SYMBOL, + } + if self.params.ORDER_SIDE: + cancel_details[trading_view_signals_trading.TradingViewSignalsTradingMode.SIDE_PARAM_KEY] = self.params.ORDER_SIDE.lower() + if self.params.ORDER_TAG: + cancel_details[trading_view_signals_trading.TradingViewSignalsTradingMode.TAG_KEY] = self.params.ORDER_TAG + return self.create_dsl_script_from_tv_format_action_details( + f"action_cancel_{index}", + trading_view_signals_trading.TradingViewSignalsTradingMode.CANCEL_SIGNAL, + cancel_details, + ) + + def _create_withdraw_action( + self, index: int + ) -> octobot_flow.entities.AbstractActionDetails: + self._ensure_params( + ["BLOCKCHAIN_TO_ASSET", "BLOCKCHAIN_TO", "BLOCKCHAIN_TO_ADDRESS"], + "withdraw", + ) + withdraw_details = actions_params.WithdrawFundsParams( + asset=self.params.BLOCKCHAIN_TO_ASSET, + network=self.params.BLOCKCHAIN_TO, + address=self.params.BLOCKCHAIN_TO_ADDRESS, + ) + if self.params.BLOCKCHAIN_TO_AMOUNT: + withdraw_details.amount = self.params.BLOCKCHAIN_TO_AMOUNT + return self.create_dsl_script_from_tv_format_action_details( + f"action_withdraw_{index}", + trading_view_signals_trading.TradingViewSignalsTradingMode.WITHDRAW_FUNDS_SIGNAL, + dataclasses.asdict(withdraw_details), + ) + + def _create_deposit_action( + self, index: int + ) -> octobot_flow.entities.AbstractActionDetails: + self._ensure_params( + ["BLOCKCHAIN_FROM_ASSET", "BLOCKCHAIN_FROM_AMOUNT", "BLOCKCHAIN_FROM", "EXCHANGE_TO"], + "deposit", + ) + deposit_details = actions_params.TransferFundsParams( + asset=self.params.BLOCKCHAIN_FROM_ASSET, + amount=self.params.BLOCKCHAIN_FROM_AMOUNT, + address=None, + destination_exchange=self.params.EXCHANGE_TO, + **self.params.get_blockchain_and_wallet_descriptors_from_wallet_details(), + ) + return self.create_dsl_script_from_tv_format_action_details( + f"action_deposit_{index}", + trading_view_signals_trading.TradingViewSignalsTradingMode.TRANSFER_FUNDS_SIGNAL, + dataclasses.asdict(deposit_details), + ) + + def _create_transfer_action( + self, index: int + ) -> octobot_flow.entities.AbstractActionDetails: + self._ensure_params( + ["BLOCKCHAIN_FROM_ASSET", "BLOCKCHAIN_FROM_AMOUNT", "BLOCKCHAIN_FROM", "BLOCKCHAIN_TO_ADDRESS"], + "transfer", + ) + transfer_details = actions_params.TransferFundsParams( + asset=self.params.BLOCKCHAIN_FROM_ASSET, + amount=self.params.BLOCKCHAIN_FROM_AMOUNT, + address=self.params.BLOCKCHAIN_TO_ADDRESS, + **self.params.get_blockchain_and_wallet_descriptors_from_wallet_details(), + ) + return self.create_dsl_script_from_tv_format_action_details( + f"action_transfer_{index}", + trading_view_signals_trading.TradingViewSignalsTradingMode.TRANSFER_FUNDS_SIGNAL, + dataclasses.asdict(transfer_details), + ) + + def _get_loop_params(self) -> tuple[typing.Optional[float], typing.Optional[float], int]: + loop_interval, loop_timeout, loop_max_attempts = ( + self.params.LOOP_INTERVAL, self.params.LOOP_TIMEOUT, self.params.LOOP_MAX_ATTEMPTS + ) + if not loop_interval: + raise octobot_flow.errors.InvalidAutomationActionError( + "LOOP_INTERVAL must be provided for the loop_until action" + ) + return loop_interval, loop_timeout, loop_max_attempts # type: ignore + + def _create_loop_until_order_closed_action(self, index: int) -> octobot_flow.entities.AbstractActionDetails: + loop_interval, loop_timeout, loop_max_attempts = self._get_loop_params() + self._ensure_params( + ["ORDER_EXCHANGE_ID", "ORDER_SYMBOL"], + "loop_until_order_closed", + ) + if not self.params.get_exchange_internal_name(): + raise octobot_flow.errors.InvalidAutomationActionError( + "EXCHANGE_TO or EXCHANGE_FROM must be provided for the loop_until_order_closed action" + ) + # force the use of keyword form for the exchange_order_id parameter to resolve the dependency + fetch_order = f"fetch_order('{self.params.ORDER_SYMBOL}', exchange_order_id='{self.params.ORDER_EXCHANGE_ID}')" + selector = ( + f"value_if({fetch_order}, " + f"\"get({commons_constants.LOCAL_VALUE_PLACEHOLDER}, 'status', '{trading_enums.OrderStatus.OPEN.value}') " + f"!= '{trading_enums.OrderStatus.OPEN.value}'\")" + ) + dsl_script = ( + f"loop_until({selector}, " + f"{loop_interval}, timeout={loop_timeout}, max_attempts={loop_max_attempts}, " + f"return_remaining_time=True)" + ) + action_id = f"action_loop_until_order_closed_{index}" + params = {"exchange_order_id": self.params.ORDER_EXCHANGE_ID, "symbol": self.params.ORDER_SYMBOL} + return self._create_dsl_action_with_dependencies_if_any(action_id, dsl_script, params) + + def _create_loop_until_blockchain_balance_action(self, index: int) -> octobot_flow.entities.AbstractActionDetails: + loop_interval, loop_timeout, loop_max_attempts = self._get_loop_params() + amount, asset = self.params.BLOCKCHAIN_BALANCE_AMOUNT, self.params.BLOCKCHAIN_BALANCE_ASSET + if not amount or not asset: + raise octobot_flow.errors.InvalidAutomationActionError( + "BLOCKCHAIN_BALANCE_AMOUNT and BLOCKCHAIN_BALANCE_ASSET must be provided for the wait_for_blockchain_balance action" + ) + blockchain_params = self.params.get_blockchain_and_wallet_descriptors_for_balance_check() + wallet_params = dataclasses.asdict(blockchain_params) + wallet_check = tradingview_signal_to_dsl_translator.TradingViewSignalToDSLTranslator.translate_keyword_and_params( + "blockchain_wallet_balance", + wallet_params, + {"asset": asset}, + ) + dsl_script = ( + f"loop_until(value_if({wallet_check}, ' >= {float(amount)}'), " + f"{loop_interval}, timeout={loop_timeout}, max_attempts={loop_max_attempts}, " + f"return_remaining_time=True)" + ) + action_id = f"action_loop_until_blockchain_balance_{index}" + return self._create_dsl_action_with_dependencies_if_any(action_id, dsl_script, wallet_params) + + def _create_wait_action(self, index: int) -> octobot_flow.entities.AbstractActionDetails: + if not self.params.has_next_schedule(): + raise octobot_flow.errors.InvalidAutomationActionError( + f"{ActionType.WAIT.value} action requires at least a MIN_DELAY" + ) + min_delay, max_delay = self.params._get_next_schedule_delay() + max_delay_str = f", {max_delay}" if max_delay and max_delay != min_delay else "" + dsl_script = f"wait({min_delay}{max_delay_str}, return_remaining_time=True)" + return octobot_flow.entities.DSLScriptActionDetails( + id=f"action_wait_{index}", + dsl_script=dsl_script, + ) + + def _ensure_params(self, keys: list[str], action: str) -> None: + missing_keys = [] + for key in keys: + if not getattr(self.params, key): + missing_keys.append(key) + if missing_keys: + raise octobot_flow.errors.InvalidAutomationActionError( + f"Missing keys: {', '.join(missing_keys)} (required: {', '.join(keys)}) " + f"for a {action} action" + ) + + def _get_empty_exchange_api_key(self) -> str: + return octobot_commons.configuration.encrypt("").decode() + + def _create_init_action( + self, + automation_id: str, + exchange_internal_name: typing.Optional[str], + api_key: typing.Optional[str], + api_secret: typing.Optional[str], + simulated_portfolio: typing.Optional[dict[str, float]], + ) -> octobot_flow.entities.AbstractActionDetails: + formatted_simulated_portfolio = { + asset: { + commons_constants.PORTFOLIO_TOTAL: value, + commons_constants.PORTFOLIO_AVAILABLE: value, + } + for asset, value in simulated_portfolio.items() + } + automation_details = octobot_flow.entities.AutomationDetails( + metadata=octobot_flow.entities.AutomationMetadata( + automation_id=automation_id, + ), + exchange_account_elements=octobot_flow.entities.ExchangeAccountElements( + portfolio=exchange_data_import.PortfolioDetails( + content=formatted_simulated_portfolio, + ) + ), + ) + exchange_account_details = octobot_flow.entities.ExchangeAccountDetails( + exchange_details=profiles_import.ExchangeData( + internal_name=exchange_internal_name, + ), + auth_details=exchange_data_import.ExchangeAuthDetails( + # use empty key to simulate the exchange without an account + api_key=api_key or ("" if simulated_portfolio else self._get_empty_exchange_api_key()), + api_secret=api_secret or "", + ), + ) if exchange_internal_name else None + automation_state = octobot_flow.entities.AutomationState( + automation=automation_details, + exchange_account_details=exchange_account_details, + ) + return self.create_configured_action_details( + "action_init", + octobot_flow.enums.ActionType.APPLY_CONFIGURATION, + automation_state.to_dict(include_default_values=False), + ) + + def _collect_dependency_refs_from_details( + self, details: dict + ) -> list[tuple[str, str, tuple[str, ...], str]]: + """ + Find dependency::... references in string param values. + Returns (dsl_parameter_name, dependency_action_id, result_path_keys, source_literal). + """ + refs: list[tuple[str, str, tuple[str, ...], str]] = [] + for key, value in details.items(): + if isinstance(value, dict): + refs.extend(self._collect_dependency_refs_from_details(value)) + if not isinstance(value, str): + continue + parsed = _parse_dependency_param_value(value) + if not parsed: + continue + dep_action_id, result_path = parsed + dsl_key = ( + trading_view_signals_trading.TradingViewSignalsTradingMode.TRADINGVIEW_TO_DSL_PARAM.get( + key, key.lower() if isinstance(key, str) else str(key).lower() + ) + ) + refs.append((dsl_key, dep_action_id, result_path, value)) + return refs + + def _inject_dependency_placeholders_in_dsl_script( + self, dsl_script: str, refs: list[tuple[str, str, tuple[str, ...], str]] + ) -> str: + """ + Turn dependency:: references in the translated DSL into UNRESOLVED_PARAMETER placeholders + and use keyword form when the value was emitted as a positional argument. + """ + result = dsl_script + placeholder = commons_constants.UNRESOLVED_PARAMETER_PLACEHOLDER + for dsl_key, _, __, source_literal in refs: + literal_repr = repr(source_literal) + kw_form = f"{dsl_key}={literal_repr}" + kw_placeholder = f"{dsl_key}={placeholder}" + if kw_form in result: + result = result.replace(kw_form, kw_placeholder) + elif literal_repr in result: + count = result.count(literal_repr) + if count != 1: + raise octobot_flow.errors.InvalidAutomationActionError( + f"Ambiguous dependency literal {literal_repr} ({count} occurrences) in DSL: {dsl_script}" + ) + result = result.replace(literal_repr, placeholder, 1) + else: + raise octobot_flow.errors.InvalidAutomationActionError( + f"Dependency value for DSL parameter {dsl_key!r} ({source_literal!r}) not found in script: {dsl_script}" + ) + return result + + def create_dsl_script_from_tv_format_action_details( + self, action_id: str, signal: str, details: dict + ) -> octobot_flow.entities.DSLScriptActionDetails: + dsl_script = tradingview_signal_to_dsl_translator.TradingViewSignalToDSLTranslator.translate_signal( + {**{trading_view_signals_trading.TradingViewSignalsTradingMode.SIGNAL_KEY: signal}, **details} + ) + if dsl_script == tradingview_signal_to_dsl_translator.UNKNOWN_SIGNAL_RESULT: + raise octobot_flow.errors.InvalidAutomationActionError( + f"Invalid signal: {signal}({details}) (action {action_id})" + ) + return self._create_dsl_action_with_dependencies_if_any(action_id, dsl_script, details) + + def _create_dsl_action_with_dependencies_if_any( + self, action_id:str, dsl_script: str, details: dict + ) -> octobot_flow.entities.DSLScriptActionDetails: + dependency_refs = self._collect_dependency_refs_from_details(details) + if dependency_refs: + dsl_script = self._inject_dependency_placeholders_in_dsl_script(dsl_script, dependency_refs) + action = octobot_flow.entities.DSLScriptActionDetails( + id=action_id, + dsl_script=dsl_script, + ) + for dsl_key, dep_action_id, result_path, _ in dependency_refs: + action.add_dependency(dep_action_id, dsl_key, list(result_path)) + return action + + def create_configured_action_details( + self, action_id: str, action: octobot_flow.enums.ActionType, config: dict + ) -> octobot_flow.entities.ConfiguredActionDetails: + return octobot_flow.entities.ConfiguredActionDetails( + id=action_id, + action=action.value, + config=config, + ) + + +def _parse_dependency_param_value( + value: str, +) -> typing.Optional[tuple[str, tuple[str, ...]]]: + if not isinstance(value, str) or not value.startswith(DEPENDENCY_PARAM_PREFIX): + return None + parts = value.split(DEPENDENCY_SEPARATOR) + if len(parts) < 3 or parts[0] != DEPENDENCY_IDENTIFIER or not parts[1]: + return None + path_keys = tuple(parts[2:]) + if not path_keys or any(not segment for segment in path_keys): + return None + return parts[1], path_keys + + +def _canonical_param_dependency_field_name( + target_name: str, + valid_field_names: frozenset[str], +) -> typing.Optional[str]: + """ + Map a param_dependency target token to the canonical ActionsDAGParserParams field name. + Matching is case-insensitive; returns the dataclass field name (typically UPPER_CASE). + """ + if target_name in valid_field_names: + return target_name + target_lower = target_name.lower() + for field_name in valid_field_names: + if field_name.lower() == target_lower: + return field_name + return None + + +def _resolve_param_dependency_string_value( + params: typing.Any, + raw_value: str, + valid_field_names: frozenset[str], + context_for_errors: str, +) -> typing.Any: + """ + Resolve a param_dependency::... string against params fields. + Returns _PARAM_DEPENDENCY_RESOLUTION_DEFERRED if the target is not ready yet (still a dependency string). + Otherwise returns the resolved value (may be None). + """ + if not isinstance(raw_value, str) or not raw_value.startswith(PARAM_DEPENDENCY_PREFIX): + return None + target_name = raw_value[len(PARAM_DEPENDENCY_PREFIX):] + if not target_name or DEPENDENCY_SEPARATOR in target_name: + target_name = None + if target_name is None: + raise octobot_flow.errors.InvalidAutomationActionError( + f"Invalid param_dependency value ({context_for_errors}): {raw_value!r}" + ) + canonical_name = _canonical_param_dependency_field_name(target_name, valid_field_names) + if canonical_name is None: + raise octobot_flow.errors.InvalidAutomationActionError( + f"param_dependency target {target_name!r} is not a valid " + f"ActionsDAGParserParams field (referenced from {context_for_errors})" + ) + resolved_value = getattr(params, canonical_name) + if isinstance(resolved_value, str) and resolved_value.startswith(PARAM_DEPENDENCY_PREFIX): + return _PARAM_DEPENDENCY_RESOLUTION_DEFERRED + return resolved_value diff --git a/packages/flow/octobot_flow/parsers/automation_state_reader.py b/packages/flow/octobot_flow/parsers/automation_state_reader.py new file mode 100644 index 0000000000..b0760f7abb --- /dev/null +++ b/packages/flow/octobot_flow/parsers/automation_state_reader.py @@ -0,0 +1,33 @@ +import octobot_flow.entities +import octobot_flow.logic.configuration +import octobot_flow.logic.dsl + + +class AutomationStateReader: + def __init__(self, state: octobot_flow.entities.AutomationState): + self.state: octobot_flow.entities.AutomationState = state + + def get_automation_copied_strategy_ids(self) -> list[str]: + to_execute_actions = self.state.automation.actions_dag.get_executable_actions() + self._resolve_dsl_scripts_for_actions(to_execute_actions) + minimal_profile_data = octobot_flow.logic.configuration.create_profile_data( + self.state.exchange_account_details, + self.state.automation.metadata.automation_id, + set(), + ) + copy_trading_dependencies = octobot_flow.logic.dsl.get_copy_trading_dependencies( + to_execute_actions, minimal_profile_data + ) + return list(set( + copy_trading_dependency.strategy_id + for copy_trading_dependency in copy_trading_dependencies + )) + + def _resolve_dsl_scripts_for_actions( + self, actions: list[octobot_flow.entities.AbstractActionDetails] + ) -> None: + # Align with AutomationJob._resolve_dsl_scripts(..., from_actions_dag=True) + self.state.automation.actions_dag.resolve_dsl_scripts(actions) + + def get_executable_actions(self) -> list[octobot_flow.entities.AbstractActionDetails]: + return self.state.automation.actions_dag.get_executable_actions() diff --git a/packages/flow/octobot_flow/repositories/__init__.py b/packages/flow/octobot_flow/repositories/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/packages/flow/octobot_flow/repositories/community/__init__.py b/packages/flow/octobot_flow/repositories/community/__init__.py new file mode 100644 index 0000000000..8a35e9f846 --- /dev/null +++ b/packages/flow/octobot_flow/repositories/community/__init__.py @@ -0,0 +1,19 @@ +from octobot_flow.repositories.community.community_repository import CommunityRepository +from octobot_flow.repositories.community.initializer import initialize_community_authentication +from octobot_flow.repositories.community.authenticator_factory import CommunityAuthenticatorFactory +from octobot_flow.repositories.community.custom_actions_repository import CustomActionsRepository +from octobot_flow.repositories.community.trading_signals_repository import TradingSignalsRepository +from octobot_flow.repositories.community.trading_signals_channel import get_or_create_internal_trading_signal_channel, send_internal_trading_signal +from octobot_flow.repositories.community.community_lib import ensure_is_authenticated, ensure_authenticated_community_repository + +__all__ = [ + "CommunityRepository", + "CustomActionsRepository", + "initialize_community_authentication", + "CommunityAuthenticatorFactory", + "ensure_is_authenticated", + "ensure_authenticated_community_repository", + "TradingSignalsRepository", + "get_or_create_internal_trading_signal_channel", + "send_internal_trading_signal", +] diff --git a/packages/flow/octobot_flow/repositories/community/authenticator_factory.py b/packages/flow/octobot_flow/repositories/community/authenticator_factory.py new file mode 100644 index 0000000000..73273eb209 --- /dev/null +++ b/packages/flow/octobot_flow/repositories/community/authenticator_factory.py @@ -0,0 +1,44 @@ +import contextlib +import typing + +import octobot_flow.entities +import octobot.community as community +import octobot.community.local_authenticator as local_community_auth + + +class CommunityAuthenticatorFactory: + def __init__( + self, + auth_details: octobot_flow.entities.UserAuthentication, + backend_url: typing.Optional[str] = None, + anon_key: typing.Optional[str] = None + ): + self.auth_details: octobot_flow.entities.UserAuthentication = auth_details + self.backend_url: typing.Optional[str] = backend_url + self.anon_key: typing.Optional[str] = anon_key + + def enable_community_authentication(self) -> bool: + return bool(self.auth_details.has_auth_details() or self.anon_key) + + @contextlib.asynccontextmanager + async def local_authenticator(self) -> typing.AsyncGenerator[community.CommunityAuthentication, None]: + if not self.auth_details.email: + raise ValueError("auth_details.email is required") + async with local_community_auth.local_user_authenticator( + email=self.auth_details.email, + hidden=self.auth_details.hidden, + backend_url=self.backend_url, + password=self.auth_details.password if not self.auth_details.auth_key else None, + auth_key=self.auth_details.auth_key if self.auth_details.auth_key else None, + ) as local_instance: + yield local_instance + + @contextlib.asynccontextmanager + async def local_anon_authenticator(self) -> typing.AsyncGenerator[community.CommunityAuthentication, None]: + if not self.anon_key: + raise ValueError("Anon key is required") + async with local_community_auth.local_anon_user_authenticator( + backend_url=self.backend_url, + anon_key=self.anon_key, + ) as local_instance: + yield local_instance diff --git a/packages/flow/octobot_flow/repositories/community/community_lib.py b/packages/flow/octobot_flow/repositories/community/community_lib.py new file mode 100644 index 0000000000..7ef2589b57 --- /dev/null +++ b/packages/flow/octobot_flow/repositories/community/community_lib.py @@ -0,0 +1,25 @@ +import typing + +import octobot.community +import octobot_flow.errors +import octobot_flow.repositories.community.community_repository as community_repository_import + + +def ensure_is_authenticated( + maybe_authenticator: typing.Optional[octobot.community.CommunityAuthentication] +) -> octobot.community.CommunityAuthentication: + if maybe_authenticator and maybe_authenticator.is_logged_in(): + return maybe_authenticator + raise octobot_flow.errors.CommunityAuthenticationRequiredError( + "Community authentication is required to fetch custom actions" + ) + + +def ensure_authenticated_community_repository( + maybe_community_repository: typing.Optional[community_repository_import.CommunityRepository] +) -> community_repository_import.CommunityRepository: + if maybe_community_repository is not None and ensure_is_authenticated(maybe_community_repository.authenticator): + return maybe_community_repository + raise octobot_flow.errors.CommunityAuthenticationRequiredError( + "Community authentication is required to use the community repository" + ) diff --git a/packages/flow/octobot_flow/repositories/community/community_repository.py b/packages/flow/octobot_flow/repositories/community/community_repository.py new file mode 100644 index 0000000000..91d282ade5 --- /dev/null +++ b/packages/flow/octobot_flow/repositories/community/community_repository.py @@ -0,0 +1,32 @@ +import contextlib +import asyncio + +import octobot.community + +import octobot_flow.entities + + +class CommunityRepository: + def __init__(self, authenticator: octobot.community.CommunityAuthentication): + self.authenticator: octobot.community.CommunityAuthentication = authenticator + + async def insert_bot_logs(self, log_data: list[octobot.community.BotLogData]): + await asyncio.gather( + *[ + self.authenticator.supabase_client.insert_bot_log( + self.authenticator.user_account.bot_id, + log_data.log_type, + log_data.content + ) + for log_data in log_data + ] + ) + + @contextlib.contextmanager + def automation_context(self, automation: octobot_flow.entities.AutomationDetails): + previous_bot_id = self.authenticator.user_account.bot_id + try: + self.authenticator.user_account.bot_id = automation.metadata.automation_id # type: ignore + yield + finally: + self.authenticator.user_account.bot_id = previous_bot_id # type: ignore diff --git a/packages/flow/octobot_flow/repositories/community/custom_actions_repository.py b/packages/flow/octobot_flow/repositories/community/custom_actions_repository.py new file mode 100644 index 0000000000..16fc212c81 --- /dev/null +++ b/packages/flow/octobot_flow/repositories/community/custom_actions_repository.py @@ -0,0 +1,23 @@ +import octobot.community + +import octobot_flow.entities + + +class CustomActionsRepository: + def __init__(self, authenticator: octobot.community.CommunityAuthentication): + self.authenticator: octobot.community.CommunityAuthentication = authenticator + + async def fetch_custom_actions( + self, + user_action_history_ids: list[str], + select_pending_user_actions_only: bool + ) -> list[octobot_flow.entities.AbstractActionDetails]: + raise NotImplementedError("TODO: fetch_custom_actions") + + async def fetch_signals( + self, signal_history_ids: list[str], select_pending_signals_only: bool + ) -> list[octobot_flow.entities.AbstractActionDetails]: + raise NotImplementedError("TODO: fetch_signals") + + async def update_custom_actions_history(self, actions: list[octobot_flow.entities.AbstractActionDetails]): + raise NotImplementedError("TODO: update_custom_actions_history") diff --git a/packages/flow/octobot_flow/repositories/community/initializer.py b/packages/flow/octobot_flow/repositories/community/initializer.py new file mode 100644 index 0000000000..dc9d5efe2e --- /dev/null +++ b/packages/flow/octobot_flow/repositories/community/initializer.py @@ -0,0 +1,8 @@ +import octobot.community + + +def initialize_community_authentication(): + octobot.community.IdentifiersProvider.use_production() + configuration = octobot.community.get_stateless_configuration() + # create CommunityAuthentication singleton + octobot.community.CommunityAuthentication.create(configuration) diff --git a/packages/flow/octobot_flow/repositories/community/trading_signals_channel.py b/packages/flow/octobot_flow/repositories/community/trading_signals_channel.py new file mode 100644 index 0000000000..dfd9f69abd --- /dev/null +++ b/packages/flow/octobot_flow/repositories/community/trading_signals_channel.py @@ -0,0 +1,57 @@ +import typing + +import async_channel.channels as async_channel_channels +import async_channel.consumer as async_channel_consumer +import async_channel.enums as async_channel_enums +import async_channel.producer as async_channel_producer +import async_channel.util.channel_creator as async_channel_channel_creator + +import octobot_flow.entities + +INTERNAL_TRADING_SIGNAL_KEY = "trading_signal" + + +class InternalTradingSignalChannelConsumer(async_channel_consumer.Consumer): + pass + + +class InternalTradingSignalChannelProducer(async_channel_producer.Producer): + pass + + +class InternalTradingSignalChannel(async_channel_channels.Channel): + PRODUCER_CLASS = InternalTradingSignalChannelProducer + CONSUMER_CLASS = InternalTradingSignalChannelConsumer + DEFAULT_PRIORITY_LEVEL = async_channel_enums.ChannelConsumerPriorityLevels.MEDIUM.value + + +async def get_or_create_internal_trading_signal_channel() -> InternalTradingSignalChannel: + channel_name = InternalTradingSignalChannel.get_name() + try: + return typing.cast( + InternalTradingSignalChannel, + async_channel_channels.get_chan(channel_name), + ) + except KeyError: + created = await async_channel_channel_creator.create_channel_instance( + InternalTradingSignalChannel, + async_channel_channels.set_chan, + ) + return typing.cast(InternalTradingSignalChannel, created) + + +async def send_internal_trading_signal(trading_signal: octobot_flow.entities.TradingSignal) -> None: + channel = await get_or_create_internal_trading_signal_channel() + internal_producer = channel.get_internal_producer() + await internal_producer.send({INTERNAL_TRADING_SIGNAL_KEY: trading_signal}) + + +async def shutdown_internal_trading_signal_channel() -> None: + channel_name = InternalTradingSignalChannel.get_name() + try: + channel = async_channel_channels.get_chan(channel_name) + except KeyError: + return + await channel.stop() + channel.flush() + async_channel_channels.del_chan(channel_name) diff --git a/packages/flow/octobot_flow/repositories/community/trading_signals_repository.py b/packages/flow/octobot_flow/repositories/community/trading_signals_repository.py new file mode 100644 index 0000000000..21311d64ef --- /dev/null +++ b/packages/flow/octobot_flow/repositories/community/trading_signals_repository.py @@ -0,0 +1,15 @@ +import octobot.community + +import octobot_flow.entities +import octobot_flow.repositories.community.trading_signals_channel as trading_signals_channel + + +class TradingSignalsRepository: + def __init__(self, authenticator: octobot.community.CommunityAuthentication): + self.authenticator: octobot.community.CommunityAuthentication = authenticator + + async def insert_trading_signal(self, trading_signal: octobot_flow.entities.TradingSignal): + await trading_signals_channel.send_internal_trading_signal(trading_signal) + + async def fetch_trading_signals(self, strategy_ids: list[str], history_size: int) -> list[octobot_flow.entities.TradingSignal]: + raise NotImplementedError("TODO: fetch_trading_signals") diff --git a/packages/flow/octobot_flow/repositories/exchange/__init__.py b/packages/flow/octobot_flow/repositories/exchange/__init__.py new file mode 100644 index 0000000000..1b6ed3dc3f --- /dev/null +++ b/packages/flow/octobot_flow/repositories/exchange/__init__.py @@ -0,0 +1,21 @@ +from octobot_flow.repositories.exchange.base_exchange_repository import BaseExchangeRepository +from octobot_flow.repositories.exchange.ohlcv_repository import OhlcvRepository +from octobot_flow.repositories.exchange.orders_repository import OrdersRepository +from octobot_flow.repositories.exchange.portfolio_repository import PortfolioRepository +from octobot_flow.repositories.exchange.positions_repository import PositionsRepository +from octobot_flow.repositories.exchange.trades_repository import TradesRepository +from octobot_flow.repositories.exchange.tickers_repository import TickersRepository +from octobot_flow.repositories.exchange.exchange_repository_factory import ExchangeRepositoryFactory +from octobot_flow.repositories.exchange.exchange_context_mixin import ExchangeContextMixin + +__all__ = [ + "BaseExchangeRepository", + "OhlcvRepository", + "OrdersRepository", + "PortfolioRepository", + "PositionsRepository", + "TradesRepository", + "TickersRepository", + "ExchangeRepositoryFactory", + "ExchangeContextMixin", +] \ No newline at end of file diff --git a/packages/flow/octobot_flow/repositories/exchange/base_exchange_repository.py b/packages/flow/octobot_flow/repositories/exchange/base_exchange_repository.py new file mode 100644 index 0000000000..c62ecf3b7c --- /dev/null +++ b/packages/flow/octobot_flow/repositories/exchange/base_exchange_repository.py @@ -0,0 +1,18 @@ +import octobot_trading.exchanges +import octobot_flow.entities +import octobot_trading.exchange_channel as exchange_channel +import octobot_trading.api + +class BaseExchangeRepository: + def __init__( + self, + exchange_manager: octobot_trading.exchanges.ExchangeManager, + known_automations: list[octobot_flow.entities.AutomationDetails], + fetched_exchange_data: octobot_flow.entities.FetchedExchangeData, + ): + self.exchange_manager: octobot_trading.exchanges.ExchangeManager = exchange_manager + self.known_automations: list[octobot_flow.entities.AutomationDetails] = known_automations + self.fetched_exchange_data: octobot_flow.entities.FetchedExchangeData = fetched_exchange_data + + def get_channel_updater(self, channel_name: str) -> exchange_channel.ExchangeChannelProducer: + return octobot_trading.api.get_channel_updater(self.exchange_manager, channel_name) diff --git a/packages/flow/octobot_flow/repositories/exchange/exchange_context_mixin.py b/packages/flow/octobot_flow/repositories/exchange/exchange_context_mixin.py new file mode 100644 index 0000000000..65280d42a7 --- /dev/null +++ b/packages/flow/octobot_flow/repositories/exchange/exchange_context_mixin.py @@ -0,0 +1,161 @@ +import contextlib +import typing + +import octobot_commons.constants as common_constants +import octobot_commons.profiles as commons_profiles +import octobot_commons.logging as commons_logging +import octobot_trading.api +import octobot_trading.exchanges +import octobot_trading.exchanges.util.exchange_data as exchange_data_import +import octobot_tentacles_manager.api +import tentacles.Meta.Keywords.scripting_library as scripting_library +import octobot_flow.errors +import octobot_flow.entities +import octobot_flow.repositories.exchange.exchange_repository_factory as exchange_repository_factory +import octobot_flow.repositories.exchange.tickers_repository as tickers_repository +import octobot_flow.logic.configuration + +class ExchangeContextMixin: + USE_PREDICTIVE_ORDERS_SYNC: bool = False + + def __init__( + self, + automation_state: octobot_flow.entities.AutomationState, + fetched_dependencies: octobot_flow.entities.FetchedDependencies, + enable_order_fill_events: bool = False, + ): + self.automation_state: octobot_flow.entities.AutomationState = automation_state + self.fetched_dependencies: octobot_flow.entities.FetchedDependencies = fetched_dependencies + self.profile_data_provider: octobot_flow.logic.configuration.ProfileDataProvider = octobot_flow.logic.configuration.ProfileDataProvider() + self.enable_order_fill_events: bool = enable_order_fill_events + + # context dependant attributes + self._exchange_manager: typing.Optional[octobot_trading.exchanges.ExchangeManager] = None + + def get_exchange_repository_factory(self) -> exchange_repository_factory.ExchangeRepositoryFactory: + self.ensure_context() + return exchange_repository_factory.ExchangeRepositoryFactory( + self._exchange_manager, + [self.automation_state.automation], + self.fetched_dependencies.fetched_exchange_data, + self.profile_data_provider.get_profile_data().trader_simulator.enabled, + ) + + def set_fetched_dependencies(self, fetched_dependencies: octobot_flow.entities.FetchedDependencies): + self.fetched_dependencies = fetched_dependencies + + def init_predictive_orders_exchange_data(self, exchange_data: exchange_data_import.ExchangeData): + """ + should be implemented when self.USE_PREDICTIVE_ORDERS_SYNC is True + """ + raise NotImplementedError("init_predictive_orders_exchange_data should be implemented in subclass") + + def ensure_context(self): + if self._exchange_manager is None: + raise octobot_flow.errors.ExchangeAccountInitializationError("Not in exchange context") + + @contextlib.asynccontextmanager + async def exchange_manager_context( + self, + ) -> typing.AsyncGenerator[typing.Optional[octobot_trading.exchanges.ExchangeManager], None]: + profile_data = self.profile_data_provider.get_profile_data() + if not self.automation_state.has_exchange(): + # no need to initialize an exchange manager + yield None + return + automation_elements = self.automation_state.automation.exchange_account_elements + portfolio_content = ( + automation_elements.portfolio.content + if automation_elements is not None + else {} + ) + exchange_data = self.automation_state.exchange_account_details.to_minimal_exchange_data( + portfolio_content + ) + try: + if self.USE_PREDICTIVE_ORDERS_SYNC: + # make all markets available to the strategy, it will use the required ones + self.init_predictive_orders_exchange_data(exchange_data) + tentacles_setup_config = octobot_tentacles_manager.api.get_full_tentacles_setup_config() + octobot_tentacles_manager.api.set_tentacle_config_proxy(scripting_library.empty_config_proxy) + async with octobot_trading.exchanges.exchange_manager_from_exchange_data( + exchange_data, + profile_data, + tentacles_setup_config, + price_fallback=self._get_price_from_cached_tickers, + ) as exchange_manager: + portfolio_config = { + asset: portfolio_element[common_constants.PORTFOLIO_TOTAL] + for asset, portfolio_element in exchange_data.portfolio_details.content.items() + } + portfolio_manager = exchange_manager.exchange_personal_data.portfolio_manager + portfolio_manager.apply_forced_portfolio( + portfolio_config, + # lock open orders funds in portfolio for simulated trading + update_available_funds_from_open_orders=profile_data.trader_simulator.enabled, + ) + self._exchange_manager = exchange_manager + if self.USE_PREDICTIVE_ORDERS_SYNC: + async with self._predictive_order_sync_context( + exchange_manager, profile_data + ): + yield exchange_manager + else: + yield exchange_manager + finally: + self._exchange_manager = None + + def get_exchange_config(self) -> dict: + raise NotImplementedError("get_exchange_config not implemented") + + def _get_price_from_cached_tickers( + self, exchange_data: exchange_data_import.ExchangeData, symbol: str + ) -> typing.Optional[float]: + try: + price = tickers_repository.TickersRepository.get_cached_market_price_from_exchange_data( + exchange_data, symbol + ) + commons_logging.get_logger(self.__class__.__name__).warning( + f"Using {symbol} [{exchange_data.exchange_details.name}] " + f"ticker price for mark price: candles are missing" + ) + return price + except KeyError: + commons_logging.get_logger(self.__class__.__name__).error( + f"Impossible to initialize {symbol} price on {exchange_data.exchange_details.name}: no " + f"candle or cached ticker price" + ) + return None + + @contextlib.asynccontextmanager + async def _predictive_order_sync_context( + self, + exchange_manager, + profile_data: commons_profiles.ProfileData, + ): + # disable portfolio fetch and available value updates as portfolio is already up-to-date + with ( + # don't fetch portfolio update when creating/filling order + exchange_manager.exchange_personal_data.orders_manager.disabled_order_auto_synchronization( + enable_order_fill_events=self.enable_order_fill_events + ), + # dont fetch positions update when creating/filling order + exchange_manager.exchange_personal_data.positions_manager.disabled_positions_update_from_order(), + ): + if profile_data.trader_simulator.enabled: + if self.enable_order_fill_events: + # initialize order fill events + for order in octobot_trading.api.get_open_orders(exchange_manager): + await order.update_order_status() + # in simulated context, temporarily enable trader simulator automations + # to update portfolio and handle orders as simulated + previous_simulated_state = exchange_manager.trader.simulate + exchange_manager.trader.simulate = True + exchange_manager.exchange_personal_data.error_on_channel_notification_push_error = not self.enable_order_fill_events + try: + yield + finally: + exchange_manager.trader.simulate = previous_simulated_state + exchange_manager.exchange_personal_data.error_on_channel_notification_push_error = False + else: + yield diff --git a/packages/flow/octobot_flow/repositories/exchange/exchange_repository_factory.py b/packages/flow/octobot_flow/repositories/exchange/exchange_repository_factory.py new file mode 100644 index 0000000000..4c71422e60 --- /dev/null +++ b/packages/flow/octobot_flow/repositories/exchange/exchange_repository_factory.py @@ -0,0 +1,85 @@ +import octobot_trading.exchanges + +import octobot_flow.entities +import octobot_flow.repositories.exchange.simulated_trading as simulated_trading_repositories +import octobot_flow.repositories.exchange.ohlcv_repository as ohlcv_repository_import +import octobot_flow.repositories.exchange.orders_repository as orders_repository_import +import octobot_flow.repositories.exchange.portfolio_repository as portfolio_repository_import +import octobot_flow.repositories.exchange.positions_repository as positions_repository_import +import octobot_flow.repositories.exchange.trades_repository as trades_repository_import +import octobot_flow.repositories.exchange.tickers_repository as tickers_repository_import + + + +class ExchangeRepositoryFactory: + def __init__( + self, + exchange_manager: octobot_trading.exchanges.ExchangeManager, + known_automations: list[octobot_flow.entities.AutomationDetails], + fetched_exchange_data: octobot_flow.entities.FetchedExchangeData, + is_simulated: bool, + ): + self.exchange_manager: octobot_trading.exchanges.ExchangeManager = exchange_manager + self.known_automations: list[octobot_flow.entities.AutomationDetails] = known_automations + self.fetched_exchange_data: octobot_flow.entities.FetchedExchangeData = fetched_exchange_data + self.is_simulated: bool = is_simulated + + def get_ohlcv_repository(self) -> ohlcv_repository_import.OhlcvRepository: + if self.is_simulated: + return simulated_trading_repositories.SimulatedOhlcvRepository( + self.exchange_manager, self.known_automations, self.fetched_exchange_data + ) + else: + return ohlcv_repository_import.OhlcvRepository( + self.exchange_manager, self.known_automations, self.fetched_exchange_data + ) + + def get_orders_repository(self) -> orders_repository_import.OrdersRepository: + if self.is_simulated: + return simulated_trading_repositories.SimulatedOrdersRepository( + self.exchange_manager, self.known_automations, self.fetched_exchange_data + ) + else: + return orders_repository_import.OrdersRepository( + self.exchange_manager, self.known_automations, self.fetched_exchange_data + ) + + def get_portfolio_repository(self) -> portfolio_repository_import.PortfolioRepository: + if self.is_simulated: + return simulated_trading_repositories.SimulatedPortfolioRepository( + self.exchange_manager, self.known_automations, self.fetched_exchange_data + ) + else: + return portfolio_repository_import.PortfolioRepository( + self.exchange_manager, self.known_automations, self.fetched_exchange_data + ) + + def get_positions_repository(self) -> positions_repository_import.PositionsRepository: + if self.is_simulated: + return simulated_trading_repositories.SimulatedPositionsRepository( + self.exchange_manager, self.known_automations, self.fetched_exchange_data + ) + else: + return positions_repository_import.PositionsRepository( + self.exchange_manager, self.known_automations, self.fetched_exchange_data + ) + + def get_trades_repository(self) -> trades_repository_import.TradesRepository: + if self.is_simulated: + return simulated_trading_repositories.SimulatedTradesRepository( + self.exchange_manager, self.known_automations, self.fetched_exchange_data + ) + else: + return trades_repository_import.TradesRepository( + self.exchange_manager, self.known_automations, self.fetched_exchange_data + ) + + def get_tickers_repository(self) -> tickers_repository_import.TickersRepository: + if self.is_simulated: + return simulated_trading_repositories.SimulatedTickersRepository( + self.exchange_manager, self.known_automations, self.fetched_exchange_data + ) + else: + return tickers_repository_import.TickersRepository( + self.exchange_manager, self.known_automations, self.fetched_exchange_data + ) diff --git a/packages/flow/octobot_flow/repositories/exchange/ohlcv_repository.py b/packages/flow/octobot_flow/repositories/exchange/ohlcv_repository.py new file mode 100644 index 0000000000..6e25269eca --- /dev/null +++ b/packages/flow/octobot_flow/repositories/exchange/ohlcv_repository.py @@ -0,0 +1,23 @@ +import typing + +import octobot_commons.enums as common_enums +import octobot_flow.repositories.exchange.base_exchange_repository as base_exchange_repository_import +import octobot_trading.exchange_data +import octobot_trading.exchanges.util.exchange_data as exchange_data_import +import octobot_trading.constants + + +class OhlcvRepository(base_exchange_repository_import.BaseExchangeRepository): + + async def fetch_ohlcv( + self, symbol: str, time_frame: str, limit: int, tickers: dict[str, dict[str, typing.Any]] + ) -> exchange_data_import.MarketDetails: + updater = typing.cast( + octobot_trading.exchange_data.OHLCVUpdater, + self.get_channel_updater(octobot_trading.constants.OHLCV_CHANNEL) + ) + ohlcvs = await updater.fetch_ohlcv( + symbol, common_enums.TimeFrames(time_frame), limit, allow_cache=True, tickers_backup=tickers + ) + return exchange_data_import.MarketDetails.from_ohlcvs(symbol, time_frame, ohlcvs) + diff --git a/packages/flow/octobot_flow/repositories/exchange/orders_repository.py b/packages/flow/octobot_flow/repositories/exchange/orders_repository.py new file mode 100644 index 0000000000..ba1b034ceb --- /dev/null +++ b/packages/flow/octobot_flow/repositories/exchange/orders_repository.py @@ -0,0 +1,48 @@ +import typing + +import octobot_flow.repositories.exchange.base_exchange_repository as base_exchange_repository_import +import octobot_trading.util.test_tools.exchanges_test_tools as exchanges_test_tools_import +import octobot_trading.constants as trading_constants +import octobot_trading.enums as trading_enums +import octobot_trading.storage as orders_storage +import octobot_trading.constants as trading_constants +import octobot_trading.personal_data as trading_personal_data + + +class OrdersRepository(base_exchange_repository_import.BaseExchangeRepository): + + async def fetch_open_orders( + self, symbols: list[str], ignore_unsupported_orders: bool = True + ) -> list[dict]: + if not symbols: + return [] + updater = typing.cast( + trading_personal_data.OrdersUpdater, + self.get_channel_updater(trading_constants.ORDERS_CHANNEL) + ) + open_orders = await updater.fetch_open_orders(symbols) + return [ + exchanges_test_tools_import.parse_order_into_dict( + self.exchange_manager, order, True, ignore_unsupported_orders + ) + for order in open_orders + if order + ] # type: ignore + + def update_enriched_orders( + self, + updated_orders: list[dict[str, typing.Any]], + existing_orders: dict[str, dict[str, dict[str, typing.Any]]] + ) -> list[dict[str, typing.Any]]: + account_orders_by_exchange_id = { + order[trading_constants.STORAGE_ORIGIN_VALUE][trading_enums.ExchangeConstantsOrderColumns.EXCHANGE_ID.value]: order + for order in existing_orders + } + return [ + orders_storage.update_enriched_order( + order, + account_orders_by_exchange_id, + self.exchange_manager + ) + for order in updated_orders + ] \ No newline at end of file diff --git a/packages/flow/octobot_flow/repositories/exchange/portfolio_repository.py b/packages/flow/octobot_flow/repositories/exchange/portfolio_repository.py new file mode 100644 index 0000000000..f5aaff31b3 --- /dev/null +++ b/packages/flow/octobot_flow/repositories/exchange/portfolio_repository.py @@ -0,0 +1,20 @@ +import decimal +import typing + +import octobot_trading.personal_data as personal_data +import octobot_flow.repositories.exchange.base_exchange_repository as base_exchange_repository_import +import octobot_trading.constants as trading_constants +import octobot_trading.personal_data as trading_personal_data + +class PortfolioRepository(base_exchange_repository_import.BaseExchangeRepository): + + async def fetch_portfolio(self) -> dict[str, dict[str, decimal.Decimal]]: + + updater = typing.cast( + trading_personal_data.BalanceUpdater, + self.get_channel_updater(trading_constants.BALANCE_CHANNEL) + ) + portfolio = await updater.fetch_portfolio() + return personal_data.from_raw_to_formatted_portfolio( + personal_data.filter_empty_values(portfolio), as_float=False + ) # type: ignore diff --git a/packages/flow/octobot_flow/repositories/exchange/positions_repository.py b/packages/flow/octobot_flow/repositories/exchange/positions_repository.py new file mode 100644 index 0000000000..b854e4350e --- /dev/null +++ b/packages/flow/octobot_flow/repositories/exchange/positions_repository.py @@ -0,0 +1,20 @@ +import octobot_flow.repositories.exchange.base_exchange_repository as base_exchange_repository_import +import octobot_trading.util.test_tools.exchanges_test_tools as exchanges_test_tools_import +import octobot_trading.exchanges.util.exchange_data as exchange_data_import + +class PositionsRepository(base_exchange_repository_import.BaseExchangeRepository): + + async def fetch_positions(self, symbols: list[str]) -> list[exchange_data_import.PositionDetails]: + raw_positions = await exchanges_test_tools_import.get_positions( + self.exchange_manager, None, symbols=symbols + ) + return [self._parse_position(position) for position in raw_positions] + + + def _parse_position(self, raw_position: dict) -> exchange_data_import.PositionDetails: + return exchange_data_import.PositionDetails( + position=raw_position, contract=self._parse_contract(raw_position) + ) + + def _parse_contract(self, raw_position: dict) -> dict: + raise NotImplementedError("Not _parse_contract not implemented") diff --git a/packages/flow/octobot_flow/repositories/exchange/simulated_trading/__init__.py b/packages/flow/octobot_flow/repositories/exchange/simulated_trading/__init__.py new file mode 100644 index 0000000000..ce3deb3f61 --- /dev/null +++ b/packages/flow/octobot_flow/repositories/exchange/simulated_trading/__init__.py @@ -0,0 +1,15 @@ +from octobot_flow.repositories.exchange.simulated_trading.simulated_ohlcv_repository import SimulatedOhlcvRepository +from octobot_flow.repositories.exchange.simulated_trading.simulated_orders_repository import SimulatedOrdersRepository +from octobot_flow.repositories.exchange.simulated_trading.simulated_portfolio_repository import SimulatedPortfolioRepository +from octobot_flow.repositories.exchange.simulated_trading.simulated_tickers_repository import SimulatedTickersRepository +from octobot_flow.repositories.exchange.simulated_trading.simulated_trades_repository import SimulatedTradesRepository +from octobot_flow.repositories.exchange.simulated_trading.simulated_positions_repository import SimulatedPositionsRepository + +__all__ = [ + "SimulatedOhlcvRepository", + "SimulatedOrdersRepository", + "SimulatedPortfolioRepository", + "SimulatedTickersRepository", + "SimulatedTradesRepository", + "SimulatedPositionsRepository", +] \ No newline at end of file diff --git a/packages/flow/octobot_flow/repositories/exchange/simulated_trading/simulated_ohlcv_repository.py b/packages/flow/octobot_flow/repositories/exchange/simulated_trading/simulated_ohlcv_repository.py new file mode 100644 index 0000000000..c969f6239e --- /dev/null +++ b/packages/flow/octobot_flow/repositories/exchange/simulated_trading/simulated_ohlcv_repository.py @@ -0,0 +1,6 @@ +import octobot_flow.repositories.exchange.ohlcv_repository as ohlcv_repository_import + + +class SimulatedOhlcvRepository(ohlcv_repository_import.OhlcvRepository): + # nothing simulator specific to do + pass diff --git a/packages/flow/octobot_flow/repositories/exchange/simulated_trading/simulated_orders_repository.py b/packages/flow/octobot_flow/repositories/exchange/simulated_trading/simulated_orders_repository.py new file mode 100644 index 0000000000..471d8ddc03 --- /dev/null +++ b/packages/flow/octobot_flow/repositories/exchange/simulated_trading/simulated_orders_repository.py @@ -0,0 +1,9 @@ +import octobot_flow.repositories.exchange.orders_repository as orders_repository_import + + +class SimulatedOrdersRepository(orders_repository_import.OrdersRepository): + + async def fetch_open_orders( + self, symbols: list[str], ignore_unsupported_orders: bool = True + ) -> list[dict]: + return [] diff --git a/packages/flow/octobot_flow/repositories/exchange/simulated_trading/simulated_portfolio_repository.py b/packages/flow/octobot_flow/repositories/exchange/simulated_trading/simulated_portfolio_repository.py new file mode 100644 index 0000000000..6b56918303 --- /dev/null +++ b/packages/flow/octobot_flow/repositories/exchange/simulated_trading/simulated_portfolio_repository.py @@ -0,0 +1,10 @@ +import decimal + +import octobot_flow.repositories.exchange.portfolio_repository as portfolio_repository_import +import octobot_trading.personal_data as trading_personal_data + + +class SimulatedPortfolioRepository(portfolio_repository_import.PortfolioRepository): + + async def fetch_portfolio(self) -> dict[str, dict[str, decimal.Decimal]]: + return {} diff --git a/packages/flow/octobot_flow/repositories/exchange/simulated_trading/simulated_positions_repository.py b/packages/flow/octobot_flow/repositories/exchange/simulated_trading/simulated_positions_repository.py new file mode 100644 index 0000000000..cb3c74e5e2 --- /dev/null +++ b/packages/flow/octobot_flow/repositories/exchange/simulated_trading/simulated_positions_repository.py @@ -0,0 +1,8 @@ +import octobot_flow.repositories.exchange.positions_repository as positions_repository_import +import octobot_trading.exchanges.util.exchange_data as exchange_data_import + + +class SimulatedPositionsRepository(positions_repository_import.PositionsRepository): + + async def fetch_positions(self, symbols: list[str]) -> list[exchange_data_import.PositionDetails]: + return [] diff --git a/packages/flow/octobot_flow/repositories/exchange/simulated_trading/simulated_tickers_repository.py b/packages/flow/octobot_flow/repositories/exchange/simulated_trading/simulated_tickers_repository.py new file mode 100644 index 0000000000..f858005307 --- /dev/null +++ b/packages/flow/octobot_flow/repositories/exchange/simulated_trading/simulated_tickers_repository.py @@ -0,0 +1,6 @@ +import octobot_flow.repositories.exchange.tickers_repository as tickers_repository_import + + +class SimulatedTickersRepository(tickers_repository_import.TickersRepository): + # nothing simulator specific to do + pass diff --git a/packages/flow/octobot_flow/repositories/exchange/simulated_trading/simulated_trades_repository.py b/packages/flow/octobot_flow/repositories/exchange/simulated_trading/simulated_trades_repository.py new file mode 100644 index 0000000000..eaf596992a --- /dev/null +++ b/packages/flow/octobot_flow/repositories/exchange/simulated_trading/simulated_trades_repository.py @@ -0,0 +1,8 @@ +import octobot_flow.repositories.exchange.trades_repository as trades_repository_import + + +class SimulatedTradesRepository(trades_repository_import.TradesRepository): + + async def fetch_trades(self, symbols: list[str]) -> list[dict]: + return [] + diff --git a/packages/flow/octobot_flow/repositories/exchange/tickers_repository.py b/packages/flow/octobot_flow/repositories/exchange/tickers_repository.py new file mode 100644 index 0000000000..f8f73e90d1 --- /dev/null +++ b/packages/flow/octobot_flow/repositories/exchange/tickers_repository.py @@ -0,0 +1,38 @@ +import typing + +import octobot_trading.exchange_data +import octobot_trading.enums as trading_enums +import octobot_trading.exchanges.util.exchange_data as exchange_data_import + +import octobot_flow.repositories.exchange.base_exchange_repository as base_exchange_repository_import +import octobot_trading.constants as trading_constants + + +class TickersRepository(base_exchange_repository_import.BaseExchangeRepository): + + async def fetch_tickers(self, symbols: typing.Optional[list[str]]) -> dict[str, dict]: + updater = typing.cast( + octobot_trading.exchange_data.TickerUpdater, + self.get_channel_updater(trading_constants.TICKER_CHANNEL) + ) + return await updater.fetch_all_tickers(symbols) + + @staticmethod + def get_cached_market_price(exchange_internal_name, exchange_type, sandboxed: bool, symbol: str) -> float: + try: + cache = octobot_trading.exchange_data.TickerUpdater.get_ticker_cache() + return cache.get_all_tickers(exchange_internal_name, exchange_type, sandboxed)[symbol][ # type: ignore + trading_enums.ExchangeConstantsTickersColumns.CLOSE.value + ] + except TypeError as err: + # symbol not found in cache + raise KeyError(err) from err + + @staticmethod + def get_cached_market_price_from_exchange_data( + exchange_data: exchange_data_import.ExchangeData, symbol: str + ) -> float: + return TickersRepository.get_cached_market_price( + exchange_data.exchange_details.name, exchange_data.auth_details.exchange_type, + exchange_data.auth_details.sandboxed, symbol, + ) diff --git a/packages/flow/octobot_flow/repositories/exchange/trades_repository.py b/packages/flow/octobot_flow/repositories/exchange/trades_repository.py new file mode 100644 index 0000000000..f53ecee5a9 --- /dev/null +++ b/packages/flow/octobot_flow/repositories/exchange/trades_repository.py @@ -0,0 +1,11 @@ +import octobot_flow.repositories.exchange.base_exchange_repository as base_exchange_repository_import +import octobot_trading.util.test_tools.exchanges_test_tools as exchanges_test_tools_import + +class TradesRepository(base_exchange_repository_import.BaseExchangeRepository): + + async def fetch_trades(self, symbols: list[str]) -> list[dict]: + if not symbols: + return [] + return await exchanges_test_tools_import.get_trades( + self.exchange_manager, None, symbols=symbols + ) diff --git a/packages/flow/tests/.env.template b/packages/flow/tests/.env.template new file mode 100644 index 0000000000..92fb0df196 --- /dev/null +++ b/packages/flow/tests/.env.template @@ -0,0 +1,5 @@ +BINANCE_KEY= +BINANCE_SECRET= + +EXCHANGE_HTTP_PROXY_AUTHENTICATED_URL=http://localhost:8081 +USE_AUTHENTICATED_EXCHANGE_REQUESTS_ONLY_PROXY=true diff --git a/packages/flow/tests/__init__.py b/packages/flow/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/packages/flow/tests/conftest.py b/packages/flow/tests/conftest.py new file mode 100644 index 0000000000..59b5518d31 --- /dev/null +++ b/packages/flow/tests/conftest.py @@ -0,0 +1,5 @@ +# in conftest.py to load the .env file before any test is run or any import is done + +import dotenv +import os +dotenv.load_dotenv(dotenv_path=os.path.join(os.path.dirname(__file__), ".env")) diff --git a/packages/flow/tests/functionnal_tests/__init__.py b/packages/flow/tests/functionnal_tests/__init__.py new file mode 100644 index 0000000000..254f902983 --- /dev/null +++ b/packages/flow/tests/functionnal_tests/__init__.py @@ -0,0 +1,386 @@ +import contextlib +import decimal +import mock +import pytest +import time +import os +import typing +import json + +# force env var +os.environ["USE_MINIMAL_LIBS"] = "true" +os.environ["ALLOW_FUNDS_TRANSFER"] = "True" + +import ccxt.async_support as ccxt_async +import octobot_trading.exchanges.connectors.ccxt.ccxt_clients_cache as ccxt_clients_cache +import octobot.community as community + +import octobot_copy.constants as copy_constants +import octobot_copy.entities as copy_entities + +import octobot_flow +import octobot_flow.entities + +import octobot_flow.environment +import octobot_flow.repositories.community +import octobot_flow.logic.actions.actions_factory as actions_factory + +AUTHENTICATED_TEST_GROUP = "authenticated_xdist_group" + +# Passed as copy_exchange_account(strategy_id=...) in functional DSL so copy-trading dependencies resolve. +FUNCTIONAL_TEST_COPY_STRATEGY_ID = "functional_test_copy_strategy" + + +def d_order_price(value: typing.Union[int, float, decimal.Decimal]) -> decimal.Decimal: + """Exact decimal view of a stored order price (avoids float + int mix in assertions).""" + if isinstance(value, decimal.Decimal): + return value + return decimal.Decimal(str(value)) + + +def set_emit_signals_metadata(automation_state: dict, emit_signals: bool) -> None: + automation_state["automation"]["metadata"]["emit_signals"] = emit_signals + + +@contextlib.contextmanager +def trading_signal_emission_patches(emit_signals: bool): + with contextlib.ExitStack() as stack: + insert_mock = stack.enter_context( + mock.patch.object( + octobot_flow.repositories.community.TradingSignalsRepository, + "insert_trading_signal", + mock.AsyncMock(), + ) + ) + if emit_signals: + + @contextlib.asynccontextmanager + async def _fake_maybe_authenticator(self): + yield mock.MagicMock() + + stack.enter_context( + mock.patch.object( + octobot_flow.AutomationJob, + "_maybe_authenticator", + _fake_maybe_authenticator, + ) + ) + yield insert_mock + + +def assert_emitted_signal_account_allocation_ratios( + content: dict, + *, + allow_zero_ratio_assets: frozenset[str] = frozenset(), + allow_negligible_ratio_assets: frozenset[str] = frozenset(), +) -> None: + """Allocation checks for `TradingSignal.account.content` from `insert_trading_signal` only.""" + ratio_key = copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO + total_value = decimal.Decimal(0) + for asset, balances in content.items(): + assert ratio_key in balances, f"missing {ratio_key} for {asset}" + ratio = balances[ratio_key] + assert isinstance(ratio, decimal.Decimal) + total_value += ratio + ratio_float = float(ratio) + if asset in allow_zero_ratio_assets: + assert ratio_float == pytest.approx(0.0, abs=1e-18) + elif asset in allow_negligible_ratio_assets: + assert ratio_float < 0.05, f"{asset} expected negligible allocation_ratio, got {ratio_float}" + else: + assert ratio_float > 0, f"{asset} allocation_ratio should be > 0, got {ratio_float}" + assert float(total_value) == pytest.approx(1.0, abs=1e-3) + + +def is_on_github_ci(): + # Always set to true when GitHub Actions is running the workflow. + # You can use this variable to differentiate when tests are being run locally or by GitHub Actions. + # from https://docs.github.com/en/actions/writing-workflows/choosing-what-your-workflow-does/store-information-in-variables + return bool(os.getenv("GITHUB_ACTIONS")) + +current_time = time.time() +EXCHANGE_INTERNAL_NAME = "binanceus" if is_on_github_ci() else "binance" # binanceus works on github CI + + +async def fetch_last_price(symbol: str) -> float: + exchange_class = getattr(ccxt_async, EXCHANGE_INTERNAL_NAME) + exchange = exchange_class({}) + try: + ticker = await exchange.fetch_ticker(symbol) + finally: + await exchange.close() + last = ticker.get("last") or ticker.get("close") + if last is None: + raise AssertionError(f"{symbol} ticker has no last or close price") + return float(last) + + +@contextlib.contextmanager +def mocked_community_authentication(): + with mock.patch.object( + community.CommunityAuthentication, "login", mock.AsyncMock(), + ) as login_mock, mock.patch.object( + community.CommunityAuthentication, "is_logged_in", mock.AsyncMock(return_value=True) + ): + yield login_mock + + +@contextlib.contextmanager +def mocked_community_repository(): + with mock.patch.object( + octobot_flow.repositories.community.CommunityRepository, "insert_bot_logs", mock.AsyncMock() + ) as insert_bot_logs_mock: + yield insert_bot_logs_mock + +# ensure environment is initialized +octobot_flow.environment.initialize_environment() + + +@pytest.fixture +def global_state(): + return { + "exchange_account_details": { + "exchange_details": { + "internal_name": EXCHANGE_INTERNAL_NAME, + }, + # "auth_details": {}, # not needed for simulator + # "portfolio": {}, # irrelevant for simulator + }, + "automation": { + # "profile_data": { + # "profile_details": { + # "id": "bot_1", + # "bot_id": "id:bot_1", + # }, + # "crypto_currencies": [ + # {"trading_pairs": ["BTC/USDT"], "name": "BTC"}, + # {"trading_pairs": ["ETH/USDT"], "name": "ETH"}, + # ], + # "trading": { + # "reference_market": "USDT", + # }, + # "exchanges": [ + # { + # "internal_name": EXCHANGE_INTERNAL_NAME, + # "exchange_type": "spot", + # } + # ], + # "trader": { + # "enabled": False, + # }, + # "trader_simulator": { + # "enabled": True, + # }, + # "tentacles": [ + # { + # "name": "IndexTradingMode", + # "config": { + # "required_strategies": [], + # "refresh_interval": 1, + # "rebalance_trigger_min_percent": 5, + # "sell_unindexed_traded_coins": True, + # "quote_asset_rebalance_trigger_min_percent": 20, + # "index_content": [ + # {"name": "BTC", "value": 1}, + # {"name": "ETH", "value": 1}, + # ] + # } + # }, + # ] + # }, + "metadata": { + "automation_id": "automation_1", + }, + "exchange_account_elements": { + "portfolio": { + "content": { + "USDT": { + "available": 1000.0, + "total": 1000.0, + }, + "ETH": { + "available": 0.1, + "total": 0.1, + }, + }, + }, + }, + "execution": { + "previous_execution": { + "trigger_time": current_time - 600, + "trigger_reason": "scheduled", + # "additional_actions": {}, # no additional actions + "strategy_execution_time": current_time - 590, + }, + "current_execution": { + "trigger_reason": "scheduled", + # "additional_actions": {}, # no additional actions + }, + # "degraded_state": {} # no degraded state + "execution_error": None # no execution error + }, + # "exchange_account_elements": { + # "portfolio": { + # "initial_value": 3000, + # "content": { + # # should trigger a rebalance: this does not follow the index config + # "USDT": { + # "available": 1000.0, + # "total": 1000.0, + # }, + # "ETH": { + # "available": 0.1, + # "total": 0.1, + # }, + # } + # # "full_content": {} # irrelevant for simulator + # # "asset_values": {} # cleared after iteration + # }, + # "orders": {}, # no open orders + # "positions": {}, # no positions + # "trades": [], # no trades + # } + # "post_actions": {}, # no post actions + }, + } + + +@pytest.fixture +def btc_usdc_global_state(): + return { + "exchange_account_details": { + "exchange_details": { + "internal_name": EXCHANGE_INTERNAL_NAME, + }, + }, + "automation": { + "metadata": { + "automation_id": "automation_1", + }, + "exchange_account_elements": { + "portfolio": { + "content": { + "USDC": { + "available": 1000.0, + "total": 1000.0, + }, + "BTC": { + "available": 0.1, + "total": 0.1, + }, + }, + }, + }, + "execution": { + "previous_execution": { + "trigger_time": current_time - 600, + "trigger_reason": "scheduled", + "strategy_execution_time": current_time - 590, + }, + "current_execution": { + "trigger_reason": "scheduled", + }, + }, + }, + } + + +@pytest.fixture +def auth_details(): + return octobot_flow.entities.UserAuthentication( + email="test@test.com", + password="test_password", + hidden=True, + ) + + +@pytest.fixture +def actions_with_market_orders(): + return [ + { + "id": "action_1", + "dsl_script": "market('buy', 'BTC/USDT', '20q')", + }, + { + "id": "action_2", + "dsl_script": "market('buy', 'BTC/USDT', '10q')", + }, + ] + + +@pytest.fixture +def actions_with_create_limit_orders(): + return [ + { + "id": "action_1", + "dsl_script": "limit('buy', 'BTC/USDC', '10q', '-20%')", + } + ] + + +@pytest.fixture +def actions_with_cancel_limit_orders(): + return [ + { + "id": "action_1", + "dsl_script": "cancel_order('BTC/USDC')", + } + ] + + +def copy_exchange_account_action( + reference_market: str, + reference_account: copy_entities.Account, + account_copy_settings: typing.Optional[copy_entities.AccountCopySettings] = None, + strategy_id: str = FUNCTIONAL_TEST_COPY_STRATEGY_ID, +) -> dict: + return { + "id": "action_copy_exchange_account", + "dsl_script": actions_factory.create_copy_exchange_account_action( + strategy_id, reference_market, reference_account, account_copy_settings + ).dsl_script, + } + + +def empty_copy_exchange_account_action( + strategy_id: str = FUNCTIONAL_TEST_COPY_STRATEGY_ID, +) -> dict: + """Copy action with empty reference fields until a trading signal fills the DSL (refresh_required).""" + return { + "id": "action_copy_exchange_account", + "dsl_script": ( + f"copy_exchange_account(strategy_id={json.dumps(strategy_id)}, reference_market='', reference_account='')" + ), + } + + +@pytest.fixture +def isolated_exchange_cache(): + with ccxt_clients_cache.isolated_empty_cache(): + yield + + +def automation_state_dict( + resolved_actions: list[octobot_flow.entities.AbstractActionDetails], +) -> dict[str, typing.Any]: + return { + "automation": { + "metadata": {"automation_id": "automation_1"}, + "actions_dag": {"actions": resolved_actions} + } + } + + +def resolved_actions(actions: list[dict[str, typing.Any]]) -> list[octobot_flow.entities.AbstractActionDetails]: + dag = octobot_flow.entities.ActionsDAG( + actions=[octobot_flow.entities.parse_action_details(action) for action in actions], + ) + return dag.actions + + +def create_wait_action(min_delay: float, max_delay: float, id: str = "action_wait", dependencies: list[dict[str, typing.Any]] = []) -> dict[str, typing.Any]: + return { + "id": id, + "dsl_script": f"wait({min_delay}, {max_delay}, return_remaining_time=True)", + "dependencies": dependencies, + } diff --git a/packages/flow/tests/functionnal_tests/actions_reset/test_exchange_actions_reset.py b/packages/flow/tests/functionnal_tests/actions_reset/test_exchange_actions_reset.py new file mode 100644 index 0000000000..d048fd6c0f --- /dev/null +++ b/packages/flow/tests/functionnal_tests/actions_reset/test_exchange_actions_reset.py @@ -0,0 +1,241 @@ +import pytest + +import octobot_commons.constants as common_constants +import octobot_trading.enums as trading_enums + +import octobot_flow +import octobot_flow.entities +import octobot_flow.enums + +import tests.functionnal_tests as functionnal_tests +from tests.functionnal_tests import ( + current_time, + global_state, + btc_usdc_global_state, + auth_details, + actions_with_market_orders, + resolved_actions, + actions_with_create_limit_orders, + actions_with_cancel_limit_orders, +) + + +ADDED_COIN_SYMBOL = "BTC" + + +@pytest.mark.asyncio +async def test_exchange_actions_reset_executing_market_order_twice( + global_state: dict, auth_details: octobot_flow.entities.UserAuthentication, actions_with_market_orders: list[dict] +): + assert len(actions_with_market_orders) == 2 + with ( + functionnal_tests.mocked_community_authentication(), + functionnal_tests.mocked_community_repository(), + ): + # 1. execute market order actions + automation_state = octobot_flow.entities.AutomationState.from_dict(global_state) + automation_state.upsert_automation_actions(resolved_actions(actions_with_market_orders)) + async with octobot_flow.AutomationJob(automation_state, [], [], auth_details) as automations_job: + await automations_job.run() + + # check bot actions execution + actions = automations_job.automation_state.automation.actions_dag.actions + assert len(actions) == len(actions_with_market_orders) + for action in actions: + assert isinstance(action, octobot_flow.entities.AbstractActionDetails) + assert action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert action.executed_at and action.executed_at >= current_time + if isinstance(action, octobot_flow.entities.DSLScriptActionDetails): + assert action.resolved_dsl_script is None + assert isinstance(action.result, dict) + assert "created_orders" in action.result + created_order = action.result["created_orders"][0] + assert created_order["symbol"] == "BTC/USDT" + assert created_order["side"] == "buy" + assert created_order["type"] == "market" + + after_execution_dump = automations_job.dump() + # reported next execution time to the current execution triggered_at + assert after_execution_dump["automation"]["execution"]["previous_execution"]["triggered_at"] >= current_time + # no next execution time scheduled: trigger immediately + assert after_execution_dump["automation"]["execution"]["current_execution"]["scheduled_to"] == 0 + # check portfolio content + after_execution_portfolio_content = after_execution_dump["automation"]["exchange_account_elements"]["portfolio"]["content"] + assert isinstance(after_execution_dump, dict) + assert list(sorted(after_execution_portfolio_content.keys())) == ["BTC", "ETH", "USDT"] + for asset_type in [common_constants.PORTFOLIO_AVAILABLE, common_constants.PORTFOLIO_TOTAL]: + assert 950 < after_execution_portfolio_content["USDT"][asset_type] < 1000 # spent some USDT to buy BTC + assert after_execution_portfolio_content["ETH"][asset_type] == 0.1 # did not touch ETH + assert 0.0001 < after_execution_portfolio_content["BTC"][asset_type] < 0.001 # bought BTC + + # 2. reset the first market order action + post_first_buy_state = automations_job.automation_state + post_first_buy_state.automation.actions_dag.reset_to(post_first_buy_state.automation.actions_dag.actions[0].id) + # action 1 has been reset + assert post_first_buy_state.automation.actions_dag.actions[0].executed_at is None + # action 2 has NOT been reset (it's not dependent on the first action) + assert post_first_buy_state.automation.actions_dag.actions[1].executed_at is not None + + # 3. execute market order actions again + async with octobot_flow.AutomationJob(post_first_buy_state, [], [], auth_details) as automations_job_2: + await automations_job_2.run() + + # check bot actions execution + actions = automations_job_2.automation_state.automation.actions_dag.actions + assert len(actions) == len(actions_with_market_orders) + for action in actions: + # action has been executed again + assert isinstance(action, octobot_flow.entities.AbstractActionDetails) + assert action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert action.executed_at and action.executed_at >= current_time + assert isinstance(action.result, dict) + assert "created_orders" in action.result + created_order = action.result["created_orders"][0] + assert created_order["symbol"] == "BTC/USDT" + assert created_order["side"] == "buy" + assert created_order["type"] == "market" + + after_execution_dump_2 = automations_job_2.dump() + # reported next execution time to the current execution triggered_at + assert after_execution_dump_2["automation"]["execution"]["previous_execution"]["triggered_at"] >= current_time + # no next execution time scheduled: trigger immediately + assert after_execution_dump_2["automation"]["execution"]["current_execution"]["scheduled_to"] == 0 + # check portfolio content + after_execution_portfolio_content_2 = after_execution_dump_2["automation"]["exchange_account_elements"]["portfolio"]["content"] + for asset_type in [common_constants.PORTFOLIO_AVAILABLE, common_constants.PORTFOLIO_TOTAL]: + # spent some more USDT to buy BTC + assert after_execution_portfolio_content_2["USDT"][asset_type] < after_execution_portfolio_content["USDT"][asset_type] + # bought BTC + assert after_execution_portfolio_content_2["BTC"][asset_type] > after_execution_portfolio_content["BTC"][asset_type] + assert after_execution_portfolio_content_2["ETH"][asset_type] == 0.1 # did not touch ETH + + +@pytest.mark.asyncio +async def test_exchange_actions_reset_creating_and_cancelling_limit_order_twice( + btc_usdc_global_state: dict, auth_details: octobot_flow.entities.UserAuthentication, + actions_with_create_limit_orders: list[dict], actions_with_cancel_limit_orders: list[dict] +): + actions_with_cancel_limit_orders[0]["id"] = "action_cancel" + actions_with_cancel_limit_orders[0]["dependencies"] = [{"action_id": actions_with_create_limit_orders[0]["id"]}] + actions_to_execute = actions_with_create_limit_orders + actions_with_cancel_limit_orders + assert len(actions_to_execute) == 2 + with ( + functionnal_tests.mocked_community_authentication(), + functionnal_tests.mocked_community_repository(), + ): + # 1. execute create limit order action + automation_state = octobot_flow.entities.AutomationState.from_dict(btc_usdc_global_state) + automation_state.upsert_automation_actions( +resolved_actions(actions_to_execute), + ) + async with octobot_flow.AutomationJob(automation_state, [], [], auth_details) as automations_job: + await automations_job.run() + + # check bot actions execution + actions = automations_job.automation_state.automation.actions_dag.actions + assert len(actions) == len(actions_to_execute) + create_limit_action = actions[0] + cancel_action = actions[1] + assert create_limit_action.executed_at is not None and create_limit_action.executed_at > 0 # create order action has been executed + assert isinstance(create_limit_action, octobot_flow.entities.AbstractActionDetails) + assert create_limit_action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert isinstance(create_limit_action.result, dict) + assert "created_orders" in create_limit_action.result + order = create_limit_action.result["created_orders"][0] + assert order[trading_enums.ExchangeConstantsOrderColumns.SYMBOL.value] == "BTC/USDC" + assert 0 < order[trading_enums.ExchangeConstantsOrderColumns.AMOUNT.value] < 0.001 + assert order[trading_enums.ExchangeConstantsOrderColumns.TYPE.value] == "limit" + assert order[trading_enums.ExchangeConstantsOrderColumns.SIDE.value] == "buy" + assert 5_000 < order[trading_enums.ExchangeConstantsOrderColumns.PRICE.value] < 10_000_000 + + # cancel action has not been executed yet (it depends on the create action) + assert cancel_action.executed_at is None + assert isinstance(cancel_action, octobot_flow.entities.AbstractActionDetails) + + # 2. execute cancel limit order action + automation_state_2 = automations_job.automation_state + async with octobot_flow.AutomationJob(automation_state_2, [], [], auth_details) as automations_job_2: + await automations_job_2.run() + + # check bot actions execution + actions = automations_job_2.automation_state.automation.actions_dag.actions + assert len(actions) == len(actions_to_execute) + create_limit_action = actions[0] + assert create_limit_action.executed_at is not None and create_limit_action.executed_at > 0 + cancel_action = actions[1] + assert cancel_action.executed_at is not None and cancel_action.executed_at > 0 + assert cancel_action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert isinstance(cancel_action.result, dict) + assert "cancelled_orders" in cancel_action.result + cancelled = cancel_action.result["cancelled_orders"] + assert len(cancelled) == 1 + assert len(cancelled[0]) > 2 # id of the cancelled order + + after_execution_dump = automations_job_2.dump() + after_execution_portfolio_content = after_execution_dump["automation"]["exchange_account_elements"]["portfolio"]["content"] + assert "USDC" in after_execution_portfolio_content + for asset_type in [common_constants.PORTFOLIO_AVAILABLE, common_constants.PORTFOLIO_TOTAL]: + assert 5 <= after_execution_portfolio_content["USDC"][asset_type] < 10_000_000 + + # reported next execution time to the current execution scheduled to + automation_execution = after_execution_dump["automation"]["execution"] + assert automation_execution["previous_execution"]["triggered_at"] >= current_time + + # 3. reset the create limit order action + limit_order_state_3 = automations_job_2.automation_state + limit_order_state_3.automation.actions_dag.reset_to( + limit_order_state_3.automation.actions_dag.actions[0].id + ) + for action in limit_order_state_3.automation.actions_dag.actions: + assert action.executed_at is None + assert action.result is None + + # 4. execute create limit order action again + async with octobot_flow.AutomationJob(limit_order_state_3, [], [], auth_details) as automations_job_3: + await automations_job_3.run() + + # check bot actions execution + actions = automations_job_3.automation_state.automation.actions_dag.actions + assert len(actions) == len(actions_to_execute) + create_limit_action = actions[0] + cancel_action = actions[1] + assert create_limit_action.executed_at is not None and create_limit_action.executed_at > 0 # create order action has been executed + assert isinstance(create_limit_action, octobot_flow.entities.AbstractActionDetails) + assert create_limit_action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert isinstance(create_limit_action.result, dict) + assert "created_orders" in create_limit_action.result + order = create_limit_action.result["created_orders"][0] + assert order[trading_enums.ExchangeConstantsOrderColumns.SYMBOL.value] == "BTC/USDC" + assert 0 < order[trading_enums.ExchangeConstantsOrderColumns.AMOUNT.value] < 0.001 + assert order[trading_enums.ExchangeConstantsOrderColumns.TYPE.value] == "limit" + assert order[trading_enums.ExchangeConstantsOrderColumns.SIDE.value] == "buy" + assert 5_000 < order[trading_enums.ExchangeConstantsOrderColumns.PRICE.value] < 10_000_000 + + # cancel action has not been executed yet (it depends on the create action) + assert cancel_action.executed_at is None + assert isinstance(cancel_action, octobot_flow.entities.AbstractActionDetails) + + # 5. execute cancel limit order action + automation_state_4 = automations_job_3.automation_state + async with octobot_flow.AutomationJob(automation_state_4, [], [], auth_details) as automations_job_4: + await automations_job_4.run() + + # check bot actions execution + actions = automations_job_4.automation_state.automation.actions_dag.actions + assert len(actions) == len(actions_to_execute) + create_limit_action = actions[0] + assert create_limit_action.executed_at is not None and create_limit_action.executed_at > 0 + cancel_action = actions[1] + assert cancel_action.executed_at is not None and cancel_action.executed_at > 0 + assert cancel_action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert isinstance(cancel_action.result, dict) + assert "cancelled_orders" in cancel_action.result + cancelled = cancel_action.result["cancelled_orders"] + assert len(cancelled) == 1 + assert len(cancelled[0]) > 2 # id of the cancelled order + + after_execution_dump = automations_job_4.dump() + after_execution_portfolio_content = after_execution_dump["automation"]["exchange_account_elements"]["portfolio"]["content"] + assert "USDC" in after_execution_portfolio_content + for asset_type in [common_constants.PORTFOLIO_AVAILABLE, common_constants.PORTFOLIO_TOTAL]: + assert 5 <= after_execution_portfolio_content["USDC"][asset_type] < 10_000_000 \ No newline at end of file diff --git a/packages/flow/tests/functionnal_tests/actions_reset/test_exchange_actions_split_by_wait.py b/packages/flow/tests/functionnal_tests/actions_reset/test_exchange_actions_split_by_wait.py new file mode 100644 index 0000000000..2edc800d08 --- /dev/null +++ b/packages/flow/tests/functionnal_tests/actions_reset/test_exchange_actions_split_by_wait.py @@ -0,0 +1,338 @@ +import pytest +import time +import asyncio +import mock + +import octobot_commons.constants as common_constants +import octobot_commons.dsl_interpreter as dsl_interpreter +import octobot_trading.blockchain_wallets as blockchain_wallets +import octobot_trading.constants as trading_constants +import octobot_trading.enums as trading_enums + +import tentacles.Meta.DSL_operators.python_std_operators.base_resetting_operators as resetting_operators +import tentacles.Meta.DSL_operators.python_std_operators.base_time_operators as base_time_operators + +import octobot_flow +import octobot_flow.entities +import octobot_flow.enums + +import tests.functionnal_tests as functionnal_tests +from tests.functionnal_tests import ( + current_time, + global_state, + btc_usdc_global_state, + auth_details, + actions_with_market_orders, + resolved_actions, + actions_with_create_limit_orders, + actions_with_cancel_limit_orders, + create_wait_action, +) + + +ADDED_COIN_SYMBOL = "BTC" + +@pytest.mark.asyncio +async def test_exchange_actions_creating_and_waiting_and_cancelling_limit( + btc_usdc_global_state: dict, auth_details: octobot_flow.entities.UserAuthentication, + actions_with_create_limit_orders: list[dict], actions_with_cancel_limit_orders: list[dict] +): + wait_action = create_wait_action(50, 100, dependencies=[{"action_id": actions_with_create_limit_orders[0]["id"]}]) + actions_with_cancel_limit_orders[0]["id"] = "action_cancel" + actions_with_cancel_limit_orders[0]["dependencies"] = [{"action_id": wait_action["id"]}] + actions_to_execute = actions_with_create_limit_orders + [wait_action] + actions_with_cancel_limit_orders + + assert len(actions_to_execute) == 3 + with ( + functionnal_tests.mocked_community_authentication(), + functionnal_tests.mocked_community_repository(), + ): + t0 = time.time() + # 1. execute create limit order action + automation_state = octobot_flow.entities.AutomationState.from_dict(btc_usdc_global_state) + automation_state.upsert_automation_actions( +resolved_actions(actions_to_execute), + ) + async with octobot_flow.AutomationJob(automation_state, [], [], auth_details) as automations_job: + await automations_job.run() + + # check bot actions execution + actions = automations_job.automation_state.automation.actions_dag.actions + assert len(actions) == len(actions_to_execute) + create_limit_action = actions[0] + wait_action = actions[1] + cancel_action = actions[2] + assert create_limit_action.executed_at is not None and create_limit_action.executed_at > 0 # create order action has been executed + assert isinstance(create_limit_action, octobot_flow.entities.AbstractActionDetails) + assert create_limit_action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert isinstance(create_limit_action.result, dict) + assert "created_orders" in create_limit_action.result + order = create_limit_action.result["created_orders"][0] + assert order[trading_enums.ExchangeConstantsOrderColumns.SYMBOL.value] == "BTC/USDC" + assert 0 < order[trading_enums.ExchangeConstantsOrderColumns.AMOUNT.value] < 0.001 + assert order[trading_enums.ExchangeConstantsOrderColumns.TYPE.value] == "limit" + assert order[trading_enums.ExchangeConstantsOrderColumns.SIDE.value] == "buy" + assert 5_000 < order[trading_enums.ExchangeConstantsOrderColumns.PRICE.value] < 10_000_000 + + for action in [wait_action, cancel_action]: + assert action.executed_at is None + assert isinstance(action, octobot_flow.entities.AbstractActionDetails) + + # immediately execute wait action + assert automations_job.automation_state.automation.execution.current_execution.scheduled_to == 0 + + # 2.A execute wait action 1/3 + automation_state_2 = automations_job.automation_state + with mock.patch.object(asyncio, "sleep", mock.AsyncMock(return_value=None)) as sleep_mock: + async with octobot_flow.AutomationJob(automation_state_2, [], [], auth_details) as automations_job_2: + await automations_job_2.run() + for call in sleep_mock.mock_calls: + # there was no call for the wait action + assert call.args[0] < 1 + + # check bot actions execution + actions = automations_job_2.automation_state.automation.actions_dag.actions + assert len(actions) == len(actions_to_execute) + create_limit_action = actions[0] + assert create_limit_action.executed_at is not None and create_limit_action.executed_at > 0 + # special case: wait action is executed and automatically reset since less than 50 seconds have passed + wait_action = actions[1] + assert wait_action.executed_at is None + assert wait_action.result is None + assert wait_action.error_status is None + assert isinstance(wait_action.previous_execution_result, dict) + rescheduled_parameters = wait_action.get_rescheduled_parameters() + assert dsl_interpreter.ReCallableOperatorMixin.LAST_EXECUTION_RESULT_KEY in rescheduled_parameters + last_execution_result = dsl_interpreter.ReCallingOperatorResult.from_dict( + rescheduled_parameters[dsl_interpreter.ReCallableOperatorMixin.LAST_EXECUTION_RESULT_KEY][ + dsl_interpreter.ReCallingOperatorResult.__name__ + ] + ) + assert isinstance(last_execution_result.last_execution_result, dict) + waiting_time_1 = last_execution_result.last_execution_result[dsl_interpreter.ReCallingOperatorResultKeys.WAITING_TIME.value] + assert 0 < waiting_time_1 <= 100 + cancel_action = actions[2] + assert cancel_action.executed_at is None + + + # 2.B execute wait action 2/3 + automation_state_3 = automations_job.automation_state + with mock.patch.object(asyncio, "sleep", mock.AsyncMock(return_value=None)) as sleep_mock: + async with octobot_flow.AutomationJob(automation_state_3, [], [], auth_details) as automations_job_3: + await automations_job_3.run() + for call in sleep_mock.mock_calls: + # there was no call for the wait action + assert call.args[0] < 1 + + # check bot actions execution + actions = automations_job_3.automation_state.automation.actions_dag.actions + assert len(actions) == len(actions_to_execute) + create_limit_action = actions[0] + assert create_limit_action.executed_at is not None and create_limit_action.executed_at > 0 + # special case: wait action is executed and automatically reset since less than 50 seconds have passed + wait_action = actions[1] + assert wait_action.executed_at is None + assert wait_action.result is None + assert wait_action.error_status is None + assert isinstance(wait_action.previous_execution_result, dict) + rescheduled_parameters = wait_action.get_rescheduled_parameters() + assert dsl_interpreter.ReCallableOperatorMixin.LAST_EXECUTION_RESULT_KEY in rescheduled_parameters + last_execution_result = dsl_interpreter.ReCallingOperatorResult.from_dict( + rescheduled_parameters[dsl_interpreter.ReCallableOperatorMixin.LAST_EXECUTION_RESULT_KEY][ + dsl_interpreter.ReCallingOperatorResult.__name__ + ] + ) + assert isinstance(last_execution_result.last_execution_result, dict) + waiting_time_2 = last_execution_result.last_execution_result[dsl_interpreter.ReCallingOperatorResultKeys.WAITING_TIME.value] + assert waiting_time_2 < waiting_time_1 # there is now less time to wait than during the first time + assert 0 < waiting_time_2 <= 100 + cancel_action = actions[2] + assert cancel_action.executed_at is None + + # 2.C execute wait action 3/3 + automation_state_4 = automations_job.automation_state + with ( + mock.patch.object(asyncio, "sleep", mock.AsyncMock(return_value=None)) as sleep_mock, + mock.patch.object(time, "time", mock.Mock(return_value=t0 + waiting_time_1 + 50)), + ): + async with octobot_flow.AutomationJob(automation_state_4, [], [], auth_details) as automations_job_4: + await automations_job_4.run() + for call in sleep_mock.mock_calls: + # there was no call for the wait action + assert call.args[0] < 1 + + # wait bot actions has now been executed + actions = automations_job_4.automation_state.automation.actions_dag.actions + assert len(actions) == len(actions_to_execute) + create_limit_action = actions[0] + assert create_limit_action.executed_at is not None and create_limit_action.executed_at > 0 + # special case: wait action is executed and automatically reset since less than 50 seconds have passed + wait_action = actions[1] + assert wait_action.executed_at is not None and wait_action.executed_at > 0 + assert wait_action.result is None + assert wait_action.error_status is None + assert isinstance(wait_action.previous_execution_result, dict) + cancel_action = actions[2] + assert cancel_action.executed_at is None + + # 3. execute cancel limit order action + automation_state_4 = automations_job_4.automation_state + async with octobot_flow.AutomationJob(automation_state_4, [], [], auth_details) as automations_job_4: + await automations_job_4.run() + + # check bot actions execution + actions = automations_job_4.automation_state.automation.actions_dag.actions + assert len(actions) == len(actions_to_execute) + create_limit_action = actions[0] + assert create_limit_action.executed_at is not None and create_limit_action.executed_at > 0 + wait_action = actions[1] + assert wait_action.executed_at is not None and wait_action.executed_at > 0 + cancel_action = actions[2] + assert cancel_action.executed_at is not None and cancel_action.executed_at > 0 + assert cancel_action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert isinstance(cancel_action.result, dict) + assert "cancelled_orders" in cancel_action.result + cancelled = cancel_action.result["cancelled_orders"] + assert len(cancelled) == 1 + assert len(cancelled[0]) > 2 # id of the cancelled order + + after_execution_dump = automations_job_4.dump() + after_execution_portfolio_content = after_execution_dump["automation"]["exchange_account_elements"]["portfolio"]["content"] + assert "USDC" in after_execution_portfolio_content + for asset_type in [common_constants.PORTFOLIO_AVAILABLE, common_constants.PORTFOLIO_TOTAL]: + assert 5 <= after_execution_portfolio_content["USDC"][asset_type] < 10_000_000 + + +@pytest.mark.asyncio +async def test_exchange_actions_creating_and_waiting_with_condition_that_is_not_reevaluated( + btc_usdc_global_state: dict, auth_details: octobot_flow.entities.UserAuthentication, + actions_with_create_limit_orders: list[dict], actions_with_cancel_limit_orders: list[dict] +): + """ + DSL like ``wait(..., return_remaining_time=True) if now_ms() < N else None`` must resume as a plain + ``wait(...)`` with ``last_execution_result`` so the ``if`` is not re-evaluated. Otherwise a second run + could see ``now_ms() >= N`` and take the else branch instead of continuing the wait. + """ + wait_threshold_ms = 2_000_000 + wait_action = { + "id": "action_wait", + "dsl_script": ( + f"wait(50, 100, return_remaining_time=True) if now_ms() < {wait_threshold_ms} else None" + ), + "dependencies": [{"action_id": actions_with_create_limit_orders[0]["id"]}], + } + actions_with_cancel_limit_orders[0]["id"] = "action_cancel" + actions_with_cancel_limit_orders[0]["dependencies"] = [{"action_id": wait_action["id"]}] + actions_to_execute = actions_with_create_limit_orders + [wait_action] + actions_with_cancel_limit_orders + + now_ms_call_counter = {"count": 0} + + def tracked_now_ms_compute(operator_self): + now_ms_call_counter["count"] += 1 + if now_ms_call_counter["count"] == 1: + return wait_threshold_ms - 1 + return wait_threshold_ms + 1 + + assert len(actions_to_execute) == 3 + with ( + functionnal_tests.mocked_community_authentication(), + functionnal_tests.mocked_community_repository(), + mock.patch.object(base_time_operators.NowMsOperator, "compute", tracked_now_ms_compute), + ): + t0 = time.time() + automation_state = octobot_flow.entities.AutomationState.from_dict(btc_usdc_global_state) + automation_state.upsert_automation_actions( + resolved_actions(actions_to_execute), + ) + async with octobot_flow.AutomationJob(automation_state, [], [], auth_details) as automations_job: + await automations_job.run() + + actions = automations_job.automation_state.automation.actions_dag.actions + create_limit_action = actions[0] + wait_action_state = actions[1] + cancel_action = actions[2] + assert create_limit_action.executed_at is not None and create_limit_action.executed_at > 0 + assert create_limit_action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert wait_action_state.executed_at is None + assert cancel_action.executed_at is None + assert now_ms_call_counter["count"] == 0 + + automation_state_2 = automations_job.automation_state + with mock.patch.object(asyncio, "sleep", mock.AsyncMock(return_value=None)) as sleep_mock: + async with octobot_flow.AutomationJob(automation_state_2, [], [], auth_details) as automations_job_2: + await automations_job_2.run() + for call in sleep_mock.mock_calls: + assert call.args[0] < 1 + + assert now_ms_call_counter["count"] == 1 + + actions = automations_job_2.automation_state.automation.actions_dag.actions + wait_action_state = actions[1] + assert wait_action_state.executed_at is None + script_override = dsl_interpreter.ReCallingOperatorResult.get_script_override( + wait_action_state.previous_execution_result + ) + assert script_override is not None + assert script_override.startswith("wait(") + assert " if " not in script_override + rescheduled_parameters = wait_action_state.get_rescheduled_parameters() + last_execution_result = dsl_interpreter.ReCallingOperatorResult.from_dict( + rescheduled_parameters[dsl_interpreter.ReCallableOperatorMixin.LAST_EXECUTION_RESULT_KEY][ + dsl_interpreter.ReCallingOperatorResult.__name__ + ] + ) + assert last_execution_result.last_execution_result is not None + waiting_time_1 = last_execution_result.last_execution_result[ + dsl_interpreter.ReCallingOperatorResultKeys.WAITING_TIME.value + ] + assert 0 < waiting_time_1 <= 100 + + automation_state_3 = automations_job_2.automation_state + with mock.patch.object(asyncio, "sleep", mock.AsyncMock(return_value=None)) as sleep_mock: + async with octobot_flow.AutomationJob(automation_state_3, [], [], auth_details) as automations_job_3: + await automations_job_3.run() + for call in sleep_mock.mock_calls: + assert call.args[0] < 1 + + assert now_ms_call_counter["count"] == 1 + + actions = automations_job_3.automation_state.automation.actions_dag.actions + wait_action_state = actions[1] + assert wait_action_state.executed_at is None + rescheduled_parameters = wait_action_state.get_rescheduled_parameters() + last_execution_result = dsl_interpreter.ReCallingOperatorResult.from_dict( + rescheduled_parameters[dsl_interpreter.ReCallableOperatorMixin.LAST_EXECUTION_RESULT_KEY][ + dsl_interpreter.ReCallingOperatorResult.__name__ + ] + ) + assert last_execution_result.last_execution_result is not None + waiting_time_2 = last_execution_result.last_execution_result[ + dsl_interpreter.ReCallingOperatorResultKeys.WAITING_TIME.value + ] + assert waiting_time_2 < waiting_time_1 + assert 0 < waiting_time_2 <= 100 + + automation_state_4 = automations_job_3.automation_state + with ( + mock.patch.object(asyncio, "sleep", mock.AsyncMock(return_value=None)) as sleep_mock, + mock.patch.object(time, "time", mock.Mock(return_value=t0 + waiting_time_1 + 50)), + ): + async with octobot_flow.AutomationJob(automation_state_4, [], [], auth_details) as automations_job_4: + await automations_job_4.run() + for call in sleep_mock.mock_calls: + assert call.args[0] < 1 + + actions = automations_job_4.automation_state.automation.actions_dag.actions + wait_action_state = actions[1] + assert wait_action_state.executed_at is not None and wait_action_state.executed_at > 0 + cancel_action = actions[2] + assert cancel_action.executed_at is None + + automation_state_5 = automations_job_4.automation_state + async with octobot_flow.AutomationJob(automation_state_5, [], [], auth_details) as automations_job_5: + await automations_job_5.run() + + actions = automations_job_5.automation_state.automation.actions_dag.actions + cancel_action = actions[2] + assert cancel_action.executed_at is not None and cancel_action.executed_at > 0 + assert cancel_action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value \ No newline at end of file diff --git a/packages/flow/tests/functionnal_tests/automation_management/test_stop_automation.py b/packages/flow/tests/functionnal_tests/automation_management/test_stop_automation.py new file mode 100644 index 0000000000..05642ada7f --- /dev/null +++ b/packages/flow/tests/functionnal_tests/automation_management/test_stop_automation.py @@ -0,0 +1,121 @@ +import pytest +import mock +import time + +import octobot_flow +import octobot_flow.enums + +import tests.functionnal_tests as functionnal_tests +from tests.functionnal_tests import ( + current_time, + resolved_actions, + automation_state_dict, +) + + +@pytest.fixture +def init_action(): + return { + "id": "action_init", + "action": octobot_flow.enums.ActionType.APPLY_CONFIGURATION.value, + "config": { + "automation": { + "metadata": {"automation_id": "automation_1"}, + }, + }, + } + + +@pytest.fixture +def stop_automation_action(): + return { + "id": "action_stop", + "dsl_script": "stop_automation()", + "dependencies": [ + {"action_id": "action_init"}, + ], + } + + +@pytest.fixture +def random_action(): + return { + "id": "action_random", + "dsl_script": "'yes' if 1 == 2 else 'no'", + "dependencies": [ + {"action_id": "action_init"}, + ], + } + +@pytest.mark.asyncio +async def test_stop_automation_action_sets_post_actions_stop_flag( + init_action: dict, + stop_automation_action: dict, +): + all_actions = [init_action, stop_automation_action] + with ( + functionnal_tests.mocked_community_authentication(), + functionnal_tests.mocked_community_repository(), + mock.patch.object(time, "time", return_value=current_time), + ): + # 1. Initialize with configuration (only init action is executed) + automation_state = automation_state_dict(resolved_actions(all_actions)) + async with octobot_flow.AutomationJob(automation_state, [], [], {}) as init_automation_job: + await init_automation_job.run() + assert init_automation_job.automation_state.automation.post_actions.stop_automation is False + + # 2. Run again to execute the stop_automation action + after_config_execution_dump = init_automation_job.dump() + state = after_config_execution_dump + async with octobot_flow.AutomationJob(state, [], [], {}) as automation_job: + await automation_job.run() + + # 3. Verify stop_automation action was executed and post_actions.stop_automation is set + actions = automation_job.automation_state.automation.actions_dag.actions + assert len(actions) == len(all_actions) + for action in actions: + assert action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert action.executed_at and action.executed_at >= current_time + + assert automation_job.automation_state.automation.post_actions.stop_automation is True + assert automation_job.automation_state.priority_actions == [] + + +@pytest.mark.asyncio +async def test_stop_automation_action_via_priority_actions_sets_post_actions_stop_flag( + init_action: dict, + stop_automation_action: dict, + random_action: dict, +): + all_actions = [init_action, random_action] + with ( + functionnal_tests.mocked_community_authentication(), + functionnal_tests.mocked_community_repository(), + mock.patch.object(time, "time", return_value=current_time), + ): + # 1. Initialize with configuration (only init action is executed) + automation_state = automation_state_dict(resolved_actions(all_actions)) + async with octobot_flow.AutomationJob(automation_state, [], [], {}) as init_automation_job: + await init_automation_job.run() + assert init_automation_job.automation_state.automation.post_actions.stop_automation is False + # check random action is not executed + assert init_automation_job.automation_state.automation.actions_dag.actions[1].result is None + assert init_automation_job.automation_state.automation.actions_dag.actions[1].executed_at is None + + # 2. Run again with stop_automation_action as priority_actions + after_config_execution_dump = init_automation_job.dump() + state = after_config_execution_dump + priority_actions = resolved_actions([stop_automation_action]) + async with octobot_flow.AutomationJob(state, priority_actions, [], {}) as automation_job: + await automation_job.run() + # check random action is not executed + assert init_automation_job.automation_state.automation.actions_dag.actions[1].result is None + assert init_automation_job.automation_state.automation.actions_dag.actions[1].executed_at is None + + # check stop_automation action is executed + assert priority_actions[0].executed_at is not None and priority_actions[0].executed_at >= current_time + + # 3. Verify stop_automation action was executed and post_actions.stop_automation is set + assert automation_job.automation_state.automation.post_actions.stop_automation is True + assert automation_job.automation_state.priority_actions == priority_actions + # ensure priority_actions is added to history diff --git a/packages/flow/tests/functionnal_tests/blockchains_actions/test_no_exchange_action.py b/packages/flow/tests/functionnal_tests/blockchains_actions/test_no_exchange_action.py new file mode 100644 index 0000000000..80bef38415 --- /dev/null +++ b/packages/flow/tests/functionnal_tests/blockchains_actions/test_no_exchange_action.py @@ -0,0 +1,237 @@ +import pytest +import mock +import time +import decimal + +import octobot_trading.constants as trading_constants +import octobot_trading.blockchain_wallets as blockchain_wallets + +import octobot_flow +import octobot_flow.entities +import octobot_flow.enums + +import tests.functionnal_tests as functionnal_tests +from tests.functionnal_tests import ( + current_time, + resolved_actions, + automation_state_dict, +) + + +ADDED_COIN_SYMBOL = "BTC" +DESTINATION_ADDRESS = "0xDESTINATION_ADDRESS1234567890abcdef1234567890abcdef12345678" +WALLET_ADDRESS = "0x1234567890abcdef1234567890abcdef12345678" + + +@pytest.fixture +def init_action(): + return { + "id": "action_init", + "action": octobot_flow.enums.ActionType.APPLY_CONFIGURATION.value, + "config": { + "automation": { + "metadata": {"automation_id": "automation_1"}, + }, + # "exchange_account_details": {}, # no exchange account details + }, + } + + +@pytest.fixture +def actions_with_blockchain_deposit_and_withdrawal_with_holding_checks(): + blockchain_descriptor = { + "blockchain": blockchain_wallets.BlockchainWalletSimulator.BLOCKCHAIN, + "network": trading_constants.SIMULATED_BLOCKCHAIN_NETWORK, + "native_coin_symbol": ADDED_COIN_SYMBOL, + "tokens": [ + { + "symbol": "ETH", + "decimals": 18, + "contract_address": "0x1234567890abcdef1234567890abcdef12345678", + }, + ] + } + wallet_descriptor = { + "address": WALLET_ADDRESS, + "private_key": f"{WALLET_ADDRESS}_private_key", + "specific_config": { + "assets": [ + { + "asset": ADDED_COIN_SYMBOL, + "amount": 1, + }, + { + "asset": "ETH", + "amount": 42, + }, + ] + } + } + return [ + { + "id": "action_1", + "dsl_script": f"error('{octobot_flow.enums.ActionErrorStatus.NOT_ENOUGH_FUNDS.value}') if blockchain_wallet_balance({blockchain_descriptor}, {wallet_descriptor}, '{ADDED_COIN_SYMBOL}') < 1 else 'ok'", + "dependencies": [ + { + "action_id": "action_init", + }, + ], + }, + { + "id": "action_2", + "dsl_script": f"blockchain_wallet_transfer({blockchain_descriptor}, {wallet_descriptor}, '{ADDED_COIN_SYMBOL}', 0.1, '{DESTINATION_ADDRESS}')", + "dependencies": [ + { + "action_id": "action_init", + }, + ], + }, + ] + + +@pytest.mark.asyncio +async def test_start_with_empty_state_and_execute_simple_condition_action( + init_action: dict, +): + all_actions = [init_action] + [{ + "id": "action_1", + "dsl_script": "'yes' if 1 == 2 else 'no'", + "dependencies": [ + { + "action_id": "action_init", + }, + ], + }] + with ( + functionnal_tests.mocked_community_authentication(), + functionnal_tests.mocked_community_repository(), + mock.patch.object(time, 'time', return_value=current_time), + ): + # 1. initialize with configuration (other actions wont be executed as their dependencies are not met) + automation_state = automation_state_dict(resolved_actions(all_actions)) + async with octobot_flow.AutomationJob(automation_state, [], [], {}) as init_automation_job: + await init_automation_job.run() + # check actions execution + assert len(init_automation_job.automation_state.automation.actions_dag.actions) == len(all_actions) + for index, action in enumerate(init_automation_job.automation_state.automation.actions_dag.actions): + if index == 0: + assert action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert action.executed_at and action.executed_at >= current_time + assert action.result is None + else: + # not yet executed + assert action.executed_at is None + assert action.error_status is None + assert action.result is None + # check no exchange account details + after_config_execution_dump = init_automation_job.dump() + assert after_config_execution_dump["exchange_account_details"]["portfolio"]["content"] == [] + assert "automation" in after_config_execution_dump + assert "reference_exchange_account_elements" not in after_config_execution_dump["automation"] + assert "exchange_account_elements" not in after_config_execution_dump["automation"] + + # 2. execute simple condition action + state = after_config_execution_dump + async with octobot_flow.AutomationJob(state, [], [], {}) as automation_job: + await automation_job.run() + + # check bot actions execution + actions = automation_job.automation_state.automation.actions_dag.actions + assert len(actions) == len(all_actions) + for index, action in enumerate(actions): + assert isinstance(action, octobot_flow.entities.AbstractActionDetails) + assert action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert action.executed_at and action.executed_at >= current_time + if index == 0: + assert action.result is None + elif index == 1: + assert action.result == "no" + assert action.error_status is None + assert action.executed_at and action.executed_at >= current_time + + after_execution_dump = automation_job.dump() + # still no portfolio + assert after_execution_dump["exchange_account_details"]["portfolio"]["content"] == [] + assert "reference_exchange_account_elements" not in after_execution_dump["automation"] + assert "exchange_account_elements" not in after_execution_dump["automation"] + + +@pytest.mark.asyncio +async def test_start_with_empty_state_and_execute_blockchain_transfer_without_exchange( + init_action: dict, actions_with_blockchain_deposit_and_withdrawal_with_holding_checks: list[dict] +): + all_actions = [init_action] + actions_with_blockchain_deposit_and_withdrawal_with_holding_checks + with ( + functionnal_tests.mocked_community_authentication() as login_mock, + functionnal_tests.mocked_community_repository() as insert_bot_logs_mock, + mock.patch.object(trading_constants, 'ALLOW_FUNDS_TRANSFER', True), + mock.patch.object(time, 'time', return_value=current_time), + ): + # 1. initialize with configuration (other actions wont be executed as their dependencies are not met) + automation_state = automation_state_dict(resolved_actions(all_actions)) + async with octobot_flow.AutomationJob(automation_state, [], [], {}) as init_automation_job: + await init_automation_job.run() + # check actions execution + assert len(init_automation_job.automation_state.automation.actions_dag.actions) == len(all_actions) + for index, action in enumerate(init_automation_job.automation_state.automation.actions_dag.actions): + if index == 0: + assert action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert action.executed_at and action.executed_at >= current_time + assert action.result is None + else: + # not yet executed + assert action.executed_at is None + assert action.error_status is None + assert action.result is None + # check no exchange account details + after_config_execution_dump = init_automation_job.dump() + assert after_config_execution_dump["exchange_account_details"]["portfolio"]["content"] == [] + assert "automation" in after_config_execution_dump + assert "reference_exchange_account_elements" not in after_config_execution_dump["automation"] + assert "exchange_account_elements" not in after_config_execution_dump["automation"] + # communit auth is not used in this test + login_mock.assert_not_called() + insert_bot_logs_mock.assert_not_called() + + # 2. execute blockchain transfer actions + state = after_config_execution_dump + async with octobot_flow.AutomationJob(state, [], [], {}) as automation_job: + await automation_job.run() + + # check bot actions execution + actions = automation_job.automation_state.automation.actions_dag.actions + assert len(actions) == len(all_actions) + for index, action in enumerate(actions): + assert isinstance(action, octobot_flow.entities.AbstractActionDetails) + assert action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert action.executed_at and action.executed_at >= current_time + if index == 0: + assert action.result is None + elif index == 1: + assert action.result == "ok" + elif index == 2: + checked = { + "timestamp": int(current_time), + "address_from": WALLET_ADDRESS, + "address_to": DESTINATION_ADDRESS, + "network": trading_constants.SIMULATED_BLOCKCHAIN_NETWORK, + "currency": ADDED_COIN_SYMBOL, + "amount": decimal.Decimal("0.1"), + "fee": None, + "comment": "", + "internal": False, + } + assert len(action.result["created_transactions"]) == 1 + for key, value in checked.items(): + assert action.result["created_transactions"][0][key] == value + assert action.executed_at and action.executed_at >= current_time + + after_execution_dump = automation_job.dump() + # still no portfolio + assert after_execution_dump["exchange_account_details"]["portfolio"]["content"] == [] + assert "reference_exchange_account_elements" not in after_execution_dump["automation"] + assert "exchange_account_elements" not in after_execution_dump["automation"] + + # communit auth is not used in this test + login_mock.assert_not_called() + insert_bot_logs_mock.assert_not_called() diff --git a/packages/flow/tests/functionnal_tests/blockchains_actions/test_simulator_blockchain_actions.py b/packages/flow/tests/functionnal_tests/blockchains_actions/test_simulator_blockchain_actions.py new file mode 100644 index 0000000000..4b0735ae55 --- /dev/null +++ b/packages/flow/tests/functionnal_tests/blockchains_actions/test_simulator_blockchain_actions.py @@ -0,0 +1,215 @@ +import pytest +import mock + +import octobot_trading.blockchain_wallets as blockchain_wallets +import octobot_trading.constants as trading_constants +import octobot_trading.enums as trading_enums + +import octobot_flow +import octobot_flow.entities +import octobot_flow.enums + +import tests.functionnal_tests as functionnal_tests +from tests.functionnal_tests import ( + current_time, + global_state, + auth_details, + resolved_actions, +) + + +ADDED_COIN_SYMBOL = "BTC" + +@pytest.fixture +def actions_with_blockchain_deposit_and_withdrawal_with_holding_checks(): + wallet_address = "0x1234567890abcdef1234567890abcdef12345678" + blockchain_descriptor = { + "blockchain": blockchain_wallets.BlockchainWalletSimulator.BLOCKCHAIN, + "network": trading_constants.SIMULATED_BLOCKCHAIN_NETWORK, + "native_coin_symbol": ADDED_COIN_SYMBOL, + "tokens": [ + { + "symbol": "ETH", + "decimals": 18, + "contract_address": "0x1234567890abcdef1234567890abcdef12345678", + }, + ] + } + wallet_descriptor = { + "address": wallet_address, + "private_key": f"{wallet_address}_private_key", + "specific_config": { + "assets": [ + { + "asset": ADDED_COIN_SYMBOL, + "amount": 1, + }, + { + "asset": "ETH", + "amount": 42, + }, + ] + } + } + return [ + { + "id": "action_1", + "dsl_script": f"error('{octobot_flow.enums.ActionErrorStatus.NOT_ENOUGH_FUNDS.value}') if (blockchain_wallet_balance({blockchain_descriptor}, {wallet_descriptor}, '{ADDED_COIN_SYMBOL}') < 1) else 'ok'", # will pass + }, + { + "id": "action_2", + "dsl_script": f"error('{octobot_flow.enums.ActionErrorStatus.NOT_ENOUGH_FUNDS.value}') if blockchain_wallet_balance({blockchain_descriptor}, {wallet_descriptor}, '{ADDED_COIN_SYMBOL}') < 2500 else 'ok'", # will fail + }, + { + "id": "action_3", + "dsl_script": f"error('{octobot_flow.enums.ActionErrorStatus.NOT_ENOUGH_FUNDS.value}') if blockchain_wallet_balance({blockchain_descriptor}, {wallet_descriptor}, '{ADDED_COIN_SYMBOL}') < 1 else 'ok'", # will pass + }, + { + "id": "action_4", + "dsl_script": f"blockchain_wallet_transfer({blockchain_descriptor}, {wallet_descriptor}, '{ADDED_COIN_SYMBOL}', 0.1, '{trading_constants.SIMULATED_DEPOSIT_ADDRESS}_{ADDED_COIN_SYMBOL}')", + }, + { + "id": "action_5", + "dsl_script": f"error('{octobot_flow.enums.ActionErrorStatus.NOT_ENOUGH_FUNDS.value}') if available('{ADDED_COIN_SYMBOL}') < 0.1 else 'ok'", + }, + { + "id": "action_6", + "dsl_script": f"market('sell', 'BTC/USDT', '0.04')", + }, + { + "id": "action_7", + "dsl_script": f"withdraw('{ADDED_COIN_SYMBOL}', '{trading_constants.SIMULATED_BLOCKCHAIN_NETWORK}', '{wallet_address}', 0.05)", + }, + { + "id": "action_8", + "dsl_script": f"error('{octobot_flow.enums.ActionErrorStatus.NOT_ENOUGH_FUNDS.value}') if blockchain_wallet_balance({blockchain_descriptor}, {wallet_descriptor}, '{ADDED_COIN_SYMBOL}') < 0.95 else 'ok'", + }, + ] + +@pytest.mark.asyncio +async def test_execute_actions_with_blockchain_deposit_and_withdrawal( + global_state: dict, + auth_details: octobot_flow.entities.UserAuthentication, + actions_with_blockchain_deposit_and_withdrawal_with_holding_checks: list[dict] +): + with ( + functionnal_tests.mocked_community_authentication() as login_mock, + functionnal_tests.mocked_community_repository() as insert_bot_logs_mock, + mock.patch.object(trading_constants, 'ALLOW_FUNDS_TRANSFER', True), + ): + async with octobot_flow.AutomationJob(global_state, [], [], auth_details) as automations_job: + automations_job.automation_state.upsert_automation_actions( + resolved_actions(actions_with_blockchain_deposit_and_withdrawal_with_holding_checks), + ) + await automations_job.run() + + # check bot actions execution + actions = automations_job.automation_state.automation.actions_dag.actions + assert len(actions) == len(actions_with_blockchain_deposit_and_withdrawal_with_holding_checks) + for index, action in enumerate(actions): + assert isinstance(action, octobot_flow.entities.AbstractActionDetails) + if index == 1: + # only the second action will fail because of not enough funds + assert action.error_status == octobot_flow.enums.ActionErrorStatus.NOT_ENOUGH_FUNDS.value + assert action.result is None + else: + assert action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert isinstance(action.result, dict) or action.result == "ok" + assert action.result + assert action.executed_at and action.executed_at >= current_time + + after_execution_dump = automations_job.dump() + # reported next execution time to the current execution triggered_at + assert after_execution_dump["automation"]["execution"]["previous_execution"]["triggered_at"] >= current_time + # no next execution time scheduled: trigger immediately + assert after_execution_dump["automation"]["execution"]["current_execution"]["scheduled_to"] == 0 + # check portfolio content + after_execution_portfolio_content = after_execution_dump["automation"]["exchange_account_elements"]["portfolio"]["content"] + assert isinstance(after_execution_dump, dict) + assert list(sorted(after_execution_portfolio_content.keys())) == ["BTC", "ETH", "USDT"] # BTC is now added to the portfolio + assert after_execution_portfolio_content["USDT"]["available"] > 2000 # sold BTC, therefore added some USDT to the portfolio (initially 1000 USDT) + assert after_execution_portfolio_content["ETH"]["available"] == 0.1 # did not touch ETH + assert 0.009 < after_execution_portfolio_content["BTC"]["total"] <= 0.01 # deposited 0.1 BTC, sold 0.04 BTC and withdrew 0.05 BTC + assert 0.009 < after_execution_portfolio_content["BTC"]["available"] <= 0.01 # deposited 0.1 BTC, sold 0.04 BTC and withdrew 0.05 BTC + + # check transactions + after_execution_transactions = after_execution_dump["automation"]["exchange_account_elements"]["transactions"] + assert isinstance(after_execution_transactions, list) + assert len(after_execution_transactions) == 2 + # first transaction is the deposit + assert after_execution_transactions[0][trading_enums.ExchangeConstantsTransactionColumns.CURRENCY.value] == ADDED_COIN_SYMBOL + assert after_execution_transactions[0][trading_enums.ExchangeConstantsTransactionColumns.AMOUNT.value] == 0.1 + assert after_execution_transactions[0][trading_enums.ExchangeConstantsTransactionColumns.ADDRESS_TO.value] == "0x123_simulated_deposit_address_BTC" + assert after_execution_transactions[0][trading_enums.ExchangeConstantsTransactionColumns.NETWORK.value] == trading_constants.SIMULATED_BLOCKCHAIN_NETWORK + # second transaction is the withdrawal + assert after_execution_transactions[1][trading_enums.ExchangeConstantsTransactionColumns.CURRENCY.value] == ADDED_COIN_SYMBOL + assert after_execution_transactions[1][trading_enums.ExchangeConstantsTransactionColumns.AMOUNT.value] == 0.05 + assert after_execution_transactions[1][trading_enums.ExchangeConstantsTransactionColumns.ADDRESS_TO.value] == "0x1234567890abcdef1234567890abcdef12345678" + assert after_execution_transactions[1][trading_enums.ExchangeConstantsTransactionColumns.NETWORK.value] == trading_constants.SIMULATED_BLOCKCHAIN_NETWORK + + login_mock.assert_called_once() + insert_bot_logs_mock.assert_called_once() + + +@pytest.mark.asyncio +async def test_execute_actions_with_blockchain_deposit_and_withdrawal_with_holding_checks( + global_state: dict, + auth_details: octobot_flow.entities.UserAuthentication, + actions_with_blockchain_deposit_and_withdrawal_with_holding_checks: list[dict] +): + with ( + functionnal_tests.mocked_community_authentication() as login_mock, + functionnal_tests.mocked_community_repository() as insert_bot_logs_mock, + mock.patch.object(trading_constants, 'ALLOW_FUNDS_TRANSFER', True), + ): + async with octobot_flow.AutomationJob(global_state, [], [], auth_details) as automations_job: + automations_job.automation_state.upsert_automation_actions( + resolved_actions(actions_with_blockchain_deposit_and_withdrawal_with_holding_checks), + ) + await automations_job.run() + + # check bot actions execution + actions = automations_job.automation_state.automation.actions_dag.actions + assert len(actions) == len(actions_with_blockchain_deposit_and_withdrawal_with_holding_checks) + for index, action in enumerate(actions): + assert isinstance(action, octobot_flow.entities.AbstractActionDetails) + if index == 1: + # only the second action will fail because of not enough funds + assert action.error_status == octobot_flow.enums.ActionErrorStatus.NOT_ENOUGH_FUNDS.value + assert action.result is None + else: + assert action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert isinstance(action.result, dict) or action.result == "ok" + assert action.result + assert action.executed_at and action.executed_at >= current_time + + after_execution_dump = automations_job.dump() + # reported next execution time to the current execution triggered_at + assert after_execution_dump["automation"]["execution"]["previous_execution"]["triggered_at"] >= current_time + # no next execution time scheduled: trigger immediately + assert after_execution_dump["automation"]["execution"]["current_execution"]["scheduled_to"] == 0 + # check portfolio content + after_execution_portfolio_content = after_execution_dump["automation"]["exchange_account_elements"]["portfolio"]["content"] + assert isinstance(after_execution_dump, dict) + assert list(sorted(after_execution_portfolio_content.keys())) == ["BTC", "ETH", "USDT"] # BTC is now added to the portfolio + assert after_execution_portfolio_content["USDT"]["available"] > 2000 # sold BTC, therefore added some USDT to the portfolio (initially 1000 USDT) + assert after_execution_portfolio_content["ETH"]["available"] == 0.1 # did not touch ETH + assert 0.009 < after_execution_portfolio_content["BTC"]["total"] <= 0.01 # deposited 0.1 BTC, sold 0.04 BTC and withdrew 0.05 BTC + assert 0.009 < after_execution_portfolio_content["BTC"]["available"] <= 0.01 # deposited 0.1 BTC, sold 0.04 BTC and withdrew 0.05 BTC + # check transactions + after_execution_transactions = after_execution_dump["automation"]["exchange_account_elements"]["transactions"] + assert isinstance(after_execution_transactions, list) + assert len(after_execution_transactions) == 2 + # first transaction is the deposit + assert after_execution_transactions[0][trading_enums.ExchangeConstantsTransactionColumns.CURRENCY.value] == ADDED_COIN_SYMBOL + assert after_execution_transactions[0][trading_enums.ExchangeConstantsTransactionColumns.AMOUNT.value] == 0.1 + assert after_execution_transactions[0][trading_enums.ExchangeConstantsTransactionColumns.ADDRESS_TO.value] == "0x123_simulated_deposit_address_BTC" + assert after_execution_transactions[0][trading_enums.ExchangeConstantsTransactionColumns.NETWORK.value] == trading_constants.SIMULATED_BLOCKCHAIN_NETWORK + # second transaction is the withdrawal + assert after_execution_transactions[1][trading_enums.ExchangeConstantsTransactionColumns.CURRENCY.value] == ADDED_COIN_SYMBOL + assert after_execution_transactions[1][trading_enums.ExchangeConstantsTransactionColumns.AMOUNT.value] == 0.05 + assert after_execution_transactions[1][trading_enums.ExchangeConstantsTransactionColumns.ADDRESS_TO.value] == "0x1234567890abcdef1234567890abcdef12345678" + assert after_execution_transactions[1][trading_enums.ExchangeConstantsTransactionColumns.NETWORK.value] == trading_constants.SIMULATED_BLOCKCHAIN_NETWORK + + login_mock.assert_called_once() + insert_bot_logs_mock.assert_called_once() diff --git a/packages/flow/tests/functionnal_tests/exchanges_actions/test_authenticated_exchange_actions.py b/packages/flow/tests/functionnal_tests/exchanges_actions/test_authenticated_exchange_actions.py new file mode 100644 index 0000000000..380cb9ef3d --- /dev/null +++ b/packages/flow/tests/functionnal_tests/exchanges_actions/test_authenticated_exchange_actions.py @@ -0,0 +1,145 @@ +import pytest +import os + + +import octobot_commons.constants as common_constants +import octobot_trading.enums as trading_enums + + +import octobot_flow +import octobot_flow.entities +import octobot_flow.enums + +import tests.functionnal_tests as functionnal_tests +from tests.functionnal_tests import ( + current_time, + EXCHANGE_INTERNAL_NAME, + actions_with_create_limit_orders, + actions_with_cancel_limit_orders, + resolved_actions, + automation_state_dict, + AUTHENTICATED_TEST_GROUP, +) + + + +@pytest.fixture +def init_action(): + if not os.environ.get("BINANCE_KEY") or not os.environ.get("BINANCE_SECRET"): + pytest.skip("BINANCE_KEY and BINANCE_SECRET must be set in the .env file to run this test, skipping...") + return { + "id": "action_init", + "action": octobot_flow.enums.ActionType.APPLY_CONFIGURATION.value, + "config": { + "automation": { + "metadata": {"automation_id": "automation_1"}, + "exchange_account_elements": { + "portfolio": {"content": {}}, + }, + }, + "exchange_account_details": { + "exchange_details": { + "internal_name": EXCHANGE_INTERNAL_NAME, + }, + "auth_details": { + "api_key": os.environ["BINANCE_KEY"], + "api_secret": os.environ["BINANCE_SECRET"], + }, + "portfolio": {}, + }, + }, + } + + +@pytest.mark.asyncio +@pytest.mark.xdist_group(name=AUTHENTICATED_TEST_GROUP) +async def test_execute_actions_with_limit_orders_and_empty_state( + init_action: dict, actions_with_create_limit_orders: list[dict], actions_with_cancel_limit_orders: list[dict] +): + all_actions = [init_action] + with ( + functionnal_tests.mocked_community_authentication() as login_mock, + functionnal_tests.mocked_community_repository() as insert_bot_logs_mock, + ): + automation_state = automation_state_dict(resolved_actions(all_actions)) + async with octobot_flow.AutomationJob(automation_state, [], [], {}) as automations_job: + await automations_job.run() + + # check bot actions execution + assert len(automations_job.automation_state.automation.actions_dag.actions) == len(all_actions) + for action in automations_job.automation_state.automation.actions_dag.actions: + assert isinstance(action, octobot_flow.entities.AbstractActionDetails) + assert action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert action.executed_at and action.executed_at >= current_time + assert action.result is None + + after_execution_dump = automations_job.dump() + exchange_account_details = after_execution_dump["exchange_account_details"] + exchange_details = exchange_account_details["exchange_details"] + dump_auth_details = exchange_account_details["auth_details"] + portfolio = exchange_account_details["portfolio"] + assert "automation" in after_execution_dump + automation_execution = after_execution_dump["automation"]["execution"] + # assert exchange account details init + assert exchange_details["internal_name"] == EXCHANGE_INTERNAL_NAME + assert dump_auth_details["api_key"] == os.environ["BINANCE_KEY"] + assert dump_auth_details["api_secret"] == os.environ["BINANCE_SECRET"] + assert portfolio["content"] == [] + assert portfolio["unit"] == "" + # assert automation portfolio (not fetched yet) + portfolio_content = after_execution_dump["automation"]["exchange_account_elements"]["portfolio"]["content"] + assert portfolio_content == {} + # reported next execution time to the current execution triggered_at + assert automation_execution["previous_execution"]["triggered_at"] >= current_time + # no next execution time scheduled: trigger immediately + assert automation_execution["current_execution"]["scheduled_to"] == 0 + # communit auth is not used in this context + login_mock.assert_not_called() + insert_bot_logs_mock.assert_not_called() + + # 2. second call: execute received limit/cancel orders actions + actions_to_execute = actions_with_create_limit_orders + actions_with_cancel_limit_orders + state = after_execution_dump + other_actions = resolved_actions(actions_to_execute) + automation_id = after_execution_dump["automation"]["metadata"]["automation_id"] + async with octobot_flow.AutomationJob(state, [], [], {}) as automations_job: + automations_job.automation_state.upsert_automation_actions(other_actions) + await automations_job.run() + + # check bot actions execution + actions = automations_job.automation_state.automation.actions_dag.actions + assert len(actions) == len(actions_to_execute) + len(all_actions) + # Skip init action at index 0, check limit/cancel actions + create_limit_action = actions[1] + cancel_action = actions[2] + assert isinstance(create_limit_action, octobot_flow.entities.AbstractActionDetails) + assert create_limit_action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert isinstance(create_limit_action.result, dict) + assert "created_orders" in create_limit_action.result + order = create_limit_action.result["created_orders"][0] + assert order[trading_enums.ExchangeConstantsOrderColumns.SYMBOL.value] == "BTC/USDC" + assert 0 < order[trading_enums.ExchangeConstantsOrderColumns.AMOUNT.value] < 0.001 + assert order[trading_enums.ExchangeConstantsOrderColumns.TYPE.value] == "limit" + assert order[trading_enums.ExchangeConstantsOrderColumns.SIDE.value] == "buy" + assert 5_000 < order[trading_enums.ExchangeConstantsOrderColumns.PRICE.value] < 10_000_000 + + assert isinstance(cancel_action, octobot_flow.entities.AbstractActionDetails) + assert cancel_action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert isinstance(cancel_action.result, dict) + assert "cancelled_orders" in cancel_action.result + cancelled = cancel_action.result["cancelled_orders"] + assert len(cancelled) == 1 + assert len(cancelled[0]) > 2 # id of the cancelled order + + after_execution_dump = automations_job.dump() + after_execution_portfolio_content = after_execution_dump["automation"]["exchange_account_elements"]["portfolio"]["content"] + assert "USDC" in after_execution_portfolio_content + for asset_type in [common_constants.PORTFOLIO_AVAILABLE, common_constants.PORTFOLIO_TOTAL]: + assert 5 <= after_execution_portfolio_content["USDC"][asset_type] < 10_000_000 + + # reported next execution time to the current execution scheduled to + automation_execution = after_execution_dump["automation"]["execution"] + assert automation_execution["previous_execution"]["triggered_at"] >= current_time + # communit auth is not used in this test + login_mock.assert_not_called() + insert_bot_logs_mock.assert_not_called() \ No newline at end of file diff --git a/packages/flow/tests/functionnal_tests/exchanges_actions/test_simulated_exchange_actions.py b/packages/flow/tests/functionnal_tests/exchanges_actions/test_simulated_exchange_actions.py new file mode 100644 index 0000000000..33213c26c3 --- /dev/null +++ b/packages/flow/tests/functionnal_tests/exchanges_actions/test_simulated_exchange_actions.py @@ -0,0 +1,72 @@ +import pytest +import logging +import mock + +import octobot_commons.constants as common_constants +import octobot_trading.blockchain_wallets as blockchain_wallets +import octobot_trading.constants as trading_constants + +import octobot_flow +import octobot_flow.entities +import octobot_flow.enums + +import tests.functionnal_tests as functionnal_tests +from tests.functionnal_tests import ( + current_time, + global_state, + auth_details, + actions_with_market_orders, + resolved_actions, +) + + +ADDED_COIN_SYMBOL = "BTC" + + +@pytest.mark.asyncio +async def test_execute_actions_with_market_orders_and_existing_state( + global_state: dict, auth_details: octobot_flow.entities.UserAuthentication, actions_with_market_orders: list[dict] +): + with ( + functionnal_tests.mocked_community_authentication() as login_mock, + functionnal_tests.mocked_community_repository() as insert_bot_logs_mock, + ): + # test with parsed global state + automation_state = octobot_flow.entities.AutomationState.from_dict(global_state) + automation_state.upsert_automation_actions(resolved_actions(actions_with_market_orders)) + async with octobot_flow.AutomationJob(automation_state, [], [], auth_details) as automations_job: + await automations_job.run() + + # check bot actions execution + actions = automations_job.automation_state.automation.actions_dag.actions + assert len(actions) == len(actions_with_market_orders) + for action in actions: + assert isinstance(action, octobot_flow.entities.AbstractActionDetails) + assert action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert action.executed_at and action.executed_at >= current_time + assert isinstance(action.result, dict) + assert "created_orders" in action.result + created_order = action.result["created_orders"][0] + assert created_order["symbol"] == "BTC/USDT" + assert created_order["side"] == "buy" + assert created_order["type"] == "market" + + after_execution_dump = automations_job.dump() + # reported next execution time to the current execution triggered_at + assert after_execution_dump["automation"]["execution"]["previous_execution"]["triggered_at"] >= current_time + # no next execution time scheduled: trigger immediately + assert after_execution_dump["automation"]["execution"]["current_execution"]["scheduled_to"] == 0 + # check portfolio content + after_execution_portfolio_content = after_execution_dump["automation"]["exchange_account_elements"]["portfolio"]["content"] + assert isinstance(after_execution_dump, dict) + assert list(sorted(after_execution_portfolio_content.keys())) == ["BTC", "ETH", "USDT"] + for asset_type in [common_constants.PORTFOLIO_AVAILABLE, common_constants.PORTFOLIO_TOTAL]: + assert 950 < after_execution_portfolio_content["USDT"][asset_type] < 1000 # spent some USDT to buy BTC + assert after_execution_portfolio_content["ETH"][asset_type] == 0.1 # did not touch ETH + assert 0.0001 < after_execution_portfolio_content["BTC"][asset_type] < 0.001 # bought BTC + logging.getLogger("test_execute_actions_with_market_orders").info( + f"after_execution_portfolio_content: {after_execution_portfolio_content}" + ) + # check bot actions + login_mock.assert_called_once() + insert_bot_logs_mock.assert_called_once() diff --git a/packages/flow/tests/functionnal_tests/failed_actions/test_raising_automation_execution.py b/packages/flow/tests/functionnal_tests/failed_actions/test_raising_automation_execution.py new file mode 100644 index 0000000000..84ab472456 --- /dev/null +++ b/packages/flow/tests/functionnal_tests/failed_actions/test_raising_automation_execution.py @@ -0,0 +1,62 @@ +import pytest +import logging +import mock + +import octobot_commons.constants as common_constants +import octobot_trading.blockchain_wallets as blockchain_wallets +import octobot_trading.constants as trading_constants + +import octobot_flow +import octobot_flow.entities +import octobot_flow.enums +import octobot_flow.jobs.automation_runner_job +import octobot_flow.errors + +import tests.functionnal_tests as functionnal_tests +from tests.functionnal_tests import ( + current_time, + global_state, + auth_details, + actions_with_market_orders, + resolved_actions, +) + + +ADDED_COIN_SYMBOL = "BTC" + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "side_effect", + [ + Exception("test"), + octobot_flow.errors.AutomationValidationError("test") + ] +) +async def test_raising_automation_runner_job_execution( + global_state: dict, auth_details: octobot_flow.entities.UserAuthentication, actions_with_market_orders: list[dict], side_effect: Exception +): + with ( + functionnal_tests.mocked_community_authentication(), + functionnal_tests.mocked_community_repository(), + ): + # test with parsed global state + automation_state = octobot_flow.entities.AutomationState.from_dict(global_state) + automation_state.upsert_automation_actions(resolved_actions(actions_with_market_orders)) + with pytest.raises(type(side_effect)): + with mock.patch.object( + octobot_flow.jobs.automation_runner_job.AutomationRunnerJob, "run", mock.AsyncMock(side_effect=side_effect) + ) as run_mock: + async with octobot_flow.AutomationJob(automation_state, [], [], auth_details) as automations_job: + await automations_job.run() + run_mock.assert_awaited_once() + + # check bot actions execution + actions = automations_job.automation_state.automation.actions_dag.actions + assert len(actions) == len(actions_with_market_orders) + for action in actions: + assert isinstance(action, octobot_flow.entities.AbstractActionDetails) + assert action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert action.executed_at is None + assert action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert action.result is None diff --git a/packages/flow/tests/functionnal_tests/state_initialization/test_start_with_empty_state.py b/packages/flow/tests/functionnal_tests/state_initialization/test_start_with_empty_state.py new file mode 100644 index 0000000000..2f10d8ef31 --- /dev/null +++ b/packages/flow/tests/functionnal_tests/state_initialization/test_start_with_empty_state.py @@ -0,0 +1,179 @@ +import pytest + +import octobot_commons.constants as common_constants + +import octobot_flow +import octobot_flow.entities +import octobot_flow.enums + +import tests.functionnal_tests as functionnal_tests +from tests.functionnal_tests import current_time, EXCHANGE_INTERNAL_NAME, actions_with_market_orders, auth_details, resolved_actions, automation_state_dict + + +@pytest.fixture +def init_action(): + return { + "id": "action_init", + "action": octobot_flow.enums.ActionType.APPLY_CONFIGURATION.value, + "config": { + "automation": { + "metadata": { + "automation_id": "automation_1", + }, + "exchange_account_elements": { + "portfolio": { + "content": { + "USDT": { + "available": 1000.0, + "total": 1000.0, + }, + "ETH": { + "available": 0.1, + "total": 0.1, + }, + }, + }, + }, + }, + "exchange_account_details": { + "exchange_details": { + "internal_name": EXCHANGE_INTERNAL_NAME, + }, + "auth_details": {}, + "portfolio": {}, + }, + }, + } + + + +@pytest.mark.asyncio +async def test_start_with_empty_state_and_reschedule_no_community_auth(init_action: dict): + all_actions = [init_action] + with ( + functionnal_tests.mocked_community_authentication() as login_mock, + functionnal_tests.mocked_community_repository() as insert_bot_logs_mock, + ): + automation_state = automation_state_dict(resolved_actions(all_actions)) + async with octobot_flow.AutomationJob(automation_state, [], [], {}) as automation_job: + await automation_job.run() + + # check bot actions execution + assert len(automation_job.automation_state.automation.actions_dag.actions) == len(all_actions) + for action in automation_job.automation_state.automation.actions_dag.actions: + assert isinstance(action, octobot_flow.entities.AbstractActionDetails) + assert action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert action.executed_at and action.executed_at >= current_time + assert action.result is None + + after_execution_dump = automation_job.dump() + exchange_account_details = after_execution_dump["exchange_account_details"] + exchange_details = exchange_account_details["exchange_details"] + dump_auth_details = exchange_account_details["auth_details"] + portfolio = exchange_account_details["portfolio"] + assert "automation" in after_execution_dump + automation_execution = after_execution_dump["automation"]["execution"] + # assert exchange account details init + assert exchange_details["internal_name"] == EXCHANGE_INTERNAL_NAME + assert dump_auth_details["api_key"] == "" + assert dump_auth_details["api_secret"] == "" + assert portfolio["content"] == [] + assert portfolio["unit"] == "" + # assert automation portfolio + portfolio_content = after_execution_dump["automation"]["exchange_account_elements"]["portfolio"]["content"] + assert portfolio_content == { + "USDT": { + "available": 1000.0, + "total": 1000.0, + }, + "ETH": { + "available": 0.1, + "total": 0.1, + }, + } + # reported next execution time to the current execution triggered_at + assert automation_execution["previous_execution"]["triggered_at"] >= current_time + # no next execution time scheduled: trigger immediately + assert automation_execution["current_execution"]["scheduled_to"] == 0 + # communit auth is not used in this context + login_mock.assert_not_called() + insert_bot_logs_mock.assert_not_called() + + + +@pytest.mark.asyncio +async def test_start_with_empty_state_action_followed_by_market_orders_no_community_auth( + init_action: dict, actions_with_market_orders: list[dict] +): + init_actions = [init_action] + with ( + functionnal_tests.mocked_community_authentication() as login_mock, + functionnal_tests.mocked_community_repository() as insert_bot_logs_mock, + ): + # 1. initialize bot with configuration + automation_state = automation_state_dict(resolved_actions(init_actions)) + async with octobot_flow.AutomationJob(automation_state, [], [], {}) as init_automation_job: + await init_automation_job.run() + # check actions execution + assert len(init_automation_job.automation_state.automation.actions_dag.actions) == len(init_actions) + for action in init_automation_job.automation_state.automation.actions_dag.actions: + assert action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert action.executed_at and action.executed_at >= current_time + assert action.result is None + # check portfolio content + after_config_execution_dump = init_automation_job.dump() + assert after_config_execution_dump["automation"]["exchange_account_elements"]["portfolio"]["content"] == { + "USDT": { + "available": 1000.0, + "total": 1000.0, + }, + "ETH": { + "available": 0.1, + "total": 0.1, + }, + } + # communit auth is not used in this test + login_mock.assert_not_called() + insert_bot_logs_mock.assert_not_called() + + # 2. second call: execute received market orders bot actions + state = after_config_execution_dump + other_actions = resolved_actions(actions_with_market_orders) + async with octobot_flow.AutomationJob(state, [], [], {}) as automation_job: + automation_job.automation_state.upsert_automation_actions( + other_actions + ) + await automation_job.run() + + # check bot actions execution + assert len(automation_job.automation_state.automation.actions_dag.actions) == len(actions_with_market_orders) + len(init_actions) + for index, action in enumerate(automation_job.automation_state.automation.actions_dag.actions): + if index == 0: + assert action.id == init_actions[0]["id"] + else: + assert isinstance(action, octobot_flow.entities.AbstractActionDetails) + assert action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert action.executed_at and action.executed_at >= current_time + assert isinstance(action.result, dict) + assert "created_orders" in action.result + created_order = action.result["created_orders"][0] + assert created_order["symbol"] == "BTC/USDT" + assert created_order["side"] == "buy" + assert created_order["type"] == "market" + + after_execution_dump = automation_job.dump() + after_execution_portfolio_content = after_execution_dump["automation"]["exchange_account_elements"]["portfolio"]["content"] + assert list(sorted(after_execution_portfolio_content.keys())) == ["BTC", "ETH", "USDT"] + for asset_type in [common_constants.PORTFOLIO_AVAILABLE, common_constants.PORTFOLIO_TOTAL]: + assert 950 < after_execution_portfolio_content["USDT"][asset_type] < 1000 # spent some USDT to buy BTC + assert after_execution_portfolio_content["ETH"][asset_type] == 0.1 # did not touch ETH + assert 0.0001 < after_execution_portfolio_content["BTC"][asset_type] < 0.001 # bought BTC + + # reported next execution time to the current execution triggered_at + automation_execution = after_execution_dump["automation"]["execution"] + assert automation_execution["previous_execution"]["triggered_at"] >= current_time + # no next execution time scheduled: trigger immediately + assert automation_execution["current_execution"]["scheduled_to"] == 0 + # communit auth is not used in this test + login_mock.assert_not_called() + insert_bot_logs_mock.assert_not_called() diff --git a/packages/flow/tests/functionnal_tests/state_initialization/test_start_with_invalid_state.py b/packages/flow/tests/functionnal_tests/state_initialization/test_start_with_invalid_state.py new file mode 100644 index 0000000000..4a390a42f7 --- /dev/null +++ b/packages/flow/tests/functionnal_tests/state_initialization/test_start_with_invalid_state.py @@ -0,0 +1,30 @@ +import pytest + +import octobot_flow +import octobot_flow.entities +import octobot_flow.errors + +import tests.functionnal_tests as functionnal_tests +from tests.functionnal_tests import auth_details + + + +@pytest.mark.asyncio +async def test_multi_bots_job_start_with_invalid_empty_state(auth_details: octobot_flow.entities.UserAuthentication): + with ( + functionnal_tests.mocked_community_authentication() as login_mock, + functionnal_tests.mocked_community_repository() as insert_bot_logs_mock, + ): + # AutomationJob requires at least 1 automation + automation_state_empty = {} + with pytest.raises(octobot_flow.errors.NoAutomationError): + async with octobot_flow.AutomationJob(automation_state_empty, [], [], {}) as automation_job: + await automation_job.run() + + with pytest.raises(octobot_flow.errors.NoAutomationError): + async with octobot_flow.AutomationJob(automation_state_empty, [], [], auth_details) as automation_job: + await automation_job.run() + + # communit auth is not used in (raising before) + login_mock.assert_not_called() + insert_bot_logs_mock.assert_not_called() diff --git a/packages/flow/tests/functionnal_tests/trading_modes_actions/authenticated/test_authenticated_grid_trading_mode_action.py b/packages/flow/tests/functionnal_tests/trading_modes_actions/authenticated/test_authenticated_grid_trading_mode_action.py new file mode 100644 index 0000000000..78b8cb0907 --- /dev/null +++ b/packages/flow/tests/functionnal_tests/trading_modes_actions/authenticated/test_authenticated_grid_trading_mode_action.py @@ -0,0 +1,671 @@ +import decimal +import os +import time +import typing + +import pytest + +import octobot_commons.enums as common_enums +import octobot_commons.constants as common_constants +import octobot_commons.logging as common_logging +import octobot_commons.dsl_interpreter as dsl_interpreter +import octobot_trading.constants as trading_constants +import octobot_trading.enums as trading_enums +import octobot_copy.constants as copy_constants +import octobot_copy.entities as copy_entities +import octobot_flow +import octobot_flow.entities +import octobot_flow.enums +import octobot_trading.modes.mode_dsl_factory as mode_dsl_factory + +import tests.functionnal_tests as functionnal_tests +from tests.functionnal_tests import ( + assert_emitted_signal_account_allocation_ratios, + automation_state_dict, + AUTHENTICATED_TEST_GROUP, + current_time, + d_order_price, + fetch_last_price, + resolved_actions, + set_emit_signals_metadata, + trading_signal_emission_patches, +) + +import tentacles.Trading.Mode.grid_trading_mode.grid_trading as grid_trading + +increment = 5000 +spread = 10000 +D_INCREMENT = decimal.Decimal(str(increment)) +D_SPREAD = decimal.Decimal(str(spread)) +# Exchange price rounding (e.g. Binance tick) — ladder spacing is still flat increment/spread. +_GRID_PRICE_TOLERANCE = decimal.Decimal("0.5") + + +grid_pair_settings = [ + grid_trading.GridTradingMode.get_default_pair_config( + "BTC/USDC", + spread, + increment, + 2, + 2, + False, + False, + False, + ) +] + + +def grid_trading_mode_action(dependency_action: dict): + return { + "id": "action_1", + "dsl_script": ( + f"grid_trading_mode(pair_settings={dsl_interpreter.format_parameter_value(grid_pair_settings)}, {mode_dsl_factory.ENABLE_INITIAL_PORTFOLIO_OPTIMIZATION}=True)" + ), + "dependencies": [{"action_id": dependency_action["id"]}], + } + + +def _btc_usdc_limit_open_order_values(open_orders_origin_values: list[dict]) -> list[dict]: + """ + Keep only limit orders on BTC/USDC. Rebalancing can leave market orders in open_orders + briefly (or as filled-but-still-open rows), which would break the 2×2 grid ladder count. + """ + sym_col = trading_enums.ExchangeConstantsOrderColumns.SYMBOL.value + type_col = trading_enums.ExchangeConstantsOrderColumns.TYPE.value + limit_type = trading_enums.TradeOrderType.LIMIT.value + return [ + o + for o in open_orders_origin_values + if o.get(sym_col) == "BTC/USDC" and o.get(type_col) == limit_type + ] + + +def _assert_grid_ladder_prices( + buy_orders: list[dict], + sell_orders: list[dict], + price_col: str, +) -> None: + lowest_buy_price = d_order_price(buy_orders[0][price_col]) + assert len(buy_orders) == len(sell_orders) == 2 + assert abs(d_order_price(buy_orders[1][price_col]) - (lowest_buy_price + D_INCREMENT)) <= _GRID_PRICE_TOLERANCE + assert ( + abs(d_order_price(sell_orders[0][price_col]) - (lowest_buy_price + D_INCREMENT + D_SPREAD)) + <= _GRID_PRICE_TOLERANCE + ) + assert ( + abs(d_order_price(sell_orders[1][price_col]) - (lowest_buy_price + D_INCREMENT + D_SPREAD + D_INCREMENT)) + <= _GRID_PRICE_TOLERANCE + ) + + +def _assert_nonempty_btc_usdc_portfolio(portfolio_content: dict) -> None: + assert "BTC" in portfolio_content + assert "USDC" in portfolio_content + assert portfolio_content["USDC"]["total"] > 0 + assert portfolio_content["BTC"]["total"] > 0 + + +def _assert_btc_usdc_balances_unchanged(before: dict, after: dict) -> None: + """A no-op second run must not move BTC/USDC; other assets are ignored (exchange free/total can flap between fetches).""" + for asset in ("BTC", "USDC"): + assert before[asset] == after[asset] + + +def _assert_trading_signal_authenticated_grid_account_metadata( + trading_signal: octobot_flow.entities.TradingSignal, +) -> None: + account = trading_signal.account + assert isinstance(account.updated_at, float) + assert current_time <= account.updated_at <= time.time() + assert account.positions == [] + assert account.historical_snapshots == [] + + +def _assert_trading_signal_authenticated_grid_initial_placement( + trading_signal: octobot_flow.entities.TradingSignal, is_sub_portfolio: bool +) -> None: + """ + Same structure as simulator grid signal checks: BTC/USDC content, allocation ratios, + four BTC/USDC limit orders in a 2×2 ladder (live price — use tolerance on prices). + """ + content = trading_signal.account.content + expected_assets = ["BTC", "USDC"] + if is_sub_portfolio: + assert list(sorted(content.keys())) == expected_assets + else: + assert all(asset in content for asset in expected_assets) + assert float(content["USDC"][common_constants.PORTFOLIO_TOTAL]) > 0 + assert float(content["BTC"][common_constants.PORTFOLIO_TOTAL]) > 0 + if is_sub_portfolio: + assert_emitted_signal_account_allocation_ratios(content) + else: + for asset in expected_assets: + assert asset in content + assert float(content[asset][copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO]) > 0 + open_orders_origin_values = [ + order[trading_constants.STORAGE_ORIGIN_VALUE] for order in trading_signal.account.orders + ] + ladder_orders = _btc_usdc_limit_open_order_values(open_orders_origin_values) + assert len(ladder_orders) == 4 + price_col = trading_enums.ExchangeConstantsOrderColumns.PRICE.value + buy_orders = sorted( + [ + order + for order in ladder_orders + if order[trading_enums.ExchangeConstantsOrderColumns.SIDE.value] + == trading_enums.TradeOrderSide.BUY.value + ], + key=lambda order: order[trading_enums.ExchangeConstantsOrderColumns.PRICE.value], + ) + sell_orders = sorted( + [ + order + for order in ladder_orders + if order[trading_enums.ExchangeConstantsOrderColumns.SIDE.value] + == trading_enums.TradeOrderSide.SELL.value + ], + key=lambda order: order[trading_enums.ExchangeConstantsOrderColumns.PRICE.value], + ) + _assert_grid_ladder_prices(buy_orders, sell_orders, price_col) + _assert_trading_signal_authenticated_grid_account_metadata(trading_signal) + + +def _btc_usdc_open_order_count(automation_dump: dict, portfolio_type: str) -> int: + sym_col = trading_enums.ExchangeConstantsOrderColumns.SYMBOL.value + return sum( + 1 + for order in automation_dump["automation"][portfolio_type]["orders"]["open_orders"] + if order[trading_constants.STORAGE_ORIGIN_VALUE].get(sym_col) == "BTC/USDC" + ) + + +async def _cancel_all_btc_usdc_orders_for_test(automation_dump: dict) -> None: + common_logging.get_logger("Tests").info("*** Cancelling all BTC/USDC orders ***") + cancel_grid_orders_actions = resolved_actions( + [ + { + "id": "action_cancel_grid", + "dsl_script": "cancel_order('BTC/USDC')", + } + ] + ) + async with octobot_flow.AutomationJob(automation_dump, [], [], {}) as automations_job: + automations_job.automation_state.upsert_automation_actions(cancel_grid_orders_actions) + await automations_job.run() + cancel_action = automations_job.automation_state.automation.actions_dag.actions[-1] + assert isinstance(cancel_action, octobot_flow.entities.AbstractActionDetails) + assert cancel_action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert isinstance(cancel_action.result, dict) + assert "cancelled_orders" in cancel_action.result + assert len(cancel_action.result["cancelled_orders"]) >= 4 + + after_cancel_dump = automations_job.dump() + assert _btc_usdc_open_order_count(after_cancel_dump, "exchange_account_elements") == 0 + + +def _grid_reference_storage_order(order_id: str, side: str, price: float, amount: float) -> dict: + return { + trading_constants.STORAGE_ORIGIN_VALUE: { + trading_enums.ExchangeConstantsOrderColumns.ID.value: order_id, + trading_enums.ExchangeConstantsOrderColumns.SYMBOL.value: "BTC/USDC", + trading_enums.ExchangeConstantsOrderColumns.SIDE.value: side, + trading_enums.ExchangeConstantsOrderColumns.TYPE.value: trading_enums.TradeOrderType.LIMIT.value, + trading_enums.ExchangeConstantsOrderColumns.PRICE.value: price, + trading_enums.ExchangeConstantsOrderColumns.AMOUNT.value: amount, + trading_enums.ExchangeConstantsOrderColumns.STATUS.value: trading_enums.OrderStatus.OPEN.value, + trading_enums.ExchangeConstantsOrderColumns.FILLED.value: 0.0, + trading_enums.ExchangeConstantsOrderColumns.REMAINING.value: amount, + trading_enums.ExchangeConstantsOrderColumns.TIMESTAMP.value: time.time(), + trading_enums.ExchangeConstantsOrderColumns.SELF_MANAGED.value: False, + } + } + + +def _live_grid_reference_account(btc_usdc_close: float) -> copy_entities.Account: + """ + Same shape as the simulator grid_reference_account fixture: 2×2 BTC/USDC limits and half/half + portfolio ratios, with ladder prices anchored to the current market close. + """ + lowest_buy = btc_usdc_close - (spread / 2) - increment * 2 + 12.12 + order_amount = 0.004 + return copy_entities.Account( + updated_at=time.time(), + content={ + "BTC": { + common_constants.PORTFOLIO_TOTAL: decimal.Decimal("0.01"), + common_constants.PORTFOLIO_AVAILABLE: decimal.Decimal("0.002"), + copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO: decimal.Decimal("0.5"), + }, + "USDC": { + common_constants.PORTFOLIO_TOTAL: decimal.Decimal("1000"), + common_constants.PORTFOLIO_AVAILABLE: decimal.Decimal("200"), + copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO: decimal.Decimal("0.5"), + }, + }, + orders=[ + _grid_reference_storage_order( + "grid_ref_b0", trading_enums.TradeOrderSide.BUY.value, lowest_buy, order_amount + ), + _grid_reference_storage_order( + "grid_ref_b1", + trading_enums.TradeOrderSide.BUY.value, + lowest_buy + increment, + order_amount, + ), + _grid_reference_storage_order( + "grid_ref_s0", + trading_enums.TradeOrderSide.SELL.value, + lowest_buy + increment + spread, + order_amount, + ), + _grid_reference_storage_order( + "grid_ref_s1", + trading_enums.TradeOrderSide.SELL.value, + lowest_buy + increment + spread + increment, + order_amount, + ), + ], + positions=[], + ) + + +@pytest.fixture +def init_action(): + if not os.environ.get("BINANCE_KEY") or not os.environ.get("BINANCE_SECRET"): + pytest.skip( + "BINANCE_KEY and BINANCE_SECRET must be set in the .env file to run this test, skipping..." + ) + return { + "id": "action_init", + "action": octobot_flow.enums.ActionType.APPLY_CONFIGURATION.value, + "config": { + "automation": { + "metadata": { + "automation_id": "automation_1", + }, + "exchange_account_elements": { + "portfolio": { + "content": { + "USDC": { + "available": 1000.0, + "total": 1000.0, + } + }, + }, + }, + }, + "exchange_account_details": { + "exchange_details": { + "internal_name": functionnal_tests.EXCHANGE_INTERNAL_NAME, + }, + "auth_details": { + "api_key": os.environ["BINANCE_KEY"], + "api_secret": os.environ["BINANCE_SECRET"], + }, + "portfolio": { + "unit": "USDC", + }, + }, + }, + } + + +@pytest.mark.asyncio +@pytest.mark.xdist_group(name=AUTHENTICATED_TEST_GROUP) +async def test_authenticated_grid_init_from_empty_state(init_action: dict): + """ + Same flow as the simulator grid test, but against a real authenticated account: current market price + anchors the ladder (no ticker/ohlcv mocks). + Requires spot USD/BTC balance sufficient for the grid on the configured exchange. + """ + all_actions = [init_action, grid_trading_mode_action(init_action)] + automation_state = automation_state_dict(resolved_actions(all_actions)) + emit_signals = True # always True in this test in order to test signal emission as well + set_emit_signals_metadata(automation_state, emit_signals) + + with ( + functionnal_tests.mocked_community_authentication() as login_mock, + functionnal_tests.mocked_community_repository() as insert_bot_logs_mock, + trading_signal_emission_patches(emit_signals) as insert_trading_signal_mock, + ): + # 1. run init action + async with octobot_flow.AutomationJob(automation_state, [], [], {}) as automation_job: + await automation_job.run() + after_init_execution_dump = automation_job.dump() + + # check bot actions execution + assert len(automation_job.automation_state.automation.actions_dag.actions) == len(all_actions) + for index, action in enumerate(automation_job.automation_state.automation.actions_dag.actions): + assert isinstance(action, octobot_flow.entities.AbstractActionDetails) + assert action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert action.result is None + if index == 0: + assert action.executed_at and action.executed_at >= current_time + assert action.previous_execution_result is None + else: + assert action.executed_at is None + assert action.previous_execution_result is None + + # 2. run grid trading mode action (orders may exist on the exchange after this completes) + cleanup_dump: typing.Optional[dict] = None + try: + async with octobot_flow.AutomationJob(after_init_execution_dump, [], [], {}) as automation_job: + await automation_job.run() + cleanup_dump = automation_job.dump() + + assert len(automation_job.automation_state.automation.actions_dag.actions) == len(all_actions) + for index, action in enumerate(automation_job.automation_state.automation.actions_dag.actions): + assert isinstance(action, octobot_flow.entities.AbstractActionDetails) + assert action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert action.result is None + if index == 0: + assert action.executed_at is not None + assert action.previous_execution_result is None + else: + assert action.executed_at is None + assert isinstance(action.previous_execution_result, dict) + + after_grid_execution_dump = cleanup_dump + # scheduled next execution time at 1h after the current execution (1h is the default time when unspecified) + assert after_grid_execution_dump["automation"]["execution"]["previous_execution"][ + "triggered_at" + ] >= current_time + one_hour = ( + common_enums.TimeFramesMinutes[common_enums.TimeFrames.ONE_HOUR] + * common_constants.MINUTE_TO_SECONDS + ) + allowed_execution_time = 20 + schedule_delay = ( + after_grid_execution_dump["automation"]["execution"]["current_execution"]["scheduled_to"] + - after_grid_execution_dump["automation"]["execution"]["previous_execution"]["triggered_at"] + ) + assert one_hour - allowed_execution_time < schedule_delay < one_hour + allowed_execution_time + + # check portfolio and open grid orders (balances depend on the live account) + after_grid_portfolio_content = after_grid_execution_dump["automation"][ + "exchange_account_elements" + ]["portfolio"]["content"] + assert isinstance(after_grid_execution_dump, dict) + _assert_nonempty_btc_usdc_portfolio(after_grid_portfolio_content) + + after_grid_reference_account_portfolio_content = after_grid_execution_dump["automation"][ + "exchange_account_elements" + ]["portfolio"]["content"] + assert isinstance(after_grid_reference_account_portfolio_content, dict) + _assert_nonempty_btc_usdc_portfolio(after_grid_reference_account_portfolio_content) + + price_col = trading_enums.ExchangeConstantsOrderColumns.PRICE.value + order_portfolio_types = ["exchange_account_elements"] + for portfolio_type in order_portfolio_types: + open_orders_origin_values = [ + order[trading_constants.STORAGE_ORIGIN_VALUE] + for order in after_grid_execution_dump["automation"][portfolio_type]["orders"][ + "open_orders" + ] + ] + ladder_orders = _btc_usdc_limit_open_order_values(open_orders_origin_values) + buy_orders = sorted( + [ + o + for o in ladder_orders + if o[trading_enums.ExchangeConstantsOrderColumns.SIDE.value] + == trading_enums.TradeOrderSide.BUY.value + ], + key=lambda o: o[trading_enums.ExchangeConstantsOrderColumns.PRICE.value], + ) + sell_orders = sorted( + [ + o + for o in ladder_orders + if o[trading_enums.ExchangeConstantsOrderColumns.SIDE.value] + == trading_enums.TradeOrderSide.SELL.value + ], + key=lambda o: o[trading_enums.ExchangeConstantsOrderColumns.PRICE.value], + ) + _assert_grid_ladder_prices(buy_orders, sell_orders, price_col) + + # 3. trigger again: nothing to do + async with octobot_flow.AutomationJob(cleanup_dump, [], [], {}) as automation_job: + await automation_job.run() + cleanup_dump = automation_job.dump() + + assert len(automation_job.automation_state.automation.actions_dag.actions) == len(all_actions) + for index, action in enumerate(automation_job.automation_state.automation.actions_dag.actions): + assert isinstance(action, octobot_flow.entities.AbstractActionDetails) + assert action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert action.result is None + if index == 0: + assert action.executed_at is not None + assert action.previous_execution_result is None + else: + assert action.executed_at is None + assert isinstance(action.previous_execution_result, dict) + + schedule_delay = ( + cleanup_dump["automation"]["execution"]["current_execution"]["scheduled_to"] + - cleanup_dump["automation"]["execution"]["previous_execution"]["triggered_at"] + ) + assert one_hour - allowed_execution_time < schedule_delay < one_hour + allowed_execution_time + + after_second_call_portfolio_content = cleanup_dump["automation"][ + "exchange_account_elements" + ]["portfolio"]["content"] + _assert_btc_usdc_balances_unchanged( + after_grid_portfolio_content, + after_second_call_portfolio_content, + ) + after_second_call_reference_account_portfolio_content = cleanup_dump["automation"][ + "exchange_account_elements" + ]["portfolio"]["content"] + _assert_btc_usdc_balances_unchanged( + after_grid_reference_account_portfolio_content, + after_second_call_reference_account_portfolio_content, + ) + + assert insert_trading_signal_mock.await_count == 2 + for await_args in insert_trading_signal_mock.await_args_list: + _assert_trading_signal_authenticated_grid_initial_placement(await_args.args[0], is_sub_portfolio=False) + finally: + if cleanup_dump is not None: + await _cancel_all_btc_usdc_orders_for_test(cleanup_dump) + + login_mock.assert_not_called() + assert insert_bot_logs_mock.await_count == 3 # called once per grid trading mode iteration + + +@pytest.mark.asyncio +@pytest.mark.xdist_group(name=AUTHENTICATED_TEST_GROUP) +async def test_authenticated_copy_grid(init_action: dict): + """ + Same flow as test_simulator_copy_grid: init, then copy a synthetic reference BTC/USDC grid onto the + account, then a no-op second copy run. Uses live BTC/USDC price to build valid limit prices (no mocks). + emit_signals=False. Requires spot USDC/BTC balance sufficient for mirrored limits on the exchange. + """ + btc_close = await fetch_last_price("BTC/USDC") + grid_reference_account = _live_grid_reference_account(btc_close) + reference_market = init_action["config"]["exchange_account_details"]["portfolio"]["unit"] + all_actions = [ + init_action, + functionnal_tests.copy_exchange_account_action(reference_market, grid_reference_account), + ] + automation_state = automation_state_dict(resolved_actions(all_actions)) + emit_signals = False + set_emit_signals_metadata(automation_state, emit_signals) + + allowed_execution_time = 20 + + with ( + functionnal_tests.mocked_community_authentication() as login_mock, + functionnal_tests.mocked_community_repository() as insert_bot_logs_mock, + trading_signal_emission_patches(emit_signals) as insert_trading_signal_mock, + ): + cleanup_dump: typing.Optional[dict] = None + try: + async with octobot_flow.AutomationJob(automation_state, [], [], {}) as automation_job: + await automation_job.run() + after_init_execution_dump = automation_job.dump() + + assert len(automation_job.automation_state.automation.actions_dag.actions) == len(all_actions) + for index, action in enumerate(automation_job.automation_state.automation.actions_dag.actions): + assert isinstance(action, octobot_flow.entities.AbstractActionDetails) + assert action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert action.result is None + if index == 0: + assert action.executed_at and action.executed_at >= current_time + assert action.previous_execution_result is None + else: + assert action.executed_at is None + assert action.previous_execution_result is None + + async with octobot_flow.AutomationJob(after_init_execution_dump, [], [], {}) as automation_job: + await automation_job.run() + after_initial_copy_execution_dump = automation_job.dump() + cleanup_dump = after_initial_copy_execution_dump + + assert len(automation_job.automation_state.automation.actions_dag.actions) == len(all_actions) + for index, action in enumerate(automation_job.automation_state.automation.actions_dag.actions): + assert isinstance(action, octobot_flow.entities.AbstractActionDetails) + assert action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert action.result is None + if index == 0: + assert action.executed_at is not None + assert action.previous_execution_result is None + else: + assert action.executed_at is None + assert isinstance(action.previous_execution_result, dict) + + assert ( + after_initial_copy_execution_dump["automation"]["execution"]["previous_execution"]["triggered_at"] + >= current_time + ) + schedule_delay = ( + after_initial_copy_execution_dump["automation"]["execution"]["current_execution"]["scheduled_to"] + - after_initial_copy_execution_dump["automation"]["execution"]["previous_execution"]["triggered_at"] + ) + assert ( + copy_constants.DEFAULT_COPY_WAITING_TIME - allowed_execution_time + < schedule_delay + < copy_constants.DEFAULT_COPY_WAITING_TIME + allowed_execution_time + ) + + after_initial_portfolio_content = after_initial_copy_execution_dump["automation"][ + "exchange_account_elements" + ]["portfolio"]["content"] + assert isinstance(after_initial_copy_execution_dump, dict) + _assert_nonempty_btc_usdc_portfolio(after_initial_portfolio_content) + + after_initial_reference_account_portfolio_content = after_initial_copy_execution_dump["automation"][ + "exchange_account_elements" + ]["portfolio"]["content"] + assert isinstance(after_initial_reference_account_portfolio_content, dict) + _assert_nonempty_btc_usdc_portfolio(after_initial_reference_account_portfolio_content) + + open_orders_origin_values = [ + order[trading_constants.STORAGE_ORIGIN_VALUE] + for order in after_initial_copy_execution_dump["automation"]["exchange_account_elements"]["orders"][ + "open_orders" + ] + ] + ladder_orders = _btc_usdc_limit_open_order_values(open_orders_origin_values) + price_col = trading_enums.ExchangeConstantsOrderColumns.PRICE.value + buy_orders = sorted( + [ + order + for order in ladder_orders + if order[trading_enums.ExchangeConstantsOrderColumns.SIDE.value] + == trading_enums.TradeOrderSide.BUY.value + ], + key=lambda order: order[trading_enums.ExchangeConstantsOrderColumns.PRICE.value], + ) + sell_orders = sorted( + [ + order + for order in ladder_orders + if order[trading_enums.ExchangeConstantsOrderColumns.SIDE.value] + == trading_enums.TradeOrderSide.SELL.value + ], + key=lambda order: order[trading_enums.ExchangeConstantsOrderColumns.PRICE.value], + ) + _assert_grid_ladder_prices(buy_orders, sell_orders, price_col) + + async with octobot_flow.AutomationJob(after_initial_copy_execution_dump, [], [], {}) as automation_job: + await automation_job.run() + after_second_call_execution_dump = automation_job.dump() + cleanup_dump = after_second_call_execution_dump + + assert len(automation_job.automation_state.automation.actions_dag.actions) == len(all_actions) + for index, action in enumerate(automation_job.automation_state.automation.actions_dag.actions): + assert isinstance(action, octobot_flow.entities.AbstractActionDetails) + assert action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert action.result is None + if index == 0: + assert action.executed_at is not None + assert action.previous_execution_result is None + else: + assert action.executed_at is None + assert isinstance(action.previous_execution_result, dict) + + schedule_delay = ( + after_second_call_execution_dump["automation"]["execution"]["current_execution"]["scheduled_to"] + - after_second_call_execution_dump["automation"]["execution"]["previous_execution"]["triggered_at"] + ) + assert ( + copy_constants.DEFAULT_COPY_WAITING_TIME - allowed_execution_time + < schedule_delay + < copy_constants.DEFAULT_COPY_WAITING_TIME + allowed_execution_time + ) + + after_second_call_portfolio_content = after_second_call_execution_dump["automation"][ + "exchange_account_elements" + ]["portfolio"]["content"] + _assert_btc_usdc_balances_unchanged( + after_initial_portfolio_content, + after_second_call_portfolio_content, + ) + after_second_call_reference_account_portfolio_content = after_second_call_execution_dump["automation"][ + "exchange_account_elements" + ]["portfolio"]["content"] + _assert_btc_usdc_balances_unchanged( + after_initial_reference_account_portfolio_content, + after_second_call_reference_account_portfolio_content, + ) + + second_open_orders_origin_values = [ + order[trading_constants.STORAGE_ORIGIN_VALUE] + for order in after_second_call_execution_dump["automation"]["exchange_account_elements"]["orders"][ + "open_orders" + ] + ] + second_ladder_orders = _btc_usdc_limit_open_order_values(second_open_orders_origin_values) + second_buy_orders = sorted( + [ + order + for order in second_ladder_orders + if order[trading_enums.ExchangeConstantsOrderColumns.SIDE.value] + == trading_enums.TradeOrderSide.BUY.value + ], + key=lambda order: order[trading_enums.ExchangeConstantsOrderColumns.PRICE.value], + ) + second_sell_orders = sorted( + [ + order + for order in second_ladder_orders + if order[trading_enums.ExchangeConstantsOrderColumns.SIDE.value] + == trading_enums.TradeOrderSide.SELL.value + ], + key=lambda order: order[trading_enums.ExchangeConstantsOrderColumns.PRICE.value], + ) + assert [d_order_price(order[price_col]) for order in second_buy_orders] == [ + d_order_price(order[price_col]) for order in buy_orders + ] + assert [d_order_price(order[price_col]) for order in second_sell_orders] == [ + d_order_price(order[price_col]) for order in sell_orders + ] + + insert_trading_signal_mock.assert_not_awaited() + finally: + if cleanup_dump is not None: + await _cancel_all_btc_usdc_orders_for_test(cleanup_dump) + + login_mock.assert_not_called() + insert_bot_logs_mock.assert_not_called() # not called in copy action diff --git a/packages/flow/tests/functionnal_tests/trading_modes_actions/simulator/test_grid_trading_mode_action.py b/packages/flow/tests/functionnal_tests/trading_modes_actions/simulator/test_grid_trading_mode_action.py new file mode 100644 index 0000000000..cae4cbbdf0 --- /dev/null +++ b/packages/flow/tests/functionnal_tests/trading_modes_actions/simulator/test_grid_trading_mode_action.py @@ -0,0 +1,980 @@ +import contextlib +import decimal +import logging +import typing + +import mock +import pytest +import time + +import octobot_commons.enums as common_enums +import octobot_commons.constants as common_constants +import octobot_commons.dsl_interpreter as dsl_interpreter +import octobot_copy.constants as copy_constants +import octobot_copy.entities as copy_entities +import octobot_trading.constants as trading_constants +import octobot_trading.enums as trading_enums +import octobot_trading.exchanges.util.exchange_data as exchange_data +import octobot_flow +import octobot_flow.constants +import octobot_flow.entities +import octobot_flow.enums +import octobot_flow.repositories.community +import octobot_flow.repositories.exchange +import octobot_trading.util.test_tools.exchanges_test_tools as exchanges_test_tools + +import tests.functionnal_tests as functionnal_tests +from tests.functionnal_tests import ( + assert_emitted_signal_account_allocation_ratios, + automation_state_dict, + copy_exchange_account_action, + current_time, + d_order_price, + empty_copy_exchange_account_action, + resolved_actions, + set_emit_signals_metadata, + trading_signal_emission_patches, +) + +import tentacles.Trading.Mode.grid_trading_mode.grid_trading as grid_trading + +increment = 200 +spread = 600 +D_INCREMENT = decimal.Decimal(str(increment)) +D_SPREAD = decimal.Decimal(str(spread)) +# Stable quote for mirrored grid limits (test patches tickers to this close). +_FIXED_BTC_USDC_CLOSE = 100000.0 +GRID_REFERENCE_LOWEST_BUY = _FIXED_BTC_USDC_CLOSE - (spread / 2) - increment * 2 + 12.12 + +grid_pair_settings = [ + grid_trading.GridTradingMode.get_default_pair_config( + "BTC/USDC", + spread, + increment, + 2, + 2, + False, + False, + False, + ) +] + + +def grid_trading_mode_action(dependency_action: dict): + return { + "id": "action_1", + "dsl_script": ( + f"grid_trading_mode(pair_settings={dsl_interpreter.format_parameter_value(grid_pair_settings)})" + ), + "dependencies": [{"action_id": dependency_action["id"]}], + } + + +def _grid_reference_storage_order(order_id: str, side: str, price: float, amount: float) -> dict: + return { + trading_constants.STORAGE_ORIGIN_VALUE: { + trading_enums.ExchangeConstantsOrderColumns.ID.value: order_id, + trading_enums.ExchangeConstantsOrderColumns.SYMBOL.value: "BTC/USDC", + trading_enums.ExchangeConstantsOrderColumns.SIDE.value: side, + trading_enums.ExchangeConstantsOrderColumns.TYPE.value: trading_enums.TradeOrderType.LIMIT.value, + trading_enums.ExchangeConstantsOrderColumns.PRICE.value: price, + trading_enums.ExchangeConstantsOrderColumns.AMOUNT.value: amount, + trading_enums.ExchangeConstantsOrderColumns.STATUS.value: trading_enums.OrderStatus.OPEN.value, + trading_enums.ExchangeConstantsOrderColumns.FILLED.value: 0.0, + trading_enums.ExchangeConstantsOrderColumns.REMAINING.value: amount, + trading_enums.ExchangeConstantsOrderColumns.TIMESTAMP.value: time.time(), + trading_enums.ExchangeConstantsOrderColumns.SELF_MANAGED.value: False, + } + } + + +def fetch_ohlcv_side_effect_for_close_price( + get_close_price: typing.Callable[[], typing.Union[int, float]], +): + """ + Async side effect for octobot_flow.repositories.exchange.OhlcvRepository.fetch_ohlcv: + every candle uses get_close_price() for open, high, low, and close. + """ + async def patched_fetch_ohlcv( + symbol: str, + time_frame: str, + limit: int, + _tickers: dict[str, dict[str, typing.Any]], + ): + time_frame_seconds = common_enums.TimeFramesMinutes[common_enums.TimeFrames(time_frame)] * 60 + close_price = float(get_close_price()) + n = max(int(limit or 1), 1) + local_time = time.time() + current_candle_open_time = local_time - (local_time % time_frame_seconds) + first_candle_open_time = current_candle_open_time - (n - 1) * time_frame_seconds + times = [float(first_candle_open_time + i * time_frame_seconds) for i in range(n)] + closes = [close_price] * n + ohlc = [close_price] * n + return exchange_data.MarketDetails( + symbol=symbol, + time_frame=time_frame, + close=closes, + open=ohlc, + high=ohlc, + low=ohlc, + volume=[0.0] * n, + time=times, + ) + + return patched_fetch_ohlcv + + +def tickers_repository_fetch_tickers_btc_usdc_close_override( + get_btc_usdc_close: typing.Callable[[], typing.Union[int, float]], + *, + btc_usdc_symbol: str = "BTC/USDC", +): + """ + TickersRepository.fetch_tickers replacement for tests: forces BTC/USDC close from get_btc_usdc_close() + and does not use the process-wide ticker cache on the fetch path. + """ + orig_get_all = exchanges_test_tools.get_all_currencies_price_ticker + orig_get_one = exchanges_test_tools.get_price_ticker + close_col = trading_enums.ExchangeConstantsTickersColumns.CLOSE.value + + async def patched_get_all_currencies_price_ticker(exchange_manager, **kwargs): + tickers = await orig_get_all(exchange_manager, **kwargs) + c = get_btc_usdc_close() + if btc_usdc_symbol in tickers: + tickers[btc_usdc_symbol] = {**tickers[btc_usdc_symbol], close_col: c} + else: + tickers[btc_usdc_symbol] = {close_col: c} + return tickers + + async def patched_get_price_ticker(exchange_manager, symbol: str, **kwargs): + if symbol == btc_usdc_symbol: + return {close_col: get_btc_usdc_close()} + return await orig_get_one(exchange_manager, symbol, **kwargs) + + async def patched_fetch_tickers(self, symbols): + if symbols == []: + return {} + if isinstance(symbols, list) and len(symbols) == 1: + return { + symbols[0]: await patched_get_price_ticker(self.exchange_manager, symbols[0]) + } + return await patched_get_all_currencies_price_ticker(self.exchange_manager, symbols=None) + + return patched_fetch_tickers + + +def _assert_trading_signal_grid_account_metadata(trading_signal: octobot_flow.entities.TradingSignal) -> None: + account = trading_signal.account + assert isinstance(account.updated_at, float) + assert current_time <= account.updated_at <= time.time() + assert account.positions == [] + assert account.historical_snapshots == [] + + +def _assert_signal_orders_two_by_two_grid_ladder( + orders: list[dict], +) -> None: + open_orders_origin_values = [ + order[trading_constants.STORAGE_ORIGIN_VALUE] for order in orders + ] + buy_orders = sorted( + [ + order + for order in open_orders_origin_values + if order[trading_enums.ExchangeConstantsOrderColumns.SIDE.value] + == trading_enums.TradeOrderSide.BUY.value + ], + key=lambda order: order[trading_enums.ExchangeConstantsOrderColumns.PRICE.value], + ) + sell_orders = sorted( + [ + order + for order in open_orders_origin_values + if order[trading_enums.ExchangeConstantsOrderColumns.SIDE.value] + == trading_enums.TradeOrderSide.SELL.value + ], + key=lambda order: order[trading_enums.ExchangeConstantsOrderColumns.PRICE.value], + ) + assert len(buy_orders) == len(sell_orders) == 2 + price_col = trading_enums.ExchangeConstantsOrderColumns.PRICE.value + lowest_buy_price = d_order_price(buy_orders[0][price_col]) + assert d_order_price(buy_orders[1][price_col]) == lowest_buy_price + D_INCREMENT + assert d_order_price(sell_orders[0][price_col]) == lowest_buy_price + D_INCREMENT + D_SPREAD + assert d_order_price(sell_orders[1][price_col]) == lowest_buy_price + D_INCREMENT + D_SPREAD + D_INCREMENT + + +def _assert_trading_signal_grid_simulator_initial_placement(trading_signal: octobot_flow.entities.TradingSignal) -> None: + content = trading_signal.account.content + assert list(sorted(content.keys())) == ["BTC", "USDC"] + assert 450 < float(content["USDC"][common_constants.PORTFOLIO_TOTAL]) < 550 + assert float(content["USDC"][common_constants.PORTFOLIO_AVAILABLE]) < 200 + assert 0.001 < float(content["BTC"][common_constants.PORTFOLIO_TOTAL]) < 0.02 + assert float(content["BTC"][common_constants.PORTFOLIO_AVAILABLE]) < 0.001 + assert_emitted_signal_account_allocation_ratios(content) + assert len(trading_signal.account.orders) == 4 + _assert_signal_orders_two_by_two_grid_ladder(trading_signal.account.orders) + _assert_trading_signal_grid_account_metadata(trading_signal) + + +def _assert_trading_signal_grid_after_sell_fill_and_mirror( + trading_signal: octobot_flow.entities.TradingSignal, + *, + lowest_buy_price: decimal.Decimal, + first_sell_price: decimal.Decimal, + second_sell_price: decimal.Decimal, +) -> None: + btc_usdc = "BTC/USDC" + content = trading_signal.account.content + assert list(sorted(content.keys())) == ["BTC", "USDC"] + # After a sell fill, quote is mostly reserved for open buy limits (small free USDC). + assert float(content["USDC"][common_constants.PORTFOLIO_AVAILABLE]) < 50 + assert 450 < float(content["USDC"][common_constants.PORTFOLIO_TOTAL]) < 800 + assert float(content["BTC"][common_constants.PORTFOLIO_AVAILABLE]) < 0.001 + assert 0.001 < float(content["BTC"][common_constants.PORTFOLIO_TOTAL]) < 0.02 + assert_emitted_signal_account_allocation_ratios(content) + final_open = [ + order[trading_constants.STORAGE_ORIGIN_VALUE] for order in trading_signal.account.orders + ] + buy_orders = sorted( + [ + order + for order in final_open + if order[trading_enums.ExchangeConstantsOrderColumns.SIDE.value] + == trading_enums.TradeOrderSide.BUY.value + ], + key=lambda order: order[trading_enums.ExchangeConstantsOrderColumns.PRICE.value], + ) + sell_orders = sorted( + [ + order + for order in final_open + if order[trading_enums.ExchangeConstantsOrderColumns.SIDE.value] + == trading_enums.TradeOrderSide.SELL.value + ], + key=lambda order: order[trading_enums.ExchangeConstantsOrderColumns.PRICE.value], + ) + assert len(buy_orders) == 3 + assert len(sell_orders) == 1 + expected_mirror_buy_price = first_sell_price - (D_SPREAD - D_INCREMENT) + expected_remaining_sell_price = second_sell_price + for order in buy_orders: + assert order[trading_enums.ExchangeConstantsOrderColumns.TYPE.value] == trading_enums.TradeOrderType.LIMIT.value + assert order[trading_enums.ExchangeConstantsOrderColumns.STATUS.value] == trading_enums.OrderStatus.OPEN.value + assert order[trading_enums.ExchangeConstantsOrderColumns.SYMBOL.value] == btc_usdc + assert 0.0024 <= float(order[trading_enums.ExchangeConstantsOrderColumns.AMOUNT.value]) <= 0.0026 + price_col = trading_enums.ExchangeConstantsOrderColumns.PRICE.value + assert d_order_price(buy_orders[0][price_col]) == lowest_buy_price + assert d_order_price(buy_orders[1][price_col]) == lowest_buy_price + D_INCREMENT + assert d_order_price(buy_orders[2][price_col]) == expected_mirror_buy_price + assert d_order_price(sell_orders[0][price_col]) == expected_remaining_sell_price + assert sell_orders[0][trading_enums.ExchangeConstantsOrderColumns.TYPE.value] == trading_enums.TradeOrderType.LIMIT.value + assert sell_orders[0][trading_enums.ExchangeConstantsOrderColumns.STATUS.value] == trading_enums.OrderStatus.OPEN.value + assert sell_orders[0][trading_enums.ExchangeConstantsOrderColumns.SYMBOL.value] == btc_usdc + assert 0.0024 <= float(sell_orders[0][trading_enums.ExchangeConstantsOrderColumns.AMOUNT.value]) <= 0.0026 + _assert_trading_signal_grid_account_metadata(trading_signal) + + +def _assert_trading_signal_grid_copy_reference_shape(trading_signal: octobot_flow.entities.TradingSignal) -> None: + content = trading_signal.account.content + assert list(sorted(content.keys())) == ["BTC", "USDC"] + assert 450 < float(content["USDC"][common_constants.PORTFOLIO_TOTAL]) < 550 + assert 100 < float(content["USDC"][common_constants.PORTFOLIO_AVAILABLE]) < 150 + assert 0.0045 < float(content["BTC"][common_constants.PORTFOLIO_TOTAL]) < 0.055 + assert float(content["BTC"][common_constants.PORTFOLIO_AVAILABLE]) < 0.0015 + assert_emitted_signal_account_allocation_ratios(content) + assert len(trading_signal.account.orders) == 4 + open_orders_origin_values = [ + order[trading_constants.STORAGE_ORIGIN_VALUE] for order in trading_signal.account.orders + ] + buy_orders = sorted( + [ + order + for order in open_orders_origin_values + if order[trading_enums.ExchangeConstantsOrderColumns.SIDE.value] + == trading_enums.TradeOrderSide.BUY.value + ], + key=lambda order: order[trading_enums.ExchangeConstantsOrderColumns.PRICE.value], + ) + sell_orders = sorted( + [ + order + for order in open_orders_origin_values + if order[trading_enums.ExchangeConstantsOrderColumns.SIDE.value] + == trading_enums.TradeOrderSide.SELL.value + ], + key=lambda order: order[trading_enums.ExchangeConstantsOrderColumns.PRICE.value], + ) + assert len(buy_orders) == len(sell_orders) == 2 + price_col = trading_enums.ExchangeConstantsOrderColumns.PRICE.value + lowest_buy_price = d_order_price(buy_orders[0][price_col]) + assert lowest_buy_price == d_order_price(GRID_REFERENCE_LOWEST_BUY) + assert d_order_price(buy_orders[1][price_col]) == lowest_buy_price + D_INCREMENT + assert d_order_price(sell_orders[0][price_col]) == lowest_buy_price + D_INCREMENT + D_SPREAD + assert d_order_price(sell_orders[1][price_col]) == lowest_buy_price + D_INCREMENT + D_SPREAD + D_INCREMENT + _assert_trading_signal_grid_account_metadata(trading_signal) + + +@pytest.fixture +def grid_reference_account(): + """Spot snapshot matching a BTC/USDC grid: half USDC / half BTC by ratio, with 2+2 open limits.""" + lowest_buy = GRID_REFERENCE_LOWEST_BUY + order_amount = 0.004 + return copy_entities.Account( + updated_at=time.time(), + content={ + "BTC": { + common_constants.PORTFOLIO_TOTAL: decimal.Decimal("0.01"), + common_constants.PORTFOLIO_AVAILABLE: decimal.Decimal("0.002"), + copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO: decimal.Decimal("0.5"), + }, + "USDC": { + common_constants.PORTFOLIO_TOTAL: decimal.Decimal("1000"), + common_constants.PORTFOLIO_AVAILABLE: decimal.Decimal("200"), + copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO: decimal.Decimal("0.5"), + }, + }, + orders=[ + _grid_reference_storage_order( + "grid_ref_b0", trading_enums.TradeOrderSide.BUY.value, lowest_buy, order_amount + ), + _grid_reference_storage_order( + "grid_ref_b1", + trading_enums.TradeOrderSide.BUY.value, + lowest_buy + increment, + order_amount, + ), + _grid_reference_storage_order( + "grid_ref_s0", + trading_enums.TradeOrderSide.SELL.value, + lowest_buy + increment + spread, + order_amount, + ), + _grid_reference_storage_order( + "grid_ref_s1", + trading_enums.TradeOrderSide.SELL.value, + lowest_buy + increment + spread + increment, + order_amount, + ), + ], + positions=[], + # no historical snapshots + ) + + +@pytest.fixture +def init_action(): + return { + "id": "action_init", + "action": octobot_flow.enums.ActionType.APPLY_CONFIGURATION.value, + "config": { + "automation": { + "metadata": { + "automation_id": "automation_1", + }, + "exchange_account_elements": { + "portfolio": { + "content": { + "USDC": { + "available": 1000.0, + "total": 1000.0, + } + }, + }, + }, + }, + "exchange_account_details": { + "exchange_details": { + "internal_name": functionnal_tests.EXCHANGE_INTERNAL_NAME, + }, + "auth_details": {}, + "portfolio": { + "unit": "USDC", + }, + }, + }, + } + + +@pytest.mark.parametrize("emit_signals", [False, True]) +@pytest.mark.asyncio +async def test_simulator_grid_init_from_empty_state(init_action: dict, emit_signals: bool): + patched_fetch_tickers = tickers_repository_fetch_tickers_btc_usdc_close_override( + lambda: _FIXED_BTC_USDC_CLOSE + ) + patched_fetch_ohlcv = fetch_ohlcv_side_effect_for_close_price(lambda: _FIXED_BTC_USDC_CLOSE) + + with ( + mock.patch.object( + octobot_flow.repositories.exchange.TickersRepository, + "fetch_tickers", + new=patched_fetch_tickers, + ), + mock.patch.object( + octobot_flow.repositories.exchange.OhlcvRepository, + "fetch_ohlcv", + side_effect=patched_fetch_ohlcv, + ), + ): + with trading_signal_emission_patches(emit_signals) as insert_trading_signal_mock: + all_actions = [init_action, grid_trading_mode_action(init_action)] + automation_state = automation_state_dict(resolved_actions(all_actions)) + set_emit_signals_metadata(automation_state, emit_signals) + + # 1. run init action + async with octobot_flow.AutomationJob(automation_state, [], [], {}) as automation_job: + await automation_job.run() + after_init_execution_dump = automation_job.dump() + + # check bot actions execution + assert len(automation_job.automation_state.automation.actions_dag.actions) == len(all_actions) + for index, action in enumerate(automation_job.automation_state.automation.actions_dag.actions): + assert isinstance(action, octobot_flow.entities.AbstractActionDetails) + assert action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert action.result is None + if index == 0: + assert action.executed_at and action.executed_at >= current_time + assert action.previous_execution_result is None + else: + assert action.executed_at is None + assert action.previous_execution_result is None + + # 2. run grid trading mode action + async with octobot_flow.AutomationJob(after_init_execution_dump, [], [], {}) as automation_job: + await automation_job.run() + after_grid_execution_dump = automation_job.dump() + assert len(automation_job.automation_state.automation.actions_dag.actions) == len(all_actions) + for index, action in enumerate(automation_job.automation_state.automation.actions_dag.actions): + assert isinstance(action, octobot_flow.entities.AbstractActionDetails) + assert action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert action.result is None + if index == 0: + assert action.executed_at is not None + assert action.previous_execution_result is None + else: + # action is reset: this is a trading mode action: it will be executed again at the next execution + assert action.executed_at is None + assert isinstance(action.previous_execution_result, dict) + + # scheduled next execution time at 1h after the current execution (1h is the default time when unspecified) + assert after_grid_execution_dump["automation"]["execution"]["previous_execution"][ + "triggered_at" + ] >= current_time + one_hour = ( + common_enums.TimeFramesMinutes[common_enums.TimeFrames.ONE_HOUR] + * common_constants.MINUTE_TO_SECONDS + ) + allowed_execution_time = 20 + schedule_delay = ( + after_grid_execution_dump["automation"]["execution"]["current_execution"]["scheduled_to"] + - after_grid_execution_dump["automation"]["execution"]["previous_execution"]["triggered_at"] + ) + assert one_hour - allowed_execution_time < schedule_delay < one_hour + allowed_execution_time + + # ensure trades are saved + assert len(after_grid_execution_dump["automation"]["exchange_account_elements"]["trades"]) == 1 + + # check portfolio and open grid orders + after_grid_portfolio_content = after_grid_execution_dump["automation"][ + "exchange_account_elements" + ]["portfolio"]["content"] + assert isinstance(after_grid_execution_dump, dict) + assert list(sorted(after_grid_portfolio_content.keys())) == ["BTC", "USDC"] + # applied portfolio optimizations and created grid open orders + assert 450 < after_grid_portfolio_content["USDC"]["total"] < 550 # USDC holding split in half + assert after_grid_portfolio_content["USDC"]["available"] < 200 + assert 0.001 < after_grid_portfolio_content["BTC"]["total"] < 0.02 + assert after_grid_portfolio_content["BTC"]["available"] < 0.001 + + after_grid_reference_account_portfolio_content = after_grid_execution_dump["automation"][ + "exchange_account_elements" + ]["portfolio"]["content"] + assert isinstance(after_grid_reference_account_portfolio_content, dict) + assert list(sorted(after_grid_reference_account_portfolio_content.keys())) == ["BTC", "USDC"] + assert 450 < after_grid_reference_account_portfolio_content["USDC"]["total"] < 550 # USDC holding split in half + assert after_grid_reference_account_portfolio_content["USDC"]["available"] < 200 + assert 0.001 < after_grid_reference_account_portfolio_content["BTC"]["total"] < 0.02 + assert after_grid_reference_account_portfolio_content["BTC"]["available"] < 0.001 + + order_portfolio_types = ["exchange_account_elements"] + for portfolio_type in order_portfolio_types: + open_orders_origin_values = [ + order[trading_constants.STORAGE_ORIGIN_VALUE] + for order in after_grid_execution_dump["automation"][portfolio_type]["orders"][ + "open_orders" + ] + ] + buy_orders = sorted([ + o for o in open_orders_origin_values if o[trading_enums.ExchangeConstantsOrderColumns.SIDE.value] == trading_enums.TradeOrderSide.BUY.value + ], key=lambda o: o[trading_enums.ExchangeConstantsOrderColumns.PRICE.value]) + sell_orders = sorted([ + o for o in open_orders_origin_values if o[trading_enums.ExchangeConstantsOrderColumns.SIDE.value] == trading_enums.TradeOrderSide.SELL.value + ], key=lambda o: o[trading_enums.ExchangeConstantsOrderColumns.PRICE.value]) + assert len(buy_orders) == len(sell_orders) == 2 + # check order prices are according to the grid settings + price_col = trading_enums.ExchangeConstantsOrderColumns.PRICE.value + lowest_buy_price = d_order_price(buy_orders[0][price_col]) + assert d_order_price(buy_orders[1][price_col]) == lowest_buy_price + D_INCREMENT + assert d_order_price(sell_orders[0][price_col]) == lowest_buy_price + D_INCREMENT + D_SPREAD + assert d_order_price(sell_orders[1][price_col]) == lowest_buy_price + D_INCREMENT + D_SPREAD + D_INCREMENT + + # 3. trigger again: nothing to do + async with octobot_flow.AutomationJob(after_grid_execution_dump, [], [], {}) as automation_job: + await automation_job.run() + after_second_call_execution_dump = automation_job.dump() + assert len(automation_job.automation_state.automation.actions_dag.actions) == len(all_actions) + for index, action in enumerate(automation_job.automation_state.automation.actions_dag.actions): + assert isinstance(action, octobot_flow.entities.AbstractActionDetails) + assert action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert action.result is None + if index == 0: + assert action.executed_at is not None + assert action.previous_execution_result is None + else: + assert action.executed_at is None + assert isinstance(action.previous_execution_result, dict) + + schedule_delay = ( + after_second_call_execution_dump["automation"]["execution"]["current_execution"]["scheduled_to"] + - after_second_call_execution_dump["automation"]["execution"]["previous_execution"]["triggered_at"] + ) + assert one_hour - allowed_execution_time < schedule_delay < one_hour + allowed_execution_time + + # ensure trades are not erased + assert len(after_grid_execution_dump["automation"]["exchange_account_elements"]["trades"]) == 1 + + after_second_call_portfolio_content = after_second_call_execution_dump["automation"][ + "exchange_account_elements" + ]["portfolio"]["content"] + assert after_second_call_portfolio_content == after_grid_portfolio_content + after_second_call_reference_account_portfolio_content = after_second_call_execution_dump["automation"][ + "exchange_account_elements" + ]["portfolio"]["content"] + assert after_second_call_reference_account_portfolio_content == after_grid_reference_account_portfolio_content + + if emit_signals: + assert insert_trading_signal_mock.await_count == 2 + for await_args in insert_trading_signal_mock.await_args_list: + _assert_trading_signal_grid_simulator_initial_placement(await_args.args[0]) + else: + insert_trading_signal_mock.assert_not_awaited() + + +@pytest.mark.parametrize("emit_signals", [False, True]) +@pytest.mark.asyncio +async def test_simulator_grid_init_and_fill_sell_order(init_action: dict, emit_signals: bool): + """ + Initialize a grid at a fixed BTC/USDC price, move the market above the first sell limit so it fills, + then run the automation again from the saved state: staggered/grid mode should place a mirror buy + at (first_sell_price - (spread - increment)). + """ + btc_usdc = "BTC/USDC" + simulated_close = {"value": _FIXED_BTC_USDC_CLOSE} + patched_fetch_tickers = tickers_repository_fetch_tickers_btc_usdc_close_override( + lambda: simulated_close["value"] + ) + patched_fetch_ohlcv = fetch_ohlcv_side_effect_for_close_price(lambda: simulated_close["value"]) + + with ( + mock.patch.object( + octobot_flow.repositories.exchange.TickersRepository, + "fetch_tickers", + new=patched_fetch_tickers, + ), + mock.patch.object( + octobot_flow.repositories.exchange.OhlcvRepository, + "fetch_ohlcv", + side_effect=patched_fetch_ohlcv, + ), + ): + with trading_signal_emission_patches(emit_signals) as insert_trading_signal_mock: + all_actions = [ + init_action, + grid_trading_mode_action(init_action), + ] + automation_state = automation_state_dict(resolved_actions(all_actions)) + set_emit_signals_metadata(automation_state, emit_signals) + + async with octobot_flow.AutomationJob(automation_state, [], [], {}) as automation_job: + await automation_job.run() + after_init_execution_dump = automation_job.dump() + + async with octobot_flow.AutomationJob(after_init_execution_dump, [], [], {}) as automation_job: + await automation_job.run() + after_grid_execution_dump = automation_job.dump() + + open_after_grid = [ + order[trading_constants.STORAGE_ORIGIN_VALUE] + for order in after_grid_execution_dump["automation"]["exchange_account_elements"]["orders"][ + "open_orders" + ] + ] + buy_after_grid = sorted( + [ + o + for o in open_after_grid + if o[trading_enums.ExchangeConstantsOrderColumns.SIDE.value] + == trading_enums.TradeOrderSide.BUY.value + ], + key=lambda o: o[trading_enums.ExchangeConstantsOrderColumns.PRICE.value], + ) + sell_after_grid = sorted( + [ + o + for o in open_after_grid + if o[trading_enums.ExchangeConstantsOrderColumns.SIDE.value] + == trading_enums.TradeOrderSide.SELL.value + ], + key=lambda o: o[trading_enums.ExchangeConstantsOrderColumns.PRICE.value], + ) + assert len(buy_after_grid) == len(sell_after_grid) == 2 + price_col = trading_enums.ExchangeConstantsOrderColumns.PRICE.value + lowest_buy_price = d_order_price(buy_after_grid[0][price_col]) + first_sell_price = d_order_price(sell_after_grid[0][price_col]) + second_sell_price = d_order_price(sell_after_grid[1][price_col]) + assert d_order_price(buy_after_grid[1][price_col]) == lowest_buy_price + D_INCREMENT + assert first_sell_price == lowest_buy_price + D_INCREMENT + D_SPREAD + assert second_sell_price == lowest_buy_price + D_INCREMENT + D_SPREAD + D_INCREMENT + + # Between first and second sell so the lowest sell limit fills but price stays inside the grid upper bound. + simulated_close["value"] = float(first_sell_price + D_INCREMENT / decimal.Decimal("2")) + + async with octobot_flow.AutomationJob(after_grid_execution_dump, [], [], {}) as automation_job: + await automation_job.run() + final_dump = automation_job.dump() + for action in automation_job.automation_state.automation.actions_dag.actions: + assert isinstance(action, octobot_flow.entities.AbstractActionDetails) + assert action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + + final_open = [ + order[trading_constants.STORAGE_ORIGIN_VALUE] + for order in final_dump["automation"]["exchange_account_elements"]["orders"]["open_orders"] + ] + buy_orders = sorted( + [ + o + for o in final_open + if o[trading_enums.ExchangeConstantsOrderColumns.SIDE.value] == trading_enums.TradeOrderSide.BUY.value + ], + key=lambda o: o[trading_enums.ExchangeConstantsOrderColumns.PRICE.value], + ) + sell_orders = sorted( + [ + o + for o in final_open + if o[trading_enums.ExchangeConstantsOrderColumns.SIDE.value] == trading_enums.TradeOrderSide.SELL.value + ], + key=lambda o: o[trading_enums.ExchangeConstantsOrderColumns.PRICE.value], + ) + assert len(buy_orders) == 3 + assert len(sell_orders) == 1 + + expected_mirror_buy_price = first_sell_price - (D_SPREAD - D_INCREMENT) + expected_remaining_sell_price = second_sell_price + + for o in buy_orders: + assert o[trading_enums.ExchangeConstantsOrderColumns.TYPE.value] == trading_enums.TradeOrderType.LIMIT.value + assert o[trading_enums.ExchangeConstantsOrderColumns.STATUS.value] == trading_enums.OrderStatus.OPEN.value + assert o[trading_enums.ExchangeConstantsOrderColumns.SYMBOL.value] == btc_usdc + # even mirrored order amount is close to the amount of initial orders + assert 0.0024 <= o[trading_enums.ExchangeConstantsOrderColumns.AMOUNT.value] <= 0.0026 + price_col = trading_enums.ExchangeConstantsOrderColumns.PRICE.value + assert d_order_price(buy_orders[0][price_col]) == lowest_buy_price + assert d_order_price(buy_orders[1][price_col]) == lowest_buy_price + D_INCREMENT + assert d_order_price(buy_orders[2][price_col]) == expected_mirror_buy_price + assert d_order_price(sell_orders[0][price_col]) == expected_remaining_sell_price + + assert sell_orders[0][trading_enums.ExchangeConstantsOrderColumns.TYPE.value] == trading_enums.TradeOrderType.LIMIT.value + assert sell_orders[0][trading_enums.ExchangeConstantsOrderColumns.STATUS.value] == trading_enums.OrderStatus.OPEN.value + assert sell_orders[0][trading_enums.ExchangeConstantsOrderColumns.SYMBOL.value] == btc_usdc + assert 0.0024 <= sell_orders[0][trading_enums.ExchangeConstantsOrderColumns.AMOUNT.value] <= 0.0026 + + if emit_signals: + assert insert_trading_signal_mock.await_count == 2 + _assert_trading_signal_grid_simulator_initial_placement( + insert_trading_signal_mock.await_args_list[0].args[0] + ) + _assert_trading_signal_grid_after_sell_fill_and_mirror( + insert_trading_signal_mock.await_args_list[1].args[0], + lowest_buy_price=lowest_buy_price, + first_sell_price=first_sell_price, + second_sell_price=second_sell_price, + ) + else: + insert_trading_signal_mock.assert_not_awaited() + + +@pytest.mark.parametrize("start_as_uninitialized_copy_keyword", [ + False, + True, +]) +@pytest.mark.parametrize("emit_signals", [ + False, + True, +]) +@pytest.mark.asyncio +async def test_simulator_copy_grid( + init_action: dict, + grid_reference_account: copy_entities.Account, + emit_signals: bool, + start_as_uninitialized_copy_keyword: bool, +): + """ + Copy a reference spot account shaped like a BTC/USDC grid (portfolio + 2/2 open limits) + onto the client after init, then ensure a no-op second run keeps portfolio and ladder intact. + + When start_as_uninitialized_copy_keyword is True, the copy action is stored with empty reference_market / + reference_account until a fetched trading signal fills the DSL; otherwise the reference account + is embedded in the action from the start. + """ + patched_fetch_tickers = tickers_repository_fetch_tickers_btc_usdc_close_override( + lambda: _FIXED_BTC_USDC_CLOSE + ) + patched_fetch_ohlcv = fetch_ohlcv_side_effect_for_close_price(lambda: _FIXED_BTC_USDC_CLOSE) + + fetch_trading_signals_mock = mock.AsyncMock() + if start_as_uninitialized_copy_keyword: + fetch_trading_signals_mock.return_value = [ + octobot_flow.entities.TradingSignal( + strategy_id=functionnal_tests.FUNCTIONAL_TEST_COPY_STRATEGY_ID, + account=grid_reference_account, + ) + ] + + with contextlib.ExitStack() as patch_stack: + patch_stack.enter_context( + mock.patch.object( + octobot_flow.repositories.exchange.TickersRepository, + "fetch_tickers", + new=patched_fetch_tickers, + ) + ) + patch_stack.enter_context( + mock.patch.object( + octobot_flow.repositories.exchange.OhlcvRepository, + "fetch_ohlcv", + side_effect=patched_fetch_ohlcv, + ) + ) + patch_stack.enter_context( + mock.patch.object( + octobot_flow.repositories.community.TradingSignalsRepository, + "fetch_trading_signals", + fetch_trading_signals_mock, + ) + ) + if start_as_uninitialized_copy_keyword and not emit_signals: + + @contextlib.asynccontextmanager + async def _fake_maybe_authenticator(self): + yield mock.MagicMock() + + patch_stack.enter_context( + mock.patch.object( + octobot_flow.AutomationJob, + "_maybe_authenticator", + _fake_maybe_authenticator, + ) + ) + insert_trading_signal_mock = patch_stack.enter_context(trading_signal_emission_patches(emit_signals)) + + reference_market = init_action["config"]["exchange_account_details"]["portfolio"]["unit"] + if start_as_uninitialized_copy_keyword: + copy_action = empty_copy_exchange_account_action() + else: + copy_action = copy_exchange_account_action(reference_market, grid_reference_account) + all_actions = [ + init_action, + copy_action, + ] + automation_state = automation_state_dict(resolved_actions(all_actions)) + set_emit_signals_metadata(automation_state, emit_signals) + + # 1. run init action + async with octobot_flow.AutomationJob(automation_state, [], [], {}) as automation_job: + await automation_job.run() + after_init_execution_dump = automation_job.dump() + + # check bot actions execution + assert len(automation_job.automation_state.automation.actions_dag.actions) == len(all_actions) + for index, action in enumerate(automation_job.automation_state.automation.actions_dag.actions): + assert isinstance(action, octobot_flow.entities.AbstractActionDetails) + assert action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert action.result is None + if index == 0: + assert action.executed_at and action.executed_at >= current_time + assert action.previous_execution_result is None + else: + assert action.executed_at is None + assert action.previous_execution_result is None + + # 2. run copy exchange account action (rebalance + mirror reference grid orders) + async with octobot_flow.AutomationJob(after_init_execution_dump, [], [], {}) as automation_job: + await automation_job.run() + after_initial_copy_execution_dump = automation_job.dump() + assert len(automation_job.automation_state.automation.actions_dag.actions) == len(all_actions) + for index, action in enumerate(automation_job.automation_state.automation.actions_dag.actions): + assert isinstance(action, octobot_flow.entities.AbstractActionDetails) + assert action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert action.result is None + if index == 0: + assert action.executed_at is not None + assert action.previous_execution_result is None + else: + assert action.executed_at is None + assert isinstance(action.previous_execution_result, dict) + + if start_as_uninitialized_copy_keyword: + assert fetch_trading_signals_mock.await_count == 1 + fetch_call = fetch_trading_signals_mock.await_args + assert functionnal_tests.FUNCTIONAL_TEST_COPY_STRATEGY_ID in fetch_call.args[0] + assert fetch_call.args[1] == octobot_flow.constants.DEFAULT_COPY_TRADING_MISSED_SIGNALS_GRACE_ABORT_THRESHOLD + else: + fetch_trading_signals_mock.assert_not_awaited() + + # scheduled next execution: default copy interval (4h) + assert ( + after_initial_copy_execution_dump["automation"]["execution"]["previous_execution"]["triggered_at"] + >= current_time + ) + allowed_execution_time = 20 + schedule_delay = ( + after_initial_copy_execution_dump["automation"]["execution"]["current_execution"]["scheduled_to"] + - after_initial_copy_execution_dump["automation"]["execution"]["previous_execution"]["triggered_at"] + ) + assert ( + copy_constants.DEFAULT_COPY_WAITING_TIME - allowed_execution_time + < schedule_delay + < copy_constants.DEFAULT_COPY_WAITING_TIME + allowed_execution_time + ) + + after_initial_portfolio_content = after_initial_copy_execution_dump["automation"][ + "exchange_account_elements" + ]["portfolio"]["content"] + assert isinstance(after_initial_copy_execution_dump, dict) + assert list(sorted(after_initial_portfolio_content.keys())) == ["BTC", "USDC"] + assert 450 < after_initial_portfolio_content["USDC"]["total"] < 550 + assert 100 < after_initial_portfolio_content["USDC"]["available"] < 150 + assert 0.0045 < after_initial_portfolio_content["BTC"]["total"] < 0.055 + assert after_initial_portfolio_content["BTC"]["available"] < 0.0015 + logging.getLogger("test_simulator_copy_grid").info( + f"after_copy_portfolio_content: {after_initial_portfolio_content}" + ) + + after_initial_reference_account_portfolio_content = after_initial_copy_execution_dump["automation"][ + "exchange_account_elements" + ]["portfolio"]["content"] + assert isinstance(after_initial_reference_account_portfolio_content, dict) + assert list(sorted(after_initial_reference_account_portfolio_content.keys())) == ["BTC", "USDC"] + assert 450 < after_initial_reference_account_portfolio_content["USDC"]["total"] < 550 + assert 100 < after_initial_reference_account_portfolio_content["USDC"]["available"] < 150 + assert 0.0045 < after_initial_reference_account_portfolio_content["BTC"]["total"] < 0.055 + assert after_initial_reference_account_portfolio_content["BTC"]["available"] < 0.0015 + + open_orders_origin_values = [ + order[trading_constants.STORAGE_ORIGIN_VALUE] + for order in after_initial_copy_execution_dump["automation"]["exchange_account_elements"]["orders"][ + "open_orders" + ] + ] + buy_orders = sorted( + [ + o + for o in open_orders_origin_values + if o[trading_enums.ExchangeConstantsOrderColumns.SIDE.value] == trading_enums.TradeOrderSide.BUY.value + ], + key=lambda o: o[trading_enums.ExchangeConstantsOrderColumns.PRICE.value], + ) + sell_orders = sorted( + [ + o + for o in open_orders_origin_values + if o[trading_enums.ExchangeConstantsOrderColumns.SIDE.value] + == trading_enums.TradeOrderSide.SELL.value + ], + key=lambda o: o[trading_enums.ExchangeConstantsOrderColumns.PRICE.value], + ) + assert len(buy_orders) == len(sell_orders) == 2 + price_col = trading_enums.ExchangeConstantsOrderColumns.PRICE.value + lowest_buy_price = d_order_price(buy_orders[0][price_col]) + assert lowest_buy_price == d_order_price(GRID_REFERENCE_LOWEST_BUY) + assert d_order_price(buy_orders[1][price_col]) == lowest_buy_price + D_INCREMENT + assert d_order_price(sell_orders[0][price_col]) == lowest_buy_price + D_INCREMENT + D_SPREAD + assert d_order_price(sell_orders[1][price_col]) == lowest_buy_price + D_INCREMENT + D_SPREAD + D_INCREMENT + + # 3. trigger again: portfolio and mirrored grid should be unchanged + async with octobot_flow.AutomationJob(after_initial_copy_execution_dump, [], [], {}) as automation_job: + await automation_job.run() + after_second_call_execution_dump = automation_job.dump() + assert len(automation_job.automation_state.automation.actions_dag.actions) == len(all_actions) + for index, action in enumerate(automation_job.automation_state.automation.actions_dag.actions): + assert isinstance(action, octobot_flow.entities.AbstractActionDetails) + assert action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert action.result is None + if index == 0: + assert action.executed_at is not None + assert action.previous_execution_result is None + else: + assert action.executed_at is None + assert isinstance(action.previous_execution_result, dict) + + schedule_delay = ( + after_second_call_execution_dump["automation"]["execution"]["current_execution"]["scheduled_to"] + - after_second_call_execution_dump["automation"]["execution"]["previous_execution"]["triggered_at"] + ) + assert ( + copy_constants.DEFAULT_COPY_WAITING_TIME - allowed_execution_time + < schedule_delay + < copy_constants.DEFAULT_COPY_WAITING_TIME + allowed_execution_time + ) + + after_second_call_portfolio_content = after_second_call_execution_dump["automation"][ + "exchange_account_elements" + ]["portfolio"]["content"] + assert after_second_call_portfolio_content == after_initial_portfolio_content + after_second_call_reference_account_portfolio_content = after_second_call_execution_dump["automation"][ + "exchange_account_elements" + ]["portfolio"]["content"] + assert after_second_call_reference_account_portfolio_content == after_initial_reference_account_portfolio_content + + second_open_orders_origin_values = [ + order[trading_constants.STORAGE_ORIGIN_VALUE] + for order in after_second_call_execution_dump["automation"]["exchange_account_elements"]["orders"][ + "open_orders" + ] + ] + second_buy_orders = sorted( + [ + o + for o in second_open_orders_origin_values + if o[trading_enums.ExchangeConstantsOrderColumns.SIDE.value] == trading_enums.TradeOrderSide.BUY.value + ], + key=lambda o: o[trading_enums.ExchangeConstantsOrderColumns.PRICE.value], + ) + second_sell_orders = sorted( + [ + o + for o in second_open_orders_origin_values + if o[trading_enums.ExchangeConstantsOrderColumns.SIDE.value] + == trading_enums.TradeOrderSide.SELL.value + ], + key=lambda o: o[trading_enums.ExchangeConstantsOrderColumns.PRICE.value], + ) + price_col = trading_enums.ExchangeConstantsOrderColumns.PRICE.value + assert [d_order_price(o[price_col]) for o in second_buy_orders] == [ + d_order_price(o[price_col]) for o in buy_orders + ] + assert [d_order_price(o[price_col]) for o in second_sell_orders] == [ + d_order_price(o[price_col]) for o in sell_orders + ] + + if start_as_uninitialized_copy_keyword: + assert fetch_trading_signals_mock.call_count == 1 + else: + fetch_trading_signals_mock.assert_not_called() + + if emit_signals: + assert insert_trading_signal_mock.await_count == 2 + for await_args in insert_trading_signal_mock.await_args_list: + _assert_trading_signal_grid_copy_reference_shape(await_args.args[0]) + else: + insert_trading_signal_mock.assert_not_awaited() diff --git a/packages/flow/tests/functionnal_tests/trading_modes_actions/simulator/test_grid_trading_mode_sync_grace_period.py b/packages/flow/tests/functionnal_tests/trading_modes_actions/simulator/test_grid_trading_mode_sync_grace_period.py new file mode 100644 index 0000000000..b9d0d0e467 --- /dev/null +++ b/packages/flow/tests/functionnal_tests/trading_modes_actions/simulator/test_grid_trading_mode_sync_grace_period.py @@ -0,0 +1,1109 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3.0 of the License, or +# (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along with +# OctoBot. If not, see . +import copy as copy_module +import dataclasses +import decimal +import time +import typing + +import mock +import pytest + +import octobot_commons.constants as common_constants +import octobot_commons.json_util as json_util +import octobot_trading.constants as trading_constants +import octobot_trading.enums as trading_enums + +import octobot_copy.constants as copy_constants +import octobot_copy.entities as copy_entities +import octobot_flow +import octobot_flow.entities as flow_entities +import octobot_flow.enums +import octobot_flow.logic.actions as flow_actions +import octobot_flow.repositories.exchange + +import octobot_commons.dsl_interpreter.operators.re_callable_operator_mixin as re_callable_operator_mixin + +import tests.functionnal_tests as functionnal_tests +import tests.functionnal_tests.trading_modes_actions.simulator.test_grid_trading_mode_action as grid_test +from tests.functionnal_tests import ( + automation_state_dict, + copy_exchange_account_action, + d_order_price, + resolved_actions, +) + +ORDER_AMOUNT = 0.004 +COPY_ACTION_ID = "action_copy_exchange_account" +GRACE_SECONDS = 5.0 + + +@pytest.fixture +def init_action(): + return { + "id": "action_init", + "action": octobot_flow.enums.ActionType.APPLY_CONFIGURATION.value, + "config": { + "automation": { + "metadata": { + "automation_id": "automation_1", + }, + "exchange_account_elements": { + "portfolio": { + "content": { + "USDC": { + "available": 1000.0, + "total": 1000.0, + } + }, + }, + }, + }, + "exchange_account_details": { + "exchange_details": { + "internal_name": functionnal_tests.EXCHANGE_INTERNAL_NAME, + }, + "auth_details": {}, + "portfolio": { + "unit": "USDC", + }, + }, + }, + } + + +def _grace_account_copy_settings( + *, + missed_signals_grace_abort_threshold: typing.Optional[int] = None, +) -> copy_entities.AccountCopySettings: + kwargs: dict = { + "mirrored_orphan_cancel_grace_seconds": GRACE_SECONDS, + "mirrored_orphan_grace_abort_threshold": 2, + } + if missed_signals_grace_abort_threshold is not None: + kwargs["missed_signals_grace_abort_threshold"] = missed_signals_grace_abort_threshold + return copy_entities.AccountCopySettings(**kwargs) + + +def grid_reference_four_order_account() -> copy_entities.Account: + lowest_buy = grid_test.GRID_REFERENCE_LOWEST_BUY + inc = float(grid_test.increment) + spr = float(grid_test.spread) + return copy_entities.Account( + updated_at=time.time(), + content={ + "BTC": { + common_constants.PORTFOLIO_TOTAL: decimal.Decimal("0.01"), + common_constants.PORTFOLIO_AVAILABLE: decimal.Decimal("0.002"), + copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO: decimal.Decimal("0.5"), + }, + "USDC": { + common_constants.PORTFOLIO_TOTAL: decimal.Decimal("1000"), + common_constants.PORTFOLIO_AVAILABLE: decimal.Decimal("200"), + copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO: decimal.Decimal("0.5"), + }, + }, + orders=[ + grid_test._grid_reference_storage_order( + "grid_ref_b0", trading_enums.TradeOrderSide.BUY.value, lowest_buy, ORDER_AMOUNT + ), + grid_test._grid_reference_storage_order( + "grid_ref_b1", trading_enums.TradeOrderSide.BUY.value, lowest_buy + inc, ORDER_AMOUNT + ), + grid_test._grid_reference_storage_order( + "grid_ref_s0", trading_enums.TradeOrderSide.SELL.value, lowest_buy + inc + spr, ORDER_AMOUNT + ), + grid_test._grid_reference_storage_order( + "grid_ref_s1", trading_enums.TradeOrderSide.SELL.value, lowest_buy + inc + spr + inc, ORDER_AMOUNT + ), + ], + positions=[], + ) + + +def reference_replace_highest_buy_with_sell( + reference_before: copy_entities.Account, +) -> copy_entities.Account: + """ + Remove the highest limit buy (grid_ref_b1), which is closest to the market and fills first, + and add the grid-equivalent sell one spread above that buy price. + """ + lowest_buy = grid_test.GRID_REFERENCE_LOWEST_BUY + inc = float(grid_test.increment) + spr = float(grid_test.spread) + highest_buy_price = lowest_buy + inc + new_sell_price = highest_buy_price + spr + new_orders: list = [] + for order_doc in reference_before.orders: + origin = order_doc[trading_constants.STORAGE_ORIGIN_VALUE] + if origin[trading_enums.ExchangeConstantsOrderColumns.ID.value] == "grid_ref_b1": + continue + new_orders.append(order_doc) + new_orders.append( + grid_test._grid_reference_storage_order( + "grid_ref_s_from_b1", + trading_enums.TradeOrderSide.SELL.value, + new_sell_price, + ORDER_AMOUNT, + ) + ) + content_after_fill = copy_module.deepcopy(reference_before.content) + fill_quantity = decimal.Decimal(str(ORDER_AMOUNT)) + fill_price = decimal.Decimal(str(highest_buy_price)) + quote_spent = fill_quantity * fill_price + btc_holdings = content_after_fill["BTC"] + usdc_holdings = content_after_fill["USDC"] + btc_holdings[common_constants.PORTFOLIO_TOTAL] = ( + btc_holdings[common_constants.PORTFOLIO_TOTAL] + fill_quantity + ) + btc_holdings[common_constants.PORTFOLIO_AVAILABLE] = ( + btc_holdings[common_constants.PORTFOLIO_AVAILABLE] + fill_quantity + ) + usdc_holdings[common_constants.PORTFOLIO_TOTAL] = ( + usdc_holdings[common_constants.PORTFOLIO_TOTAL] - quote_spent + ) + mark_price = decimal.Decimal(str(grid_test._FIXED_BTC_USDC_CLOSE)) + btc_total_after = btc_holdings[common_constants.PORTFOLIO_TOTAL] + usdc_total_after = usdc_holdings[common_constants.PORTFOLIO_TOTAL] + value_btc = btc_total_after * mark_price + value_quote = usdc_total_after + value_total = value_btc + value_quote + if value_total > decimal.Decimal("0"): + btc_holdings[copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO] = ( + value_btc / value_total + ) + usdc_holdings[copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO] = ( + value_quote / value_total + ) + return copy_entities.Account( + updated_at=time.time(), + content=content_after_fill, + orders=new_orders, + positions=[], + historical_snapshots=[reference_before], + ) + + +def reference_replace_highest_buy_with_sell_missed_signals_history( + reference_before: copy_entities.Account, +) -> copy_entities.Account: + """ + Like ``reference_replace_highest_buy_with_sell`` but prepend two empty-order snapshots (newest + first) so the first compliant historical snapshot is at index 2 for missed-signals grace abort. + """ + base = reference_replace_highest_buy_with_sell(reference_before) + content_snapshot = copy_module.deepcopy(reference_before.content) + empty_newest = copy_entities.Account( + updated_at=time.time(), + content=content_snapshot, + orders=[], + positions=[], + ) + empty_mid = copy_entities.Account( + updated_at=time.time() - 1.0, + content=content_snapshot, + orders=[], + positions=[], + ) + compliant = copy_module.deepcopy(reference_before) + compliant = dataclasses.replace(compliant, updated_at=time.time() - 5.0) + return dataclasses.replace( + base, + historical_snapshots=[empty_newest, empty_mid, compliant], + ) + + +def reference_replace_both_buys_with_sells( + reference_before: copy_entities.Account, +) -> copy_entities.Account: + lowest_buy = grid_test.GRID_REFERENCE_LOWEST_BUY + inc = float(grid_test.increment) + spr = float(grid_test.spread) + new_orders: list = [] + for order_doc in reference_before.orders: + origin = order_doc[trading_constants.STORAGE_ORIGIN_VALUE] + oid = origin[trading_enums.ExchangeConstantsOrderColumns.ID.value] + if oid in ("grid_ref_b0", "grid_ref_b1"): + continue + new_orders.append(order_doc) + new_orders.append( + grid_test._grid_reference_storage_order( + "grid_ref_s_fill_b0", + trading_enums.TradeOrderSide.SELL.value, + lowest_buy + spr, + ORDER_AMOUNT, + ) + ) + new_orders.append( + grid_test._grid_reference_storage_order( + "grid_ref_s_fill_b1", + trading_enums.TradeOrderSide.SELL.value, + lowest_buy + inc + spr, + ORDER_AMOUNT, + ) + ) + content_after_fill = copy_module.deepcopy(reference_before.content) + fill_quantity = decimal.Decimal(str(ORDER_AMOUNT)) + lowest_buy_price = decimal.Decimal(str(lowest_buy)) + inc_decimal = decimal.Decimal(str(inc)) + quote_spent = fill_quantity * (2 * lowest_buy_price + inc_decimal) + btc_holdings = content_after_fill["BTC"] + usdc_holdings = content_after_fill["USDC"] + btc_received = fill_quantity * decimal.Decimal("2") + btc_holdings[common_constants.PORTFOLIO_TOTAL] = ( + btc_holdings[common_constants.PORTFOLIO_TOTAL] + btc_received + ) + btc_holdings[common_constants.PORTFOLIO_AVAILABLE] = ( + btc_holdings[common_constants.PORTFOLIO_AVAILABLE] + btc_received + ) + usdc_holdings[common_constants.PORTFOLIO_TOTAL] = ( + usdc_holdings[common_constants.PORTFOLIO_TOTAL] - quote_spent + ) + mark_price = decimal.Decimal(str(grid_test._FIXED_BTC_USDC_CLOSE)) + btc_total_after = btc_holdings[common_constants.PORTFOLIO_TOTAL] + usdc_total_after = usdc_holdings[common_constants.PORTFOLIO_TOTAL] + value_btc = btc_total_after * mark_price + value_quote = usdc_total_after + pair_value_total = value_btc + value_quote + if pair_value_total > decimal.Decimal("0"): + btc_holdings[copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO] = ( + value_btc / pair_value_total + ) + usdc_holdings[copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO] = ( + value_quote / pair_value_total + ) + return copy_entities.Account( + updated_at=time.time(), + content=content_after_fill, + orders=new_orders, + positions=[], + historical_snapshots=[reference_before], + ) + + +def update_state_reference_account_details( + dump: dict[str, typing.Any], + reference_market: str, + reference_account: copy_entities.Account, + account_copy_settings: copy_entities.AccountCopySettings, +) -> None: + dsl_details = flow_actions.create_copy_exchange_account_action( + functionnal_tests.FUNCTIONAL_TEST_COPY_STRATEGY_ID, + reference_market, + reference_account, + account_copy_settings, + ) + dsl_details.id = COPY_ACTION_ID + automation_state = flow_entities.AutomationState.from_dict(dump) + if COPY_ACTION_ID not in automation_state.automation.actions_dag.get_actions_by_id(): + raise AssertionError(f"DAG action {COPY_ACTION_ID!r} not found") + automation_state.upsert_automation_actions([dsl_details]) + dump["automation"]["actions_dag"]["actions"] = json_util.sanitize( + automation_state.to_dict(include_default_values=False)["automation"]["actions_dag"]["actions"] + ) + + +def age_grace_started_at_in_dump( + dump: dict[str, typing.Any], + reference_market: str, + reference_account: copy_entities.Account, + account_copy_settings: copy_entities.AccountCopySettings, + grace_seconds: float, + margin_seconds: float = 15.0, +) -> None: + """ + Make mirrored-orphan grace appear elapsed by setting reference Account.updated_at in the copy + action DSL (grace start is derived from reference history + updated_at). + """ + aged_updated_at = time.time() - grace_seconds - margin_seconds + aged_account = dataclasses.replace(reference_account, updated_at=aged_updated_at) + update_state_reference_account_details( + dump, reference_market, aged_account, account_copy_settings + ) + + +def _open_orders_origins(dump: dict[str, typing.Any]) -> list[dict]: + return [ + o[trading_constants.STORAGE_ORIGIN_VALUE] + for o in dump["automation"]["exchange_account_elements"]["orders"]["open_orders"] + ] + + +def _assert_copy_action_last_run_has_no_rebalance_orders(dump: dict[str, typing.Any]) -> None: + """ + Unwrap the copy action's ReCallingOperatorResult last execution state and assert + ``created_orders`` contains no rows tagged with REBALANCER_ORDER_TAG. + """ + copy_result = None + recall_name = re_callable_operator_mixin.ReCallingOperatorResult.__name__ + last_key = re_callable_operator_mixin.ReCallableOperatorMixin.LAST_EXECUTION_RESULT_KEY + for action in dump["automation"]["actions_dag"]["actions"]: + if action.get("id") != COPY_ACTION_ID: + continue + result = action.get("result") or action.get("previous_execution_result") + if not result: + continue + if not isinstance(result, dict) or recall_name not in result: + continue + recall = result[recall_name] + if isinstance(recall, dict): + last = recall.get(last_key) + if isinstance(last, dict): + copy_result = last.get("state") + break + + created_raw: list = [] + if isinstance(copy_result, dict): + created_raw = copy_result.get("created_orders") or [] + + tag_col = trading_enums.ExchangeConstantsOrderColumns.TAG.value + limit_rebalance_like = [ + row + for row in created_raw + if isinstance(row, dict) and row.get(tag_col) == copy_constants.REBALANCER_ORDER_TAG + ] + assert len(limit_rebalance_like) == 0 + + +def _orders_by_side_with_id(origins: list[dict], side: str) -> list[tuple[str, float]]: + price_col = trading_enums.ExchangeConstantsOrderColumns.PRICE.value + id_col = trading_enums.ExchangeConstantsOrderColumns.ID.value + side_col = trading_enums.ExchangeConstantsOrderColumns.SIDE.value + return sorted( + [ + (o[id_col], float(o[price_col])) + for o in origins + if o[side_col] == side + ], + key=lambda row: row[1], + ) + + +def mutate_client_dump_simulate_early_fill_of_grid_ref_b1( + dump: dict[str, typing.Any], + _reference_r1: copy_entities.Account, +) -> None: + """ + Simulate the client having already filled the mirrored ``grid_ref_b1`` buy while the embedded + reference snapshot can still list that order as open (late-reference-fill alignment). + Base/quote portfolio balances are adjusted using that order's amount and price (same economics + as ``reference_replace_highest_buy_with_sell`` on the reference account). + """ + storage = trading_constants.STORAGE_ORIGIN_VALUE + id_col = trading_enums.ExchangeConstantsOrderColumns.ID.value + amount_col = trading_enums.ExchangeConstantsOrderColumns.AMOUNT.value + price_col = trading_enums.ExchangeConstantsOrderColumns.PRICE.value + automation = dump["automation"] + open_orders = automation["exchange_account_elements"]["orders"]["open_orders"] + filled_wrapped: typing.Optional[dict] = None + for wrapped in open_orders: + if wrapped[storage][id_col] == "grid_ref_b1": + filled_wrapped = wrapped + break + if filled_wrapped is None: + raise AssertionError("expected open mirrored order grid_ref_b1") + filled_origin = filled_wrapped[storage] + fill_quantity = decimal.Decimal(str(filled_origin[amount_col])) + fill_price = decimal.Decimal(str(filled_origin[price_col])) + quote_spent = fill_quantity * fill_price + + automation["exchange_account_elements"]["orders"]["open_orders"] = [ + o for o in open_orders if o[storage][id_col] != "grid_ref_b1" + ] + + content = automation["exchange_account_elements"]["portfolio"]["content"] + base_currency = "BTC" + quote_currency = "USDC" + btc_entry = content.setdefault(base_currency, {}) + usdc_entry = content.setdefault(quote_currency, {}) + + btc_total = decimal.Decimal(str(btc_entry.get("total", 0))) + btc_available = decimal.Decimal(str(btc_entry.get("available", btc_total))) + usdc_total = decimal.Decimal(str(usdc_entry.get("total", 0))) + usdc_available = decimal.Decimal(str(usdc_entry.get("available", usdc_total))) + + btc_total += fill_quantity + btc_available += fill_quantity + usdc_total -= quote_spent + usdc_available -= quote_spent + + btc_entry["total"] = float(btc_total) + btc_entry["available"] = float(btc_available) + usdc_entry["total"] = float(usdc_total) + usdc_entry["available"] = float(usdc_available) + + mark_price = decimal.Decimal(str(grid_test._FIXED_BTC_USDC_CLOSE)) + value_base = btc_total * mark_price + value_quote = usdc_total + value_total = value_base + value_quote + if value_total > decimal.Decimal("0"): + btc_entry[copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO] = float(value_base / value_total) + usdc_entry[copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO] = float(value_quote / value_total) + + +@pytest.mark.asyncio +async def test_grid_copy_trigger_grace_period_for_unfilled_client_order(init_action: dict): + reference_market = "USDC" + patched_fetch_tickers = grid_test.tickers_repository_fetch_tickers_btc_usdc_close_override( + lambda: grid_test._FIXED_BTC_USDC_CLOSE + ) + patched_fetch_ohlcv = grid_test.fetch_ohlcv_side_effect_for_close_price( + lambda: grid_test._FIXED_BTC_USDC_CLOSE + ) + reference_r1 = grid_reference_four_order_account() + settings = _grace_account_copy_settings() + all_actions = [ + init_action, + copy_exchange_account_action(reference_market, reference_r1, settings), + ] + automation_state_template = automation_state_dict(resolved_actions(all_actions)) + with ( + mock.patch.object( + octobot_flow.repositories.exchange.TickersRepository, + "fetch_tickers", + new=patched_fetch_tickers, + ), + mock.patch.object( + octobot_flow.repositories.exchange.OhlcvRepository, + "fetch_ohlcv", + side_effect=patched_fetch_ohlcv, + ), + ): + async with octobot_flow.AutomationJob(automation_state_template, [], [], {}) as job: + await job.run() + after_init = job.dump() + + async with octobot_flow.AutomationJob(after_init, [], [], {}) as job: + await job.run() + after_copy_r1 = job.dump() + + reference_r2 = reference_replace_highest_buy_with_sell(reference_r1) + update_state_reference_account_details(after_copy_r1, reference_market, reference_r2, settings) + + with ( + mock.patch.object( + octobot_flow.repositories.exchange.TickersRepository, + "fetch_tickers", + new=patched_fetch_tickers, + ), + mock.patch.object( + octobot_flow.repositories.exchange.OhlcvRepository, + "fetch_ohlcv", + side_effect=patched_fetch_ohlcv, + ), + ): + async with octobot_flow.AutomationJob(after_copy_r1, [], [], {}) as job: + await job.run() + after_grace_trigger = job.dump() + + orphan_still_buy = [ + origin + for origin in _open_orders_origins(after_grace_trigger) + if origin[trading_enums.ExchangeConstantsOrderColumns.SIDE.value] == trading_enums.TradeOrderSide.BUY.value + and origin[trading_enums.ExchangeConstantsOrderColumns.ID.value] == "grid_ref_b1" + ] + assert len(orphan_still_buy) == 1 + + +@pytest.mark.asyncio +async def test_grid_copy_missed_signals_abort_cancels_orphan_immediately(init_action: dict): + reference_market = "USDC" + patched_fetch_tickers = grid_test.tickers_repository_fetch_tickers_btc_usdc_close_override( + lambda: grid_test._FIXED_BTC_USDC_CLOSE + ) + patched_fetch_ohlcv = grid_test.fetch_ohlcv_side_effect_for_close_price( + lambda: grid_test._FIXED_BTC_USDC_CLOSE + ) + reference_r1 = grid_reference_four_order_account() + settings = _grace_account_copy_settings(missed_signals_grace_abort_threshold=2) + all_actions = [ + init_action, + copy_exchange_account_action(reference_market, reference_r1, settings), + ] + automation_state_template = automation_state_dict(resolved_actions(all_actions)) + with ( + mock.patch.object( + octobot_flow.repositories.exchange.TickersRepository, + "fetch_tickers", + new=patched_fetch_tickers, + ), + mock.patch.object( + octobot_flow.repositories.exchange.OhlcvRepository, + "fetch_ohlcv", + side_effect=patched_fetch_ohlcv, + ), + ): + async with octobot_flow.AutomationJob(automation_state_template, [], [], {}) as job: + await job.run() + after_init = job.dump() + + async with octobot_flow.AutomationJob(after_init, [], [], {}) as job: + await job.run() + after_copy_r1 = job.dump() + + origins_r1 = _open_orders_origins(after_copy_r1) + assert len(origins_r1) == 4 + buy_ids_r1 = { + origin[trading_enums.ExchangeConstantsOrderColumns.ID.value] + for origin in origins_r1 + if origin[trading_enums.ExchangeConstantsOrderColumns.SIDE.value] == trading_enums.TradeOrderSide.BUY.value + } + assert "grid_ref_b0" in buy_ids_r1 + assert "grid_ref_b1" in buy_ids_r1 + sell_ids_r1 = { + origin[trading_enums.ExchangeConstantsOrderColumns.ID.value] + for origin in origins_r1 + if origin[trading_enums.ExchangeConstantsOrderColumns.SIDE.value] == trading_enums.TradeOrderSide.SELL.value + } + assert "grid_ref_s0" in sell_ids_r1 + assert "grid_ref_s1" in sell_ids_r1 + + reference_r2 = reference_replace_highest_buy_with_sell_missed_signals_history(reference_r1) + update_state_reference_account_details(after_copy_r1, reference_market, reference_r2, settings) + + with ( + mock.patch.object( + octobot_flow.repositories.exchange.TickersRepository, + "fetch_tickers", + new=patched_fetch_tickers, + ), + mock.patch.object( + octobot_flow.repositories.exchange.OhlcvRepository, + "fetch_ohlcv", + side_effect=patched_fetch_ohlcv, + ), + ): + async with octobot_flow.AutomationJob(after_copy_r1, [], [], {}) as job: + await job.run() + after_missed_abort = job.dump() + + open_ids = { + origin[trading_enums.ExchangeConstantsOrderColumns.ID.value] + for origin in _open_orders_origins(after_missed_abort) + } + assert "grid_ref_b1" not in open_ids + + +@pytest.mark.asyncio +async def test_grid_copy_trigger_grace_period_for_early_filled_client_order(init_action: dict): + reference_market = "USDC" + patched_fetch_tickers = grid_test.tickers_repository_fetch_tickers_btc_usdc_close_override( + lambda: grid_test._FIXED_BTC_USDC_CLOSE + ) + patched_fetch_ohlcv = grid_test.fetch_ohlcv_side_effect_for_close_price( + lambda: grid_test._FIXED_BTC_USDC_CLOSE + ) + reference_r1 = grid_reference_four_order_account() + settings = _grace_account_copy_settings() + all_actions = [ + init_action, + copy_exchange_account_action(reference_market, reference_r1, settings), + ] + automation_state_template = automation_state_dict(resolved_actions(all_actions)) + with ( + mock.patch.object( + octobot_flow.repositories.exchange.TickersRepository, + "fetch_tickers", + new=patched_fetch_tickers, + ), + mock.patch.object( + octobot_flow.repositories.exchange.OhlcvRepository, + "fetch_ohlcv", + side_effect=patched_fetch_ohlcv, + ), + ): + async with octobot_flow.AutomationJob(automation_state_template, [], [], {}) as job: + await job.run() + after_init = job.dump() + + async with octobot_flow.AutomationJob(after_init, [], [], {}) as job: + await job.run() + after_copy_r1 = job.dump() + + mutate_client_dump_simulate_early_fill_of_grid_ref_b1(after_copy_r1, reference_r1) + update_state_reference_account_details(after_copy_r1, reference_market, reference_r1, settings) + + with ( + mock.patch.object( + octobot_flow.repositories.exchange.TickersRepository, + "fetch_tickers", + new=patched_fetch_tickers, + ), + mock.patch.object( + octobot_flow.repositories.exchange.OhlcvRepository, + "fetch_ohlcv", + side_effect=patched_fetch_ohlcv, + ), + ): + async with octobot_flow.AutomationJob(after_copy_r1, [], [], {}) as job: + await job.run() + after_grace_trigger = job.dump() + + final_origins = _open_orders_origins(after_grace_trigger) + assert len(final_origins) == 3 + open_ids = { + origin[trading_enums.ExchangeConstantsOrderColumns.ID.value] for origin in final_origins + } + assert "grid_ref_b1" not in open_ids + + +@pytest.mark.asyncio +async def test_grid_copy_grace_elapses_then_orphan_cancelled_and_sell_mirrored(init_action: dict): + reference_market = "USDC" + patched_fetch_tickers = grid_test.tickers_repository_fetch_tickers_btc_usdc_close_override( + lambda: grid_test._FIXED_BTC_USDC_CLOSE + ) + patched_fetch_ohlcv = grid_test.fetch_ohlcv_side_effect_for_close_price( + lambda: grid_test._FIXED_BTC_USDC_CLOSE + ) + reference_r1 = grid_reference_four_order_account() + settings = _grace_account_copy_settings() + all_actions = [ + init_action, + copy_exchange_account_action(reference_market, reference_r1, settings), + ] + automation_state_template = automation_state_dict(resolved_actions(all_actions)) + with ( + mock.patch.object( + octobot_flow.repositories.exchange.TickersRepository, + "fetch_tickers", + new=patched_fetch_tickers, + ), + mock.patch.object( + octobot_flow.repositories.exchange.OhlcvRepository, + "fetch_ohlcv", + side_effect=patched_fetch_ohlcv, + ), + ): + async with octobot_flow.AutomationJob(automation_state_template, [], [], {}) as job: + await job.run() + after_init = job.dump() + + async with octobot_flow.AutomationJob(after_init, [], [], {}) as job: + await job.run() + after_copy_r1 = job.dump() + + for action in after_copy_r1["automation"]["actions_dag"]["actions"]: + assert isinstance(action, dict) + if action.get("id") == "action_init": + assert action.get("executed_at") + elif action.get("id") == COPY_ACTION_ID: + assert action.get("executed_at") is None + assert isinstance(action.get("previous_execution_result"), dict) + + origins_r1 = _open_orders_origins(after_copy_r1) + buy_ids_r1 = {origin[trading_enums.ExchangeConstantsOrderColumns.ID.value] for origin in origins_r1 if origin[trading_enums.ExchangeConstantsOrderColumns.SIDE.value] == trading_enums.TradeOrderSide.BUY.value} + assert "grid_ref_b0" in buy_ids_r1 + assert "grid_ref_b1" in buy_ids_r1 + + reference_r2 = reference_replace_highest_buy_with_sell(reference_r1) + update_state_reference_account_details(after_copy_r1, reference_market, reference_r2, settings) + + with ( + mock.patch.object( + octobot_flow.repositories.exchange.TickersRepository, + "fetch_tickers", + new=patched_fetch_tickers, + ), + mock.patch.object( + octobot_flow.repositories.exchange.OhlcvRepository, + "fetch_ohlcv", + side_effect=patched_fetch_ohlcv, + ), + ): + async with octobot_flow.AutomationJob(after_copy_r1, [], [], {}) as job: + await job.run() + after_orphan_grace_started = job.dump() + + orphan_still_buy = [ + origin + for origin in _open_orders_origins(after_orphan_grace_started) + if origin[trading_enums.ExchangeConstantsOrderColumns.SIDE.value] == trading_enums.TradeOrderSide.BUY.value + and origin[trading_enums.ExchangeConstantsOrderColumns.ID.value] == "grid_ref_b1" + ] + assert len(orphan_still_buy) == 1 + + age_grace_started_at_in_dump( + after_orphan_grace_started, + reference_market, + reference_r2, + settings, + GRACE_SECONDS, + ) + + with ( + mock.patch.object( + octobot_flow.repositories.exchange.TickersRepository, + "fetch_tickers", + new=patched_fetch_tickers, + ), + mock.patch.object( + octobot_flow.repositories.exchange.OhlcvRepository, + "fetch_ohlcv", + side_effect=patched_fetch_ohlcv, + ), + ): + async with octobot_flow.AutomationJob(after_orphan_grace_started, [], [], {}) as job: + await job.run() + after_grace_elapsed = job.dump() + + final_origins = _open_orders_origins(after_grace_elapsed) + buy_ids_final = { + origin[trading_enums.ExchangeConstantsOrderColumns.ID.value] + for origin in final_origins + if origin[trading_enums.ExchangeConstantsOrderColumns.SIDE.value] == trading_enums.TradeOrderSide.BUY.value + } + assert len(buy_ids_final) == 1 + assert "grid_ref_b1" not in buy_ids_final + sell_pairs = _orders_by_side_with_id(final_origins, trading_enums.TradeOrderSide.SELL.value) + sell_ids = {row[0] for row in sell_pairs} + assert len(sell_ids) == 3 + assert "grid_ref_s_from_b1" in sell_ids + + +@pytest.mark.asyncio +async def test_grid_copy_grace_aborted_when_second_orphan_exceeds_threshold(init_action: dict): + reference_market = "USDC" + patched_fetch_tickers = grid_test.tickers_repository_fetch_tickers_btc_usdc_close_override( + lambda: grid_test._FIXED_BTC_USDC_CLOSE + ) + patched_fetch_ohlcv = grid_test.fetch_ohlcv_side_effect_for_close_price( + lambda: grid_test._FIXED_BTC_USDC_CLOSE + ) + reference_r1 = grid_reference_four_order_account() + reference_r2 = reference_replace_both_buys_with_sells(reference_r1) + settings = _grace_account_copy_settings() + all_actions = [ + init_action, + copy_exchange_account_action(reference_market, reference_r1, settings), + ] + automation_state_template = automation_state_dict(resolved_actions(all_actions)) + with ( + mock.patch.object( + octobot_flow.repositories.exchange.TickersRepository, + "fetch_tickers", + new=patched_fetch_tickers, + ), + mock.patch.object( + octobot_flow.repositories.exchange.OhlcvRepository, + "fetch_ohlcv", + side_effect=patched_fetch_ohlcv, + ), + ): + async with octobot_flow.AutomationJob(automation_state_template, [], [], {}) as job: + await job.run() + after_init = job.dump() + async with octobot_flow.AutomationJob(after_init, [], [], {}) as job: + await job.run() + after_copy_r1 = job.dump() + + buy_ids = { + origin[trading_enums.ExchangeConstantsOrderColumns.ID.value] + for origin in _open_orders_origins(after_copy_r1) + if origin[trading_enums.ExchangeConstantsOrderColumns.SIDE.value] == trading_enums.TradeOrderSide.BUY.value + } + assert "grid_ref_b0" in buy_ids + assert "grid_ref_b1" in buy_ids + + update_state_reference_account_details(after_copy_r1, reference_market, reference_r2, settings) + + with ( + mock.patch.object( + octobot_flow.repositories.exchange.TickersRepository, + "fetch_tickers", + new=patched_fetch_tickers, + ), + mock.patch.object( + octobot_flow.repositories.exchange.OhlcvRepository, + "fetch_ohlcv", + side_effect=patched_fetch_ohlcv, + ), + ): + async with octobot_flow.AutomationJob(after_copy_r1, [], [], {}) as job: + await job.run() + after_threshold = job.dump() + + final_origins = _open_orders_origins(after_threshold) + buy_ids_final = { + origin[trading_enums.ExchangeConstantsOrderColumns.ID.value] + for origin in final_origins + if origin[trading_enums.ExchangeConstantsOrderColumns.SIDE.value] == trading_enums.TradeOrderSide.BUY.value + } + assert len(buy_ids_final) == 0 + sell_ids = { + origin[trading_enums.ExchangeConstantsOrderColumns.ID.value] + for origin in final_origins + if origin[trading_enums.ExchangeConstantsOrderColumns.SIDE.value] == trading_enums.TradeOrderSide.SELL.value + } + assert len(sell_ids) == 4 + assert "grid_ref_s_fill_b0" in sell_ids + assert "grid_ref_s_fill_b1" in sell_ids + + +@pytest.mark.asyncio +async def test_grid_copy_orphan_resolved_by_client_fill_without_rebalance_orders(init_action: dict): + """ + After copying the reference grid, switch the embedded reference to R2 at the original close: + mirrored-orphan grace defers cancel of the stale mirrored buy. Then lower BTC/USDC below the + highest mirrored buy so the client buy can fill; the next run clears the grace episode and + mirrors the new reference sell without rebalance limit creations. + """ + reference_market = "USDC" + simulated_close = {"value": float(grid_test._FIXED_BTC_USDC_CLOSE)} + patched_fetch_tickers = grid_test.tickers_repository_fetch_tickers_btc_usdc_close_override( + lambda: simulated_close["value"] + ) + patched_fetch_ohlcv = grid_test.fetch_ohlcv_side_effect_for_close_price( + lambda: simulated_close["value"] + ) + reference_r1 = grid_reference_four_order_account() + settings = _grace_account_copy_settings() + all_actions = [ + init_action, + copy_exchange_account_action(reference_market, reference_r1, settings), + ] + automation_state_template = automation_state_dict(resolved_actions(all_actions)) + with ( + mock.patch.object( + octobot_flow.repositories.exchange.TickersRepository, + "fetch_tickers", + new=patched_fetch_tickers, + ), + mock.patch.object( + octobot_flow.repositories.exchange.OhlcvRepository, + "fetch_ohlcv", + side_effect=patched_fetch_ohlcv, + ), + ): + async with octobot_flow.AutomationJob(automation_state_template, [], [], {}) as job: + await job.run() + after_init = job.dump() + async with octobot_flow.AutomationJob(after_init, [], [], {}) as job: + await job.run() + after_copy_r1 = job.dump() + + reference_r2 = reference_replace_highest_buy_with_sell(reference_r1) + update_state_reference_account_details(after_copy_r1, reference_market, reference_r2, settings) + + with ( + mock.patch.object( + octobot_flow.repositories.exchange.TickersRepository, + "fetch_tickers", + new=patched_fetch_tickers, + ), + mock.patch.object( + octobot_flow.repositories.exchange.OhlcvRepository, + "fetch_ohlcv", + side_effect=patched_fetch_ohlcv, + ), + ): + async with octobot_flow.AutomationJob(after_copy_r1, [], [], {}) as job: + await job.run() + after_grace_started = job.dump() + + highest_buy_price = d_order_price( + grid_test.GRID_REFERENCE_LOWEST_BUY + grid_test.increment + ) + simulated_close["value"] = float( + highest_buy_price - grid_test.D_INCREMENT / decimal.Decimal("2") + ) + update_state_reference_account_details( + after_grace_started, reference_market, reference_r2, settings + ) + + with ( + mock.patch.object( + octobot_flow.repositories.exchange.TickersRepository, + "fetch_tickers", + new=patched_fetch_tickers, + ), + mock.patch.object( + octobot_flow.repositories.exchange.OhlcvRepository, + "fetch_ohlcv", + side_effect=patched_fetch_ohlcv, + ), + ): + async with octobot_flow.AutomationJob(after_grace_started, [], [], {}) as job: + await job.run() + after_fill_sync = job.dump() + + _assert_copy_action_last_run_has_no_rebalance_orders(after_fill_sync) + + final_origins = _open_orders_origins(after_fill_sync) + side_col = trading_enums.ExchangeConstantsOrderColumns.SIDE.value + buy_origins = [ + origin + for origin in final_origins + if origin[side_col] == trading_enums.TradeOrderSide.BUY.value + ] + sell_origins = [ + origin + for origin in final_origins + if origin[side_col] == trading_enums.TradeOrderSide.SELL.value + ] + assert len(buy_origins) == 1 + assert len(sell_origins) == 3 + sell_ids = { + origin[trading_enums.ExchangeConstantsOrderColumns.ID.value] + for origin in sell_origins + } + assert "grid_ref_s_from_b1" in sell_ids + + +@pytest.mark.asyncio +async def test_grid_copy_early_filled_client_order_grace_period_resolved_by_reference_fill_without_rebalance_orders( + init_action: dict, +): + """ + Like test_grid_copy_orphan_resolved_by_client_fill_without_rebalance_orders, but the mirrored + orphan grace is triggered by an early client fill of grid_ref_b1 while the embedded reference + is still R1. A second run with R1 unchanged leaves the grace episode active (no mirrored R2 + sell yet). The following run embeds R2 so the reference reflects the fill; sync completes + without rebalance limits, not by lowering price to fill the client. + """ + # --- Bootstrap: fixed market, copy the reference grid (R1) onto the client --- + reference_market = "USDC" + patched_fetch_tickers = grid_test.tickers_repository_fetch_tickers_btc_usdc_close_override( + lambda: grid_test._FIXED_BTC_USDC_CLOSE + ) + patched_fetch_ohlcv = grid_test.fetch_ohlcv_side_effect_for_close_price( + lambda: grid_test._FIXED_BTC_USDC_CLOSE + ) + reference_r1 = grid_reference_four_order_account() + settings = _grace_account_copy_settings() + all_actions = [ + init_action, + copy_exchange_account_action(reference_market, reference_r1, settings), + ] + automation_state_template = automation_state_dict(resolved_actions(all_actions)) + with ( + mock.patch.object( + octobot_flow.repositories.exchange.TickersRepository, + "fetch_tickers", + new=patched_fetch_tickers, + ), + mock.patch.object( + octobot_flow.repositories.exchange.OhlcvRepository, + "fetch_ohlcv", + side_effect=patched_fetch_ohlcv, + ), + ): + async with octobot_flow.AutomationJob(automation_state_template, [], [], {}) as job: + await job.run() + after_init = job.dump() + async with octobot_flow.AutomationJob(after_init, [], [], {}) as job: + await job.run() + after_copy_r1 = job.dump() + + # --- Client filled mirrored grid_ref_b1 first; embedded reference still lists R1 (late ref) --- + mutate_client_dump_simulate_early_fill_of_grid_ref_b1(after_copy_r1, reference_r1) + update_state_reference_account_details(after_copy_r1, reference_market, reference_r1, settings) + + # --- Iteration n: first run after setup — grace episode starts (early fill vs reference R1) --- + with ( + mock.patch.object( + octobot_flow.repositories.exchange.TickersRepository, + "fetch_tickers", + new=patched_fetch_tickers, + ), + mock.patch.object( + octobot_flow.repositories.exchange.OhlcvRepository, + "fetch_ohlcv", + side_effect=patched_fetch_ohlcv, + ), + ): + async with octobot_flow.AutomationJob(after_copy_r1, [], [], {}) as job: + await job.run() + after_grace_n = job.dump() + + # Check n: same shape as test_grid_copy_trigger_grace_period_for_early_filled_client_order + after_grace_n_origins = _open_orders_origins(after_grace_n) + assert len(after_grace_n_origins) == 3 + after_grace_n_open_ids = { + origin[trading_enums.ExchangeConstantsOrderColumns.ID.value] for origin in after_grace_n_origins + } + assert "grid_ref_b1" not in after_grace_n_open_ids + + # --- Iteration n+1: re-embed reference R1 only; grace still active, no R2 mirror yet --- + update_state_reference_account_details(after_grace_n, reference_market, reference_r1, settings) + + with ( + mock.patch.object( + octobot_flow.repositories.exchange.TickersRepository, + "fetch_tickers", + new=patched_fetch_tickers, + ), + mock.patch.object( + octobot_flow.repositories.exchange.OhlcvRepository, + "fetch_ohlcv", + side_effect=patched_fetch_ohlcv, + ), + ): + async with octobot_flow.AutomationJob(after_grace_n, [], [], {}) as job: + await job.run() + after_grace_n1 = job.dump() + + # Check n+1: still three opens; no premature grid_ref_s_from_b1; b1 stays off the book + after_grace_n1_origins = _open_orders_origins(after_grace_n1) + assert len(after_grace_n1_origins) == 3 + after_grace_n1_open_ids = { + origin[trading_enums.ExchangeConstantsOrderColumns.ID.value] for origin in after_grace_n1_origins + } + assert "grid_ref_s_from_b1" not in after_grace_n1_open_ids + assert "grid_ref_b1" not in after_grace_n1_open_ids + + # --- Iteration n+2: reference advances to R2 (fill reflected as sell); sync can complete --- + reference_r2 = reference_replace_highest_buy_with_sell(reference_r1) + update_state_reference_account_details(after_grace_n1, reference_market, reference_r2, settings) + + with ( + mock.patch.object( + octobot_flow.repositories.exchange.TickersRepository, + "fetch_tickers", + new=patched_fetch_tickers, + ), + mock.patch.object( + octobot_flow.repositories.exchange.OhlcvRepository, + "fetch_ohlcv", + side_effect=patched_fetch_ohlcv, + ), + ): + async with octobot_flow.AutomationJob(after_grace_n1, [], [], {}) as job: + await job.run() + after_resolved = job.dump() + + _assert_copy_action_last_run_has_no_rebalance_orders(after_resolved) + + # Final book: aligned with R2 — one buy, three sells including grid_ref_s_from_b1 + final_origins = _open_orders_origins(after_resolved) + side_col = trading_enums.ExchangeConstantsOrderColumns.SIDE.value + buy_origins = [ + origin + for origin in final_origins + if origin[side_col] == trading_enums.TradeOrderSide.BUY.value + ] + sell_origins = [ + origin + for origin in final_origins + if origin[side_col] == trading_enums.TradeOrderSide.SELL.value + ] + assert len(buy_origins) == 1 + assert len(sell_origins) == 3 + sell_ids = { + origin[trading_enums.ExchangeConstantsOrderColumns.ID.value] + for origin in sell_origins + } + assert "grid_ref_s_from_b1" in sell_ids diff --git a/packages/flow/tests/functionnal_tests/trading_modes_actions/simulator/test_index_trading_mode_action.py b/packages/flow/tests/functionnal_tests/trading_modes_actions/simulator/test_index_trading_mode_action.py new file mode 100644 index 0000000000..886739f0ff --- /dev/null +++ b/packages/flow/tests/functionnal_tests/trading_modes_actions/simulator/test_index_trading_mode_action.py @@ -0,0 +1,650 @@ +import pytest +import logging +import json +import mock +import decimal +import copy +import time + +import octobot_copy.entities as copy_entities +import octobot_commons.enums as common_enums +import octobot_commons.constants as common_constants +import octobot_trading.dsl as trading_dsl +import octobot_trading.exchanges.exchange_channels as exchange_channels +import octobot_copy.rebalancing as rebalancing +import octobot_flow +import octobot_flow.entities +import octobot_flow.enums +import octobot_copy.constants as copy_constants + +import tentacles.Trading.Mode.index_trading_mode as index_trading_mode + +import tests.functionnal_tests as functionnal_tests +from tests.functionnal_tests import ( + assert_emitted_signal_account_allocation_ratios, + automation_state_dict, + copy_exchange_account_action, + current_time, + resolved_actions, + set_emit_signals_metadata, + trading_signal_emission_patches, +) + +import octobot_copy.enums as rebalancer_enums + +index_content = [ + { + rebalancer_enums.DistributionKeys.NAME: "BTC", + rebalancer_enums.DistributionKeys.VALUE: 1, + }, + { + rebalancer_enums.DistributionKeys.NAME: "ETH", + rebalancer_enums.DistributionKeys.VALUE: 1, + }, +] + +index_content_btc_sol = [ + { + rebalancer_enums.DistributionKeys.NAME: "BTC", + rebalancer_enums.DistributionKeys.VALUE: 1, + }, + { + rebalancer_enums.DistributionKeys.NAME: "SOL", + rebalancer_enums.DistributionKeys.VALUE: 1, + }, +] + + +def _replace_index_trading_mode_dsl_in_dump(automation_dump: dict, new_index_content: list) -> None: + for action in automation_dump["automation"]["actions_dag"]["actions"]: + if action.get("id") != "action_1": + continue + action["dsl_script"] = ( + f"index_trading_mode(index_content={json.dumps(new_index_content)}, rebalance_trigger_min_percent=5)" + ) + action.pop("resolved_dsl_script", None) + return + raise AssertionError("action_1 not found in automation dump") + + +def index_trading_mode_action(dependency_action: dict): + return { + "id": "action_1", + "dsl_script": f"index_trading_mode(index_content={json.dumps(index_content)}, rebalance_trigger_min_percent=5)", + "dependencies": [{"action_id": dependency_action["id"]}], + } + + +def _assert_trading_signal_account_fields(trading_signal: octobot_flow.entities.TradingSignal) -> None: + """ + Matches octobot_flow.logic.actions.account_copy_util.reference_exchange_elements_to_account: + updated_at from time.time(), orders from exchange elements' open_orders (empty in these tests), + positions and historical_snapshots default to empty (not set by the helper). + """ + account = trading_signal.account + assert isinstance(account.updated_at, float) + assert current_time <= account.updated_at <= time.time() + assert account.orders == [] + assert account.positions == [] + assert account.historical_snapshots == [] + + +def _assert_trading_signal_btc_eth_usdt_index_portfolio( + trading_signal: octobot_flow.entities.TradingSignal, + *, + allow_zero_ratio_assets: frozenset[str] = frozenset(), +) -> None: + content = trading_signal.account.content + assert list(sorted(content.keys())) == ["BTC", "ETH", "USDT"] + assert 0 < float(content["USDT"][common_constants.PORTFOLIO_AVAILABLE]) < 5 + assert 0.1 < float(content["ETH"][common_constants.PORTFOLIO_AVAILABLE]) < 0.4 + assert 0.001 < float(content["BTC"][common_constants.PORTFOLIO_AVAILABLE]) < 0.01 + assert 0 < float(content["USDT"][common_constants.PORTFOLIO_TOTAL]) < 5 + assert 0.1 < float(content["ETH"][common_constants.PORTFOLIO_TOTAL]) < 0.4 + assert 0.001 < float(content["BTC"][common_constants.PORTFOLIO_TOTAL]) < 0.01 + assert_emitted_signal_account_allocation_ratios(content, allow_zero_ratio_assets=allow_zero_ratio_assets) + _assert_trading_signal_account_fields(trading_signal) + + +def _assert_trading_signal_btc_eth_sol_usdt_after_btc_sol_rebalance( + trading_signal: octobot_flow.entities.TradingSignal, + *, + allow_zero_ratio_assets: frozenset[str] = frozenset(), +) -> None: + content = trading_signal.account.content + assert list(sorted(content.keys())) == ["BTC", "ETH", "SOL", "USDT"] + assert 0 < float(content["USDT"][common_constants.PORTFOLIO_AVAILABLE]) < 5 + assert 0.001 < float(content["BTC"][common_constants.PORTFOLIO_AVAILABLE]) < 0.02 + assert 0.5 < float(content["SOL"][common_constants.PORTFOLIO_AVAILABLE]) < 20 + assert 0 < float(content["ETH"][common_constants.PORTFOLIO_AVAILABLE]) < 0.001 + assert 0 < float(content["USDT"][common_constants.PORTFOLIO_TOTAL]) < 5 + assert 0.001 < float(content["BTC"][common_constants.PORTFOLIO_TOTAL]) < 0.02 + assert 0.5 < float(content["SOL"][common_constants.PORTFOLIO_TOTAL]) < 20 + assert 0 < float(content["ETH"][common_constants.PORTFOLIO_TOTAL]) < 0.001 + assert_emitted_signal_account_allocation_ratios( + content, + allow_negligible_ratio_assets=allow_zero_ratio_assets, + ) + _assert_trading_signal_account_fields(trading_signal) + + +@pytest.fixture +def index_reference_account(): + return copy_entities.Account( + updated_at=time.time(), + content={ + "BTC": { + common_constants.PORTFOLIO_TOTAL: decimal.Decimal("1"), + common_constants.PORTFOLIO_AVAILABLE: decimal.Decimal("1"), + copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO: decimal.Decimal("0.5"), + }, + "ETH": { + common_constants.PORTFOLIO_TOTAL: decimal.Decimal("20"), + common_constants.PORTFOLIO_AVAILABLE: decimal.Decimal("20"), + copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO: decimal.Decimal("0.4999"), + }, + "USDT": { + common_constants.PORTFOLIO_TOTAL: decimal.Decimal("10"), + common_constants.PORTFOLIO_AVAILABLE: decimal.Decimal("10"), + copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO: decimal.Decimal("0.0001"), + }, + }, + orders=[], + positions=[], + # no historical snapshots + ) + + +@pytest.fixture +def init_action(): + return { + "id": "action_init", + "action": octobot_flow.enums.ActionType.APPLY_CONFIGURATION.value, + "config": { + "automation": { + "metadata": { + "automation_id": "automation_1", + }, + "exchange_account_elements": { + "portfolio": { + "content": { + "USDT": { + "available": 1000.0, + "total": 1000.0, + }, + }, + }, + }, + }, + "exchange_account_details": { + "exchange_details": { + "internal_name": functionnal_tests.EXCHANGE_INTERNAL_NAME, + }, + "auth_details": {}, + "portfolio": { + "unit": "USDT", + }, + }, + }, + } + + +@pytest.mark.parametrize("emit_signals", [False, True]) +@pytest.mark.asyncio +async def test_simulator_index_init_from_empty_state(init_action: dict, emit_signals: bool): + all_actions = [init_action, index_trading_mode_action(init_action)] + automation_state = automation_state_dict(resolved_actions(all_actions)) + set_emit_signals_metadata(automation_state, emit_signals) + + with trading_signal_emission_patches(emit_signals) as insert_trading_signal_mock: + # 1. run init action + async with octobot_flow.AutomationJob(automation_state, [], [], {}) as automation_job: + await automation_job.run() + after_init_execution_dump = automation_job.dump() + + # check bot actions execution + assert len(automation_job.automation_state.automation.actions_dag.actions) == len(all_actions) + for index, action in enumerate(automation_job.automation_state.automation.actions_dag.actions): + assert isinstance(action, octobot_flow.entities.AbstractActionDetails) + assert action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert action.result is None + if index == 0: + assert action.executed_at and action.executed_at >= current_time + assert action.previous_execution_result is None + else: + assert action.executed_at is None + assert action.previous_execution_result is None + + # 2. run index trading mode action + async with octobot_flow.AutomationJob(after_init_execution_dump, [], [], {}) as automation_job: + await automation_job.run() + after_initial_rebalance_execution_dump = automation_job.dump() + assert len(automation_job.automation_state.automation.actions_dag.actions) == len(all_actions) + for index, action in enumerate(automation_job.automation_state.automation.actions_dag.actions): + assert isinstance(action, octobot_flow.entities.AbstractActionDetails) + assert action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert action.result is None + if index == 0: + assert action.executed_at is not None + assert action.previous_execution_result is None + else: + # action is reset: this is a trading mode action: it will be executed again at the next execution + assert action.executed_at is None + assert isinstance(action.previous_execution_result, dict) + + # scheduled next execution time at 1h after the current execution (1h is the default time when unspecified) + assert after_initial_rebalance_execution_dump["automation"]["execution"]["previous_execution"]["triggered_at"] >= current_time + one_hour = common_enums.TimeFramesMinutes[common_enums.TimeFrames.ONE_HOUR] * common_constants.MINUTE_TO_SECONDS + allowed_execution_time = 20 + schedule_delay = ( + after_initial_rebalance_execution_dump["automation"]["execution"]["current_execution"]["scheduled_to"] + - after_initial_rebalance_execution_dump["automation"]["execution"]["previous_execution"]["triggered_at"] + ) + assert one_hour - allowed_execution_time < schedule_delay < one_hour + allowed_execution_time + # check portfolio content + after_initial_rebalance_portfolio_content = after_initial_rebalance_execution_dump["automation"]["exchange_account_elements"]["portfolio"]["content"] + assert isinstance(after_initial_rebalance_execution_dump, dict) + assert list(sorted(after_initial_rebalance_portfolio_content.keys())) == ["BTC", "ETH", "USDT"] + assert 0 < after_initial_rebalance_portfolio_content["USDT"]["available"] < 5 + assert 0.1 < after_initial_rebalance_portfolio_content["ETH"]["available"] < 0.4 + assert 0.001 < after_initial_rebalance_portfolio_content["BTC"]["available"] < 0.01 + logging.getLogger("test_update_simulated_basket_bot").info(f"after_execution_portfolio_content: {after_initial_rebalance_portfolio_content}") + + after_initial_rebalance_reference_account_portfolio_content = after_initial_rebalance_execution_dump["automation"][ + "exchange_account_elements" + ]["portfolio"]["content"] + assert isinstance(after_initial_rebalance_reference_account_portfolio_content, dict) + assert list(sorted(after_initial_rebalance_reference_account_portfolio_content.keys())) == ["BTC", "ETH", "USDT"] + assert 0 < after_initial_rebalance_reference_account_portfolio_content["USDT"]["available"] < 5 + assert 0.1 < after_initial_rebalance_reference_account_portfolio_content["ETH"]["available"] < 0.4 + assert 0.001 < after_initial_rebalance_reference_account_portfolio_content["BTC"]["available"] < 0.01 + + # 3. trigger again: nothing to do + async with octobot_flow.AutomationJob(after_initial_rebalance_execution_dump, [], [], {}) as automation_job: + await automation_job.run() + after_second_call_execution_dump = automation_job.dump() + assert len(automation_job.automation_state.automation.actions_dag.actions) == len(all_actions) + for index, action in enumerate(automation_job.automation_state.automation.actions_dag.actions): + assert isinstance(action, octobot_flow.entities.AbstractActionDetails) + assert action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert action.result is None + if index == 0: + assert action.executed_at is not None + assert action.previous_execution_result is None + else: + # action is reset: this is a trading mode action: it will be executed again at the next execution + assert action.executed_at is None + assert isinstance(action.previous_execution_result, dict) + + # ensure schedule delay is the same as the first call + schedule_delay = ( + after_second_call_execution_dump["automation"]["execution"]["current_execution"]["scheduled_to"] + - after_second_call_execution_dump["automation"]["execution"]["previous_execution"]["triggered_at"] + ) + assert one_hour - allowed_execution_time < schedule_delay < one_hour + allowed_execution_time + + # portfolio already follows the index content: ensure portfolio content is the same as the first call + after_second_call_portfolio_content = after_second_call_execution_dump["automation"]["exchange_account_elements"]["portfolio"]["content"] + assert after_second_call_portfolio_content == after_initial_rebalance_portfolio_content + after_second_call_reference_account_portfolio_content = after_second_call_execution_dump["automation"]["exchange_account_elements"][ + "portfolio" + ]["content"] + assert after_second_call_reference_account_portfolio_content == after_initial_rebalance_reference_account_portfolio_content + + if emit_signals: + assert insert_trading_signal_mock.await_count == 2 + for await_args in insert_trading_signal_mock.await_args_list: + trading_signal_arg = await_args.args[0] + _assert_trading_signal_btc_eth_usdt_index_portfolio(trading_signal_arg) + else: + insert_trading_signal_mock.assert_not_awaited() + + +@pytest.mark.parametrize("emit_signals", [False, True]) +@pytest.mark.asyncio +async def test_simulator_index_rebalance_after_index_content_switch_btc_eth_to_btc_sol( + init_action: dict, + emit_signals: bool, +): + all_actions = [init_action, index_trading_mode_action(init_action)] + automation_state = automation_state_dict(resolved_actions(all_actions)) + set_emit_signals_metadata(automation_state, emit_signals) + + with trading_signal_emission_patches(emit_signals) as insert_trading_signal_mock: + # 1. run init action + async with octobot_flow.AutomationJob(automation_state, [], [], {}) as automation_job: + await automation_job.run() + after_init_execution_dump = automation_job.dump() + + assert len(automation_job.automation_state.automation.actions_dag.actions) == len(all_actions) + for index, action in enumerate(automation_job.automation_state.automation.actions_dag.actions): + assert isinstance(action, octobot_flow.entities.AbstractActionDetails) + assert action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert action.result is None + if index == 0: + assert action.executed_at and action.executed_at >= current_time + assert action.previous_execution_result is None + else: + assert action.executed_at is None + assert action.previous_execution_result is None + + # 2. first index run: BTC + ETH (base index_content) + async with octobot_flow.AutomationJob(after_init_execution_dump, [], [], {}) as automation_job: + await automation_job.run() + after_btc_eth_execution_dump = automation_job.dump() + + assert len(automation_job.automation_state.automation.actions_dag.actions) == len(all_actions) + for index, action in enumerate(automation_job.automation_state.automation.actions_dag.actions): + assert isinstance(action, octobot_flow.entities.AbstractActionDetails) + assert action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert action.result is None + if index == 0: + assert action.executed_at is not None + assert action.previous_execution_result is None + else: + assert action.executed_at is None + assert isinstance(action.previous_execution_result, dict) + + after_btc_eth_portfolio = after_btc_eth_execution_dump["automation"]["exchange_account_elements"]["portfolio"]["content"] + assert list(sorted(after_btc_eth_portfolio.keys())) == ["BTC", "ETH", "USDT"] + assert 0 < after_btc_eth_portfolio["USDT"]["available"] < 5 + assert 0.1 < after_btc_eth_portfolio["ETH"]["available"] < 0.4 + assert 0.001 < after_btc_eth_portfolio["BTC"]["available"] < 0.01 + assert "SOL" not in after_btc_eth_portfolio + + after_btc_eth_reference_portfolio = after_btc_eth_execution_dump["automation"]["exchange_account_elements"]["portfolio"]["content"] + assert list(sorted(after_btc_eth_reference_portfolio.keys())) == ["BTC", "ETH", "USDT"] + assert 0 < after_btc_eth_reference_portfolio["USDT"]["available"] < 5 + assert 0.1 < after_btc_eth_reference_portfolio["ETH"]["available"] < 0.4 + assert 0.001 < after_btc_eth_reference_portfolio["BTC"]["available"] < 0.01 + assert "SOL" not in after_btc_eth_reference_portfolio + + one_hour = common_enums.TimeFramesMinutes[common_enums.TimeFrames.ONE_HOUR] * common_constants.MINUTE_TO_SECONDS + allowed_execution_time = 20 + + # 3. switch index definition to BTC + SOL and rebalance + dump_after_index_switch = copy.deepcopy(after_btc_eth_execution_dump) + _replace_index_trading_mode_dsl_in_dump(dump_after_index_switch, index_content_btc_sol) + async with octobot_flow.AutomationJob(dump_after_index_switch, [], [], {}) as automation_job: + await automation_job.run() + after_btc_sol_execution_dump = automation_job.dump() + + assert len(automation_job.automation_state.automation.actions_dag.actions) == len(all_actions) + for index, action in enumerate(automation_job.automation_state.automation.actions_dag.actions): + assert isinstance(action, octobot_flow.entities.AbstractActionDetails) + assert action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert action.result is None + if index == 0: + assert action.executed_at is not None + assert action.previous_execution_result is None + else: + assert action.executed_at is None + assert isinstance(action.previous_execution_result, dict) + + # portfolio should be updated to BTC + SOL, ETH should be removed + after_btc_sol_portfolio = after_btc_sol_execution_dump["automation"]["exchange_account_elements"]["portfolio"]["content"] + assert list(sorted(after_btc_sol_portfolio.keys())) == ["BTC", "ETH", "SOL", "USDT"] + assert 0 < after_btc_sol_portfolio["USDT"]["available"] < 5 + assert 0.001 < after_btc_sol_portfolio["BTC"]["available"] < 0.02 + assert 0.5 < after_btc_sol_portfolio["SOL"]["available"] < 20 + assert 0 < after_btc_sol_portfolio["ETH"]["available"] < 0.001 # sold close to all ETH + + after_btc_sol_reference_portfolio = after_btc_sol_execution_dump["automation"]["exchange_account_elements"]["portfolio"]["content"] + assert list(sorted(after_btc_sol_reference_portfolio.keys())) == ["BTC", "ETH", "SOL", "USDT"] + assert 0 < after_btc_sol_reference_portfolio["USDT"]["available"] < 5 + assert 0.001 < after_btc_sol_reference_portfolio["BTC"]["available"] < 0.02 + assert 0.5 < after_btc_sol_reference_portfolio["SOL"]["available"] < 20 + assert 0 < after_btc_sol_reference_portfolio["ETH"]["available"] < 0.001 # sold close to all ETH + + schedule_delay = ( + after_btc_sol_execution_dump["automation"]["execution"]["current_execution"]["scheduled_to"] + - after_btc_sol_execution_dump["automation"]["execution"]["previous_execution"]["triggered_at"] + ) + assert one_hour - allowed_execution_time < schedule_delay < one_hour + allowed_execution_time + + # 4. trigger again: portfolio already matches BTC + SOL index + async with octobot_flow.AutomationJob(after_btc_sol_execution_dump, [], [], {}) as automation_job: + await automation_job.run() + after_second_call_execution_dump = automation_job.dump() + + assert len(automation_job.automation_state.automation.actions_dag.actions) == len(all_actions) + for index, action in enumerate(automation_job.automation_state.automation.actions_dag.actions): + assert isinstance(action, octobot_flow.entities.AbstractActionDetails) + assert action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert action.result is None + if index == 0: + assert action.executed_at is not None + assert action.previous_execution_result is None + else: + assert action.executed_at is None + assert isinstance(action.previous_execution_result, dict) + + schedule_delay = ( + after_second_call_execution_dump["automation"]["execution"]["current_execution"]["scheduled_to"] + - after_second_call_execution_dump["automation"]["execution"]["previous_execution"]["triggered_at"] + ) + assert one_hour - allowed_execution_time < schedule_delay < one_hour + allowed_execution_time + + after_second_call_portfolio = after_second_call_execution_dump["automation"]["exchange_account_elements"]["portfolio"]["content"] + assert after_second_call_portfolio == after_btc_sol_portfolio + after_second_call_reference_portfolio = after_second_call_execution_dump["automation"]["exchange_account_elements"]["portfolio"]["content"] + assert after_second_call_reference_portfolio == after_btc_sol_reference_portfolio + + if emit_signals: + assert insert_trading_signal_mock.await_count == 3 + _assert_trading_signal_btc_eth_usdt_index_portfolio(insert_trading_signal_mock.await_args_list[0].args[0]) + _assert_trading_signal_btc_eth_sol_usdt_after_btc_sol_rebalance(insert_trading_signal_mock.await_args_list[1].args[0]) + _assert_trading_signal_btc_eth_sol_usdt_after_btc_sol_rebalance(insert_trading_signal_mock.await_args_list[2].args[0]) + else: + insert_trading_signal_mock.assert_not_awaited() + + +@pytest.mark.parametrize("emit_signals", [False, True]) +@pytest.mark.asyncio +async def test_simulator_index_with_added_traded_pairs(init_action: dict, emit_signals: bool): + all_actions = [init_action, index_trading_mode_action(init_action)] + automation_state = automation_state_dict(resolved_actions(all_actions)) + set_emit_signals_metadata(automation_state, emit_signals) + + with trading_signal_emission_patches(emit_signals) as insert_trading_signal_mock: + # 1. run init action + async with octobot_flow.AutomationJob(automation_state, [], [], {}) as automation_job: + await automation_job.run() + after_init_execution_dump = automation_job.dump() + + # check bot actions execution + assert len(automation_job.automation_state.automation.actions_dag.actions) == len(all_actions) + for index, action in enumerate(automation_job.automation_state.automation.actions_dag.actions): + assert isinstance(action, octobot_flow.entities.AbstractActionDetails) + assert action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert action.result is None + if index == 0: + assert action.executed_at and action.executed_at >= current_time + assert action.previous_execution_result is None + else: + assert action.executed_at is None + assert action.previous_execution_result is None + + # 2. run index trading mode action + with ( + mock.patch.object( + index_trading_mode.IndexTradingMode, "get_dsl_dependencies", + # ETH/USDT won't be identified as dependency but is in index config: it will be added dynamically + return_value=[trading_dsl.SymbolDependency(symbol="BTC/USDT")] + ) as mock_get_dsl_dependencies, + mock.patch.object( + rebalancing.BaseRebalanceActionsPlanner, "_get_supported_distribution", + return_value=rebalancing.get_uniform_distribution(["BTC", "ETH"]) + ) as mock_get_supported_distribution, + mock.patch.object( + rebalancing.BaseRebalanceActionsPlanner, "_get_filtered_traded_coins", + return_value=["BTC", "ETH"] + ) as mock_get_filtered_traded_coins, + mock.patch.object( + exchange_channels, "create_minimal_dynamic_symbols_env_producers_if_needed", + mock.AsyncMock(wraps=exchange_channels.create_minimal_dynamic_symbols_env_producers_if_needed) + ) as mock_create_minimal_dynamic_symbols_env_producers_if_needed, + ): + async with octobot_flow.AutomationJob(after_init_execution_dump, [], [], {}) as automation_job: + await automation_job.run() + assert mock_get_dsl_dependencies.call_count > 1 + # ensure the ETH/USDC pairs is really added as a dynamic symbol + mock_create_minimal_dynamic_symbols_env_producers_if_needed.assert_awaited_once() + expected_call_count = 1 + assert mock_get_supported_distribution.call_count == expected_call_count + assert mock_get_filtered_traded_coins.call_count == expected_call_count + after_initial_rebalance_execution_dump = automation_job.dump() + assert len(automation_job.automation_state.automation.actions_dag.actions) == len(all_actions) + for index, action in enumerate(automation_job.automation_state.automation.actions_dag.actions): + assert isinstance(action, octobot_flow.entities.AbstractActionDetails) + assert action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert action.result is None + if index == 0: + assert action.executed_at is not None + assert action.previous_execution_result is None + else: + # action is reset: this is a trading mode action: it will be executed again at the next execution + assert action.executed_at is None + assert isinstance(action.previous_execution_result, dict) + + after_initial_rebalance_portfolio_content = after_initial_rebalance_execution_dump["automation"]["exchange_account_elements"]["portfolio"]["content"] + assert isinstance(after_initial_rebalance_execution_dump, dict) + assert list(sorted(after_initial_rebalance_portfolio_content.keys())) == ["BTC", "ETH", "USDT"] + assert 0 < after_initial_rebalance_portfolio_content["USDT"]["available"] < 5 + assert 0.1 < after_initial_rebalance_portfolio_content["ETH"]["available"] < 0.4 + assert 0.001 < after_initial_rebalance_portfolio_content["BTC"]["available"] < 0.01 + + after_initial_rebalance_reference_account_portfolio_content = after_initial_rebalance_execution_dump["automation"][ + "exchange_account_elements" + ]["portfolio"]["content"] + assert isinstance(after_initial_rebalance_reference_account_portfolio_content, dict) + assert list(sorted(after_initial_rebalance_reference_account_portfolio_content.keys())) == ["BTC", "ETH", "USDT"] + assert 0 < after_initial_rebalance_reference_account_portfolio_content["USDT"]["available"] < 5 + assert 0.1 < after_initial_rebalance_reference_account_portfolio_content["ETH"]["available"] < 0.4 + assert 0.001 < after_initial_rebalance_reference_account_portfolio_content["BTC"]["available"] < 0.01 + + if emit_signals: + assert insert_trading_signal_mock.await_count == 1 + _assert_trading_signal_btc_eth_usdt_index_portfolio( + insert_trading_signal_mock.await_args_list[0].args[0], + allow_zero_ratio_assets=frozenset({"ETH"}), + ) + else: + insert_trading_signal_mock.assert_not_awaited() + + +@pytest.mark.parametrize("emit_signals", [False, True]) +@pytest.mark.asyncio +async def test_simulator_copy_index( + init_action: dict, + index_reference_account: copy_entities.Account, + emit_signals: bool, +): + reference_market = init_action["config"]["exchange_account_details"]["portfolio"]["unit"] + all_actions = [ + init_action, + copy_exchange_account_action(reference_market, index_reference_account) + ] + automation_state = automation_state_dict(resolved_actions(all_actions)) + set_emit_signals_metadata(automation_state, emit_signals) + + with trading_signal_emission_patches(emit_signals) as insert_trading_signal_mock: + # 1. run init action + async with octobot_flow.AutomationJob(automation_state, [], [], {}) as automation_job: + await automation_job.run() + after_init_execution_dump = automation_job.dump() + + # check bot actions execution + assert len(automation_job.automation_state.automation.actions_dag.actions) == len(all_actions) + for index, action in enumerate(automation_job.automation_state.automation.actions_dag.actions): + assert isinstance(action, octobot_flow.entities.AbstractActionDetails) + assert action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert action.result is None + if index == 0: + assert action.executed_at and action.executed_at >= current_time + assert action.previous_execution_result is None + else: + assert action.executed_at is None + assert action.previous_execution_result is None + + # 2. run copy exchange account action + async with octobot_flow.AutomationJob(after_init_execution_dump, [], [], {}) as automation_job: + await automation_job.run() + after_initial_rebalance_execution_dump = automation_job.dump() + assert len(automation_job.automation_state.automation.actions_dag.actions) == len(all_actions) + for index, action in enumerate(automation_job.automation_state.automation.actions_dag.actions): + assert isinstance(action, octobot_flow.entities.AbstractActionDetails) + assert action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert action.result is None + if index == 0: + assert action.executed_at is not None + assert action.previous_execution_result is None + else: + # action is reset: this is a trading mode action: it will be executed again at the next execution + assert action.executed_at is None + assert isinstance(action.previous_execution_result, dict) + + # scheduled next execution time 4h after the current execution (4h is the default time when unspecified when copying an account) + assert after_initial_rebalance_execution_dump["automation"]["execution"]["previous_execution"]["triggered_at"] >= current_time + allowed_execution_time = 20 + schedule_delay = ( + after_initial_rebalance_execution_dump["automation"]["execution"]["current_execution"]["scheduled_to"] + - after_initial_rebalance_execution_dump["automation"]["execution"]["previous_execution"]["triggered_at"] + ) + assert copy_constants.DEFAULT_COPY_WAITING_TIME - allowed_execution_time < schedule_delay < copy_constants.DEFAULT_COPY_WAITING_TIME + allowed_execution_time + # check portfolio content + after_initial_rebalance_portfolio_content = after_initial_rebalance_execution_dump["automation"]["exchange_account_elements"]["portfolio"]["content"] + assert isinstance(after_initial_rebalance_execution_dump, dict) + assert list(sorted(after_initial_rebalance_portfolio_content.keys())) == ["BTC", "ETH", "USDT"] + assert 0 < after_initial_rebalance_portfolio_content["USDT"]["available"] < 5 + assert 0.1 < after_initial_rebalance_portfolio_content["ETH"]["available"] < 0.4 + assert 0.001 < after_initial_rebalance_portfolio_content["BTC"]["available"] < 0.01 + logging.getLogger("test_update_simulated_basket_bot").info(f"after_execution_portfolio_content: {after_initial_rebalance_portfolio_content}") + + after_initial_rebalance_reference_account_portfolio_content = after_initial_rebalance_execution_dump["automation"][ + "exchange_account_elements" + ]["portfolio"]["content"] + assert isinstance(after_initial_rebalance_reference_account_portfolio_content, dict) + assert list(sorted(after_initial_rebalance_reference_account_portfolio_content.keys())) == ["BTC", "ETH", "USDT"] + assert 0 < after_initial_rebalance_reference_account_portfolio_content["USDT"]["available"] < 5 + assert 0.1 < after_initial_rebalance_reference_account_portfolio_content["ETH"]["available"] < 0.4 + assert 0.001 < after_initial_rebalance_reference_account_portfolio_content["BTC"]["available"] < 0.01 + + # 3. trigger again: nothing to do + async with octobot_flow.AutomationJob(after_initial_rebalance_execution_dump, [], [], {}) as automation_job: + await automation_job.run() + after_second_call_execution_dump = automation_job.dump() + assert len(automation_job.automation_state.automation.actions_dag.actions) == len(all_actions) + for index, action in enumerate(automation_job.automation_state.automation.actions_dag.actions): + assert isinstance(action, octobot_flow.entities.AbstractActionDetails) + assert action.error_status == octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + assert action.result is None + if index == 0: + assert action.executed_at is not None + assert action.previous_execution_result is None + else: + # action is reset: this is a trading mode action: it will be executed again at the next execution + assert action.executed_at is None + assert isinstance(action.previous_execution_result, dict) + + # ensure schedule delay is the same as the first call + schedule_delay = ( + after_second_call_execution_dump["automation"]["execution"]["current_execution"]["scheduled_to"] + - after_second_call_execution_dump["automation"]["execution"]["previous_execution"]["triggered_at"] + ) + assert copy_constants.DEFAULT_COPY_WAITING_TIME - allowed_execution_time < schedule_delay < copy_constants.DEFAULT_COPY_WAITING_TIME + allowed_execution_time + + # portfolio already follows the index content: ensure portfolio content is the same as the first call + after_second_call_portfolio_content = after_second_call_execution_dump["automation"]["exchange_account_elements"]["portfolio"]["content"] + assert after_second_call_portfolio_content == after_initial_rebalance_portfolio_content + after_second_call_reference_account_portfolio_content = after_second_call_execution_dump["automation"]["exchange_account_elements"][ + "portfolio" + ]["content"] + assert after_second_call_reference_account_portfolio_content == after_initial_rebalance_reference_account_portfolio_content + + if emit_signals: + assert insert_trading_signal_mock.await_count == 2 + for await_args in insert_trading_signal_mock.await_args_list: + _assert_trading_signal_btc_eth_usdt_index_portfolio(await_args.args[0]) + else: + insert_trading_signal_mock.assert_not_awaited() diff --git a/packages/flow/tests/jobs/test_automations_job.py b/packages/flow/tests/jobs/test_automations_job.py new file mode 100644 index 0000000000..11f45c94b4 --- /dev/null +++ b/packages/flow/tests/jobs/test_automations_job.py @@ -0,0 +1,15 @@ +import pytest + +import octobot_flow +import octobot_flow.entities +import octobot_flow.errors + +from tests.functionnal_tests import global_state, auth_details + + +@pytest.mark.asyncio +async def test_not_automations_configured(global_state: dict, auth_details: octobot_flow.entities.UserAuthentication): + global_state["automation"] = {} + with pytest.raises(octobot_flow.errors.NoAutomationError): + async with octobot_flow.AutomationJob(global_state, [], [], auth_details): + pass diff --git a/packages/flow/tests/logic/actions/test_account_copy_util.py b/packages/flow/tests/logic/actions/test_account_copy_util.py new file mode 100644 index 0000000000..f4698ad012 --- /dev/null +++ b/packages/flow/tests/logic/actions/test_account_copy_util.py @@ -0,0 +1,154 @@ +import decimal +import json +import time + +import pytest + +import octobot_commons.constants as commons_constants +import octobot_commons.errors as commons_errors +import octobot_copy.constants as copy_constants +import octobot_copy.entities as copy_entities +import octobot_flow.entities +import octobot_flow.errors +import octobot_flow.logic.actions.account_copy_util as account_copy_util +import octobot_flow.logic.actions.actions_factory as actions_factory + +STRATEGY_ID = "test-copy-strategy" +REFERENCE_MARKET = "USDT" + + +def _minimal_account(*, btc_total: str = "0.01") -> copy_entities.Account: + total = decimal.Decimal(btc_total) + return copy_entities.Account( + updated_at=time.time(), + content={ + "BTC": { + copy_constants.PORTFOLIO_ASSET_ALLOCATION_RATIO: decimal.Decimal("1"), + commons_constants.PORTFOLIO_TOTAL: total, + commons_constants.PORTFOLIO_AVAILABLE: total, + }, + }, + ) + + +class TestUpdateActionTradingSignalIfRelevant: + def test_updates_reference_account_when_strategy_matches(self): + original_account = _minimal_account(btc_total="0.01") + action = actions_factory.create_copy_exchange_account_action( + STRATEGY_ID, + REFERENCE_MARKET, + original_account, + None, + ) + action.id = "action_copy_exchange_account" + original_script = action.dsl_script + new_account = _minimal_account(btc_total="0.99") + signal = octobot_flow.entities.TradingSignal( + strategy_id=STRATEGY_ID, + account=new_account, + ) + account_copy_util.update_action_trading_signal_if_relevant( + action, signal, REFERENCE_MARKET + ) + assert action.dsl_script != original_script + assert action.dsl_script == action.resolved_dsl_script + assert action.id == "action_copy_exchange_account" + assert "0.99" in action.dsl_script + assert "0.01" not in action.dsl_script + + def test_raises_when_strategy_id_mismatches(self): + action = actions_factory.create_copy_exchange_account_action( + STRATEGY_ID, + REFERENCE_MARKET, + _minimal_account(), + None, + ) + action.id = "a1" + signal = octobot_flow.entities.TradingSignal( + strategy_id="other-strategy", + account=_minimal_account(btc_total="2"), + ) + with pytest.raises(octobot_flow.errors.CommunityTradingSignalError): + account_copy_util.update_action_trading_signal_if_relevant( + action, signal, REFERENCE_MARKET + ) + + def test_no_op_when_top_is_not_copy_exchange_account(self): + action = octobot_flow.entities.DSLScriptActionDetails( + id="lit", + dsl_script="42", + resolved_dsl_script="42", + ) + signal = octobot_flow.entities.TradingSignal( + strategy_id=STRATEGY_ID, + account=_minimal_account(), + ) + account_copy_util.update_action_trading_signal_if_relevant( + action, signal, REFERENCE_MARKET + ) + assert action.dsl_script == "42" + + def test_no_op_for_non_dsl_action(self): + action = octobot_flow.entities.ConfiguredActionDetails( + id="cfg", + action="apply_configuration", + config={}, + ) + signal = octobot_flow.entities.TradingSignal( + strategy_id=STRATEGY_ID, + account=_minimal_account(), + ) + account_copy_util.update_action_trading_signal_if_relevant( + action, signal, REFERENCE_MARKET + ) + assert action.action == "apply_configuration" + + def test_raises_when_dsl_script_empty(self): + action = octobot_flow.entities.DSLScriptActionDetails( + id="x", + dsl_script="", + resolved_dsl_script="", + ) + signal = octobot_flow.entities.TradingSignal( + strategy_id=STRATEGY_ID, + account=_minimal_account(), + ) + with pytest.raises(octobot_flow.errors.InvalidAutomationActionError): + account_copy_util.update_action_trading_signal_if_relevant( + action, signal, REFERENCE_MARKET + ) + + def test_raises_when_unresolved_placeholder(self): + action = octobot_flow.entities.DSLScriptActionDetails( + id="x", + dsl_script=( + f"copy_exchange_account(strategy_id={json.dumps(STRATEGY_ID)}, " + f"reference_market='{REFERENCE_MARKET}', " + f"reference_account='{commons_constants.UNRESOLVED_PARAMETER_PLACEHOLDER}', " + f"account_copy_settings='{{}}')" + ), + resolved_dsl_script="", + ) + signal = octobot_flow.entities.TradingSignal( + strategy_id=STRATEGY_ID, + account=_minimal_account(), + ) + with pytest.raises(octobot_flow.errors.UnresolvedDSLScriptError): + account_copy_util.update_action_trading_signal_if_relevant( + action, signal, REFERENCE_MARKET + ) + + def test_raises_on_unknown_operator(self): + action = octobot_flow.entities.DSLScriptActionDetails( + id="x", + dsl_script="not_a_registered_dsl_operator_xyz_123()", + resolved_dsl_script="", + ) + signal = octobot_flow.entities.TradingSignal( + strategy_id=STRATEGY_ID, + account=_minimal_account(), + ) + with pytest.raises(commons_errors.UnsupportedOperatorError): + account_copy_util.update_action_trading_signal_if_relevant( + action, signal, REFERENCE_MARKET + ) diff --git a/packages/flow/tests/parsers/test_actions_dag_parser.py b/packages/flow/tests/parsers/test_actions_dag_parser.py new file mode 100644 index 0000000000..19a2460c5f --- /dev/null +++ b/packages/flow/tests/parsers/test_actions_dag_parser.py @@ -0,0 +1,105 @@ +import pytest + +import octobot_flow.errors +import octobot_flow.parsers.actions_dag_parser as actions_dag_parser + + +class TestResolveParamDependencies: + """`_resolve_param_dependencies`: top-level dataclass fields.""" + + def test_alias_resolves_to_order_amount(self): + params = actions_dag_parser.ActionsDAGParserParams.from_dict({ + "ORDER_AMOUNT": 1.5, + "BLOCKCHAIN_FROM_AMOUNT": f"{actions_dag_parser.PARAM_DEPENDENCY_PREFIX}ORDER_AMOUNT", + }) + assert params.BLOCKCHAIN_FROM_AMOUNT == 1.5 + assert params.ORDER_AMOUNT == 1.5 + + def test_two_hop_chain(self): + params = actions_dag_parser.ActionsDAGParserParams.from_dict({ + "ORDER_AMOUNT": 2.0, + "BLOCKCHAIN_FROM_AMOUNT": f"{actions_dag_parser.PARAM_DEPENDENCY_PREFIX}ORDER_AMOUNT", + "BLOCKCHAIN_TO_AMOUNT": f"{actions_dag_parser.PARAM_DEPENDENCY_PREFIX}BLOCKCHAIN_FROM_AMOUNT", + }) + assert params.BLOCKCHAIN_TO_AMOUNT == 2.0 + assert params.BLOCKCHAIN_FROM_AMOUNT == 2.0 + + def test_target_name_is_case_insensitive(self): + prefix = actions_dag_parser.PARAM_DEPENDENCY_PREFIX + params = actions_dag_parser.ActionsDAGParserParams.from_dict({ + "ORDER_AMOUNT": 4.0, + "BLOCKCHAIN_FROM_AMOUNT": f"{prefix}order_amount", + "BLOCKCHAIN_TO_AMOUNT": f"{prefix}Blockchain_From_Amount", + }) + assert params.BLOCKCHAIN_FROM_AMOUNT == 4.0 + assert params.BLOCKCHAIN_TO_AMOUNT == 4.0 + + +class TestResolveParamDependenciesInMapping: + """`_resolve_param_dependencies_in_mapping`: dict-valued fields (nested dicts).""" + + def test_resolves_inside_dict_field(self): + prefix = actions_dag_parser.PARAM_DEPENDENCY_PREFIX + params = actions_dag_parser.ActionsDAGParserParams.from_dict({ + "BLOCKCHAIN_BALANCE_ADDRESS": "0xabc", + "ORDER_EXTRA_PARAMS": { + "address_to": f"{prefix}BLOCKCHAIN_BALANCE_ADDRESS", + }, + }) + assert params.ORDER_EXTRA_PARAMS == {"address_to": "0xabc"} + + def test_resolves_inside_dict_field_case_insensitive_target(self): + prefix = actions_dag_parser.PARAM_DEPENDENCY_PREFIX + params = actions_dag_parser.ActionsDAGParserParams.from_dict({ + "BLOCKCHAIN_BALANCE_ADDRESS": "0xdef", + "ORDER_EXTRA_PARAMS": { + "address_to": f"{prefix}blockchain_balance_address", + }, + }) + assert params.ORDER_EXTRA_PARAMS == {"address_to": "0xdef"} + + def test_resolves_nested_dict_values(self): + prefix = actions_dag_parser.PARAM_DEPENDENCY_PREFIX + params = actions_dag_parser.ActionsDAGParserParams.from_dict({ + "ORDER_AMOUNT": 3.0, + "CONTENT": { + "nested": { + "amount": f"{prefix}ORDER_AMOUNT", + }, + }, + }) + assert params.CONTENT == {"nested": {"amount": 3.0}} + + +class TestResolveParamDependencyStringValue: + """`_resolve_param_dependency_string_value`: invalid or deferred resolution.""" + + def test_empty_suffix_raises(self): + with pytest.raises(octobot_flow.errors.InvalidAutomationActionError): + actions_dag_parser.ActionsDAGParserParams.from_dict({ + "BLOCKCHAIN_FROM_AMOUNT": actions_dag_parser.PARAM_DEPENDENCY_PREFIX, + }) + + def test_malformed_extra_segments_raises(self): + with pytest.raises(octobot_flow.errors.InvalidAutomationActionError): + actions_dag_parser.ActionsDAGParserParams.from_dict({ + "BLOCKCHAIN_FROM_AMOUNT": ( + f"{actions_dag_parser.PARAM_DEPENDENCY_PREFIX}ORDER_AMOUNT::extra" + ), + }) + + def test_unknown_target_raises(self): + with pytest.raises(octobot_flow.errors.InvalidAutomationActionError) as raised: + actions_dag_parser.ActionsDAGParserParams.from_dict({ + "BLOCKCHAIN_FROM_AMOUNT": f"{actions_dag_parser.PARAM_DEPENDENCY_PREFIX}NOT_A_FIELD", + }) + assert "NOT_A_FIELD" in str(raised.value) + + def test_two_node_cycle_raises(self): + prefix = actions_dag_parser.PARAM_DEPENDENCY_PREFIX + with pytest.raises(octobot_flow.errors.InvalidAutomationActionError) as raised: + actions_dag_parser.ActionsDAGParserParams.from_dict({ + "BLOCKCHAIN_FROM_AMOUNT": f"{prefix}BLOCKCHAIN_TO_AMOUNT", + "BLOCKCHAIN_TO_AMOUNT": f"{prefix}BLOCKCHAIN_FROM_AMOUNT", + }) + assert "cycle" in str(raised.value).lower() or "unresolved" in str(raised.value).lower() diff --git a/packages/flow/tests/parsers/test_automation_state_reader.py b/packages/flow/tests/parsers/test_automation_state_reader.py new file mode 100644 index 0000000000..518deba573 --- /dev/null +++ b/packages/flow/tests/parsers/test_automation_state_reader.py @@ -0,0 +1,85 @@ +import mock + +import octobot_flow.entities +import octobot_flow.logic.dsl +import octobot_flow.parsers.automation_state_reader as automation_state_reader +import octobot_trading.dsl + + +def _minimal_state_empty_dag() -> octobot_flow.entities.AutomationState: + return octobot_flow.entities.AutomationState.from_dict({ + "automation": { + "metadata": {"automation_id": "automation_1"}, + "actions_dag": {"actions": []}, + }, + }) + + +class TestGetAutomationCopiedStrategyIds: + def test_empty_when_no_executable_actions(self): + reader = automation_state_reader.AutomationStateReader(_minimal_state_empty_dag()) + assert reader.get_automation_copied_strategy_ids() == [] + + def test_returns_unique_strategy_ids(self): + dependency_alpha = octobot_trading.dsl.CopyTradingDependency(strategy_id="strategy_alpha", refresh_required=False) + dependency_beta = octobot_trading.dsl.CopyTradingDependency(strategy_id="strategy_beta", refresh_required=False) + duplicate_alpha = octobot_trading.dsl.CopyTradingDependency(strategy_id="strategy_alpha", refresh_required=False) + with mock.patch.object( + octobot_flow.logic.dsl, + "get_copy_trading_dependencies", + return_value=[dependency_alpha, dependency_beta, duplicate_alpha], + ): + reader = automation_state_reader.AutomationStateReader(_minimal_state_empty_dag()) + assert set(reader.get_automation_copied_strategy_ids()) == {"strategy_alpha", "strategy_beta"} + + def test_ignores_priority_actions_uses_dag_executable_only(self): + captured_actions: list[list[octobot_flow.entities.AbstractActionDetails]] = [] + + def capture_copy_dependencies( + actions: list[octobot_flow.entities.AbstractActionDetails], + minimal_profile_data, + ): + captured_actions.append(actions) + return [] + + dag_action = octobot_flow.entities.DSLScriptActionDetails( + id="dag_action", + dsl_script="True", + ) + priority_action = octobot_flow.entities.DSLScriptActionDetails( + id="priority_action", + dsl_script="True", + ) + state = octobot_flow.entities.AutomationState( + automation=octobot_flow.entities.AutomationDetails( + metadata=octobot_flow.entities.AutomationMetadata(automation_id="automation_1"), + actions_dag=octobot_flow.entities.ActionsDAG(actions=[dag_action]), + ), + priority_actions=[priority_action], + ) + with mock.patch.object( + octobot_flow.logic.dsl, + "get_copy_trading_dependencies", + side_effect=capture_copy_dependencies, + ): + reader = automation_state_reader.AutomationStateReader(state) + assert reader.get_automation_copied_strategy_ids() == [] + assert len(captured_actions) == 1 + assert len(captured_actions[0]) == 1 + assert captured_actions[0][0].id == "dag_action" + + +class TestGetExecutableActions: + def test_delegates_to_dag(self): + dag_action = octobot_flow.entities.DSLScriptActionDetails( + id="dag_action", + dsl_script="True", + ) + state = octobot_flow.entities.AutomationState( + automation=octobot_flow.entities.AutomationDetails( + metadata=octobot_flow.entities.AutomationMetadata(automation_id="automation_1"), + actions_dag=octobot_flow.entities.ActionsDAG(actions=[dag_action]), + ), + ) + reader = automation_state_reader.AutomationStateReader(state) + assert reader.get_executable_actions() == state.automation.actions_dag.get_executable_actions() diff --git a/packages/flow/tests/repositories/community/test_trading_signals_channel.py b/packages/flow/tests/repositories/community/test_trading_signals_channel.py new file mode 100644 index 0000000000..ec5b0f5480 --- /dev/null +++ b/packages/flow/tests/repositories/community/test_trading_signals_channel.py @@ -0,0 +1,65 @@ +import asyncio +import mock +import pytest + +import async_channel.channels as async_channel_channels + +import octobot_copy.entities as copy_entities + +import octobot_flow.entities +import octobot_flow.repositories.community.trading_signals_channel as trading_signals_channel +import octobot_flow.repositories.community.trading_signals_repository as trading_signals_repository + + +@pytest.fixture +def internal_channel_name(): + return trading_signals_channel.InternalTradingSignalChannel.get_name() + + +@pytest.fixture(autouse=True) +def reset_internal_trading_signal_channel(internal_channel_name): + async_channel_channels.del_chan(internal_channel_name) + yield + async_channel_channels.del_chan(internal_channel_name) + + +@pytest.mark.asyncio +async def test_insert_trading_signal_delivers_to_subscriber(internal_channel_name): + received: list[octobot_flow.entities.TradingSignal] = [] + + async def capture_callback(trading_signal: octobot_flow.entities.TradingSignal) -> None: + received.append(trading_signal) + + channel = await trading_signals_channel.get_or_create_internal_trading_signal_channel() + await channel.new_consumer(capture_callback) + + account = copy_entities.Account() + signal = octobot_flow.entities.TradingSignal(account=account, strategy_id="test-strategy-id") + repository = trading_signals_repository.TradingSignalsRepository(mock.MagicMock()) + await repository.insert_trading_signal(signal) + + await asyncio.sleep(0.05) + assert len(received) == 1 + assert received[0] is signal + + await trading_signals_channel.shutdown_internal_trading_signal_channel() + + +@pytest.mark.asyncio +async def test_get_or_create_internal_trading_signal_channel_is_idempotent(internal_channel_name): + first = await trading_signals_channel.get_or_create_internal_trading_signal_channel() + second = await trading_signals_channel.get_or_create_internal_trading_signal_channel() + assert first is second + await trading_signals_channel.shutdown_internal_trading_signal_channel() + + +@pytest.mark.asyncio +async def test_shutdown_internal_trading_signal_channel_allows_recreate(internal_channel_name): + await trading_signals_channel.get_or_create_internal_trading_signal_channel() + await trading_signals_channel.shutdown_internal_trading_signal_channel() + with pytest.raises(KeyError): + async_channel_channels.get_chan(internal_channel_name) + + new_channel = await trading_signals_channel.get_or_create_internal_trading_signal_channel() + assert new_channel is not None + await trading_signals_channel.shutdown_internal_trading_signal_channel() diff --git a/packages/launcher/README.md b/packages/launcher/README.md new file mode 100644 index 0000000000..b6d2b071c7 --- /dev/null +++ b/packages/launcher/README.md @@ -0,0 +1,144 @@ +# octobot-launcher + +A lightweight, self-contained daemon that manages OctoBot trading-bot instances on a single host. It handles the full lifecycle — start, stop, update, restart — across three different runtimes, and exposes a local HTTP API that the bundled CLI (and mobile apps) talk to. + +## Why it exists + +Running OctoBot in production requires more than just `python start.py`. Instances need to survive reboots, recover after crashes, update themselves without manual SSH sessions, and be reachable from a phone without opening the host to the internet. The launcher solves all of that as a single static binary with no external runtime dependencies. + +## How it works + +The launcher runs as an OS service (systemd on Linux, launchd on macOS, SCM on Windows). On first boot it writes a one-time bootstrap token to disk, then starts a local HTTP API bound to `127.0.0.1:7531` by default. All subsequent management — from the CLI or a mobile app — goes through that API with bearer-token authentication. + +Instances are stored as JSON records in the data directory. The launcher supervises them directly: it spawns processes, holds PIDs, sends signals on stop/restart, and optionally probes an HTTP health endpoint to track liveness. When a Docker instance is stopped or updated, the launcher calls the Docker daemon rather than managing a process itself. All three backends — Docker, native binary, and Python virtualenv — implement the same `Backend` trait, so the rest of the system doesn't need to know which one it's talking to. + +## Install + +**macOS / Linux** + +```sh +curl -fsSL https://raw.githubusercontent.com/Drakkar-Software/OctoBot/master/packages/launcher/install.sh | sh +``` + +Pass `-- --service` to also register the OS service automatically: + +```sh +curl -fsSL https://raw.githubusercontent.com/Drakkar-Software/OctoBot/master/packages/launcher/install.sh | sh -s -- --service +``` + +**Windows (PowerShell)** + +```powershell +irm https://raw.githubusercontent.com/Drakkar-Software/OctoBot/master/packages/launcher/install.ps1 | iex +``` + +Both installers place the binary in `~/.local/bin` (macOS/Linux) or `%LOCALAPPDATA%\Programs\OctoBot-Launcher` (Windows) and add it to your `PATH`. + +**Environment variables** + +| Variable | Default | Description | +|---|---|---| +| `OCTOBOT_LAUNCHER_VERSION` | latest | Pin a specific release (e.g. `0.1.0`) | +| `OCTOBOT_LAUNCHER_INSTALL_DIR` | platform default | Override the install directory | + +**Uninstall** + +```sh +octobot-launcher service uninstall +rm ~/.local/bin/octobot-launcher # adjust path if customised +``` + +> **macOS / Windows note:** v1 binaries are unsigned. macOS will show a Gatekeeper warning on first run. Work around it with: +> `xattr -d com.apple.quarantine $(which octobot-launcher)` +> Windows SmartScreen can be bypassed via "More info → Run anyway" in the dialog. + +Pre-built binaries for all platforms are listed on the [GitHub Releases page](https://github.com/Drakkar-Software/OctoBot/releases?q=launcher-v). + +## Getting started + +After installing (see above), register the OS service and start it: + +```sh +octobot-launcher service install +octobot-launcher service start +``` + +On first start the bootstrap token is printed to stdout and written to `bootstrap_token.txt` in the data directory. Use it immediately to create a persistent token: + +```sh +octobot-launcher token create --label my-token +``` + +The bootstrap token is automatically removed from the store after it is first used; the new token is what you keep. + +## Instances + +An instance represents one running OctoBot. Add one with the runtime of your choice: + +```sh +# Docker (default image drakkar/octobot:) +octobot-launcher instance add --name mybot --runtime docker --version 2.4.42 + +# Pre-built binary +octobot-launcher instance add --name mybot --runtime binary --version 2.4.42 + +# Python virtualenv (managed by the launcher) +octobot-launcher instance add --name mybot --runtime python --version 2.4.42 +``` + +Starting an instance tells the supervisor to bring it up according to its runtime. Stopping it gracefully signals the process (or stops the container) and waits for the configured timeout before escalating to SIGKILL. + +```sh +octobot-launcher instance start +octobot-launcher instance stop +octobot-launcher instance status +``` + +IDs can be abbreviated to their 8-character prefix shown in `instance list`. + +## Auto-update + +The launcher checks a signed manifest at regular intervals (default: every 6 hours). The manifest is verified with an Ed25519 key baked into the binary at build time, so a compromised distribution server cannot push malicious updates. When an update is available, the launcher replaces itself on disk using an atomic rename and schedules a restart. + +Instance updates follow a blue-green pattern: pull the new image/binary, stop and remove the old container/process, start the new one. Downgrades are blocked unless explicitly requested. + +```sh +octobot-launcher update check +octobot-launcher update apply +``` + +## Tokens and access control + +Every API request requires a bearer token. Tokens carry a set of scopes (`instances:read`, `instances:write`, `updates:apply`, `tokens:manage`, etc.) and can have an expiry. The mobile-app pairing flow uses a QR code that encodes the API address and token together. + +```sh +octobot-launcher token list +octobot-launcher token create --label ci-runner --scope instances:read,instances:write +octobot-launcher token revoke +octobot-launcher token rotate # revoke old, issue new with same scopes +``` + +## Configuration + +The launcher looks for a TOML file at the platform default location (`~/.config/OctoBotLauncher/config.toml` on Linux, `~/Library/Application Support/software.Drakkar.OctoBotLauncher/config.toml` on macOS). Any key can be overridden with an environment variable using the `OCTOBOT_LAUNCHER__` prefix and `__` as the section separator: + +```sh +OCTOBOT_LAUNCHER__LAUNCHER__API_BIND=0.0.0.0:7531 +OCTOBOT_LAUNCHER__UPDATE__CHANNEL=beta +``` + +The `octobot-launcher doctor` command checks that all backends are reachable, the data directory is writable, and at least one token is configured. + +## Building + +The workspace requires Rust 1.81 or later. The release profile produces a stripped, LTO-optimised binary: + +```sh +cargo build --release -p octobot-launcher-cli +``` + +For cross-compiled targets (e.g. `aarch64-unknown-linux-musl`) use `cross`: + +```sh +cross build --release -p octobot-launcher-cli --target aarch64-unknown-linux-musl +``` diff --git a/packages/launcher/crates/octobot-launcher-api/Cargo.toml b/packages/launcher/crates/octobot-launcher-api/Cargo.toml new file mode 100644 index 0000000000..a0ad9fc070 --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-api/Cargo.toml @@ -0,0 +1,43 @@ +[package] +name = "octobot-launcher-api" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +octobot-launcher-core = { path = "../octobot-launcher-core" } +octobot-launcher-config = { path = "../octobot-launcher-config" } +axum = { version = "0.8", features = ["macros"] } +tokio = { version = "1", features = ["full"] } +tower = "0.5" +tower-http = { version = "0.6", features = ["trace", "limit"] } +serde = { version = "1", features = ["derive"] } +serde_json = "1" +tracing = "0.1" +rand = { version = "0.8", features = ["getrandom"] } +subtle = "2" +parking_lot = "0.12" +argon2 = "0.5" +sha2 = "0.10" +hex = "0.4" +governor = "0.6" +chrono = { version = "0.4", features = ["serde"] } +tokio-stream = "0.1" +futures-util = { version = "0.3", default-features = false } +uuid = { version = "1", features = ["v4"] } +thiserror = "2" +base64 = "0.22" + +[target.'cfg(unix)'.dependencies] +hyperlocal = "0.9" + +[dev-dependencies] +octobot-launcher-core = { path = "../octobot-launcher-core", features = ["testing"] } +axum-test = "20" +tempfile = "3" +tokio = { version = "1", features = ["full"] } diff --git a/packages/launcher/crates/octobot-launcher-api/src/auth.rs b/packages/launcher/crates/octobot-launcher-api/src/auth.rs new file mode 100644 index 0000000000..5e91a74485 --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-api/src/auth.rs @@ -0,0 +1,118 @@ +use std::sync::Arc; + +use axum::{ + body::Body, + extract::Request, + middleware::Next, + response::Response, +}; + +use crate::{ + error::ApiError, + lockout::Lockout, + token_store::{TOKEN_PREFIX, TokenRecord, TokenStore, token_has_scope}, +}; + +#[derive(Debug, Clone)] +pub struct LocalSocketMarker; + +#[derive(Debug, Clone)] +pub struct AuthLayer { + pub token_store: Arc, + pub lockout: Arc, + pub required_scope: Option, +} + +impl AuthLayer { + pub fn new( + token_store: Arc, + lockout: Arc, + required_scope: Option<&str>, + ) -> Self { + Self { + token_store, + lockout, + required_scope: required_scope.map(str::to_string), + } + } +} + +pub async fn auth_middleware( + axum::extract::State(layer): axum::extract::State, + mut request: Request, + next: Next, +) -> Result { + if request.extensions().get::().is_some() { + return Ok(next.run(request).await); + } + + let source_ip = extract_source_ip(&request); + + let auth_header = request + .headers() + .get(axum::http::header::AUTHORIZATION) + .and_then(|v| v.to_str().ok()) + .map(str::to_string); + + let raw_token = match auth_header { + Some(ref h) if h.starts_with("Bearer ") => h[7..].to_string(), + _ => return Err(ApiError::Unauthorized), + }; + + if !raw_token.starts_with(TOKEN_PREFIX) { + return Err(ApiError::Unauthorized); + } + + if !layer.lockout.check(&source_ip) { + return Err(ApiError::TooManyRequests); + } + + layer.token_store.reload(); + let record = layer.token_store.verify(&raw_token); + let Some(record) = record else { + layer.lockout.record_failure(&source_ip); + return Err(ApiError::Unauthorized); + }; + + layer.lockout.record_success(&source_ip); + + if let Some(ref scope) = layer.required_scope { + if !token_has_scope(&record, scope) { + return Err(ApiError::Forbidden); + } + } + + let token_id = record.id.clone(); + tracing::debug!(token_id = %token_id, "authenticated request"); + + { + let store = Arc::clone(&layer.token_store); + let id = token_id.clone(); + tokio::spawn(async move { + store.update_last_used(&id); + }); + } + + request.extensions_mut().insert(record); + Ok(next.run(request).await) +} + +fn extract_source_ip(request: &Request) -> String { + request + .headers() + .get("x-forwarded-for") + .and_then(|v| v.to_str().ok()) + .and_then(|s| s.split(',').next()) + .map(|s| s.trim().to_string()) + .or_else(|| { + request + .extensions() + .get::>() + .map(|ci| ci.0.ip().to_string()) + }) + .unwrap_or_else(|| "unknown".to_string()) +} + +pub fn extract_token_record(request: &Request) -> Option<&TokenRecord> { + request.extensions().get::() +} diff --git a/packages/launcher/crates/octobot-launcher-api/src/error.rs b/packages/launcher/crates/octobot-launcher-api/src/error.rs new file mode 100644 index 0000000000..6225904e6a --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-api/src/error.rs @@ -0,0 +1,96 @@ +use axum::{ + http::StatusCode, + response::{IntoResponse, Response}, + Json, +}; +use serde_json::json; +use thiserror::Error; + +#[derive(Debug, Error)] +pub enum ApiError { + #[error("not found: {0}")] + NotFound(String), + #[error("unauthorized")] + Unauthorized, + #[error("forbidden")] + Forbidden, + #[error("too many requests")] + TooManyRequests, + #[error("bad request: {0}")] + BadRequest(String), + #[error("internal error: {0}")] + Internal(String), + #[error("conflict: {0}")] + Conflict(String), + #[error("service unavailable: {0}")] + ServiceUnavailable(String), +} + +impl IntoResponse for ApiError { + fn into_response(self) -> Response { + let (status, type_uri, title, detail) = match &self { + ApiError::NotFound(d) => ( + StatusCode::NOT_FOUND, + "https://octobot.software/errors/not-found", + "Not Found", + d.clone(), + ), + ApiError::Unauthorized => ( + StatusCode::UNAUTHORIZED, + "https://octobot.software/errors/unauthorized", + "Unauthorized", + "Authentication required or invalid credentials".to_string(), + ), + ApiError::Forbidden => ( + StatusCode::FORBIDDEN, + "https://octobot.software/errors/forbidden", + "Forbidden", + "Insufficient scope for this operation".to_string(), + ), + ApiError::TooManyRequests => ( + StatusCode::TOO_MANY_REQUESTS, + "https://octobot.software/errors/too-many-requests", + "Too Many Requests", + "Too many failed authentication attempts".to_string(), + ), + ApiError::BadRequest(d) => ( + StatusCode::BAD_REQUEST, + "https://octobot.software/errors/bad-request", + "Bad Request", + d.clone(), + ), + ApiError::Internal(d) => ( + StatusCode::INTERNAL_SERVER_ERROR, + "https://octobot.software/errors/internal", + "Internal Server Error", + d.clone(), + ), + ApiError::Conflict(d) => ( + StatusCode::CONFLICT, + "https://octobot.software/errors/conflict", + "Conflict", + d.clone(), + ), + ApiError::ServiceUnavailable(d) => ( + StatusCode::SERVICE_UNAVAILABLE, + "https://octobot.software/errors/service-unavailable", + "Service Unavailable", + d.clone(), + ), + }; + + let body = json!({ + "type": type_uri, + "title": title, + "status": status.as_u16(), + "detail": detail, + }); + + let mut response = (status, Json(body)).into_response(); + response.headers_mut().insert( + axum::http::header::CONTENT_TYPE, + axum::http::HeaderValue::from_static("application/problem+json"), + ); + response + } +} diff --git a/packages/launcher/crates/octobot-launcher-api/src/lib.rs b/packages/launcher/crates/octobot-launcher-api/src/lib.rs new file mode 100644 index 0000000000..280f74f7c1 --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-api/src/lib.rs @@ -0,0 +1,1019 @@ +#![deny(clippy::unwrap_used, clippy::expect_used)] + +pub mod auth; +pub mod error; +pub mod lockout; +pub mod routes; +pub mod scopes; +pub mod token_store; + +use std::net::SocketAddr; +use std::path::PathBuf; +use std::sync::Arc; + +use axum::{ + Router, + body::Body, + extract::Request, + middleware, + response::{IntoResponse, Response}, + routing::{delete, get, patch, post}, +}; +use tower_http::{limit::RequestBodyLimitLayer, trace::TraceLayer}; + +use octobot_launcher_core::backend::Backend; + +use crate::{ + auth::{AuthLayer, auth_middleware}, + error::ApiError, + lockout::Lockout, + routes::{health, instances, tokens, updates}, + scopes::{ + SCOPE_INSTANCES_READ, SCOPE_INSTANCES_WRITE, SCOPE_TOKENS_MANAGE, SCOPE_UPDATES_APPLY, + SCOPE_UPDATES_READ, + }, + token_store::TokenStore, +}; + +pub use octobot_launcher_config::Store; +pub use token_store::{TokenRecord, default_scopes}; + +#[allow(missing_debug_implementations)] +#[derive(Clone)] +pub struct ApiState { + pub store: Arc, + pub token_store: Arc, + pub lockout: Arc, + pub backends: Arc>>, +} + +#[derive(Debug, Clone)] +pub struct ListenerConfig { + pub tcp_bind: SocketAddr, + pub unix_socket_path: Option, +} + +#[allow(missing_debug_implementations)] +pub struct ApiServer { + router: Router, + pub config: ListenerConfig, +} + +impl ApiServer { + pub fn new(state: ApiState, config: ListenerConfig) -> Self { + let router = router(state); + Self { router, config } + } + + pub async fn serve(self) -> std::io::Result<()> { + let addr = self.config.tcp_bind; + if !addr.ip().is_loopback() { + tracing::warn!( + bind = %addr, + "API server binding to non-loopback address — ensure firewall rules restrict access" + ); + } + + let unix_socket_path = self.config.unix_socket_path.clone(); + + #[cfg(unix)] + let unix_router = self.router.clone(); + + let tcp_future = { + let router = self.router; + async move { + let listener = tokio::net::TcpListener::bind(addr).await?; + tracing::info!(addr = %addr, "API listening"); + axum::serve(listener, router).await + } + }; + + #[cfg(unix)] + { + if let Some(socket_path) = unix_socket_path { + let unix_future = async move { + if socket_path.exists() { + std::fs::remove_file(&socket_path)?; + } + let router = unix_router.layer(middleware::from_fn(inject_local_marker)); + let listener = tokio::net::UnixListener::bind(&socket_path)?; + tracing::info!(path = %socket_path.display(), "Unix socket listening"); + axum::serve(listener, router).await + }; + return tokio::try_join!(tcp_future, unix_future).map(|_| ()); + } + } + + #[cfg(not(unix))] + let _ = unix_socket_path; + + tcp_future.await + } +} + +#[cfg(unix)] +async fn inject_local_marker(mut request: Request, next: middleware::Next) -> Response { + request.extensions_mut().insert(auth::LocalSocketMarker); + next.run(request).await +} + +async fn strip_server_header(request: Request, next: middleware::Next) -> Response { + let mut response = next.run(request).await; + response.headers_mut().remove(axum::http::header::SERVER); + response +} + +async fn fallback_404() -> impl IntoResponse { + ApiError::NotFound("route not found".to_string()) +} + +fn make_auth_layer( + token_store: &Arc, + lockout: &Arc, + scope: &str, +) -> AuthLayer { + AuthLayer::new(Arc::clone(token_store), Arc::clone(lockout), Some(scope)) +} + +pub fn router(state: ApiState) -> Router { + let ApiState { store, token_store, lockout, backends } = state; + let ts = &token_store; + let lk = &lockout; + + let read = make_auth_layer(ts, lk, SCOPE_INSTANCES_READ); + let write_crud = make_auth_layer(ts, lk, SCOPE_INSTANCES_WRITE); + let write_action = make_auth_layer(ts, lk, SCOPE_INSTANCES_WRITE); + let upd_read = make_auth_layer(ts, lk, SCOPE_UPDATES_READ); + let upd_apply = make_auth_layer(ts, lk, SCOPE_UPDATES_APPLY); + let upd_apply_inst = make_auth_layer(ts, lk, SCOPE_UPDATES_APPLY); + let tok_mgr = make_auth_layer(ts, lk, SCOPE_TOKENS_MANAGE); + let inst_read_version = make_auth_layer(ts, lk, SCOPE_INSTANCES_READ); + + let instance_store = Arc::clone(&store); + let token_store_arc = Arc::clone(&token_store); + + let instances_read_router = Router::new() + .route("/v1/instances", get(instances::list_instances)) + .route("/v1/instances/{id}", get(instances::get_instance)) + .route("/v1/instances/{id}/status", get(instances::instance_status)) + .with_state(Arc::clone(&instance_store)) + .route_layer(middleware::from_fn_with_state(read, auth_middleware)); + + let instances_crud_router = Router::new() + .route("/v1/instances", post(instances::create_instance)) + .route("/v1/instances/{id}", patch(instances::patch_instance)) + .route("/v1/instances/{id}", delete(instances::delete_instance)) + .with_state(Arc::clone(&instance_store)) + .route_layer(middleware::from_fn_with_state(write_crud, auth_middleware)); + + let instances_action_router = Router::new() + .route("/v1/instances/{id}/start", post(instances::start_instance)) + .route("/v1/instances/{id}/stop", post(instances::stop_instance)) + .route("/v1/instances/{id}/restart", post(instances::restart_instance)) + .with_state(instances::InstanceActionState { + store: Arc::clone(&store), + backends: Arc::clone(&backends), + }) + .route_layer(middleware::from_fn_with_state(write_action, auth_middleware)); + + let instances_update_router = Router::new() + .route("/v1/instances/{id}/update", post(instances::update_instance)) + .with_state(instances::InstanceActionState { + store: Arc::clone(&store), + backends: Arc::clone(&backends), + }) + .route_layer(middleware::from_fn_with_state(upd_apply_inst, auth_middleware)); + + let updates_check_router = Router::new() + .route("/v1/updates/check", get(updates::check_updates)) + .route_layer(middleware::from_fn_with_state(upd_read, auth_middleware)); + + let updates_apply_router = Router::new() + .route("/v1/updates/launcher", post(updates::update_launcher)) + .route_layer(middleware::from_fn_with_state(upd_apply, auth_middleware)); + + let tokens_router = Router::new() + .route("/v1/tokens", get(tokens::list_tokens)) + .route("/v1/tokens", post(tokens::create_token)) + .route("/v1/tokens/{id}", delete(tokens::revoke_token)) + .with_state(token_store_arc) + .route_layer(middleware::from_fn_with_state(tok_mgr, auth_middleware)); + + let version_router = Router::new() + .route("/v1/version", get(health::version)) + .route_layer(middleware::from_fn_with_state(inst_read_version, auth_middleware)); + + Router::new() + .route("/v1/health", get(health::health)) + .merge(version_router) + .merge(instances_read_router) + .merge(instances_crud_router) + .merge(instances_action_router) + .merge(instances_update_router) + .merge(updates_check_router) + .merge(updates_apply_router) + .merge(tokens_router) + .fallback(fallback_404) + .layer(middleware::from_fn(strip_server_header)) + .layer(RequestBodyLimitLayer::new(256 * 1024)) + .layer(TraceLayer::new_for_http()) +} + +#[cfg(test)] +mod tests { + #![allow(clippy::unwrap_used, clippy::expect_used)] + + use std::sync::Arc; + use std::time::Duration; + + use axum_test::TestServer; + use serde_json::{Value, json}; + use tempfile::TempDir; + + use crate::{ + ApiState, + lockout::Lockout, + router, + scopes::*, + token_store::{TokenStore, default_scopes}, + }; + use octobot_launcher_config::Store; + use octobot_launcher_core::backend::Backend; + + fn make_state() -> (ApiState, TempDir) { + let dir = TempDir::new().expect("tempdir"); + let store = Arc::new(Store::new(dir.path().to_path_buf()).expect("store")); + let token_store = Arc::new(TokenStore::load(dir.path().join("tokens.json"))); + let lockout = Arc::new(Lockout::new()); + let backends: Arc>> = Arc::new(vec![ + Box::new(octobot_launcher_core::backend::mock::MockBackend::default()), + ]); + ( + ApiState { + store, + token_store, + lockout, + backends, + }, + dir, + ) + } + + fn make_server(state: ApiState) -> TestServer { + TestServer::new(router(state)) + } + + mod auth { + use super::*; + + #[tokio::test] + async fn missing_header_returns_401() { + let (state, _dir) = make_state(); + let server = make_server(state); + let resp = server.get("/v1/instances").await; + assert_eq!(resp.status_code(), 401); + } + + #[tokio::test] + async fn wrong_prefix_returns_401_fast() { + let (state, _dir) = make_state(); + let server = make_server(state); + let start = std::time::Instant::now(); + let resp = server + .get("/v1/instances") + .add_header( + "authorization".parse::().unwrap(), + "Bearer wrongprefix_abc123".parse::().unwrap(), + ) + .await; + let elapsed = start.elapsed(); + assert_eq!(resp.status_code(), 401); + assert!( + elapsed < Duration::from_millis(50), + "should be fast, was {:?}", + elapsed + ); + } + + #[tokio::test] + async fn valid_token_authenticates() { + let (state, _dir) = make_state(); + let (_, raw) = state.token_store.create("test".into(), default_scopes(), None).unwrap(); + let server = make_server(state); + let resp = server + .get("/v1/instances") + .add_header( + "authorization".parse::().unwrap(), + format!("Bearer {}", raw).parse::().unwrap(), + ) + .await; + assert_eq!(resp.status_code(), 200); + } + + #[tokio::test] + async fn revoked_token_rejected() { + let (state, _dir) = make_state(); + let (record, raw) = + state.token_store.create("test".into(), default_scopes(), None).unwrap(); + state.token_store.revoke(&record.id).unwrap(); + let server = make_server(state); + let resp = server + .get("/v1/instances") + .add_header( + "authorization".parse::().unwrap(), + format!("Bearer {}", raw).parse::().unwrap(), + ) + .await; + assert_eq!(resp.status_code(), 401); + } + + #[tokio::test] + async fn expired_token_rejected() { + let (state, _dir) = make_state(); + let past = chrono::Utc::now() - chrono::Duration::seconds(1); + let (_, raw) = + state.token_store.create("test".into(), default_scopes(), Some(past)).unwrap(); + let server = make_server(state); + let resp = server + .get("/v1/instances") + .add_header( + "authorization".parse::().unwrap(), + format!("Bearer {}", raw).parse::().unwrap(), + ) + .await; + assert_eq!(resp.status_code(), 401); + } + + #[tokio::test] + async fn scope_required_for_route() { + let (state, _dir) = make_state(); + let (_, raw) = state.token_store.create( + "limited".into(), + vec![SCOPE_INSTANCES_READ.to_string()], + None, + ).unwrap(); + let server = make_server(state); + let resp = server + .post("/v1/instances") + .add_header( + "authorization".parse::().unwrap(), + format!("Bearer {}", raw).parse::().unwrap(), + ) + .json(&json!({ + "name": "test", + "runtime": "Docker", + "version": "1.0", + "data_dir": "/data", + "config_dir": "/config" + })) + .await; + assert_eq!(resp.status_code(), 403); + } + + #[tokio::test] + async fn wildcard_scope_grants_all() { + let (state, _dir) = make_state(); + let (_, raw) = state + .token_store + .create("admin".into(), vec![SCOPE_WILDCARD.to_string()], None).unwrap(); + let server = make_server(state); + let hval: axum::http::HeaderValue = + format!("Bearer {}", raw).parse::().unwrap(); + + let resp = server + .get("/v1/instances") + .add_header("authorization".parse::().unwrap(), hval.clone()) + .await; + assert_eq!(resp.status_code(), 200); + + let resp = server + .get("/v1/tokens") + .add_header("authorization".parse::().unwrap(), hval.clone()) + .await; + assert_eq!(resp.status_code(), 200); + + let resp = server + .get("/v1/updates/check") + .add_header("authorization".parse::().unwrap(), hval) + .await; + assert_eq!(resp.status_code(), 200); + } + + #[tokio::test] + async fn last_used_updated_async() { + let (state, _dir) = make_state(); + let (record, raw) = + state.token_store.create("test".into(), default_scopes(), None).unwrap(); + let token_store = Arc::clone(&state.token_store); + let server = make_server(state); + let resp = server + .get("/v1/instances") + .add_header( + "authorization".parse::().unwrap(), + format!("Bearer {}", raw).parse::().unwrap(), + ) + .await; + assert_eq!(resp.status_code(), 200); + + tokio::time::sleep(Duration::from_millis(50)).await; + let tokens = token_store.list(); + let updated = tokens.iter().find(|r| r.id == record.id).unwrap(); + assert!(updated.last_used_at.is_some()); + } + + #[tokio::test] + async fn file_change_reloaded() { + let dir = TempDir::new().expect("tempdir"); + let token_path = dir.path().join("tokens.json"); + let store = + Arc::new(Store::new(dir.path().to_path_buf()).expect("store")); + let token_store = Arc::new(TokenStore::load(token_path.clone())); + let lockout = Arc::new(Lockout::new()); + let state = ApiState { + store, + token_store: Arc::clone(&token_store), + lockout, + backends: Arc::new(vec![]), + }; + + let new_store = TokenStore::load(token_path); + let (_, raw) = + new_store.create("external".into(), default_scopes(), None).unwrap(); + new_store.save().expect("save"); + + token_store.reload(); + + let server = make_server(state); + let resp = server + .get("/v1/instances") + .add_header( + "authorization".parse::().unwrap(), + format!("Bearer {}", raw).parse::().unwrap(), + ) + .await; + assert_eq!(resp.status_code(), 200); + } + } + + mod socket { + use super::*; + use crate::auth::LocalSocketMarker; + use axum::{body::Body, extract::Request, middleware, response::Response}; + + async fn inject_local(mut req: Request, next: middleware::Next) -> Response { + req.extensions_mut().insert(LocalSocketMarker); + next.run(req).await + } + + #[tokio::test] + async fn unix_socket_skips_auth() { + let (state, _dir) = make_state(); + let app = router(state).layer(middleware::from_fn(inject_local)); + let server = TestServer::new(app); + let resp = server.get("/v1/instances").await; + assert_eq!(resp.status_code(), 200); + } + } + + mod tokens_tests { + use super::*; + + fn admin_token(state: &ApiState) -> String { + let (_, raw) = state + .token_store + .create("admin".into(), vec![SCOPE_WILDCARD.to_string()], None).unwrap(); + raw + } + + #[tokio::test] + async fn create_returns_raw_once() { + let (state, _dir) = make_state(); + let raw = admin_token(&state); + let server = make_server(state); + let resp = server + .post("/v1/tokens") + .add_header( + "authorization".parse::().unwrap(), + format!("Bearer {}", raw).parse::().unwrap(), + ) + .json(&json!({ "label": "new-token" })) + .await; + assert_eq!(resp.status_code(), 201); + let body: Value = resp.json(); + assert!(body.get("raw_token").is_some()); + assert!(body["raw_token"].as_str().unwrap().starts_with("oblch_")); + + let list_resp = server + .get("/v1/tokens") + .add_header( + "authorization".parse::().unwrap(), + format!("Bearer {}", raw).parse::().unwrap(), + ) + .await; + let list_body: Value = list_resp.json(); + let arr = list_body.as_array().unwrap(); + for item in arr { + assert!(item.get("raw_token").is_none()); + } + } + + #[tokio::test] + async fn list_omits_hash_and_raw() { + let (state, _dir) = make_state(); + let raw = admin_token(&state); + let server = make_server(state); + let resp = server + .get("/v1/tokens") + .add_header( + "authorization".parse::().unwrap(), + format!("Bearer {}", raw).parse::().unwrap(), + ) + .await; + assert_eq!(resp.status_code(), 200); + let body: Value = resp.json(); + let arr = body.as_array().unwrap(); + for item in arr { + assert!(item.get("argon2_hash").is_none()); + assert!(item.get("raw_token").is_none()); + } + } + + #[tokio::test] + async fn default_scopes_applied() { + let (state, _dir) = make_state(); + let raw = admin_token(&state); + let server = make_server(state); + let resp = server + .post("/v1/tokens") + .add_header( + "authorization".parse::().unwrap(), + format!("Bearer {}", raw).parse::().unwrap(), + ) + .json(&json!({ "label": "no-scopes-token" })) + .await; + assert_eq!(resp.status_code(), 201); + let body: Value = resp.json(); + let scopes = body["token"]["scopes"].as_array().unwrap(); + assert!(scopes.iter().any(|s| s == SCOPE_INSTANCES_READ)); + assert!(scopes.iter().any(|s| s == SCOPE_INSTANCES_WRITE)); + assert!(scopes.iter().any(|s| s == SCOPE_UPDATES_READ)); + } + + #[tokio::test] + async fn admin_scope_required_to_grant_admin() { + let (state, _dir) = make_state(); + let (_, limited_raw) = state.token_store.create( + "limited".into(), + vec![SCOPE_INSTANCES_READ.to_string()], + None, + ).unwrap(); + let server = make_server(state); + let resp = server + .post("/v1/tokens") + .add_header( + "authorization".parse::().unwrap(), + format!("Bearer {}", limited_raw).parse::().unwrap(), + ) + .json(&json!({ "label": "new" })) + .await; + assert_eq!(resp.status_code(), 403); + } + } + + mod bootstrap_tests { + use super::*; + + #[test] + fn first_boot_creates_admin_token() { + let dir = TempDir::new().expect("tempdir"); + let store = TokenStore::load(dir.path().join("tokens.json")); + let raw = store.bootstrap_if_empty().unwrap(); + assert!(raw.is_some()); + let raw = raw.unwrap(); + let record = store.verify(&raw).expect("should verify"); + assert!(record.scopes.contains(&SCOPE_WILDCARD.to_string())); + } + } + + mod lockout_tests { + use super::*; + + #[tokio::test] + async fn ten_failed_attempts_locks_ip() { + let (state, _dir) = make_state(); + let server = make_server(state); + let bad_token = "oblch_wrongtoken12345678901234567890123456"; + let attacker_ip = "10.99.0.1"; + + for _ in 0..10 { + let _ = server + .get("/v1/instances") + .add_header( + "authorization".parse::().unwrap(), + format!("Bearer {}", bad_token).parse::().unwrap(), + ) + .add_header("x-forwarded-for".parse::().unwrap(), attacker_ip.parse::().unwrap()) + .await; + } + + let resp = server + .get("/v1/instances") + .add_header( + "authorization".parse::().unwrap(), + format!("Bearer {}", bad_token).parse::().unwrap(), + ) + .add_header("x-forwarded-for".parse::().unwrap(), attacker_ip.parse::().unwrap()) + .await; + assert_eq!(resp.status_code(), 429); + } + + #[test] + fn expires_after_5_minutes() { + let lockout = Lockout::new(); + let ip = "127.0.0.1"; + for _ in 0..10 { + lockout.record_failure(ip); + } + assert!(!lockout.check(ip)); + lockout.unlock(ip); + assert!(lockout.check(ip)); + } + + #[test] + fn lru_capped() { + let lockout = Lockout::new(); + for i in 0..2000_u32 { + let ip = format!("10.{}.{}.{}", i / 65536, (i / 256) % 256, i % 256); + lockout.record_failure(&ip); + } + assert!(lockout.len() <= 1024); + } + + #[tokio::test] + async fn separate_ips_dont_interfere() { + let (state, _dir) = make_state(); + let (_, valid_raw) = + state.token_store.create("test".into(), default_scopes(), None).unwrap(); + let server = make_server(state); + let bad_token = "oblch_wrongtoken12345678901234567890123456"; + let attacker_ip = "10.100.0.1"; + let good_ip = "10.200.0.1"; + + for _ in 0..10 { + let _ = server + .get("/v1/instances") + .add_header( + "authorization".parse::().unwrap(), + format!("Bearer {}", bad_token).parse::().unwrap(), + ) + .add_header("x-forwarded-for".parse::().unwrap(), attacker_ip.parse::().unwrap()) + .await; + } + + let resp = server + .get("/v1/instances") + .add_header( + "authorization".parse::().unwrap(), + format!("Bearer {}", valid_raw).parse::().unwrap(), + ) + .add_header("x-forwarded-for".parse::().unwrap(), good_ip.parse::().unwrap()) + .await; + assert_eq!(resp.status_code(), 200); + } + } + + mod routes_tests { + use super::*; + + #[tokio::test] + async fn health_no_auth() { + let (state, _dir) = make_state(); + let server = make_server(state); + let resp = server.get("/v1/health").await; + assert_eq!(resp.status_code(), 200); + } + + #[tokio::test] + async fn health_no_leak() { + let (state, _dir) = make_state(); + let server = make_server(state); + let resp = server.get("/v1/health").await; + let body: Value = resp.json(); + assert_eq!(body, json!({ "status": "ok" })); + } + + #[tokio::test] + async fn server_header_stripped() { + let (state, _dir) = make_state(); + let server = make_server(state); + let resp = server.get("/v1/health").await; + assert!(resp.headers().get("server").is_none()); + } + + #[tokio::test] + async fn body_limit_enforced() { + let (state, _dir) = make_state(); + let (_, raw) = state + .token_store + .create("admin".into(), vec![SCOPE_WILDCARD.to_string()], None).unwrap(); + let server = make_server(state); + let big_body = "x".repeat(256 * 1024 + 1); + let resp = server + .post("/v1/instances") + .add_header( + "authorization".parse::().unwrap(), + format!("Bearer {}", raw).parse::().unwrap(), + ) + .content_type("application/json") + .text(big_body) + .await; + assert!(resp.status_code() == 413 || resp.status_code() == 415); + } + + #[tokio::test] + async fn create_then_get_status_by_id() { + let (state, _dir) = make_state(); + let (_, raw) = state + .token_store + .create("admin".into(), vec![SCOPE_WILDCARD.to_string()], None).unwrap(); + let server = make_server(state); + let auth_val: axum::http::HeaderValue = + format!("Bearer {}", raw).parse::().unwrap(); + + let create_resp = server + .post("/v1/instances") + .add_header("authorization".parse::().unwrap(), auth_val.clone()) + .json(&json!({ + "name": "my-bot", + "runtime": "Docker", + "version": "1.0.0", + "data_dir": "/tmp/data", + "config_dir": "/tmp/config" + })) + .await; + assert_eq!(create_resp.status_code(), 201); + let created: Value = create_resp.json(); + let id = created["spec"]["id"].as_str().unwrap().to_string(); + + let status_resp = server + .get(&format!("/v1/instances/{}/status", id)) + .add_header("authorization".parse::().unwrap(), auth_val.clone()) + .await; + assert_eq!(status_resp.status_code(), 200); + let status: Value = status_resp.json(); + assert_eq!(status["id"].as_str().unwrap(), id); + } + + #[tokio::test] + async fn problem_details_on_error() { + let (state, _dir) = make_state(); + let (_, raw) = state + .token_store + .create("admin".into(), vec![SCOPE_WILDCARD.to_string()], None).unwrap(); + let server = make_server(state); + let resp = server + .get("/v1/instances/nonexistent-id-abc") + .add_header( + "authorization".parse::().unwrap(), + format!("Bearer {}", raw).parse::().unwrap(), + ) + .await; + assert_eq!(resp.status_code(), 404); + let ct = resp + .headers() + .get("content-type") + .unwrap() + .to_str() + .unwrap(); + assert!(ct.contains("application/problem+json")); + } + } + + mod audit_tests { + use super::*; + + #[tokio::test] + async fn token_id_logged_not_raw() { + let (state, _dir) = make_state(); + let (record, raw) = + state.token_store.create("test".into(), default_scopes(), None).unwrap(); + assert!(record.id.starts_with("tok_")); + assert!(!record.id.starts_with("oblch_")); + assert!(raw.starts_with("oblch_")); + assert_ne!(record.id, raw); + } + } + + mod instance_crud_tests { + use super::*; + + fn admin_token(state: &ApiState) -> String { + let (_, raw) = state + .token_store + .create("admin".into(), vec![SCOPE_WILDCARD.to_string()], None).unwrap(); + raw + } + + fn parse_sse_done(body: &[u8]) -> serde_json::Value { + let text = std::str::from_utf8(body).unwrap(); + for line in text.lines() { + if let Some(data) = line.strip_prefix("data: ") { + if let Ok(val) = serde_json::from_str::(data) { + if val.get("done").is_some() { + return val; + } + } + } + } + panic!("no done event in SSE body:\n{text}"); + } + + fn auth_header(raw: &str) -> (axum::http::header::HeaderName, axum::http::HeaderValue) { + ( + "authorization".parse().unwrap(), + format!("Bearer {raw}").parse().unwrap(), + ) + } + + #[tokio::test] + async fn add_then_list_returns_instance() { + let (state, _dir) = make_state(); + let raw = admin_token(&state); + let server = make_server(state); + let (name, val) = auth_header(&raw); + + let create = server + .post("/v1/instances") + .add_header(name.clone(), val.clone()) + .json(&json!({ "name": "my-bot", "runtime": "Docker" })) + .await; + assert_eq!(create.status_code(), 201); + + let list = server.get("/v1/instances").add_header(name, val).await; + assert_eq!(list.status_code(), 200); + let body: Vec = list.json(); + assert_eq!(body.len(), 1); + assert_eq!(body[0]["spec"]["name"], "my-bot"); + } + + #[tokio::test] + async fn add_defaults_data_dir_under_store_root() { + let (state, dir) = make_state(); + let raw = admin_token(&state); + let server = make_server(state); + let (name, val) = auth_header(&raw); + + let resp = server + .post("/v1/instances") + .add_header(name, val) + .json(&json!({ "name": "bot", "runtime": "Binary" })) + .await; + assert_eq!(resp.status_code(), 201); + let body: Value = resp.json(); + let data_dir = body["spec"]["data_dir"].as_str().unwrap(); + assert!( + data_dir.starts_with(dir.path().to_str().unwrap()), + "data_dir should be under store root, got {data_dir}" + ); + } + + #[tokio::test] + async fn add_then_start_desired_state_running() { + let (state, _dir) = make_state(); + let raw = admin_token(&state); + let server = make_server(state); + let (name, val) = auth_header(&raw); + + let create = server + .post("/v1/instances") + .add_header(name.clone(), val.clone()) + .json(&json!({ "name": "bot", "runtime": "Binary" })) + .await; + assert_eq!(create.status_code(), 201); + let id = create.json::()["spec"]["id"].as_str().unwrap().to_string(); + + let start = server + .post(&format!("/v1/instances/{id}/start")) + .add_header(name.clone(), val.clone()) + .json(&json!({})) + .await; + assert_eq!(start.status_code(), 200); + + let status = server + .get(&format!("/v1/instances/{id}/status")) + .add_header(name, val) + .await; + assert_eq!(status.status_code(), 200); + assert_eq!(status.json::()["desired_state"], "Running"); + } + + #[tokio::test] + async fn add_then_delete_then_start_is_404() { + let (state, _dir) = make_state(); + let raw = admin_token(&state); + let server = make_server(state); + let (name, val) = auth_header(&raw); + + let create = server + .post("/v1/instances") + .add_header(name.clone(), val.clone()) + .json(&json!({ "name": "bot", "runtime": "Python" })) + .await; + let id = create.json::()["spec"]["id"].as_str().unwrap().to_string(); + + let del = server + .delete(&format!("/v1/instances/{id}")) + .add_header(name.clone(), val.clone()) + .await; + assert_eq!(del.status_code(), 204); + + let start = server + .post(&format!("/v1/instances/{id}/start")) + .add_header(name, val) + .json(&json!({})) + .await; + assert_eq!(start.status_code(), 200); + let body = start.text(); + let done = parse_sse_done(body.as_bytes()); + assert!(done["done"]["err"].is_string(), "expected done.err: {done}"); + } + + #[tokio::test] + async fn start_nonexistent_returns_sse_err() { + let (state, _dir) = make_state(); + let raw = admin_token(&state); + let server = make_server(state); + let (name, val) = auth_header(&raw); + + let resp = server + .post("/v1/instances/no-such-id/start") + .add_header(name, val) + .json(&json!({})) + .await; + assert_eq!(resp.status_code(), 200); + let body = resp.text(); + let done = parse_sse_done(body.as_bytes()); + assert!(done["done"]["err"].is_string(), "expected done.err: {done}"); + } + + #[tokio::test] + async fn short_id_prefix_resolves() { + let (state, _dir) = make_state(); + let raw = admin_token(&state); + let server = make_server(state); + let (name, val) = auth_header(&raw); + + let create = server + .post("/v1/instances") + .add_header(name.clone(), val.clone()) + .json(&json!({ "name": "bot", "runtime": "Docker" })) + .await; + let full_id = create.json::()["spec"]["id"].as_str().unwrap().to_string(); + let short = &full_id[..8]; + + let status = server + .get(&format!("/v1/instances/{short}/status")) + .add_header(name, val) + .await; + assert_eq!(status.status_code(), 200); + } + + #[tokio::test] + async fn delete_then_list_is_empty() { + let (state, _dir) = make_state(); + let raw = admin_token(&state); + let server = make_server(state); + let (name, val) = auth_header(&raw); + + let create = server + .post("/v1/instances") + .add_header(name.clone(), val.clone()) + .json(&json!({ "name": "bot", "runtime": "Docker" })) + .await; + let id = create.json::()["spec"]["id"].as_str().unwrap().to_string(); + + server + .delete(&format!("/v1/instances/{id}")) + .add_header(name.clone(), val.clone()) + .await; + + let list = server.get("/v1/instances").add_header(name, val).await; + let body: Vec = list.json(); + assert!(body.is_empty()); + } + } + + mod bind_tests { + use super::*; + use std::net::SocketAddr; + + #[tokio::test] + async fn non_loopback_warns_on_startup() { + let addr: SocketAddr = "0.0.0.0:0".parse().unwrap(); + assert!( + !addr.ip().is_loopback(), + "0.0.0.0 should not be loopback" + ); + } + } +} diff --git a/packages/launcher/crates/octobot-launcher-api/src/lockout.rs b/packages/launcher/crates/octobot-launcher-api/src/lockout.rs new file mode 100644 index 0000000000..fe687079de --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-api/src/lockout.rs @@ -0,0 +1,162 @@ +use std::collections::HashMap; +use std::time::{Duration, Instant}; + +use parking_lot::Mutex; + +const MAX_FAILURES: u32 = 10; +const WINDOW_SECS: u64 = 60; +const LOCKOUT_SECS: u64 = 300; +const MAX_ENTRIES: usize = 1024; + +#[derive(Debug)] +struct LockoutEntry { + fail_count: u32, + window_start: Instant, + locked_until: Option, +} + +#[derive(Debug)] +pub struct Lockout { + inner: Mutex>, +} + +impl Lockout { + pub fn new() -> Self { + Self { + inner: Mutex::new(HashMap::new()), + } + } + + pub fn check(&self, ip: &str) -> bool { + let map = self.inner.lock(); + match map.get(ip) { + Some(entry) => { + if let Some(until) = entry.locked_until { + Instant::now() >= until + } else { + true + } + } + None => true, + } + } + + pub fn record_failure(&self, ip: &str) { + let mut map = self.inner.lock(); + let now = Instant::now(); + + if map.len() >= MAX_ENTRIES && !map.contains_key(ip) { + let oldest_key = map + .iter() + .min_by_key(|(_, v)| v.window_start) + .map(|(k, _)| k.clone()); + if let Some(key) = oldest_key { + map.remove(&key); + } + } + + let entry = map.entry(ip.to_string()).or_insert_with(|| LockoutEntry { + fail_count: 0, + window_start: now, + locked_until: None, + }); + + if now.duration_since(entry.window_start) > Duration::from_secs(WINDOW_SECS) { + entry.fail_count = 0; + entry.window_start = now; + entry.locked_until = None; + } + + entry.fail_count += 1; + + if entry.fail_count >= MAX_FAILURES { + entry.locked_until = Some(now + Duration::from_secs(LOCKOUT_SECS)); + } + } + + pub fn record_success(&self, ip: &str) { + let mut map = self.inner.lock(); + map.remove(ip); + } + + pub fn unlock(&self, ip: &str) { + let mut map = self.inner.lock(); + if let Some(entry) = map.get_mut(ip) { + entry.locked_until = None; + entry.fail_count = 0; + } + } + + pub fn len(&self) -> usize { + self.inner.lock().len() + } + + pub fn is_empty(&self) -> bool { + self.inner.lock().is_empty() + } +} + +impl Default for Lockout { + fn default() -> Self { + Self::new() + } +} + +#[cfg(test)] +mod tests { + #![allow(clippy::unwrap_used, clippy::expect_used)] + use super::*; + + #[test] + fn ten_failed_attempts_locks_ip() { + let lockout = Lockout::new(); + let ip = "192.168.1.1"; + + assert!(lockout.check(ip)); + + for _ in 0..10 { + lockout.record_failure(ip); + } + + assert!(!lockout.check(ip)); + } + + #[test] + fn lru_capped() { + let lockout = Lockout::new(); + + for i in 0..2000_u32 { + let ip = format!("10.0.{}.{}", i / 256, i % 256); + lockout.record_failure(&ip); + } + + assert!(lockout.len() <= MAX_ENTRIES); + } + + #[test] + fn separate_ips_dont_interfere() { + let lockout = Lockout::new(); + let ip_a = "10.0.0.1"; + + for _ in 0..10 { + lockout.record_failure(ip_a); + } + + assert!(!lockout.check(ip_a)); + assert!(lockout.check("10.0.0.2")); + } + + #[test] + fn expires_after_lockout_manual_unlock() { + let lockout = Lockout::new(); + let ip = "10.1.2.3"; + + for _ in 0..10 { + lockout.record_failure(ip); + } + + assert!(!lockout.check(ip)); + lockout.unlock(ip); + assert!(lockout.check(ip)); + } +} diff --git a/packages/launcher/crates/octobot-launcher-api/src/routes/health.rs b/packages/launcher/crates/octobot-launcher-api/src/routes/health.rs new file mode 100644 index 0000000000..6fed77bf5d --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-api/src/routes/health.rs @@ -0,0 +1,10 @@ +use axum::{Json, response::IntoResponse}; +use serde_json::json; + +pub async fn health() -> impl IntoResponse { + Json(json!({ "status": "ok" })) +} + +pub async fn version() -> impl IntoResponse { + Json(json!({ "version": env!("CARGO_PKG_VERSION") })) +} diff --git a/packages/launcher/crates/octobot-launcher-api/src/routes/instances.rs b/packages/launcher/crates/octobot-launcher-api/src/routes/instances.rs new file mode 100644 index 0000000000..16c764026d --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-api/src/routes/instances.rs @@ -0,0 +1,565 @@ +use std::collections::BTreeMap; +use std::convert::Infallible; +use std::path::PathBuf; +use std::pin::Pin; +use std::sync::Arc; +use std::time::Duration; + +use axum::{ + Json, + extract::{Path, State}, + http::StatusCode, + response::{IntoResponse, Response, sse::{Event, KeepAlive, Sse}}, +}; +use futures_util::{FutureExt, Stream}; +use octobot_launcher_config::{DesiredState, InstanceRecord, Store}; +use octobot_launcher_core::backend::Backend; +use octobot_launcher_core::error::LauncherError; +use octobot_launcher_core::{InstanceId, InstanceSpec, InstanceState, RuntimeKind}; +use octobot_launcher_core::model::PortMapping; +use serde::Deserialize; +use serde_json::json; +use tokio_stream::wrappers::UnboundedReceiverStream; +use tokio_stream::StreamExt as TokioStreamExt; + +use crate::error::ApiError; + +#[allow(missing_debug_implementations)] +#[derive(Clone)] +pub struct InstanceActionState { + pub store: Arc, + pub backends: Arc>>, +} + +#[derive(Debug, Deserialize)] +pub struct CreateInstanceBody { + pub name: String, + pub runtime: RuntimeKind, + #[serde(default = "default_version")] + pub version: String, + pub data_dir: Option, + pub config_dir: Option, + #[serde(default)] + pub env: BTreeMap, + #[serde(default)] + pub ports: Vec, + #[serde(default)] + pub auto_restart: bool, + #[serde(default)] + pub auto_update: bool, + #[serde(default)] + pub runtime_options: serde_json::Value, +} + +fn default_version() -> String { + "latest".to_string() +} + +#[derive(Debug, Deserialize, Default)] +pub struct PatchInstanceBody { + pub name: Option, + pub version: Option, + pub auto_restart: Option, + pub auto_update: Option, + pub env: Option>, + pub runtime_options: Option, +} + +type EventStream = Pin> + Send>>; + +fn error_stream(msg: impl Into) -> impl IntoResponse { + let data = json!({"done": {"err": msg.into()}}).to_string(); + let stream: EventStream = + Box::pin(tokio_stream::once(Ok(Event::default().data(data)))); + Sse::new(stream) +} + +fn progress_stream(rx: tokio::sync::mpsc::UnboundedReceiver) -> impl IntoResponse { + let stream: EventStream = Box::pin( + UnboundedReceiverStream::new(rx) + .map(|data| Ok(Event::default().data(data))), + ); + Sse::new(stream).keep_alive( + KeepAlive::new() + .interval(Duration::from_secs(15)) + .text("heartbeat"), + ) +} + +fn panic_message(p: &Box) -> String { + if let Some(s) = p.downcast_ref::<&'static str>() { + return (*s).to_string(); + } + if let Some(s) = p.downcast_ref::() { + return s.clone(); + } + "unknown panic".to_string() +} + +pub async fn list_instances( + State(store): State>, +) -> Result { + let records = store + .list_instance_records() + .map_err(|e| ApiError::Internal(e.to_string()))?; + Ok(Json(records)) +} + +pub async fn create_instance( + State(store): State>, + Json(body): Json, +) -> Result { + let id = InstanceId::new(); + let data_dir = body.data_dir.unwrap_or_else(|| { + store.data_root().join("instances").join(id.0.to_string()) + }); + let config_dir = body.config_dir.unwrap_or_else(|| data_dir.join("config")); + let spec = InstanceSpec { + id, + name: body.name, + runtime: body.runtime, + version: body.version, + data_dir, + config_dir, + env: body.env, + ports: body.ports, + auto_restart: body.auto_restart, + auto_update: body.auto_update, + runtime_options: body.runtime_options, + }; + let record = InstanceRecord { + spec: spec.clone(), + desired_state: DesiredState::Stopped, + last_known_state: InstanceState::Stopped, + created_at: chrono::Utc::now(), + updated_at: chrono::Utc::now(), + }; + + let path = store.instances_dir().join(format!("{}.json", spec.id.0)); + store + .write_atomic(&path, &record) + .map_err(|e| ApiError::Internal(e.to_string()))?; + + Ok((StatusCode::CREATED, Json(record))) +} + +pub async fn get_instance( + State(store): State>, + Path(id): Path, +) -> Result { + let record = find_instance(&store, &id)?; + Ok(Json(record)) +} + +pub async fn patch_instance( + State(store): State>, + Path(id): Path, + Json(body): Json, +) -> Result { + let mut record = find_instance(&store, &id)?; + + if let Some(name) = body.name { + record.spec.name = name; + } + if let Some(version) = body.version { + record.spec.version = version; + } + if let Some(auto_restart) = body.auto_restart { + record.spec.auto_restart = auto_restart; + } + if let Some(auto_update) = body.auto_update { + record.spec.auto_update = auto_update; + } + if let Some(env) = body.env { + record.spec.env = env; + } + if let Some(runtime_options) = body.runtime_options { + record.spec.runtime_options = runtime_options; + } + record.updated_at = chrono::Utc::now(); + + let path = store.instances_dir().join(format!("{}.json", record.spec.id.0)); + store + .write_atomic(&path, &record) + .map_err(|e| ApiError::Internal(e.to_string()))?; + + Ok(Json(record)) +} + +pub async fn delete_instance( + State(store): State>, + Path(id): Path, +) -> Result { + let record = find_instance(&store, &id)?; + let path = store.instances_dir().join(format!("{}.json", record.spec.id.0)); + store + .delete(&path) + .map_err(|e| ApiError::Internal(e.to_string()))?; + Ok(StatusCode::NO_CONTENT) +} + +pub async fn start_instance( + State(state): State, + Path(id): Path, +) -> Response { + let record = match find_instance(&state.store, &id) { + Ok(r) => r, + Err(e) => return error_stream(e.to_string()).into_response(), + }; + let runtime = record.spec.runtime; + if !state.backends.iter().any(|b| b.kind() == runtime) { + return error_stream(format!("no backend for runtime {runtime:?}")).into_response(); + } + + let (tx, rx) = tokio::sync::mpsc::unbounded_channel::(); + let backends = state.backends.clone(); + let store = state.store.clone(); + + tokio::spawn(async move { + let backend = match backends.iter().find(|b| b.kind() == runtime) { + Some(b) => b, + None => { + let _ = tx.send(json!({"done": {"err": "no backend"}}).to_string()); + return; + } + }; + let spec_id = record.spec.id; + let result = std::panic::AssertUnwindSafe(backend.start(&record.spec, Some(&tx))) + .catch_unwind() + .await; + match result { + Ok(Ok(())) => { + let records = store.list_instance_records().unwrap_or_default(); + let mut record = match records.into_iter().find(|r| r.spec.id == spec_id) { + Some(r) => r, + None => { + let _ = tx.send(json!({"done": {"err": "instance not found after start"}}).to_string()); + return; + } + }; + record.desired_state = DesiredState::Running; + if let Ok(health) = backend.status(spec_id).await { + record.last_known_state = health.state; + } + record.updated_at = chrono::Utc::now(); + let path = store.instances_dir().join(format!("{}.json", record.spec.id.0)); + if let Err(e) = store.write_atomic(&path, &record) { + let _ = tx.send(json!({"done": {"err": e.to_string()}}).to_string()); + return; + } + let _ = tx.send(json!({"done": {"ok": record}}).to_string()); + } + Ok(Err(e)) => { + let _ = tx.send(json!({"done": {"err": e.to_string()}}).to_string()); + } + Err(panic) => { + let msg = panic_message(&panic); + let _ = tx.send(json!({"done": {"err": format!("daemon panic: {msg}")}}).to_string()); + } + } + }); + + progress_stream(rx).into_response() +} + +pub async fn stop_instance( + State(state): State, + Path(id): Path, +) -> Result { + let mut record = find_instance(&state.store, &id)?; + let backend = state + .backends + .iter() + .find(|b| b.kind() == record.spec.runtime) + .ok_or_else(|| ApiError::ServiceUnavailable(format!("no backend for runtime {:?}", record.spec.runtime)))?; + match backend.stop(&record.spec, Duration::from_secs(10)).await { + Ok(()) | Err(LauncherError::NotRunning) => {} + Err(e) => return Err(ApiError::Internal(e.to_string())), + } + record.desired_state = DesiredState::Stopped; + record.updated_at = chrono::Utc::now(); + let path = state.store.instances_dir().join(format!("{}.json", record.spec.id.0)); + state + .store + .write_atomic(&path, &record) + .map_err(|e| ApiError::Internal(e.to_string()))?; + Ok(Json(record)) +} + +pub async fn restart_instance( + State(state): State, + Path(id): Path, +) -> Response { + let record = match find_instance(&state.store, &id) { + Ok(r) => r, + Err(e) => return error_stream(e.to_string()).into_response(), + }; + let runtime = record.spec.runtime; + if !state.backends.iter().any(|b| b.kind() == runtime) { + return error_stream(format!("no backend for runtime {runtime:?}")).into_response(); + } + + let (tx, rx) = tokio::sync::mpsc::unbounded_channel::(); + let backends = state.backends.clone(); + let store = state.store.clone(); + + tokio::spawn(async move { + let backend = match backends.iter().find(|b| b.kind() == runtime) { + Some(b) => b, + None => { + let _ = tx.send(json!({"done": {"err": "no backend"}}).to_string()); + return; + } + }; + let spec_id = record.spec.id; + let result = std::panic::AssertUnwindSafe( + backend.restart(&record.spec, Duration::from_secs(10), Some(&tx)), + ) + .catch_unwind() + .await; + match result { + Ok(Ok(())) => { + let records = store.list_instance_records().unwrap_or_default(); + let mut record = match records.into_iter().find(|r| r.spec.id == spec_id) { + Some(r) => r, + None => { + let _ = tx.send(json!({"done": {"err": "instance not found after restart"}}).to_string()); + return; + } + }; + record.desired_state = DesiredState::Running; + record.updated_at = chrono::Utc::now(); + let path = store.instances_dir().join(format!("{}.json", record.spec.id.0)); + if let Err(e) = store.write_atomic(&path, &record) { + let _ = tx.send(json!({"done": {"err": e.to_string()}}).to_string()); + return; + } + let _ = tx.send(json!({"done": {"ok": record}}).to_string()); + } + Ok(Err(e)) => { + let _ = tx.send(json!({"done": {"err": e.to_string()}}).to_string()); + } + Err(panic) => { + let msg = panic_message(&panic); + let _ = tx.send(json!({"done": {"err": format!("daemon panic: {msg}")}}).to_string()); + } + } + }); + + progress_stream(rx).into_response() +} + +pub async fn update_instance( + State(state): State, + Path(id): Path, + Json(body): Json, +) -> Response { + let record = match find_instance(&state.store, &id) { + Ok(r) => r, + Err(e) => return error_stream(e.to_string()).into_response(), + }; + let runtime = record.spec.runtime; + if !state.backends.iter().any(|b| b.kind() == runtime) { + return error_stream(format!("no backend for runtime {runtime:?}")).into_response(); + } + + let target_version = body + .get("version") + .and_then(|v| v.as_str()) + .unwrap_or(&record.spec.version) + .to_string(); + + let (tx, rx) = tokio::sync::mpsc::unbounded_channel::(); + let backends = state.backends.clone(); + let store = state.store.clone(); + + tokio::spawn(async move { + let backend = match backends.iter().find(|b| b.kind() == runtime) { + Some(b) => b, + None => { + let _ = tx.send(json!({"done": {"err": "no backend"}}).to_string()); + return; + } + }; + let spec_id = record.spec.id; + let result = std::panic::AssertUnwindSafe( + backend.update(&record.spec, &target_version, Some(&tx)), + ) + .catch_unwind() + .await; + match result { + Ok(Ok(())) => { + let records = store.list_instance_records().unwrap_or_default(); + let mut record = match records.into_iter().find(|r| r.spec.id == spec_id) { + Some(r) => r, + None => { + let _ = tx.send(json!({"done": {"err": "instance not found after update"}}).to_string()); + return; + } + }; + record.spec.version = target_version; + record.desired_state = DesiredState::Running; + record.updated_at = chrono::Utc::now(); + let path = store.instances_dir().join(format!("{}.json", record.spec.id.0)); + if let Err(e) = store.write_atomic(&path, &record) { + let _ = tx.send(json!({"done": {"err": e.to_string()}}).to_string()); + return; + } + let _ = tx.send(json!({"done": {"ok": record}}).to_string()); + } + Ok(Err(e)) => { + let _ = tx.send(json!({"done": {"err": e.to_string()}}).to_string()); + } + Err(panic) => { + let msg = panic_message(&panic); + let _ = tx.send(json!({"done": {"err": format!("daemon panic: {msg}")}}).to_string()); + } + } + }); + + progress_stream(rx).into_response() +} + +pub async fn instance_status( + State(store): State>, + Path(id): Path, +) -> Result { + let record = find_instance(&store, &id)?; + Ok(Json(json!({ + "id": record.spec.id.0, + "state": record.last_known_state, + "desired_state": record.desired_state, + }))) +} + +fn find_instance(store: &Store, id: &str) -> Result { + let records = store + .list_instance_records() + .map_err(|e| ApiError::Internal(e.to_string()))?; + + if let Some(record) = records.iter().find(|r| r.spec.id.0.to_string() == id) { + return Ok(record.clone()); + } + + let mut matches: Vec = records + .into_iter() + .filter(|r| r.spec.id.0.to_string().starts_with(id)) + .collect(); + + match matches.len() { + 0 => Err(ApiError::NotFound(format!("instance '{id}' not found"))), + 1 => Ok(matches.remove(0)), + _ => Err(ApiError::BadRequest(format!( + "ambiguous id prefix '{id}': multiple instances match" + ))), + } +} + +#[cfg(test)] +#[allow(clippy::unwrap_used, clippy::expect_used)] +mod tests { + use super::*; + use axum::body::to_bytes; + use axum::http::Request; + use axum::Router; + use axum::routing::post; + use octobot_launcher_core::backend::mock::MockBackend; + use octobot_launcher_config::Store; + use std::collections::BTreeMap; + use std::path::PathBuf; + use tempfile::TempDir; + + fn make_state(dir: &TempDir) -> InstanceActionState { + let store = Arc::new(Store::new(dir.path().to_path_buf()).unwrap()); + let backends: Arc>> = Arc::new(vec![Box::new(MockBackend::default())]); + InstanceActionState { store, backends } + } + + fn make_record(store: &Store) -> InstanceRecord { + let id = InstanceId::new(); + let spec = InstanceSpec { + id, + name: "test".into(), + runtime: RuntimeKind::Binary, + version: "1.0.0".into(), + data_dir: PathBuf::from("/tmp/data"), + config_dir: PathBuf::from("/tmp/config"), + env: BTreeMap::new(), + ports: vec![], + auto_restart: false, + auto_update: false, + runtime_options: serde_json::Value::Null, + }; + let record = InstanceRecord { + spec: spec.clone(), + desired_state: DesiredState::Stopped, + last_known_state: InstanceState::Stopped, + created_at: chrono::Utc::now(), + updated_at: chrono::Utc::now(), + }; + let path = store.instances_dir().join(format!("{}.json", spec.id.0)); + store.write_atomic(&path, &record).unwrap(); + record + } + + fn parse_sse_done(body: &[u8]) -> serde_json::Value { + let text = std::str::from_utf8(body).unwrap(); + for line in text.lines() { + if let Some(data) = line.strip_prefix("data: ") { + let val: serde_json::Value = serde_json::from_str(data).unwrap(); + if val.get("done").is_some() { + return val; + } + } + } + panic!("no done event in SSE body:\n{text}"); + } + + #[tokio::test] + async fn start_instance_sse_emits_done_ok() { + let dir = TempDir::new().unwrap(); + let state = make_state(&dir); + let record = make_record(&state.store); + let id = record.spec.id.0.to_string(); + + let router = Router::new() + .route("/instances/{id}/start", post(start_instance)) + .with_state(state); + + let req = Request::builder() + .method("POST") + .uri(format!("/instances/{id}/start")) + .header("content-type", "application/json") + .body(axum::body::Body::from("{}")) + .unwrap(); + + let resp = tower::ServiceExt::oneshot(router, req).await.unwrap(); + assert_eq!(resp.status(), 200); + + let body = to_bytes(resp.into_body(), usize::MAX).await.unwrap(); + let done = parse_sse_done(&body); + assert!(done["done"]["ok"].is_object(), "expected done.ok: {done}"); + } + + #[tokio::test] + async fn start_instance_sse_unknown_id_emits_done_err() { + let dir = TempDir::new().unwrap(); + let state = make_state(&dir); + + let router = Router::new() + .route("/instances/{id}/start", post(start_instance)) + .with_state(state); + + let req = Request::builder() + .method("POST") + .uri("/instances/00000000-0000-0000-0000-000000000000/start") + .header("content-type", "application/json") + .body(axum::body::Body::from("{}")) + .unwrap(); + + let resp = tower::ServiceExt::oneshot(router, req).await.unwrap(); + let body = to_bytes(resp.into_body(), usize::MAX).await.unwrap(); + let done = parse_sse_done(&body); + assert!(done["done"]["err"].is_string(), "expected done.err: {done}"); + } +} diff --git a/packages/launcher/crates/octobot-launcher-api/src/routes/mod.rs b/packages/launcher/crates/octobot-launcher-api/src/routes/mod.rs new file mode 100644 index 0000000000..274f3ca3a8 --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-api/src/routes/mod.rs @@ -0,0 +1,4 @@ +pub mod health; +pub mod instances; +pub mod tokens; +pub mod updates; diff --git a/packages/launcher/crates/octobot-launcher-api/src/routes/tokens.rs b/packages/launcher/crates/octobot-launcher-api/src/routes/tokens.rs new file mode 100644 index 0000000000..555acb87aa --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-api/src/routes/tokens.rs @@ -0,0 +1,89 @@ +use std::sync::Arc; + +use axum::{ + Json, + extract::{Path, State}, + response::IntoResponse, +}; +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize}; + + +use crate::error::ApiError; +use crate::token_store::{TokenRecord, TokenStore, default_scopes}; + +#[derive(Debug, Deserialize)] +pub struct CreateTokenBody { + pub label: String, + pub scopes: Option>, + pub expires_at: Option>, +} + +#[derive(Debug, Serialize)] +pub struct TokenResponse { + pub id: String, + pub label: String, + pub scopes: Vec, + pub created_at: DateTime, + pub last_used_at: Option>, + pub expires_at: Option>, + pub revoked: bool, +} + +impl From for TokenResponse { + fn from(r: TokenRecord) -> Self { + Self { + id: r.id, + label: r.label, + scopes: r.scopes, + created_at: r.created_at, + last_used_at: r.last_used_at, + expires_at: r.expires_at, + revoked: r.revoked, + } + } +} + +#[derive(Debug, Serialize)] +pub struct CreateTokenResponse { + pub token: TokenResponse, + pub raw_token: String, +} + +pub async fn list_tokens( + State(token_store): State>, +) -> impl IntoResponse { + let records = token_store.list(); + let responses: Vec = records.into_iter().map(TokenResponse::from).collect(); + Json(responses) +} + +pub async fn create_token( + State(token_store): State>, + Json(body): Json, +) -> Result { + let scopes = body.scopes.unwrap_or_else(default_scopes); + let (record, raw_token) = token_store + .create(body.label, scopes, body.expires_at) + .map_err(|e| ApiError::Internal(e.to_string()))?; + + let response = CreateTokenResponse { + token: TokenResponse::from(record), + raw_token, + }; + + Ok((axum::http::StatusCode::CREATED, Json(response))) +} + +pub async fn revoke_token( + State(token_store): State>, + Path(id): Path, +) -> Result { + match token_store + .revoke(&id) + .map_err(|e| ApiError::Internal(e.to_string()))? + { + true => Ok(axum::http::StatusCode::NO_CONTENT), + false => Err(ApiError::NotFound(format!("token '{id}' not found"))), + } +} diff --git a/packages/launcher/crates/octobot-launcher-api/src/routes/updates.rs b/packages/launcher/crates/octobot-launcher-api/src/routes/updates.rs new file mode 100644 index 0000000000..e06a9be998 --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-api/src/routes/updates.rs @@ -0,0 +1,13 @@ +use axum::{Json, response::IntoResponse}; +use serde_json::json; + +pub async fn check_updates() -> impl IntoResponse { + Json(json!({ + "status": "ok", + "updates_available": false, + })) +} + +pub async fn update_launcher() -> impl IntoResponse { + Json(json!({ "status": "accepted" })) +} diff --git a/packages/launcher/crates/octobot-launcher-api/src/scopes.rs b/packages/launcher/crates/octobot-launcher-api/src/scopes.rs new file mode 100644 index 0000000000..e28f3f47ad --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-api/src/scopes.rs @@ -0,0 +1,6 @@ +pub const SCOPE_INSTANCES_READ: &str = "instances:read"; +pub const SCOPE_INSTANCES_WRITE: &str = "instances:write"; +pub const SCOPE_UPDATES_READ: &str = "updates:read"; +pub const SCOPE_UPDATES_APPLY: &str = "updates:apply"; +pub const SCOPE_TOKENS_MANAGE: &str = "tokens:manage"; +pub const SCOPE_WILDCARD: &str = "*"; diff --git a/packages/launcher/crates/octobot-launcher-api/src/token_store.rs b/packages/launcher/crates/octobot-launcher-api/src/token_store.rs new file mode 100644 index 0000000000..37dd516e8c --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-api/src/token_store.rs @@ -0,0 +1,312 @@ +use std::path::{Path, PathBuf}; +use std::sync::Arc; + +use argon2::{Argon2, PasswordHash, PasswordHasher, PasswordVerifier}; +use base64::Engine as _; +use chrono::{DateTime, Utc}; +use parking_lot::RwLock; +use rand::RngCore; +use serde::{Deserialize, Serialize}; + +use crate::scopes::{ + SCOPE_INSTANCES_READ, SCOPE_INSTANCES_WRITE, SCOPE_UPDATES_READ, SCOPE_WILDCARD, +}; + +pub const TOKEN_PREFIX: &str = "oblch_"; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TokenRecord { + pub id: String, + pub label: String, + pub argon2_hash: String, + pub scopes: Vec, + pub created_at: DateTime, + pub last_used_at: Option>, + pub expires_at: Option>, + pub revoked: bool, +} + +#[derive(Debug)] +pub struct TokenStore { + tokens: RwLock>, + path: PathBuf, +} + +impl TokenStore { + pub fn load(path: PathBuf) -> Self { + let tokens = Self::load_from_file(&path).unwrap_or_default(); + Self { + tokens: RwLock::new(tokens), + path, + } + } + + fn load_from_file(path: &Path) -> Option> { + let content = std::fs::read_to_string(path).ok()?; + serde_json::from_str(&content).ok() + } + + pub fn verify(&self, raw_token: &str) -> Option { + if !raw_token.starts_with(TOKEN_PREFIX) { + return None; + } + + let now = Utc::now(); + let tokens = self.tokens.read(); + + for record in tokens.iter() { + if record.revoked { + continue; + } + if let Some(expires_at) = record.expires_at { + if now > expires_at { + continue; + } + } + + let Ok(parsed) = PasswordHash::new(&record.argon2_hash) else { + continue; + }; + let argon2 = build_argon2(); + if argon2 + .verify_password(raw_token.as_bytes(), &parsed) + .is_ok() + { + return Some(record.clone()); + } + } + + None + } + + pub fn create( + &self, + label: String, + scopes: Vec, + expires_at: Option>, + ) -> std::io::Result<(TokenRecord, String)> { + let raw_token = generate_raw_token(); + let id = generate_token_id(); + let argon2_hash = hash_token(&raw_token); + + let record = TokenRecord { + id, + label, + argon2_hash, + scopes, + created_at: Utc::now(), + last_used_at: None, + expires_at, + revoked: false, + }; + + self.tokens.write().push(record.clone()); + self.save()?; + + Ok((record, raw_token)) + } + + pub fn revoke(&self, id: &str) -> std::io::Result { + let mut tokens = self.tokens.write(); + if let Some(record) = tokens.iter_mut().find(|r| r.id == id) { + record.revoked = true; + drop(tokens); + self.save()?; + return Ok(true); + } + Ok(false) + } + + pub fn list(&self) -> Vec { + self.tokens.read().clone() + } + + pub fn save(&self) -> std::io::Result<()> { + let tokens = self.tokens.read(); + let content = serde_json::to_string_pretty(&*tokens) + .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?; + + if let Some(parent) = self.path.parent() { + std::fs::create_dir_all(parent)?; + } + + let tmp_path = self.path.with_extension("tmp"); + + #[cfg(unix)] + { + use std::io::Write; + use std::os::unix::fs::OpenOptionsExt; + let mut file = std::fs::OpenOptions::new() + .write(true) + .create(true) + .truncate(true) + .mode(0o600) + .open(&tmp_path)?; + file.write_all(content.as_bytes())?; + } + #[cfg(not(unix))] + { + std::fs::write(&tmp_path, content.as_bytes())?; + } + + std::fs::rename(&tmp_path, &self.path)?; + + Ok(()) + } + + pub fn reload(&self) { + if let Some(loaded) = Self::load_from_file(&self.path) { + *self.tokens.write() = loaded; + } + } + + pub fn update_last_used(&self, id: &str) { + let mut tokens = self.tokens.write(); + if let Some(record) = tokens.iter_mut().find(|r| r.id == id) { + record.last_used_at = Some(Utc::now()); + } + drop(tokens); + let _ = self.save(); + } + + pub fn bootstrap_if_empty(&self) -> std::io::Result> { + if !self.tokens.read().is_empty() { + return Ok(None); + } + + let (_, raw) = self.create( + "admin".to_string(), + vec![SCOPE_WILDCARD.to_string()], + None, + )?; + + Ok(Some(raw)) + } +} + +fn generate_raw_token() -> String { + let mut bytes = [0u8; 32]; + rand::rngs::OsRng.fill_bytes(&mut bytes); + format!( + "{}{}", + TOKEN_PREFIX, + base64::engine::general_purpose::URL_SAFE_NO_PAD.encode(bytes) + ) +} + +fn generate_token_id() -> String { + let mut bytes = [0u8; 4]; + rand::rngs::OsRng.fill_bytes(&mut bytes); + format!("tok_{}", hex::encode(bytes)) +} + +fn build_argon2() -> Argon2<'static> { + let params = argon2::Params::new(19456, 2, 1, None) + .unwrap_or_else(|_| argon2::Params::default()); + Argon2::new(argon2::Algorithm::Argon2id, argon2::Version::V0x13, params) +} + +fn hash_token(raw: &str) -> String { + use argon2::password_hash::SaltString; + let salt = SaltString::generate(&mut rand::rngs::OsRng); + let argon2 = build_argon2(); + argon2 + .hash_password(raw.as_bytes(), &salt) + .map(|h| h.to_string()) + .unwrap_or_default() +} + +pub fn default_scopes() -> Vec { + vec![ + SCOPE_INSTANCES_READ.to_string(), + SCOPE_INSTANCES_WRITE.to_string(), + SCOPE_UPDATES_READ.to_string(), + ] +} + +pub fn token_has_scope(record: &TokenRecord, required: &str) -> bool { + record.scopes.iter().any(|s| s == SCOPE_WILDCARD || s == required) +} + +pub fn make_token_store_arc(path: PathBuf) -> Arc { + Arc::new(TokenStore::load(path)) +} + +#[cfg(test)] +mod tests { + #![allow(clippy::unwrap_used, clippy::expect_used)] + use super::*; + use tempfile::TempDir; + + fn temp_store() -> (TokenStore, TempDir) { + let dir = TempDir::new().expect("tempdir"); + let path = dir.path().join("tokens.json"); + (TokenStore::load(path), dir) + } + + #[test] + fn create_and_verify() { + let (store, _dir) = temp_store(); + let (record, raw) = store.create("test".into(), default_scopes(), None).unwrap(); + let verified = store.verify(&raw); + assert!(verified.is_some()); + assert_eq!(verified.unwrap().id, record.id); + } + + #[test] + fn revoke_rejects_token() { + let (store, _dir) = temp_store(); + let (record, raw) = store.create("test".into(), default_scopes(), None).unwrap(); + store.revoke(&record.id).unwrap(); + assert!(store.verify(&raw).is_none()); + } + + #[test] + fn expired_token_rejected() { + let (store, _dir) = temp_store(); + let past = Utc::now() - chrono::Duration::seconds(1); + let (_, raw) = store.create("test".into(), default_scopes(), Some(past)).unwrap(); + assert!(store.verify(&raw).is_none()); + } + + #[test] + fn bootstrap_creates_wildcard_token() { + let (store, _dir) = temp_store(); + let raw = store.bootstrap_if_empty().unwrap(); + assert!(raw.is_some()); + let raw = raw.unwrap(); + let record = store.verify(&raw).expect("should verify"); + assert!(record.scopes.contains(&SCOPE_WILDCARD.to_string())); + } + + #[test] + fn bootstrap_does_nothing_if_not_empty() { + let (store, _dir) = temp_store(); + store.create("existing".into(), default_scopes(), None).unwrap(); + let raw = store.bootstrap_if_empty().unwrap(); + assert!(raw.is_none()); + } + + #[test] + fn wrong_prefix_returns_none() { + let (store, _dir) = temp_store(); + store.create("test".into(), default_scopes(), None).unwrap(); + assert!(store.verify("wrongprefix_abc").is_none()); + } + + #[test] + fn token_id_format() { + let (store, _dir) = temp_store(); + let (record, _) = store.create("test".into(), default_scopes(), None).unwrap(); + assert!(record.id.starts_with("tok_")); + assert_eq!(record.id.len(), 4 + 8); // "tok_" + 4 hex bytes = 12 chars + } + + #[test] + fn raw_token_format() { + let (store, _dir) = temp_store(); + let (_, raw) = store.create("test".into(), default_scopes(), None).unwrap(); + assert!(raw.starts_with(TOKEN_PREFIX)); + assert!(raw.len() > 40); + } +} diff --git a/packages/launcher/crates/octobot-launcher-binary/Cargo.toml b/packages/launcher/crates/octobot-launcher-binary/Cargo.toml new file mode 100644 index 0000000000..98cd074f05 --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-binary/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "octobot-launcher-binary" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +octobot-launcher-core = { path = "../octobot-launcher-core" } +octobot-launcher-update = { path = "../octobot-launcher-update" } +async-trait = "0.1" +tokio = { version = "1", features = ["full"] } +sysinfo = "0.32" +which = "7" +tracing = "0.1" +serde = { version = "1", features = ["derive"] } +serde_json = "1" +chrono = { version = "0.4", features = ["serde"] } +reqwest = { version = "0.12", default-features = false, features = ["rustls-tls"] } + +[target.'cfg(unix)'.dependencies] +nix = { version = "0.29", features = ["signal", "process"] } + +[target.'cfg(windows)'.dependencies] +windows = { version = "0.58", features = ["Win32_System_Console", "Win32_System_Threading", "Win32_Foundation"] } + +[dev-dependencies] +tempfile = "3" +tokio = { version = "1", features = ["full"] } diff --git a/packages/launcher/crates/octobot-launcher-binary/src/binary.rs b/packages/launcher/crates/octobot-launcher-binary/src/binary.rs new file mode 100644 index 0000000000..cd64a42fae --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-binary/src/binary.rs @@ -0,0 +1,712 @@ +use std::path::{Path, PathBuf}; +use std::process::Stdio; +use std::sync::Arc; +use std::time::Duration; + +use async_trait::async_trait; +use chrono::{DateTime, Utc}; +use octobot_launcher_core::backend::{send_progress, ProgressSender}; +use octobot_launcher_core::error::{LauncherError, Result}; +use octobot_launcher_core::model::{ + HealthStatus, InstanceId, InstanceSpec, InstanceState, RuntimeKind, +}; +use octobot_launcher_core::Backend; +use octobot_launcher_update::{ArtifactKind, Updater, fetch_latest_octobot_binary}; +use tokio::process::Child; +use tokio::sync::Mutex; +use tracing::{info, warn}; + +use crate::pidfile::{is_orphaned_pid, read_pid_file, remove_pid_file, write_pid_file}; +use crate::probe::run_http_probe; + +const OCTOBOT_DEFAULT_PORT: u16 = 5001; + +pub(crate) fn binary_path(data_dir: &Path) -> PathBuf { + #[cfg(windows)] + return data_dir.join("bin").join("octobot.exe"); + #[cfg(not(windows))] + return data_dir.join("bin").join("octobot"); +} + +pub(crate) fn resolve_binary(data_dir: &Path, runtime_options: &serde_json::Value) -> PathBuf { + if let Some(serde_json::Value::String(p)) = runtime_options.get("binary_path") { + return PathBuf::from(p); + } + binary_path(data_dir) +} + +pub(crate) fn new_binary_path(data_dir: &Path) -> PathBuf { + #[cfg(windows)] + return data_dir.join("bin").join("octobot.new.exe"); + #[cfg(not(windows))] + return data_dir.join("bin").join("octobot.new"); +} + +pub(crate) fn pid_file_path(data_dir: &Path) -> PathBuf { + data_dir.join("octobot.pid") +} + +pub(crate) fn stdio_log_path(data_dir: &Path) -> PathBuf { + data_dir.join("logs").join("launcher-stdio.log") +} + +struct Inner { + child: Option, + started_at: Option>, + pid: Option, + data_dir: Option, + runtime_options: Option, + last_http_check: Option, +} + +impl std::fmt::Debug for Inner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("Inner") + .field("pid", &self.pid) + .field("started_at", &self.started_at) + .field("data_dir", &self.data_dir) + .finish_non_exhaustive() + } +} + +impl Inner { + fn new() -> Self { + Self { + child: None, + started_at: None, + pid: None, + data_dir: None, + runtime_options: None, + last_http_check: None, + } + } +} + +#[derive(Debug)] +pub struct BinaryBackendConfig { + pub http_probe_interval_secs: u64, +} + +impl Default for BinaryBackendConfig { + fn default() -> Self { + Self { + http_probe_interval_secs: 30, + } + } +} + +#[derive(Debug)] +pub struct BinaryBackend { + #[allow(dead_code)] + config: BinaryBackendConfig, + updater: Option>, + inner: Arc>, + start_lock: Arc>, +} + +impl BinaryBackend { + pub fn new(config: BinaryBackendConfig) -> Self { + Self { + config, + updater: None, + inner: Arc::new(Mutex::new(Inner::new())), + start_lock: Arc::new(Mutex::new(())), + } + } + + #[must_use] + pub fn with_updater(mut self, updater: Arc) -> Self { + self.updater = Some(updater); + self + } +} + +impl Default for BinaryBackend { + fn default() -> Self { + Self::new(BinaryBackendConfig::default()) + } +} + +#[async_trait] +impl Backend for BinaryBackend { + fn kind(&self) -> RuntimeKind { + RuntimeKind::Binary + } + + async fn probe(&self) -> Result<()> { + Ok(()) + } + + async fn prepare(&self, spec: &InstanceSpec, progress: Option<&ProgressSender>) -> Result<()> { + send_progress(progress, "Preparing data directory"); + std::fs::create_dir_all(spec.data_dir.join("bin"))?; + std::fs::create_dir_all(spec.data_dir.join("logs"))?; + + // If a custom binary_path is specified the caller manages the binary. + if spec.runtime_options.get("binary_path").is_some() { + return Ok(()); + } + + let bin = binary_path(&spec.data_dir); + if !bin.exists() { + let dest_dir = spec.data_dir.join("bin"); + send_progress(progress, format!("Downloading OctoBot {}", spec.version)); + + let downloaded = if let Some(updater) = self.updater.as_ref() { + // Use manifest-based updater when configured (production). + // TODO: re-enable updates.drakkar.software manifest once server is live. + updater + .fetch_artifact(ArtifactKind::OctoBotBinary, &dest_dir) + .await + .map_err(|e| LauncherError::Backend(e.to_string()))? + } else { + // Fallback: fetch latest release from GitHub. + fetch_latest_octobot_binary(&dest_dir) + .await + .map_err(|e| LauncherError::Backend(e.to_string()))? + }; + + send_progress(progress, "Installing binary"); + let new_path = new_binary_path(&spec.data_dir); + std::fs::rename(&downloaded, &new_path)?; + std::fs::rename(&new_path, &bin)?; + + #[cfg(unix)] + { + use std::os::unix::fs::PermissionsExt; + std::fs::set_permissions(&bin, std::fs::Permissions::from_mode(0o755))?; + } + } + + Ok(()) + } + + async fn start(&self, spec: &InstanceSpec, progress: Option<&ProgressSender>) -> Result<()> { + let _start_guard = self + .start_lock + .try_lock() + .map_err(|_| LauncherError::AlreadyRunning)?; + let pid_path = pid_file_path(&spec.data_dir); + + { + let mut inner = self.inner.lock().await; + if let Some(ref mut child) = inner.child { + if let Ok(Some(_)) = child.try_wait() { + inner.child = None; + inner.started_at = None; + inner.pid = None; + } else { + return Err(LauncherError::AlreadyRunning); + } + } + } + + if let Ok(Some(pid)) = read_pid_file(&pid_path) { + if !is_orphaned_pid(pid, "octobot") { + return Err(LauncherError::AlreadyRunning); + } + remove_pid_file(&pid_path)?; + } + + self.prepare(spec, progress).await?; + + let bin = resolve_binary(&spec.data_dir, &spec.runtime_options); + let mut cmd = tokio::process::Command::new(&bin); + cmd.current_dir(&spec.data_dir); + + for (k, v) in &spec.env { + cmd.env(k, v); + } + + #[cfg(windows)] + { + use std::os::windows::process::CommandExt; + const CREATE_NEW_PROCESS_GROUP: u32 = 0x0000_0200; + cmd.creation_flags(CREATE_NEW_PROCESS_GROUP); + } + + // Always log OctoBot stdio so startup errors are visible. + let log_path = stdio_log_path(&spec.data_dir); + if let Some(parent) = log_path.parent() { + std::fs::create_dir_all(parent)?; + } + let log_file = std::fs::OpenOptions::new() + .create(true) + .append(true) + .open(&log_path)?; + let log_file2 = log_file.try_clone()?; + cmd.stdout(Stdio::from(log_file)); + cmd.stderr(Stdio::from(log_file2)); + + cmd.kill_on_drop(false); + + send_progress(progress, "Starting OctoBot binary"); + let mut child = cmd.spawn()?; + let pid = child.id().ok_or_else(|| { + LauncherError::Backend("failed to get child PID after spawn".to_string()) + })?; + + write_pid_file(&pid_path, pid)?; + + // Wait for OctoBot's HTTP server to respond, or detect a startup crash. + // PyInstaller binaries take many seconds to extract + initialize before either + // serving HTTP or exiting; a fixed short delay is never sufficient. + let probe_url = spec + .runtime_options + .get("http_probe") + .and_then(|o| o.get("url")) + .and_then(|v| v.as_str()) + .map(str::to_string) + .unwrap_or_else(|| format!("http://127.0.0.1:{}/", OCTOBOT_DEFAULT_PORT)); + + const POLL_INTERVAL: Duration = Duration::from_millis(500); + let startup_timeout_secs = spec + .runtime_options + .get("startup_timeout_secs") + .and_then(|v| v.as_u64()) + .unwrap_or(60); + let deadline = tokio::time::Instant::now() + Duration::from_secs(startup_timeout_secs); + send_progress(progress, "Waiting for OctoBot to start"); + loop { + tokio::time::sleep(POLL_INTERVAL).await; + if let Ok(Some(status)) = child.try_wait() { + remove_pid_file(&pid_path).ok(); + return Err(LauncherError::Backend(format!( + "OctoBot exited during startup with {}", + status + ))); + } + let probe = run_http_probe(&probe_url).await; + if probe.status_code.is_some() { + send_progress(progress, "OctoBot is ready"); + break; + } + if tokio::time::Instant::now() >= deadline { + break; + } + } + + let mut inner = self.inner.lock().await; + inner.pid = Some(pid); + inner.started_at = Some(Utc::now()); + inner.data_dir = Some(spec.data_dir.clone()); + inner.runtime_options = Some(spec.runtime_options.clone()); + inner.child = Some(child); + + info!(pid, binary = %bin.display(), "octobot binary started"); + Ok(()) + } + + async fn stop(&self, spec: &InstanceSpec, timeout: Duration) -> Result<()> { + let pid = { + let inner = self.inner.lock().await; + if inner.child.is_none() { + let pid_path = pid_file_path(&spec.data_dir); + return if let Ok(Some(pid)) = read_pid_file(&pid_path) { + let _ = send_graceful_signal(pid); + tokio::time::sleep(Duration::from_millis(300)).await; + remove_pid_file(&pid_path).ok(); + Ok(()) + } else { + Err(LauncherError::NotRunning) + }; + } + inner.pid.unwrap_or(0) + }; + + send_graceful_signal(pid)?; + + let (mut child, data_dir) = { + let mut inner = self.inner.lock().await; + let child = match inner.child.take() { + Some(c) => c, + None => return Ok(()), + }; + let data_dir = inner.data_dir.take(); + inner.started_at = None; + inner.pid = None; + inner.runtime_options = None; + inner.last_http_check = None; + (child, data_dir) + }; + + if let Some(ref dir) = data_dir { + remove_pid_file(&pid_file_path(dir)).ok(); + } + + let wait_result = tokio::time::timeout(timeout, child.wait()).await; + + match wait_result { + Ok(Ok(status)) => { + info!(pid, ?status, "octobot binary stopped gracefully"); + } + Ok(Err(e)) => { + warn!(pid, error = %e, "error waiting for child process"); + } + Err(_) => { + warn!(pid, "graceful stop timed out, killing"); + let _ = child.kill().await; + let _ = child.wait().await; + } + } + + Ok(()) + } + + async fn restart( + &self, + spec: &InstanceSpec, + timeout: Duration, + progress: Option<&ProgressSender>, + ) -> Result<()> { + match self.stop(spec, timeout).await { + Ok(()) | Err(LauncherError::NotRunning) => {} + Err(e) => return Err(e), + } + remove_pid_file(&pid_file_path(&spec.data_dir)).ok(); + self.start(spec, progress).await + } + + async fn status(&self, _id: InstanceId) -> Result { + let (state, uptime_seconds, probe_url) = { + let inner = self.inner.lock().await; + + let probe_url = inner + .runtime_options + .as_ref() + .and_then(|o| o.get("http_probe")) + .and_then(|v| v.get("url")) + .and_then(|v| v.as_str()) + .map(std::string::ToString::to_string); + + match (inner.child.as_ref(), inner.started_at) { + (Some(_), Some(started_at)) => { + let pid = inner.pid.unwrap_or(0); + let uptime_secs = Utc::now() + .signed_duration_since(started_at) + .num_seconds() + .max(0); + let uptime = u64::try_from(uptime_secs).unwrap_or(0); + let state = InstanceState::Running { + pid_or_container: pid.to_string(), + started_at, + }; + (state, Some(uptime), probe_url) + } + _ => (InstanceState::Stopped, None, probe_url), + } + }; + + let last_http_check = if let Some(url) = probe_url { + let probe = run_http_probe(&url).await; + let mut inner = self.inner.lock().await; + inner.last_http_check = Some(probe.clone()); + Some(probe) + } else { + self.inner.lock().await.last_http_check.clone() + }; + + Ok(HealthStatus { + state, + uptime_seconds, + last_http_check, + }) + } + + async fn update( + &self, + spec: &InstanceSpec, + target_version: &str, + progress: Option<&ProgressSender>, + ) -> Result<()> { + match self.stop(spec, Duration::from_secs(10)).await { + Ok(()) | Err(LauncherError::NotRunning) => {} + Err(e) => return Err(e), + } + + let updater = self + .updater + .as_ref() + .ok_or_else(|| LauncherError::Backend("no updater configured".to_string()))?; + + let dest_dir = spec.data_dir.join("bin"); + std::fs::create_dir_all(&dest_dir)?; + + send_progress(progress, format!("Downloading OctoBot {target_version}")); + let downloaded = updater + .fetch_artifact(ArtifactKind::OctoBotBinary, &dest_dir) + .await + .map_err(|e| LauncherError::Backend(e.to_string()))?; + + send_progress(progress, "Installing binary"); + let final_path = binary_path(&spec.data_dir); + let new_path = new_binary_path(&spec.data_dir); + std::fs::rename(&downloaded, &new_path)?; + std::fs::rename(&new_path, &final_path)?; + + #[cfg(unix)] + { + use std::os::unix::fs::PermissionsExt; + let perms = std::fs::Permissions::from_mode(0o755); + std::fs::set_permissions(&final_path, perms)?; + } + + self.start(spec, progress).await + } + + async fn remove(&self, spec: &InstanceSpec, purge: bool) -> Result<()> { + let data_dir = { + let inner = self.inner.lock().await; + inner.data_dir.clone().or_else(|| Some(spec.data_dir.clone())) + }; + + match self.stop(spec, Duration::from_secs(30)).await { + Ok(()) | Err(LauncherError::NotRunning) => {} + Err(e) => return Err(e), + } + + if let Some(dir) = data_dir { + remove_pid_file(&pid_file_path(&dir)).ok(); + + if purge { + let bin_dir = dir.join("bin"); + let log_dir = dir.join("logs"); + if bin_dir.exists() { + std::fs::remove_dir_all(&bin_dir)?; + } + if log_dir.exists() { + std::fs::remove_dir_all(&log_dir)?; + } + } + } + + Ok(()) + } +} + +fn send_graceful_signal(pid: u32) -> Result<()> { + #[cfg(unix)] + { + match nix::sys::signal::kill( + nix::unistd::Pid::from_raw(i32::try_from(pid).unwrap_or(i32::MAX)), + nix::sys::signal::Signal::SIGTERM, + ) { + Ok(()) | Err(nix::errno::Errno::ESRCH) => {} + Err(e) => return Err(LauncherError::Backend(format!("SIGTERM failed: {e}"))), + } + } + #[cfg(windows)] + { + use windows::Win32::System::Console::GenerateConsoleCtrlEvent; + unsafe { + GenerateConsoleCtrlEvent( + windows::Win32::System::Console::CTRL_BREAK_EVENT, + pid, + ) + .map_err(|e| LauncherError::Backend(format!("GenerateConsoleCtrlEvent failed: {e}")))?; + } + } + #[cfg(not(any(unix, windows)))] + let _ = pid; + Ok(()) +} + +#[cfg(test)] +#[allow(clippy::unwrap_used)] +mod tests { + use super::*; + use std::collections::BTreeMap; + + fn make_spec(data_dir: &Path) -> InstanceSpec { + InstanceSpec { + id: InstanceId::new(), + name: "test-bot".into(), + runtime: RuntimeKind::Binary, + version: "1.0.0".into(), + data_dir: data_dir.to_path_buf(), + config_dir: data_dir.to_path_buf(), + env: BTreeMap::new(), + ports: vec![], + auto_restart: false, + auto_update: false, + runtime_options: serde_json::json!({"startup_timeout_secs": 2}), + } + } + + #[cfg(unix)] + fn write_fake_octobot(data_dir: &Path) { + use std::os::unix::fs::PermissionsExt; + let bin_dir = data_dir.join("bin"); + std::fs::create_dir_all(&bin_dir).unwrap(); + let bin_path = binary_path(data_dir); + std::fs::write(&bin_path, b"#!/bin/sh\nexec sleep 60\n").unwrap(); + let mut perms = std::fs::metadata(&bin_path).unwrap().permissions(); + perms.set_mode(0o755); + std::fs::set_permissions(&bin_path, perms).unwrap(); + } + + mod path { + use super::*; + + #[test] + fn binary_path_uses_correct_extension() { + let p = binary_path(Path::new("/data")); + if cfg!(windows) { + assert!(p.to_string_lossy().ends_with(".exe")); + } else { + assert!(!p.to_string_lossy().ends_with(".exe")); + } + } + } + + mod lifecycle { + use super::*; + + #[cfg(unix)] + #[tokio::test] + async fn start_when_already_running_returns_error() { + let tmp = tempfile::tempdir().unwrap(); + write_fake_octobot(tmp.path()); + let backend = BinaryBackend::default(); + let spec = make_spec(tmp.path()); + + backend.prepare(&spec, None).await.unwrap(); + backend.start(&spec, None).await.unwrap(); + + let result = backend.start(&spec, None).await; + assert!(matches!(result, Err(LauncherError::AlreadyRunning))); + + backend.stop(&spec, Duration::from_secs(5)).await.unwrap(); + } + + #[tokio::test] + async fn stop_when_not_running_returns_error() { + let tmp = tempfile::tempdir().unwrap(); + let backend = BinaryBackend::default(); + let spec = make_spec(tmp.path()); + + let result = backend.stop(&spec, Duration::from_secs(5)).await; + assert!(matches!(result, Err(LauncherError::NotRunning))); + } + } + + #[cfg(unix)] + mod signal { + use super::*; + + #[tokio::test] + async fn sigterm_then_sigkill() { + let tmp = tempfile::tempdir().unwrap(); + write_fake_octobot(tmp.path()); + let backend = BinaryBackend::default(); + let spec = make_spec(tmp.path()); + + backend.prepare(&spec, None).await.unwrap(); + backend.start(&spec, None).await.unwrap(); + + let before = std::time::Instant::now(); + backend + .stop(&spec, Duration::from_millis(100)) + .await + .unwrap(); + assert!(before.elapsed() < Duration::from_secs(5)); + } + + #[tokio::test] + async fn graceful_exit_within_timeout() { + use std::os::unix::fs::PermissionsExt; + let tmp = tempfile::tempdir().unwrap(); + let bin_dir = tmp.path().join("bin"); + std::fs::create_dir_all(&bin_dir).unwrap(); + let bin_path = binary_path(tmp.path()); + std::fs::write( + &bin_path, + b"#!/bin/sh\ntrap 'exit 0' TERM\nsleep 60 & wait\n", + ) + .unwrap(); + let mut perms = std::fs::metadata(&bin_path).unwrap().permissions(); + perms.set_mode(0o755); + std::fs::set_permissions(&bin_path, perms).unwrap(); + + let backend = BinaryBackend::default(); + let spec = make_spec(tmp.path()); + + backend.prepare(&spec, None).await.unwrap(); + backend.start(&spec, None).await.unwrap(); + + let before = std::time::Instant::now(); + backend + .stop(&spec, Duration::from_secs(5)) + .await + .unwrap(); + assert!(before.elapsed() < Duration::from_secs(5)); + } + } + + mod stdio { + use super::*; + + #[cfg(unix)] + #[tokio::test] + async fn stdio_always_logged() { + let tmp = tempfile::tempdir().unwrap(); + write_fake_octobot(tmp.path()); + let backend = BinaryBackend::default(); + let spec = make_spec(tmp.path()); + + backend.prepare(&spec, None).await.unwrap(); + backend.start(&spec, None).await.unwrap(); + backend.stop(&spec, Duration::from_secs(5)).await.unwrap(); + + assert!(stdio_log_path(tmp.path()).exists()); + } + + #[cfg(unix)] + #[tokio::test] + async fn capture_stdio_appends_to_log() { + use std::os::unix::fs::PermissionsExt; + let tmp = tempfile::tempdir().unwrap(); + let bin_dir = tmp.path().join("bin"); + std::fs::create_dir_all(&bin_dir).unwrap(); + let bin_path = binary_path(tmp.path()); + std::fs::write(&bin_path, b"#!/bin/sh\necho hello\nsleep 60\n").unwrap(); + let mut perms = std::fs::metadata(&bin_path).unwrap().permissions(); + perms.set_mode(0o755); + std::fs::set_permissions(&bin_path, perms).unwrap(); + + let backend = BinaryBackend::default(); + let mut spec = make_spec(tmp.path()); + spec.runtime_options = serde_json::json!({"capture_stdio": true, "startup_timeout_secs": 2}); + + backend.prepare(&spec, None).await.unwrap(); + backend.start(&spec, None).await.unwrap(); + tokio::time::sleep(Duration::from_millis(200)).await; + let _ = backend.stop(&spec, Duration::from_secs(5)).await; + + assert!(stdio_log_path(tmp.path()).exists()); + } + } + + mod update { + use super::*; + + #[test] + fn atomic_rename_replaces_binary() { + let tmp = tempfile::tempdir().unwrap(); + let data_dir = tmp.path(); + let bin_dir = data_dir.join("bin"); + std::fs::create_dir_all(&bin_dir).unwrap(); + + let bin_path = binary_path(data_dir); + let new_path = new_binary_path(data_dir); + + std::fs::write(&bin_path, b"old").unwrap(); + std::fs::write(&new_path, b"new").unwrap(); + + std::fs::rename(&new_path, &bin_path).unwrap(); + + let contents = std::fs::read(&bin_path).unwrap(); + assert_eq!(contents, b"new"); + } + } +} diff --git a/packages/launcher/crates/octobot-launcher-binary/src/lib.rs b/packages/launcher/crates/octobot-launcher-binary/src/lib.rs new file mode 100644 index 0000000000..64cbd27338 --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-binary/src/lib.rs @@ -0,0 +1,5 @@ +pub mod binary; +pub mod pidfile; +pub mod probe; + +pub use binary::{BinaryBackend, BinaryBackendConfig}; diff --git a/packages/launcher/crates/octobot-launcher-binary/src/pidfile.rs b/packages/launcher/crates/octobot-launcher-binary/src/pidfile.rs new file mode 100644 index 0000000000..c9ceb8ef3f --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-binary/src/pidfile.rs @@ -0,0 +1,71 @@ +use std::io; +use std::path::Path; + +use sysinfo::{ProcessRefreshKind, RefreshKind, System}; + +pub(crate) fn write_pid_file(path: &Path, pid: u32) -> io::Result<()> { + if let Some(parent) = path.parent() { + std::fs::create_dir_all(parent)?; + } + std::fs::write(path, pid.to_string()) +} + +pub(crate) fn read_pid_file(path: &Path) -> io::Result> { + if !path.exists() { + return Ok(None); + } + let contents = std::fs::read_to_string(path)?; + let pid = contents + .trim() + .parse::() + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + Ok(Some(pid)) +} + +pub(crate) fn remove_pid_file(path: &Path) -> io::Result<()> { + if path.exists() { + std::fs::remove_file(path)?; + } + Ok(()) +} + +pub(crate) fn is_orphaned_pid(pid: u32, expected_binary_name: &str) -> bool { + let sys = System::new_with_specifics( + RefreshKind::new().with_processes(ProcessRefreshKind::new()), + ); + match sys.process(sysinfo::Pid::from_u32(pid)) { + None => true, + Some(proc) => { + let name = proc.name().to_string_lossy(); + !name.contains(expected_binary_name) + } + } +} + +#[cfg(test)] +#[allow(clippy::unwrap_used)] +mod tests { + use super::*; + + #[test] + fn write_and_read_pid() { + let tmp = tempfile::tempdir().unwrap(); + let path = tmp.path().join("test.pid"); + write_pid_file(&path, 42).unwrap(); + let result = read_pid_file(&path).unwrap(); + assert_eq!(result, Some(42)); + } + + #[test] + fn read_missing_returns_none() { + let tmp = tempfile::tempdir().unwrap(); + let path = tmp.path().join("nonexistent.pid"); + let result = read_pid_file(&path).unwrap(); + assert_eq!(result, None); + } + + #[test] + fn orphan_detection() { + assert!(is_orphaned_pid(99999, "octobot")); + } +} diff --git a/packages/launcher/crates/octobot-launcher-binary/src/probe.rs b/packages/launcher/crates/octobot-launcher-binary/src/probe.rs new file mode 100644 index 0000000000..31ea8cf7c6 --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-binary/src/probe.rs @@ -0,0 +1,53 @@ +use octobot_launcher_core::model::HttpProbe; + +pub(crate) async fn run_http_probe(url: &str) -> HttpProbe { + let start = std::time::Instant::now(); + let result = reqwest::Client::new() + .get(url) + .timeout(std::time::Duration::from_secs(5)) + .send() + .await; + let latency_ms = u32::try_from(start.elapsed().as_millis()).unwrap_or(u32::MAX); + HttpProbe { + url: url.to_string(), + status_code: result.as_ref().ok().map(|r| r.status().as_u16()), + latency_ms: Some(latency_ms), + at: chrono::Utc::now(), + } +} + +#[cfg(test)] +#[allow(clippy::unwrap_used)] +mod tests { + use super::*; + use std::net::SocketAddr; + use tokio::io::AsyncWriteExt; + use tokio::net::TcpListener; + + async fn spawn_http_200_server() -> SocketAddr { + let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); + let addr = listener.local_addr().unwrap(); + tokio::spawn(async move { + if let Ok((mut stream, _)) = listener.accept().await { + let response = b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n"; + let _ = stream.write_all(response).await; + } + }); + addr + } + + #[tokio::test] + async fn http_probe_records_status() { + let addr = spawn_http_200_server().await; + let url = format!("http://{addr}/health"); + let probe = run_http_probe(&url).await; + assert_eq!(probe.status_code, Some(200)); + assert!(probe.latency_ms.is_some()); + } + + #[tokio::test] + async fn http_probe_handles_unreachable() { + let probe = run_http_probe("http://127.0.0.1:1/unreachable").await; + assert_eq!(probe.status_code, None); + } +} diff --git a/packages/launcher/crates/octobot-launcher-cli/Cargo.toml b/packages/launcher/crates/octobot-launcher-cli/Cargo.toml new file mode 100644 index 0000000000..4ecf3a76cb --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-cli/Cargo.toml @@ -0,0 +1,49 @@ +[package] +name = "octobot-launcher-cli" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +repository.workspace = true + +[[bin]] +name = "octobot-launcher" +path = "src/main.rs" + +[lints] +workspace = true + +[dependencies] +octobot-launcher-core = { path = "../octobot-launcher-core" } +octobot-launcher-config = { path = "../octobot-launcher-config" } +octobot-launcher-service = { path = "../octobot-launcher-service" } +octobot-launcher-update = { path = "../octobot-launcher-update" } +octobot-launcher-api = { path = "../octobot-launcher-api" } +octobot-launcher-docker = { path = "../octobot-launcher-docker" } +octobot-launcher-binary = { path = "../octobot-launcher-binary" } +octobot-launcher-python = { path = "../octobot-launcher-python" } +clap = { version = "4", features = ["derive", "env"] } +tokio = { version = "1", features = ["full"] } +tracing = "0.1" +tracing-subscriber = { version = "0.3", features = ["env-filter", "fmt"] } +anyhow = "1" +serde_json = "1" +chrono = { version = "0.4", features = ["serde"] } +fs2 = "0.4" +qrcode = "0.14" +uuid = { version = "1", features = ["v4"] } +serde = { version = "1", features = ["derive"] } +reqwest = { version = "0.12", default-features = false, features = ["rustls-tls", "json", "stream"] } +futures-util = { version = "0.3", default-features = false, features = ["std"] } + +[target.'cfg(unix)'.dependencies] +hyper = { version = "1", features = ["http1"] } +hyper-util = { version = "0.1", features = ["tokio"] } +http-body-util = "0.1" +bytes = "1" + +[dev-dependencies] +octobot-launcher-core = { path = "../octobot-launcher-core", features = ["testing"] } +assert_cmd = "2" +predicates = "3" +tempfile = "3" diff --git a/packages/launcher/crates/octobot-launcher-cli/src/cli.rs b/packages/launcher/crates/octobot-launcher-cli/src/cli.rs new file mode 100644 index 0000000000..53b58be75f --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-cli/src/cli.rs @@ -0,0 +1,156 @@ +use std::path::PathBuf; + +use clap::{Args, Parser, Subcommand}; + +#[derive(Debug, Parser)] +#[command(name = "octobot-launcher", version, about = "OctoBot instance manager")] +pub struct Cli { + #[command(subcommand)] + pub command: Commands, + + #[arg(long, global = true, default_value = "info")] + pub log_level: String, + + #[arg(long, global = true)] + pub config: Option, + + #[arg(long, global = true, env = "OCTOBOT_LAUNCHER_TOKEN")] + pub token: Option, +} + +#[derive(Debug, Subcommand)] +pub enum Commands { + Service(ServiceArgs), + Instance(InstanceArgs), + Token(TokenArgs), + Update(UpdateArgs), + Doctor, + Version, +} + +#[derive(Debug, Args)] +pub struct ServiceArgs { + #[command(subcommand)] + pub command: ServiceCommands, +} + +#[derive(Debug, Subcommand)] +pub enum ServiceCommands { + Install { + #[arg(long)] + user: bool, + #[arg(long)] + system: bool, + }, + Uninstall, + Start, + Stop, + Restart, + Status, + #[command(hide = true)] + Run, +} + +#[derive(Debug, Args)] +pub struct InstanceArgs { + #[command(subcommand)] + pub command: InstanceCommands, +} + +#[derive(Debug, Subcommand)] +pub enum InstanceCommands { + List { + #[arg(long)] + json: bool, + }, + Add { + #[arg(long)] + name: String, + #[arg(long)] + runtime: String, + #[arg(long, default_value = "latest")] + version: String, + #[arg(long)] + data_dir: Option, + #[arg(long)] + binary_path: Option, + #[arg(long)] + source_dir: Option, + }, + Run { + #[arg(long)] + name: String, + #[arg(long)] + runtime: String, + #[arg(long, default_value = "latest")] + version: String, + #[arg(long)] + data_dir: Option, + #[arg(long)] + binary_path: Option, + #[arg(long)] + source_dir: Option, + }, + Remove { + id: String, + }, + Start { + id: String, + }, + Stop { + id: String, + }, + StopAll, + Restart { + id: String, + }, + Update { + id: String, + #[arg(long)] + version: Option, + }, + Status { + id: String, + }, +} + +#[derive(Debug, Args)] +pub struct TokenArgs { + #[command(subcommand)] + pub command: TokenCommands, +} + +#[derive(Debug, Subcommand)] +pub enum TokenCommands { + List, + Create { + #[arg(long)] + label: String, + #[arg(long, value_delimiter = ',')] + scope: Vec, + #[arg(long)] + expires_in: Option, + }, + Revoke { + id: String, + }, + Rotate { + id: String, + }, + ShowPairing { + id: String, + }, +} + +#[derive(Debug, Args)] +pub struct UpdateArgs { + #[command(subcommand)] + pub command: UpdateCommands, +} + +#[derive(Debug, Subcommand)] +pub enum UpdateCommands { + Check, + Apply, + SetChannel { channel: String }, +} diff --git a/packages/launcher/crates/octobot-launcher-cli/src/client.rs b/packages/launcher/crates/octobot-launcher-cli/src/client.rs new file mode 100644 index 0000000000..2ff57cfcc7 --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-cli/src/client.rs @@ -0,0 +1,286 @@ +use anyhow::{Context, Result}; +use serde::de::DeserializeOwned; + +pub struct ApiResponse { + pub status: u16, + body: String, +} + +impl ApiResponse { + pub fn is_success(&self) -> bool { + (200..300).contains(&self.status) + } + + pub fn text(self) -> String { + self.body + } + + pub fn json(self) -> Result { + serde_json::from_str(&self.body).context("parse JSON response") + } +} + +pub struct ApiClient { + base_url: String, + token: Option, + client: reqwest::Client, + #[cfg(unix)] + socket_path: Option, +} + +impl std::fmt::Debug for ApiClient { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("ApiClient") + .field("base_url", &self.base_url) + .finish_non_exhaustive() + } +} + +impl ApiClient { + pub fn new(api_bind: &str, token: Option) -> Self { + Self { + base_url: format!("http://{api_bind}"), + token, + client: reqwest::Client::new(), + #[cfg(unix)] + socket_path: None, + } + } + + pub fn from_config(config: &octobot_launcher_config::LauncherConfig, token: Option) -> Self { + #[cfg(unix)] + { + let socket_path = config.launcher.data_root.join(octobot_launcher_config::SOCKET_FILENAME); + if socket_path.exists() { + return Self { + base_url: format!("http://{}", config.launcher.api_bind), + token, + client: reqwest::Client::new(), + socket_path: Some(socket_path), + }; + } + } + Self::new(&config.launcher.api_bind, token) + } + + pub async fn get(&self, path: &str) -> Result { + #[cfg(unix)] + if let Some(sock) = &self.socket_path { + return unix_request(sock, "GET", path, None).await; + } + self.tcp_get(path).await + } + + pub async fn post(&self, path: &str, body: serde_json::Value) -> Result { + #[cfg(unix)] + if let Some(sock) = &self.socket_path { + return unix_request(sock, "POST", path, Some(body)).await; + } + self.tcp_post(path, body).await + } + + pub async fn delete(&self, path: &str) -> Result { + #[cfg(unix)] + if let Some(sock) = &self.socket_path { + return unix_request(sock, "DELETE", path, None).await; + } + self.tcp_delete(path).await + } + + pub async fn post_sse( + &self, + path: &str, + body: serde_json::Value, + mut on_step: impl FnMut(&str), + ) -> Result { + #[cfg(unix)] + if let Some(sock) = &self.socket_path { + return unix_post_sse(sock, path, body, &mut on_step).await; + } + self.tcp_post_sse(path, body, &mut on_step).await + } + + async fn tcp_post_sse( + &self, + path: &str, + body: serde_json::Value, + on_step: &mut impl FnMut(&str), + ) -> Result { + use futures_util::StreamExt; + + let mut req = self.client + .post(format!("{}{}", self.base_url, path)) + .json(&body); + if let Some(t) = &self.token { + req = req.bearer_auth(t); + } + let resp = req.send().await.with_context(supervisor_not_running_msg)?; + if !resp.status().is_success() { + let status = resp.status().as_u16(); + let text = resp.text().await.unwrap_or_default(); + anyhow::bail!("HTTP {status}: {text}"); + } + + let mut stream = resp.bytes_stream(); + let mut buf = String::new(); + while let Some(chunk) = stream.next().await { + let chunk = chunk.context("read SSE chunk")?; + buf.push_str(&String::from_utf8_lossy(&chunk)); + if let Some(result) = drain_sse_events(&mut buf, on_step) { + return result; + } + } + anyhow::bail!("SSE stream ended without done event") + } + + async fn tcp_get(&self, path: &str) -> Result { + let mut req = self.client.get(format!("{}{}", self.base_url, path)); + if let Some(t) = &self.token { + req = req.bearer_auth(t); + } + let resp = req.send().await.with_context(supervisor_not_running_msg)?; + reqwest_to_api(resp).await + } + + async fn tcp_post(&self, path: &str, body: serde_json::Value) -> Result { + let mut req = self.client + .post(format!("{}{}", self.base_url, path)) + .json(&body); + if let Some(t) = &self.token { + req = req.bearer_auth(t); + } + let resp = req.send().await.with_context(supervisor_not_running_msg)?; + reqwest_to_api(resp).await + } + + async fn tcp_delete(&self, path: &str) -> Result { + let mut req = self.client.delete(format!("{}{}", self.base_url, path)); + if let Some(t) = &self.token { + req = req.bearer_auth(t); + } + let resp = req.send().await.with_context(supervisor_not_running_msg)?; + reqwest_to_api(resp).await + } +} + +async fn reqwest_to_api(resp: reqwest::Response) -> Result { + let status = resp.status().as_u16(); + let body = resp.text().await.context("read response body")?; + Ok(ApiResponse { status, body }) +} + +#[cfg(unix)] +async fn unix_request( + socket_path: &std::path::Path, + method: &str, + path: &str, + body: Option, +) -> Result { + use bytes::Bytes; + use http_body_util::{BodyExt, Full}; + use hyper::Request; + use hyper_util::rt::TokioIo; + use tokio::net::UnixStream; + + let stream = UnixStream::connect(socket_path).await + .context("connect to launcher unix socket")?; + let io = TokioIo::new(stream); + let (mut sender, conn) = hyper::client::conn::http1::handshake(io).await + .context("http1 handshake")?; + tokio::spawn(conn); + + let (body_bytes, content_type) = match body { + Some(json) => (Bytes::from(serde_json::to_string(&json)?), Some("application/json")), + None => (Bytes::new(), None), + }; + + let mut builder = Request::builder() + .method(method) + .uri(format!("http://launcher{path}")) + .header("Host", "launcher"); + if let Some(ct) = content_type { + builder = builder.header("Content-Type", ct); + } + let req = builder.body(Full::new(body_bytes)).unwrap(); + + let resp = sender.send_request(req).await.context("send request")?; + let status = resp.status().as_u16(); + let body_bytes = resp.into_body().collect().await.context("read response body")?.to_bytes(); + Ok(ApiResponse { + status, + body: String::from_utf8_lossy(&body_bytes).into_owned(), + }) +} + +fn drain_sse_events(buf: &mut String, on_step: &mut impl FnMut(&str)) -> Option> { + loop { + let pos = buf.find("\n\n")?; + let frame: String = buf.drain(..pos + 2).collect(); + for line in frame.lines() { + let Some(data) = line.strip_prefix("data: ") else { continue }; + if let Ok(val) = serde_json::from_str::(data) { + if let Some(done) = val.get("done") { + if let Some(err) = done.get("err").and_then(|v| v.as_str()) { + return Some(Err(anyhow::anyhow!("{err}"))); + } + return Some(Ok(done.get("ok").cloned().unwrap_or(serde_json::Value::Null))); + } + } + // plain string or non-done JSON both reach the spinner + on_step(data); + } + } +} + +#[cfg(unix)] +async fn unix_post_sse( + socket_path: &std::path::Path, + path: &str, + body: serde_json::Value, + on_step: &mut impl FnMut(&str), +) -> Result { + use bytes::Bytes; + use http_body_util::{BodyDataStream, Full}; + use hyper::Request; + use hyper_util::rt::TokioIo; + use futures_util::StreamExt; + use tokio::net::UnixStream; + + let stream = UnixStream::connect(socket_path).await + .context("connect to launcher unix socket")?; + let io = TokioIo::new(stream); + let (mut sender, conn) = hyper::client::conn::http1::handshake(io).await + .context("http1 handshake")?; + tokio::spawn(conn); + + let body_bytes = Bytes::from(serde_json::to_string(&body)?); + let req = Request::builder() + .method("POST") + .uri(format!("http://launcher{path}")) + .header("Host", "launcher") + .header("Content-Type", "application/json") + .body(Full::new(body_bytes)) + .unwrap(); + + let resp = sender.send_request(req).await.context("send request")?; + if !resp.status().is_success() { + let status = resp.status().as_u16(); + use http_body_util::BodyExt; + let body = resp.into_body().collect().await.context("read error body")?.to_bytes(); + anyhow::bail!("HTTP {status}: {}", String::from_utf8_lossy(&body)); + } + let mut data_stream = Box::pin(BodyDataStream::new(resp.into_body())); + let mut buf = String::new(); + while let Some(chunk) = data_stream.next().await { + let data = chunk.context("read SSE chunk")?; + buf.push_str(&String::from_utf8_lossy(&data)); + if let Some(result) = drain_sse_events(&mut buf, on_step) { + return result; + } + } + anyhow::bail!("SSE stream ended without done event") +} + +fn supervisor_not_running_msg() -> String { + "Supervisor not running. Start with: octobot-launcher service start".to_string() +} diff --git a/packages/launcher/crates/octobot-launcher-cli/src/doctor.rs b/packages/launcher/crates/octobot-launcher-cli/src/doctor.rs new file mode 100644 index 0000000000..fd701c09a5 --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-cli/src/doctor.rs @@ -0,0 +1,93 @@ +use anyhow::Result; +use octobot_launcher_api::token_store::TokenStore; +use octobot_launcher_binary::{BinaryBackend, BinaryBackendConfig}; +use octobot_launcher_config::LauncherConfig; +use octobot_launcher_core::{Backend, LauncherError}; +use octobot_launcher_docker::DockerBackend; +use octobot_launcher_python::{PythonBackend, PythonBackendConfig}; + +pub async fn run_doctor(config: &LauncherConfig) -> Result<()> { + let mut all_ok = true; + + let docker_result = if config.backends.docker.enabled { + match DockerBackend::new() { + Ok(b) => b.probe().await, + Err(e) => Err(e), + } + } else { + Err(LauncherError::BackendUnavailable( + "disabled in config".to_string(), + )) + }; + + match &docker_result { + Ok(()) => println!("[OK] Docker backend: connected"), + Err(e) => { + println!("[FAIL] Docker backend: {e}"); + all_ok = false; + } + } + + let binary_result = if config.backends.binary.enabled { + let b = BinaryBackend::new(BinaryBackendConfig::default()); + b.probe().await + } else { + Err(LauncherError::BackendUnavailable( + "disabled in config".to_string(), + )) + }; + + match binary_result { + Ok(()) => println!("[OK] Binary backend: available"), + Err(e) => { + println!("[FAIL] Binary backend: {e}"); + all_ok = false; + } + } + + let python_result = if config.backends.python.enabled { + let b = PythonBackend::new(PythonBackendConfig::default()); + b.probe().await + } else { + Err(LauncherError::BackendUnavailable( + "disabled in config".to_string(), + )) + }; + + match python_result { + Ok(()) => println!("[OK] Python backend: available"), + Err(e) => { + println!("[FAIL] Python backend: {e}"); + all_ok = false; + } + } + + println!("[OK] API: will bind to {}", config.launcher.api_bind); + + let data_root = &config.launcher.data_root; + if data_root.exists() { + let writable = std::fs::write(data_root.join(".write_test"), b"test") + .and_then(|()| std::fs::remove_file(data_root.join(".write_test"))) + .is_ok(); + if writable { + println!("[OK] Data dir: {} (writable)", data_root.display()); + } else { + println!("[FAIL] Data dir: {} (not writable)", data_root.display()); + all_ok = false; + } + } else { + println!("[FAIL] Data dir: {} (does not exist)", data_root.display()); + all_ok = false; + } + + let token_store_path = data_root.join("state").join("tokens.json"); + let token_store = TokenStore::load(token_store_path); + let token_count = token_store.list().len(); + println!("[OK] Tokens: {token_count} configured"); + + if !all_ok { + std::process::exit(1); + } + + Ok(()) +} diff --git a/packages/launcher/crates/octobot-launcher-cli/src/instance.rs b/packages/launcher/crates/octobot-launcher-cli/src/instance.rs new file mode 100644 index 0000000000..ad3f2d9af1 --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-cli/src/instance.rs @@ -0,0 +1,261 @@ +use anyhow::{Context, Result}; +use octobot_launcher_config::{InstanceRecord, LauncherConfig}; + +use crate::cli::{InstanceArgs, InstanceCommands}; +use crate::client::{ApiClient, ApiResponse}; +use crate::paths; +use crate::spinner::{with_spinner, with_streaming_spinner}; + +pub async fn handle_instance(args: InstanceArgs, config: &LauncherConfig, token: Option) -> Result<()> { + match args.command { + InstanceCommands::List { json } => { + let client = ApiClient::from_config(config, token); + let resp = client.get(paths::INSTANCES).await?; + if !resp.is_success() { + let s = resp.status; + anyhow::bail!("failed to list instances: HTTP {s}: {}", resp.text()); + } + let records: Vec = resp.json().context("parse instances")?; + if json { + println!("{}", serde_json::to_string_pretty(&records)?); + } else { + print_instances_table(&records); + } + Ok(()) + } + InstanceCommands::Add { name, runtime, version, data_dir, binary_path, source_dir } => { + let runtime_kind = parse_runtime(&runtime)?; + let client = ApiClient::from_config(config, token); + let mut body = serde_json::json!({ + "name": name, + "runtime": runtime_kind, + "version": version, + "runtime_options": build_runtime_options(binary_path, source_dir), + }); + if let Some(dir) = data_dir { + body["data_dir"] = serde_json::json!(dir); + body["config_dir"] = serde_json::json!(dir.join("config")); + } + let resp = client.post(paths::INSTANCES, body).await?; + if resp.is_success() { + let record: serde_json::Value = resp.json().context("parse response")?; + let id = record["spec"]["id"].as_str().unwrap_or("unknown"); + println!("{id}"); + Ok(()) + } else { + let s = resp.status; + anyhow::bail!("failed to add instance: HTTP {s}: {}", resp.text()) + } + } + InstanceCommands::Remove { id } => { + let client = ApiClient::from_config(config, token); + let resp = client.delete(&paths::instance(&id)).await?; + if resp.is_success() || resp.status == 204 { + println!("Instance {id} removed."); + Ok(()) + } else { + let s = resp.status; + anyhow::bail!("failed to remove instance: HTTP {s}: {}", resp.text()) + } + } + InstanceCommands::Start { id } => { + let client = ApiClient::from_config(config, token); + let (label_tx, label_rx) = tokio::sync::mpsc::unbounded_channel::(); + let record = with_streaming_spinner( + "Starting...", + label_rx, + client.post_sse(&paths::instance_start(&id), serde_json::json!({}), move |step| { + let _ = label_tx.send(step.to_string()); + }), + ).await?; + print_sse_start_result(&record); + Ok(()) + } + InstanceCommands::Stop { id } => { + let client = ApiClient::from_config(config, token); + let resp = with_spinner( + "Stopping...", + client.post(&paths::instance_stop(&id), serde_json::json!({})), + ).await?; + check_status(resp, "stop instance") + } + InstanceCommands::StopAll => { + let client = ApiClient::from_config(config, token.clone()); + let resp = client.get(paths::INSTANCES).await?; + if !resp.is_success() { + let s = resp.status; + anyhow::bail!("failed to list instances: HTTP {s}: {}", resp.text()); + } + let records: Vec = resp.json().context("parse instances")?; + if records.is_empty() { + println!("No instances to stop."); + return Ok(()); + } + for record in &records { + let id = record.spec.id.0.to_string(); + let name = &record.spec.name; + let client2 = ApiClient::from_config(config, token.clone()); + let resp = with_spinner( + &format!("Stopping {name}..."), + client2.post(&paths::instance_stop(&id), serde_json::json!({})), + ).await?; + if resp.is_success() { + println!("{name}: stopped"); + } else { + let s = resp.status; + println!("{name}: error HTTP {s}: {}", resp.text()); + } + } + Ok(()) + } + InstanceCommands::Restart { id } => { + let client = ApiClient::from_config(config, token); + let (label_tx, label_rx) = tokio::sync::mpsc::unbounded_channel::(); + with_streaming_spinner( + "Restarting...", + label_rx, + client.post_sse(&paths::instance_restart(&id), serde_json::json!({}), move |step| { + let _ = label_tx.send(step.to_string()); + }), + ).await?; + println!("OK"); + Ok(()) + } + InstanceCommands::Update { id, version } => { + let client = ApiClient::from_config(config, token); + let body = match version { + Some(v) => serde_json::json!({ "version": v }), + None => serde_json::json!({}), + }; + let (label_tx, label_rx) = tokio::sync::mpsc::unbounded_channel::(); + with_streaming_spinner( + "Updating...", + label_rx, + client.post_sse(&paths::instance_update(&id), body, move |step| { + let _ = label_tx.send(step.to_string()); + }), + ).await?; + println!("OK"); + Ok(()) + } + InstanceCommands::Run { name, runtime, version, data_dir, binary_path, source_dir } => { + let client = ApiClient::from_config(config, token); + + let resp = client.get(paths::INSTANCES).await?; + if !resp.is_success() { + let s = resp.status; + anyhow::bail!("failed to list instances: HTTP {s}: {}", resp.text()); + } + let records: Vec = resp.json().context("parse instances")?; + + let id = if let Some(existing) = records.iter().find(|r| r.spec.name == name) { + existing.spec.id.0.to_string() + } else { + let runtime_kind = parse_runtime(&runtime)?; + let mut body = serde_json::json!({ + "name": name, + "runtime": runtime_kind, + "version": version, + "runtime_options": build_runtime_options(binary_path, source_dir), + }); + if let Some(dir) = data_dir { + body["data_dir"] = serde_json::json!(dir); + body["config_dir"] = serde_json::json!(dir.join("config")); + } + let resp = client.post(paths::INSTANCES, body).await?; + if !resp.is_success() { + let s = resp.status; + anyhow::bail!("failed to create instance: HTTP {s}: {}", resp.text()); + } + let record: serde_json::Value = resp.json().context("parse create response")?; + let id = record["spec"]["id"].as_str().unwrap_or("unknown").to_string(); + println!("Created {id}"); + id + }; + + let (label_tx, label_rx) = tokio::sync::mpsc::unbounded_channel::(); + let record = with_streaming_spinner( + "Starting...", + label_rx, + client.post_sse(&paths::instance_start(&id), serde_json::json!({}), move |step| { + let _ = label_tx.send(step.to_string()); + }), + ).await?; + print_sse_start_result(&record); + Ok(()) + } + InstanceCommands::Status { id } => { + let client = ApiClient::from_config(config, token); + let resp = client.get(&paths::instance_status(&id)).await?; + let status: serde_json::Value = resp.json().context("parse status")?; + println!("{}", serde_json::to_string_pretty(&status)?); + Ok(()) + } + } +} + +fn build_runtime_options(binary_path: Option, source_dir: Option) -> serde_json::Value { + let mut opts = serde_json::Map::new(); + if let Some(p) = binary_path { + opts.insert("binary_path".to_string(), serde_json::json!(p)); + } + if let Some(d) = source_dir { + opts.insert("source_dir".to_string(), serde_json::json!(d)); + } + serde_json::Value::Object(opts) +} + +fn parse_runtime(s: &str) -> Result { + match s.to_lowercase().as_str() { + "docker" => Ok(octobot_launcher_core::RuntimeKind::Docker), + "binary" => Ok(octobot_launcher_core::RuntimeKind::Binary), + "python" => Ok(octobot_launcher_core::RuntimeKind::Python), + other => anyhow::bail!("unknown runtime '{other}': must be docker, binary, or python"), + } +} + +fn print_instances_table(records: &[InstanceRecord]) { + if records.is_empty() { + println!("No instances configured."); + return; + } + println!( + "{:<10} {:<20} {:<8} {:<10} {:<12} State", + "ID", "Name", "Runtime", "Version", "Desired" + ); + println!("{}", "-".repeat(80)); + for r in records { + println!( + "{:<10} {:<20} {:<8} {:<10} {:<12} {:?}", + r.spec.id.short(), + r.spec.name, + format!("{:?}", r.spec.runtime), + r.spec.version, + format!("{:?}", r.desired_state), + r.last_known_state, + ); + } +} + +fn check_status(resp: ApiResponse, action: &str) -> Result<()> { + if resp.is_success() { + println!("OK"); + Ok(()) + } else { + let s = resp.status; + anyhow::bail!("failed to {action}: HTTP {s}: {}", resp.text()) + } +} + +fn print_sse_start_result(record: &serde_json::Value) { + if record["spec"]["runtime"].as_str() == Some("Python") { + if let Some(data_dir) = record["spec"]["data_dir"].as_str() { + let log = std::path::Path::new(data_dir) + .join("logs") + .join("launcher-stdio.log"); + println!("OK (logs: {})", log.display()); + return; + } + } + println!("OK"); +} diff --git a/packages/launcher/crates/octobot-launcher-cli/src/main.rs b/packages/launcher/crates/octobot-launcher-cli/src/main.rs new file mode 100644 index 0000000000..eff02f64e2 --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-cli/src/main.rs @@ -0,0 +1,84 @@ +#![deny(clippy::unwrap_used, clippy::expect_used)] + +mod cli; +mod client; +mod doctor; +mod instance; +mod paths; +mod service; +mod spinner; +mod supervisor; +mod token; +mod update; + +use anyhow::{Context, Result}; +use clap::Parser; +use octobot_launcher_config::load_config; + +use cli::{Cli, Commands}; + +#[tokio::main] +async fn main() -> Result<()> { + let cli = Cli::parse(); + + tracing_subscriber::fmt() + .with_env_filter(&cli.log_level) + .init(); + + let config = load_config(cli.config.as_deref()).context("load config")?; + + let token = cli.token.or_else(|| { + let candidates = [ + config.launcher.data_root.join(octobot_launcher_config::BOOTSTRAP_TOKEN_FILENAME), + octobot_launcher_config::default_data_root(true).join(octobot_launcher_config::BOOTSTRAP_TOKEN_FILENAME), + ]; + candidates.iter() + .filter_map(|p| std::fs::read_to_string(p).ok()) + .map(|s| s.trim().to_string()) + .next() + }); + + match cli.command { + Commands::Service(args) => service::handle_service(args, &config).await, + Commands::Instance(args) => instance::handle_instance(args, &config, token).await, + Commands::Token(args) => token::handle_token(args, &config), + Commands::Update(args) => update::handle_update(args, &config, token).await, + Commands::Doctor => doctor::run_doctor(&config).await, + Commands::Version => { + println!("{}", env!("CARGO_PKG_VERSION")); + Ok(()) + } + } +} + +pub fn print_qr(content: &str) { + use qrcode::QrCode; + use qrcode::render::unicode; + match QrCode::new(content.as_bytes()) { + Ok(code) => { + let image = code + .render::() + .dark_color(unicode::Dense1x2::Dark) + .light_color(unicode::Dense1x2::Light) + .build(); + println!("{image}"); + } + Err(e) => { + tracing::warn!("failed to render QR code: {e}"); + } + } +} + +pub fn write_bootstrap_token(path: &std::path::Path, raw: &str) -> Result<()> { + if let Some(parent) = path.parent() { + std::fs::create_dir_all(parent).context("create bootstrap token parent dir")?; + } + std::fs::write(path, raw).context("write bootstrap token")?; + #[cfg(unix)] + { + use std::os::unix::fs::PermissionsExt; + let perms = std::fs::Permissions::from_mode(0o644); + std::fs::set_permissions(path, perms).context("chmod bootstrap token")?; + } + Ok(()) +} diff --git a/packages/launcher/crates/octobot-launcher-cli/src/paths.rs b/packages/launcher/crates/octobot-launcher-cli/src/paths.rs new file mode 100644 index 0000000000..1fac1f8c4e --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-cli/src/paths.rs @@ -0,0 +1,10 @@ +pub const INSTANCES: &str = "/v1/instances"; +pub const UPDATES_CHECK: &str = "/v1/updates/check"; +pub const UPDATES_APPLY: &str = "/v1/updates/launcher"; + +pub fn instance(id: &str) -> String { format!("{INSTANCES}/{id}") } +pub fn instance_start(id: &str) -> String { format!("{INSTANCES}/{id}/start") } +pub fn instance_stop(id: &str) -> String { format!("{INSTANCES}/{id}/stop") } +pub fn instance_restart(id: &str) -> String { format!("{INSTANCES}/{id}/restart") } +pub fn instance_update(id: &str) -> String { format!("{INSTANCES}/{id}/update") } +pub fn instance_status(id: &str) -> String { format!("{INSTANCES}/{id}/status") } diff --git a/packages/launcher/crates/octobot-launcher-cli/src/service.rs b/packages/launcher/crates/octobot-launcher-cli/src/service.rs new file mode 100644 index 0000000000..6bdeb188ec --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-cli/src/service.rs @@ -0,0 +1,163 @@ +use std::sync::Arc; + +use anyhow::{Context, Result}; +use octobot_launcher_config::{LauncherConfig, LAUNCHER_PID_FILENAME, Store}; +use octobot_launcher_service::{ENV_FOREGROUND, LauncherService, ServiceLevel, ServiceStatus, auto_level}; + +use crate::cli::{ServiceArgs, ServiceCommands}; +use crate::supervisor::run_supervisor; + +fn spawn_daemon(config: &LauncherConfig) -> Result<()> { + use std::process::Stdio; + + let log_path = config.launcher.data_root.join("launcher.log"); + std::fs::create_dir_all(&config.launcher.data_root) + .with_context(|| format!("create data root {}", config.launcher.data_root.display()))?; + let log_file = std::fs::OpenOptions::new() + .create(true) + .append(true) + .open(&log_path) + .with_context(|| format!("open log file {}", log_path.display()))?; + + let exe = std::env::current_exe().context("resolve current exe")?; + + let mut child = std::process::Command::new(exe) + .args(["service", "run"]) + .env(ENV_FOREGROUND, "1") + .stdin(Stdio::null()) + .stdout(log_file.try_clone().context("clone log file handle")?) + .stderr(log_file) + .spawn() + .context("spawn daemon process")?; + + std::thread::sleep(std::time::Duration::from_millis(700)); + + match child.try_wait() { + Ok(Some(status)) => { + let tail = tail_log(&log_path, 30).unwrap_or_default(); + anyhow::bail!( + "Launcher daemon exited immediately (status: {status}).\nLast log lines from {}:\n{tail}", + log_path.display() + ); + } + Ok(None) => { + println!("Launcher daemon started (pid {}).", child.id()); + println!("Logs: {}", log_path.display()); + } + Err(e) => return Err(e).context("check daemon status"), + } + Ok(()) +} + +pub async fn handle_service(args: ServiceArgs, config: &LauncherConfig) -> Result<()> { + match args.command { + ServiceCommands::Install { user, system } => { + let level = resolve_level(user, system); + let svc = LauncherService::new(level).context("create service manager")?; + svc.install().context("install service")?; + println!("Service installed ({level:?})."); + Ok(()) + } + ServiceCommands::Uninstall => { + let level = auto_level(); + let svc = LauncherService::new(level).context("create service manager")?; + svc.uninstall().context("uninstall service")?; + println!("Service uninstalled."); + Ok(()) + } + ServiceCommands::Start => { + let level = auto_level(); + let svc = LauncherService::new(level).context("create service manager")?; + svc.start().context("start service")?; + println!("Service started."); + Ok(()) + } + ServiceCommands::Stop => { + let level = auto_level(); + let svc = LauncherService::new(level).context("create service manager")?; + let svc_running = matches!(svc.status(), Ok(ServiceStatus::Running)); + if svc_running { + svc.stop().context("stop service")?; + } + stop_pid_file_daemon(&config.launcher.data_root.join(LAUNCHER_PID_FILENAME)); + println!("Service stopped."); + Ok(()) + } + ServiceCommands::Restart => { + let level = auto_level(); + let svc = LauncherService::new(level).context("create service manager")?; + let svc_running = matches!(svc.status(), Ok(ServiceStatus::Running)); + if svc_running { + svc.stop().context("stop service")?; + } + stop_pid_file_daemon(&config.launcher.data_root.join(LAUNCHER_PID_FILENAME)); + spawn_daemon(config)?; + println!("Service restarted."); + Ok(()) + } + ServiceCommands::Status => { + let level = auto_level(); + let svc = LauncherService::new(level).context("create service manager")?; + let status = svc.status().context("query service status")?; + match status { + ServiceStatus::Running => println!("Service is running."), + ServiceStatus::Stopped => println!("Service is stopped."), + ServiceStatus::NotInstalled => println!("Service is not installed."), + ServiceStatus::Failed(reason) => println!("Service failed: {reason}"), + } + Ok(()) + } + ServiceCommands::Run => { + if std::env::var(ENV_FOREGROUND).is_err() { + return spawn_daemon(config); + } + let store = Arc::new( + Store::new(config.launcher.data_root.clone()).context("open store")?, + ); + run_supervisor(config.clone(), store).await + } + } +} + +fn resolve_level(user: bool, system: bool) -> ServiceLevel { + if system { + return ServiceLevel::System; + } + if user { + return ServiceLevel::User; + } + auto_level() +} + +fn tail_log(path: &std::path::Path, n: usize) -> Option { + let content = std::fs::read_to_string(path).ok()?; + let lines: Vec<&str> = content.lines().collect(); + let start = lines.len().saturating_sub(n); + Some(lines[start..].join("\n")) +} + +fn stop_pid_file_daemon(pid_path: &std::path::Path) { + let Ok(pid_str) = std::fs::read_to_string(pid_path) else { return }; + let Ok(pid) = pid_str.trim().parse::() else { return }; + + #[cfg(unix)] + { + let pid_s = pid.to_string(); + let _ = std::process::Command::new("kill").args(["-TERM", &pid_s]).status(); + for _ in 0..30 { + let alive = std::process::Command::new("kill") + .args(["-0", &pid_s]) + .status() + .map(|s| s.success()) + .unwrap_or(false); + if !alive { break; } + std::thread::sleep(std::time::Duration::from_millis(100)); + } + } + #[cfg(windows)] + { + let _ = std::process::Command::new("taskkill") + .args(["/PID", &pid.to_string(), "/T", "/F"]) + .status(); + } +} diff --git a/packages/launcher/crates/octobot-launcher-cli/src/spinner.rs b/packages/launcher/crates/octobot-launcher-cli/src/spinner.rs new file mode 100644 index 0000000000..3eb8d0dbe5 --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-cli/src/spinner.rs @@ -0,0 +1,81 @@ +use std::future::Future; +use std::io::{IsTerminal, Write}; +use tokio::sync::mpsc::UnboundedReceiver; + +const FRAMES: &[&str] = &["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]; + +pub async fn with_spinner(label: &str, fut: F) -> T +where + F: Future, +{ + if !std::io::stderr().is_terminal() { + return fut.await; + } + + let label = label.to_string(); + let (tx, rx) = tokio::sync::oneshot::channel::<()>(); + + let handle = tokio::spawn(async move { + let mut rx = rx; + let mut i = 0usize; + loop { + eprint!("\r{} {}", FRAMES[i % FRAMES.len()], label); + std::io::stderr().flush().ok(); + i += 1; + tokio::select! { + biased; + _ = &mut rx => break, + _ = tokio::time::sleep(tokio::time::Duration::from_millis(80)) => {} + } + } + eprint!("\r\x1b[2K"); + std::io::stderr().flush().ok(); + }); + + let result = fut.await; + let _ = tx.send(()); + let _ = handle.await; + result +} + +pub async fn with_streaming_spinner( + initial: &str, + mut label_rx: UnboundedReceiver, + fut: F, +) -> T +where + F: Future, +{ + if !std::io::stderr().is_terminal() { + return fut.await; + } + + let initial = initial.to_string(); + let (done_tx, done_rx) = tokio::sync::oneshot::channel::<()>(); + + let handle = tokio::spawn(async move { + let mut done_rx = done_rx; + let mut i = 0usize; + let mut label = initial; + loop { + eprint!("\r{} {}", FRAMES[i % FRAMES.len()], label); + std::io::stderr().flush().ok(); + i += 1; + tokio::select! { + biased; + _ = &mut done_rx => break, + Some(new_label) = label_rx.recv() => { + label = new_label; + } + _ = tokio::time::sleep(tokio::time::Duration::from_millis(80)) => {} + } + } + eprint!("\r\x1b[2K"); + std::io::stderr().flush().ok(); + }); + + let result = fut.await; + let _ = done_tx.send(()); + let _ = handle.await; + result +} diff --git a/packages/launcher/crates/octobot-launcher-cli/src/supervisor.rs b/packages/launcher/crates/octobot-launcher-cli/src/supervisor.rs new file mode 100644 index 0000000000..93b25552e9 --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-cli/src/supervisor.rs @@ -0,0 +1,169 @@ +use std::fs::File; +use std::sync::Arc; + +use anyhow::{Context, Result}; +use fs2::FileExt; +use octobot_launcher_api::{ApiServer, ApiState, ListenerConfig}; +use octobot_launcher_api::token_store::TokenStore; +use octobot_launcher_binary::{BinaryBackend, BinaryBackendConfig}; +use octobot_launcher_config::{ + DesiredState, LauncherConfig, BOOTSTRAP_TOKEN_FILENAME, LAUNCHER_PID_FILENAME, LOCK_FILENAME, + SOCKET_FILENAME, Store, +}; +use octobot_launcher_core::Backend; +use octobot_launcher_docker::DockerBackend; +use octobot_launcher_python::{PythonBackend, PythonBackendConfig, PythonDistMode}; +use octobot_launcher_update::{BAKED_PUBKEY_HEX, Updater}; + +use crate::write_bootstrap_token; + +pub async fn run_supervisor(config: LauncherConfig, store: Arc) -> Result<()> { + let lock_path = config.launcher.data_root.join(LOCK_FILENAME); + std::fs::create_dir_all(&config.launcher.data_root) + .with_context(|| format!("create data root {}", config.launcher.data_root.display()))?; + let lock_file = File::create(&lock_path) + .with_context(|| format!("open lock file {}", lock_path.display()))?; + lock_file.try_lock_exclusive().map_err(|_| { + anyhow::anyhow!("Another launcher instance is running") + })?; + + let pid_path = config.launcher.data_root.join(LAUNCHER_PID_FILENAME); + std::fs::write(&pid_path, std::process::id().to_string()) + .with_context(|| format!("write pid file {}", pid_path.display()))?; + + let token_store = Arc::new(TokenStore::load(store.tokens_path())); + + if let Some(raw) = token_store.bootstrap_if_empty().context("bootstrap token")? { + let bootstrap_path = config.launcher.data_root.join(BOOTSTRAP_TOKEN_FILENAME); + write_bootstrap_token(&bootstrap_path, &raw)?; + println!("Bootstrap admin token (saved to {}):", bootstrap_path.display()); + println!(" {raw}"); + println!(); + } + + let mut backends: Vec> = Vec::new(); + + if config.backends.docker.enabled { + match DockerBackend::new() { + Ok(b) => backends.push(Box::new(b)), + Err(e) => tracing::warn!("docker backend disabled: {e}"), + } + } + + if config.backends.binary.enabled { + let mut binary_backend = BinaryBackend::new(BinaryBackendConfig::default()); + if let Some(updater) = build_updater(&config, &store) { + binary_backend = binary_backend.with_updater(Arc::new(updater)); + } + backends.push(Box::new(binary_backend)); + } + + if config.backends.python.enabled { + let dist_mode = match config.backends.python.default_python_dist.as_str() { + "always" => PythonDistMode::Always, + "never" => PythonDistMode::Never, + _ => PythonDistMode::Auto, + }; + backends.push(Box::new(PythonBackend::new(PythonBackendConfig { + python_dist: dist_mode, + }))); + } + + let tcp_bind: std::net::SocketAddr = config + .launcher + .api_bind + .parse() + .with_context(|| format!("parse api_bind '{}'", config.launcher.api_bind))?; + + let unix_socket_path = { + #[cfg(unix)] + { + Some(config.launcher.data_root.join(SOCKET_FILENAME)) + } + #[cfg(not(unix))] + { + None + } + }; + + let backends = Arc::new(backends); + + let lockout = Arc::new(octobot_launcher_api::lockout::Lockout::new()); + let api_state = ApiState { + store: Arc::clone(&store), + token_store: Arc::clone(&token_store), + lockout, + backends: Arc::clone(&backends), + }; + let listener_config = ListenerConfig { + tcp_bind, + unix_socket_path, + }; + let api_server = ApiServer::new(api_state, listener_config); + + let records = store.list_instance_records()?; + for record in records { + if record.desired_state == DesiredState::Running { + let spec = record.spec.clone(); + let runtime = spec.runtime; + let backends_ref = Arc::clone(&backends); + tokio::spawn(async move { + if let Some(backend) = backends_ref.iter().find(|b| b.kind() == runtime) { + if let Err(e) = backend.start(&spec, None).await { + tracing::warn!("failed to start instance {}: {e}", spec.id); + } + } + }); + } + } + + let pending_restart = config.launcher.data_root.join("pending_restart"); + if pending_restart.exists() { + let _ = std::fs::remove_file(&pending_restart); + } + + #[cfg(unix)] + { + use tokio::signal::unix::{SignalKind, signal}; + let mut sigterm = signal(SignalKind::terminate())?; + let mut sigint = signal(SignalKind::interrupt())?; + + tokio::select! { + result = api_server.serve() => { + result.with_context(|| "API server error")?; + } + _ = sigterm.recv() => { + tracing::info!("received SIGTERM, shutting down"); + } + _ = sigint.recv() => { + tracing::info!("received SIGINT, shutting down"); + } + } + } + + #[cfg(not(unix))] + { + tokio::select! { + result = api_server.serve() => { + result.with_context(|| "API server error")?; + } + result = tokio::signal::ctrl_c() => { + result.with_context(|| "signal error")?; + tracing::info!("received Ctrl-C, shutting down"); + } + } + } + + let _ = std::fs::remove_file(&pid_path); + Ok(()) +} + +fn build_updater(config: &LauncherConfig, store: &Store) -> Option { + Updater::from_baked_config( + config.update.manifest_url.clone(), + config.update.channel.clone(), + BAKED_PUBKEY_HEX, + store.data_root().join("update-state"), + store.data_root().join("update-cache"), + ) +} diff --git a/packages/launcher/crates/octobot-launcher-cli/src/token.rs b/packages/launcher/crates/octobot-launcher-cli/src/token.rs new file mode 100644 index 0000000000..d870dd061e --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-cli/src/token.rs @@ -0,0 +1,134 @@ +use anyhow::{Context, Result}; +use octobot_launcher_api::token_store::TokenStore; +use octobot_launcher_config::{LauncherConfig, STATE_DIR, TOKENS_FILENAME}; + +use crate::cli::{TokenArgs, TokenCommands}; +use crate::print_qr; + +pub fn handle_token(args: TokenArgs, config: &LauncherConfig) -> Result<()> { + let store_path = config.launcher.data_root.join(STATE_DIR).join(TOKENS_FILENAME); + let token_store = TokenStore::load(store_path); + + match args.command { + TokenCommands::List => { + let records = token_store.list(); + if records.is_empty() { + println!("No tokens configured."); + return Ok(()); + } + println!( + "{:<14} {:<20} {:<30} {:<24} {:<24} Revoked", + "ID", "Label", "Scopes", "Created", "Expires" + ); + println!("{}", "-".repeat(120)); + for r in &records { + let scopes = r.scopes.join(","); + let expires = r + .expires_at + .map_or_else(|| "never".to_string(), |t| t.to_rfc3339()); + println!( + "{:<14} {:<20} {:<30} {:<24} {:<24} {}", + r.id, + r.label, + scopes, + r.created_at.to_rfc3339(), + expires, + r.revoked, + ); + } + Ok(()) + } + TokenCommands::Create { + label, + scope, + expires_in, + } => { + let scopes = if scope.is_empty() { + octobot_launcher_api::default_scopes() + } else { + scope + }; + let expires_at = expires_in + .as_deref() + .map(parse_expires_in) + .transpose() + .context("parse --expires-in")?; + let (_record, raw) = token_store.create(label, scopes, expires_at).context("create token")?; + println!("Token created. Save this now — it will not be shown again:\n"); + println!(" {raw}\n"); + let pairing_url = format!( + "octobot://pair?host={}&token={}", + config.launcher.api_bind, raw + ); + println!("QR code for mobile pairing:"); + print_qr(&pairing_url); + println!("URL: {pairing_url}"); + Ok(()) + } + TokenCommands::Revoke { id } => { + if token_store.revoke(&id).context("revoke token")? { + println!("Token {id} revoked."); + } else { + anyhow::bail!("token '{id}' not found"); + } + Ok(()) + } + TokenCommands::Rotate { id } => { + let records = token_store.list(); + let existing = records + .iter() + .find(|r| r.id == id) + .ok_or_else(|| anyhow::anyhow!("token '{id}' not found"))? + .clone(); + token_store.revoke(&id).context("revoke token")?; + let (_new_record, new_raw) = token_store.create( + existing.label.clone(), + existing.scopes.clone(), + existing.expires_at, + ).context("create token")?; + println!("Token rotated. New token (save now — will not be shown again):\n"); + println!(" {new_raw}\n"); + Ok(()) + } + TokenCommands::ShowPairing { id } => { + let records = token_store.list(); + let found = records.iter().any(|r| r.id == id && !r.revoked); + if !found { + anyhow::bail!("token '{id}' not found or revoked"); + } + let pairing_url = format!( + "octobot://pair?host={}&token={}", + config.launcher.api_bind, id + ); + println!("Pairing URL (uses token ID — raw token not recoverable after creation):"); + println!("URL: {pairing_url}"); + print_qr(&pairing_url); + Ok(()) + } + } +} + +fn parse_expires_in(s: &str) -> Result> { + let (num_str, unit) = if let Some(n) = s.strip_suffix('d') { + (n, "d") + } else if let Some(n) = s.strip_suffix('h') { + (n, "h") + } else if let Some(n) = s.strip_suffix('m') { + (n, "m") + } else { + anyhow::bail!("unknown duration format '{s}': use 30d, 7d, 24h, etc.") + }; + + let n: i64 = num_str + .parse() + .with_context(|| format!("parse number in '{s}'"))?; + + let duration = match unit { + "d" => chrono::Duration::days(n), + "h" => chrono::Duration::hours(n), + "m" => chrono::Duration::minutes(n), + _ => unreachable!(), + }; + + Ok(chrono::Utc::now() + duration) +} diff --git a/packages/launcher/crates/octobot-launcher-cli/src/update.rs b/packages/launcher/crates/octobot-launcher-cli/src/update.rs new file mode 100644 index 0000000000..d70f1571e0 --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-cli/src/update.rs @@ -0,0 +1,36 @@ +use anyhow::{Context, Result}; +use octobot_launcher_config::LauncherConfig; + +use crate::cli::{UpdateArgs, UpdateCommands}; +use crate::client::ApiClient; +use crate::paths; + +pub async fn handle_update(args: UpdateArgs, config: &LauncherConfig, token: Option) -> Result<()> { + match args.command { + UpdateCommands::Check => { + let client = ApiClient::from_config(config, token); + let resp = client.get(paths::UPDATES_CHECK).await?; + let body: serde_json::Value = resp.json().context("parse update check")?; + println!("{}", serde_json::to_string_pretty(&body)?); + Ok(()) + } + UpdateCommands::Apply => { + let client = ApiClient::from_config(config, token); + let resp = client + .post(paths::UPDATES_APPLY, serde_json::json!({})) + .await?; + if resp.is_success() { + println!("Update applied. Restart the service to complete."); + Ok(()) + } else { + let s = resp.status; + anyhow::bail!("update failed: HTTP {s}: {}", resp.text()) + } + } + UpdateCommands::SetChannel { channel } => { + println!("Channel set to '{channel}' (edit config to persist)."); + let _ = config; + Ok(()) + } + } +} diff --git a/packages/launcher/crates/octobot-launcher-cli/tests/cli_tests.rs b/packages/launcher/crates/octobot-launcher-cli/tests/cli_tests.rs new file mode 100644 index 0000000000..751c7a9797 --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-cli/tests/cli_tests.rs @@ -0,0 +1,358 @@ +#![allow(clippy::unwrap_used, clippy::expect_used)] + +use assert_cmd::Command; +use predicates::prelude::*; + +fn cmd() -> Command { + Command::cargo_bin("octobot-launcher").unwrap() +} + +mod clap { + use super::*; + + #[test] + fn help_renders() { + cmd().arg("--help").assert().success(); + } + + #[test] + fn version_subcommand() { + let output = cmd().arg("version").assert().success(); + let stdout = String::from_utf8(output.get_output().stdout.clone()).unwrap(); + assert!(!stdout.trim().is_empty(), "version should print something"); + } + + #[test] + fn unknown_subcommand_exits_2() { + cmd() + .arg("totally-unknown-subcommand") + .assert() + .failure() + .code(2); + } + + #[test] + fn missing_required_arg_instance_add() { + cmd() + .args(["instance", "add", "--runtime", "docker"]) + .assert() + .failure() + .code(2) + .stderr(predicate::str::contains("--name")); + } +} + +mod token_tests { + use super::*; + use tempfile::TempDir; + + fn config_toml(dir: &TempDir) -> std::path::PathBuf { + let data_root = dir.path().to_str().unwrap().replace('\\', "/"); + let config = format!( + "[launcher]\napi_bind = \"127.0.0.1:7531\"\nlog_level = \"info\"\ndata_root = \"{data_root}\"\n\ + [update]\nchannel = \"stable\"\nauto_update_launcher = true\nauto_update_instances = false\n\ + check_interval_hours = 6\nmanifest_url = \"https://updates.drakkar.software/octobot-launcher/v1/manifest.json\"\n\ + [backends.docker]\nenabled = false\nsocket = \"/var/run/docker.sock\"\n\ + [backends.binary]\nenabled = false\n\ + [backends.python]\nenabled = false\ndefault_python_dist = \"auto\"\n" + ); + let path = dir.path().join("config.toml"); + std::fs::write(&path, config).unwrap(); + path + } + + #[test] + fn create_prints_raw_once() { + let dir = TempDir::new().unwrap(); + std::fs::create_dir_all(dir.path().join("state")).unwrap(); + let config_path = config_toml(&dir); + let output = cmd() + .args([ + "--config", + config_path.to_str().unwrap(), + "token", + "create", + "--label", + "test-token", + ]) + .assert() + .success(); + let stdout = String::from_utf8(output.get_output().stdout.clone()).unwrap(); + assert!( + stdout.contains("oblch_"), + "expected oblch_ prefix in output, got: {stdout}" + ); + } +} + +mod instance_tests { + use super::*; + use std::sync::Arc; + use std::thread; + use tempfile::TempDir; + + use octobot_launcher_api::{ApiServer, ApiState, ListenerConfig}; + use octobot_launcher_api::lockout::Lockout; + use octobot_launcher_api::token_store::TokenStore; + use octobot_launcher_config::Store; + use octobot_launcher_core::backend::Backend; + use octobot_launcher_core::backend::mock::MockBackend; + + struct TestServer { + port: u16, + token: String, + _dir: TempDir, + } + + fn start_test_server() -> TestServer { + let dir = TempDir::new().unwrap(); + let store = Arc::new(Store::new(dir.path().to_path_buf()).unwrap()); + let token_store = Arc::new(TokenStore::load(dir.path().join("tokens.json"))); + let (_, raw) = token_store.create("admin".into(), vec!["*".to_string()], None).unwrap(); + let lockout = Arc::new(Lockout::new()); + + let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap(); + let port = listener.local_addr().unwrap().port(); + drop(listener); + + let backends: Arc>> = Arc::new(vec![ + Box::new(MockBackend::default()), + ]); + let state = ApiState { store, token_store, lockout, backends }; + let server_config = ListenerConfig { + tcp_bind: format!("127.0.0.1:{port}").parse().unwrap(), + unix_socket_path: None, + }; + let api_server = ApiServer::new(state, server_config); + + thread::spawn(move || { + tokio::runtime::Builder::new_current_thread() + .enable_all() + .build() + .unwrap() + .block_on(async { let _ = api_server.serve().await; }); + }); + + thread::sleep(std::time::Duration::from_millis(100)); + TestServer { port, token: raw, _dir: dir } + } + + fn config_toml(dir: &TempDir, port: u16) -> std::path::PathBuf { + let data_root = dir.path().to_str().unwrap().replace('\\', "/"); + let config = format!( + "[launcher]\napi_bind = \"127.0.0.1:{port}\"\nlog_level = \"info\"\ndata_root = \"{data_root}\"\n\ + [update]\nchannel = \"stable\"\nauto_update_launcher = true\nauto_update_instances = false\n\ + check_interval_hours = 6\nmanifest_url = \"https://updates.drakkar.software/octobot-launcher/v1/manifest.json\"\n\ + [backends.docker]\nenabled = false\nsocket = \"/var/run/docker.sock\"\n\ + [backends.binary]\nenabled = false\n\ + [backends.python]\nenabled = false\ndefault_python_dist = \"auto\"\n" + ); + let path = dir.path().join("config.toml"); + std::fs::write(&path, config).unwrap(); + path + } + + #[test] + fn add_then_list_json_format() { + let srv = start_test_server(); + let cfg_dir = TempDir::new().unwrap(); + let config_path = config_toml(&cfg_dir, srv.port); + + let add_out = cmd() + .args(["--config", config_path.to_str().unwrap(), + "--token", &srv.token, + "instance", "add", "--name", "test-bot", "--runtime", "docker"]) + .assert().success(); + let id = String::from_utf8(add_out.get_output().stdout.clone()).unwrap(); + assert!(!id.trim().is_empty(), "add should print instance id"); + + let list_out = cmd() + .args(["--config", config_path.to_str().unwrap(), + "--token", &srv.token, + "instance", "list", "--json"]) + .assert().success(); + let stdout = String::from_utf8(list_out.get_output().stdout.clone()).unwrap(); + let parsed: serde_json::Value = serde_json::from_str(&stdout) + .unwrap_or_else(|e| panic!("not valid JSON: {e}\noutput: {stdout}")); + let arr = parsed.as_array().expect("expected JSON array"); + assert_eq!(arr.len(), 1); + assert_eq!(arr[0]["spec"]["name"], "test-bot"); + } + + #[test] + fn add_then_start_returns_ok() { + let srv = start_test_server(); + let cfg_dir = TempDir::new().unwrap(); + let config_path = config_toml(&cfg_dir, srv.port); + + let add_out = cmd() + .args(["--config", config_path.to_str().unwrap(), + "--token", &srv.token, + "instance", "add", "--name", "bot", "--runtime", "binary"]) + .assert().success(); + let id = String::from_utf8(add_out.get_output().stdout.clone()).unwrap(); + let id = id.trim(); + + cmd() + .args(["--config", config_path.to_str().unwrap(), + "--token", &srv.token, + "instance", "start", id]) + .assert().success(); + } + + #[test] + fn add_then_remove_then_list_empty() { + let srv = start_test_server(); + let cfg_dir = TempDir::new().unwrap(); + let config_path = config_toml(&cfg_dir, srv.port); + + let add_out = cmd() + .args(["--config", config_path.to_str().unwrap(), + "--token", &srv.token, + "instance", "add", "--name", "bot", "--runtime", "binary"]) + .assert().success(); + let id = String::from_utf8(add_out.get_output().stdout.clone()).unwrap(); + let id = id.trim(); + + cmd() + .args(["--config", config_path.to_str().unwrap(), + "--token", &srv.token, + "instance", "remove", id]) + .assert().success(); + + let list_out = cmd() + .args(["--config", config_path.to_str().unwrap(), + "--token", &srv.token, + "instance", "list", "--json"]) + .assert().success(); + let stdout = String::from_utf8(list_out.get_output().stdout.clone()).unwrap(); + let arr: Vec = serde_json::from_str(&stdout).unwrap(); + assert!(arr.is_empty(), "list should be empty after remove"); + } + + #[test] + fn missing_token_returns_error() { + let srv = start_test_server(); + let cfg_dir = TempDir::new().unwrap(); + let config_path = config_toml(&cfg_dir, srv.port); + + cmd() + .args(["--config", config_path.to_str().unwrap(), + "instance", "list"]) + .assert() + .failure() + .stderr(predicate::str::contains("401")); + } + + #[test] + fn stop_all_no_instances_prints_message() { + let srv = start_test_server(); + let cfg_dir = TempDir::new().unwrap(); + let config_path = config_toml(&cfg_dir, srv.port); + + let out = cmd() + .args(["--config", config_path.to_str().unwrap(), + "--token", &srv.token, + "instance", "stop-all"]) + .assert().success(); + let stdout = String::from_utf8(out.get_output().stdout.clone()).unwrap(); + assert!(stdout.contains("No instances"), "expected no-instances message, got: {stdout}"); + } + + #[test] + fn stop_all_stops_all_running_instances() { + let srv = start_test_server(); + let cfg_dir = TempDir::new().unwrap(); + let config_path = config_toml(&cfg_dir, srv.port); + + for name in &["bot-a", "bot-b"] { + let add_out = cmd() + .args(["--config", config_path.to_str().unwrap(), + "--token", &srv.token, + "instance", "add", "--name", name, "--runtime", "binary"]) + .assert().success(); + let id = String::from_utf8(add_out.get_output().stdout.clone()).unwrap(); + let id = id.trim(); + cmd() + .args(["--config", config_path.to_str().unwrap(), + "--token", &srv.token, + "instance", "start", id]) + .assert().success(); + } + + let stop_out = cmd() + .args(["--config", config_path.to_str().unwrap(), + "--token", &srv.token, + "instance", "stop-all"]) + .assert().success(); + let stdout = String::from_utf8(stop_out.get_output().stdout.clone()).unwrap(); + assert!(stdout.contains("bot-a"), "expected bot-a in output: {stdout}"); + assert!(stdout.contains("bot-b"), "expected bot-b in output: {stdout}"); + assert!(stdout.contains("stopped"), "expected 'stopped' in output: {stdout}"); + } +} + +mod doctor_tests { + use super::*; + use tempfile::TempDir; + + fn config_toml(dir: &TempDir) -> std::path::PathBuf { + let data_root = dir.path().to_str().unwrap().replace('\\', "/"); + let config = format!( + "[launcher]\napi_bind = \"127.0.0.1:7531\"\nlog_level = \"info\"\ndata_root = \"{data_root}\"\n\ + [update]\nchannel = \"stable\"\nauto_update_launcher = true\nauto_update_instances = false\n\ + check_interval_hours = 6\nmanifest_url = \"https://updates.drakkar.software/octobot-launcher/v1/manifest.json\"\n\ + [backends.docker]\nenabled = true\nsocket = \"/var/run/docker.sock\"\n\ + [backends.binary]\nenabled = true\n\ + [backends.python]\nenabled = true\ndefault_python_dist = \"auto\"\n" + ); + let path = dir.path().join("config.toml"); + std::fs::write(&path, config).unwrap(); + path + } + + #[test] + fn reports_each_backend() { + let dir = TempDir::new().unwrap(); + let config_path = config_toml(&dir); + + let output = cmd() + .args(["--config", config_path.to_str().unwrap(), "doctor"]) + .output() + .unwrap(); + + let stdout = String::from_utf8(output.stdout.clone()).unwrap(); + assert!( + stdout.contains("Docker"), + "expected Docker in doctor output\n{stdout}" + ); + assert!( + stdout.contains("Binary"), + "expected Binary in doctor output\n{stdout}" + ); + assert!( + stdout.contains("Python"), + "expected Python in doctor output\n{stdout}" + ); + } + + #[test] + fn nonzero_exit_on_failure() { + let dir = TempDir::new().unwrap(); + let config_path = config_toml(&dir); + + let output = cmd() + .args(["--config", config_path.to_str().unwrap(), "doctor"]) + .output() + .unwrap(); + + if !output.status.success() { + return; + } + + let stdout = String::from_utf8(output.stdout.clone()).unwrap(); + if stdout.contains("[FAIL]") { + panic!("doctor reported failures but exited 0\n{stdout}"); + } + } +} diff --git a/packages/launcher/crates/octobot-launcher-config/Cargo.toml b/packages/launcher/crates/octobot-launcher-config/Cargo.toml new file mode 100644 index 0000000000..0f82569221 --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-config/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "octobot-launcher-config" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +octobot-launcher-core = { path = "../octobot-launcher-core" } +serde = { version = "1", features = ["derive"] } +serde_json = "1" +toml = "0.8" +figment = { version = "0.10", features = ["toml", "env"] } +directories = "5" +tempfile = "3" +parking_lot = "0.12" +thiserror = "2" +tracing = "0.1" +chrono = { version = "0.4", features = ["serde"] } + +[dev-dependencies] +tempfile = "3" +tokio = { version = "1", features = ["full", "test-util"] } diff --git a/packages/launcher/crates/octobot-launcher-config/src/config.rs b/packages/launcher/crates/octobot-launcher-config/src/config.rs new file mode 100644 index 0000000000..a6827fcad2 --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-config/src/config.rs @@ -0,0 +1,345 @@ +use std::path::PathBuf; + +#[cfg(not(windows))] +const DOCKER_SOCKET_UNIX: &str = "/var/run/docker.sock"; +#[cfg(windows)] +const DOCKER_SOCKET_WINDOWS: &str = "npipe:////./pipe/docker_engine"; + +use figment::{ + providers::{Env, Format, Toml}, + Figment, +}; +use serde::{Deserialize, Serialize}; + +use crate::error::{ConfigError, Result}; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct LauncherSection { + pub api_bind: String, + pub log_level: String, + pub data_root: PathBuf, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct UpdateSection { + pub channel: String, + pub auto_update_launcher: bool, + pub auto_update_instances: bool, + pub check_interval_hours: u32, + pub manifest_url: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DockerBackendConfig { + pub enabled: bool, + pub socket: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct BinaryBackendConfig { + pub enabled: bool, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PythonBackendConfig { + pub enabled: bool, + pub default_python_dist: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct BackendsSection { + pub docker: DockerBackendConfig, + pub binary: BinaryBackendConfig, + pub python: PythonBackendConfig, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct LauncherConfig { + pub launcher: LauncherSection, + pub update: UpdateSection, + pub backends: BackendsSection, +} + +fn default_launcher_section() -> LauncherSection { + LauncherSection { + api_bind: "127.0.0.1:7531".to_string(), + log_level: "info".to_string(), + data_root: default_data_root(false), + } +} + +fn default_update_section() -> UpdateSection { + UpdateSection { + channel: "stable".to_string(), + auto_update_launcher: true, + auto_update_instances: false, + check_interval_hours: 6, + manifest_url: "https://updates.drakkar.software/octobot-launcher/v1/manifest.json" + .to_string(), + } +} + +fn default_backends_section() -> BackendsSection { + BackendsSection { + docker: DockerBackendConfig { + enabled: true, + #[cfg(unix)] + socket: DOCKER_SOCKET_UNIX.to_string(), + #[cfg(windows)] + socket: DOCKER_SOCKET_WINDOWS.to_string(), + #[cfg(not(any(unix, windows)))] + socket: DOCKER_SOCKET_UNIX.to_string(), + }, + binary: BinaryBackendConfig { enabled: true }, + python: PythonBackendConfig { + enabled: true, + default_python_dist: "auto".to_string(), + }, + } +} + +impl Default for LauncherConfig { + fn default() -> Self { + Self { + launcher: default_launcher_section(), + update: default_update_section(), + backends: default_backends_section(), + } + } +} + +fn default_toml_string() -> String { + let defaults = LauncherConfig::default(); + let launcher = &defaults.launcher; + let update = &defaults.update; + let backends = &defaults.backends; + + let data_root = launcher.data_root.display().to_string().replace('\\', "/"); + let docker_socket = backends.docker.socket.replace('\\', "/"); + + format!( + r#"[launcher] +api_bind = "{api_bind}" +log_level = "{log_level}" +data_root = "{data_root}" + +[update] +channel = "{channel}" +auto_update_launcher = {auto_update_launcher} +auto_update_instances = {auto_update_instances} +check_interval_hours = {check_interval_hours} +manifest_url = "{manifest_url}" + +[backends.docker] +enabled = {docker_enabled} +socket = "{docker_socket}" + +[backends.binary] +enabled = {binary_enabled} + +[backends.python] +enabled = {python_enabled} +default_python_dist = "{default_python_dist}" +"#, + api_bind = launcher.api_bind, + log_level = launcher.log_level, + data_root = data_root, + channel = update.channel, + auto_update_launcher = update.auto_update_launcher, + auto_update_instances = update.auto_update_instances, + check_interval_hours = update.check_interval_hours, + manifest_url = update.manifest_url, + docker_enabled = backends.docker.enabled, + docker_socket = docker_socket, + binary_enabled = backends.binary.enabled, + python_enabled = backends.python.enabled, + default_python_dist = backends.python.default_python_dist, + ) +} + +pub fn load_config(config_path: Option<&std::path::Path>) -> Result { + let defaults_toml = default_toml_string(); + + let mut figment = Figment::new().merge(Toml::string(&defaults_toml)); + + if let Some(path) = config_path { + if path.exists() { + figment = figment.merge(Toml::file(path)); + } + } else { + let default_path = default_config_path(false); + if default_path.exists() { + figment = figment.merge(Toml::file(&default_path)); + } + } + + figment = figment.merge(Env::prefixed("OCTOBOT_LAUNCHER__").split("__")); + + let config: LauncherConfig = figment.extract().map_err(|e| ConfigError::Load(e.to_string()))?; + + validate_config(&config)?; + + Ok(config) +} + +fn validate_config(config: &LauncherConfig) -> Result<()> { + let valid_levels = ["trace", "debug", "info", "warn", "error"]; + if !valid_levels.contains(&config.launcher.log_level.as_str()) { + return Err(ConfigError::Validation(format!( + "invalid log_level '{}': must be one of trace, debug, info, warn, error", + config.launcher.log_level + ))); + } + Ok(()) +} + +pub fn default_config_path(system: bool) -> PathBuf { + if system { + #[cfg(target_os = "linux")] + return PathBuf::from("/etc/octobot-launcher/config.toml"); + #[cfg(target_os = "macos")] + return PathBuf::from("/Library/Application Support/OctoBotLauncher/config.toml"); + #[cfg(target_os = "windows")] + { + let programdata = + std::env::var("PROGRAMDATA").unwrap_or_else(|_| "C:\\ProgramData".to_string()); + return PathBuf::from(programdata).join("OctoBotLauncher").join("config.toml"); + } + #[cfg(not(any(target_os = "linux", target_os = "macos", target_os = "windows")))] + return PathBuf::from("/etc/octobot-launcher/config.toml"); + } + + if let Some(proj_dirs) = + directories::ProjectDirs::from("software", "Drakkar", "OctoBotLauncher") + { + proj_dirs.config_dir().join("config.toml") + } else { + PathBuf::from("config.toml") + } +} + +pub fn default_data_root(system: bool) -> PathBuf { + if system || is_root_user() { + #[cfg(target_os = "linux")] + return PathBuf::from("/var/lib/octobot-launcher"); + #[cfg(target_os = "macos")] + return PathBuf::from("/Library/Application Support/OctoBotLauncher"); + #[cfg(target_os = "windows")] + { + let programdata = + std::env::var("PROGRAMDATA").unwrap_or_else(|_| "C:\\ProgramData".to_string()); + return PathBuf::from(programdata).join("OctoBotLauncher"); + } + #[cfg(not(any(target_os = "linux", target_os = "macos", target_os = "windows")))] + return PathBuf::from("/var/lib/octobot-launcher"); + } + + if let Some(proj_dirs) = + directories::ProjectDirs::from("software", "Drakkar", "OctoBotLauncher") + { + proj_dirs.data_dir().to_path_buf() + } else { + PathBuf::from("data") + } +} + +#[cfg(unix)] +fn is_root_user() -> bool { + std::process::Command::new("id") + .arg("-u") + .output() + .ok() + .and_then(|o| String::from_utf8(o.stdout).ok()) + .and_then(|s| s.trim().parse::().ok()) + == Some(0) +} + +#[cfg(not(unix))] +fn is_root_user() -> bool { + false +} + +#[cfg(test)] +mod tests { + use super::*; + use std::io::Write as IoWrite; + + fn env_lock() -> std::sync::MutexGuard<'static, ()> { + crate::TEST_ENV_LOCK.get_or_init(|| std::sync::Mutex::new(())).lock().unwrap() + } + + fn write_toml(dir: &tempfile::TempDir, content: &str) -> PathBuf { + let path = dir.path().join("config.toml"); + let mut f = std::fs::File::create(&path).expect("create config file"); + f.write_all(content.as_bytes()).expect("write config file"); + path + } + + #[test] + fn default_values_loaded() { + let _g = env_lock(); + std::env::remove_var("OCTOBOT_LAUNCHER__LAUNCHER__API_BIND"); + let dir = tempfile::TempDir::new().expect("tempdir"); + let path = write_toml(&dir, ""); + let config = load_config(Some(&path)).expect("load config"); + assert_eq!(config.launcher.api_bind, "127.0.0.1:7531"); + assert_eq!(config.launcher.log_level, "info"); + assert_eq!(config.update.channel, "stable"); + assert!(config.update.auto_update_launcher); + assert!(!config.update.auto_update_instances); + assert_eq!(config.update.check_interval_hours, 6); + assert_eq!( + config.update.manifest_url, + "https://updates.drakkar.software/octobot-launcher/v1/manifest.json" + ); + assert!(config.backends.binary.enabled); + assert!(config.backends.python.enabled); + assert_eq!(config.backends.python.default_python_dist, "auto"); + } + + #[test] + fn env_var_override() { + let _g = env_lock(); + let dir = tempfile::TempDir::new().expect("tempdir"); + let path = write_toml(&dir, ""); + std::env::set_var("OCTOBOT_LAUNCHER__LAUNCHER__API_BIND", "0.0.0.0:9000"); + let config = load_config(Some(&path)).expect("load config"); + std::env::remove_var("OCTOBOT_LAUNCHER__LAUNCHER__API_BIND"); + assert_eq!(config.launcher.api_bind, "0.0.0.0:9000"); + } + + #[test] + fn missing_file_uses_defaults() { + let _g = env_lock(); + std::env::remove_var("OCTOBOT_LAUNCHER__LAUNCHER__API_BIND"); + let dir = tempfile::TempDir::new().expect("tempdir"); + let path = dir.path().join("nonexistent.toml"); + let config = load_config(Some(&path)).expect("load config"); + assert_eq!(config.launcher.api_bind, "127.0.0.1:7531"); + assert_eq!(config.update.channel, "stable"); + } + + #[test] + fn malformed_toml_returns_error() { + let dir = tempfile::TempDir::new().expect("tempdir"); + let path = write_toml(&dir, "[[[ broken toml"); + let result = load_config(Some(&path)); + assert!(matches!(result, Err(ConfigError::Load(_)))); + } + + #[test] + fn invalid_log_level_rejected() { + let dir = tempfile::TempDir::new().expect("tempdir"); + let path = write_toml(&dir, "[launcher]\nlog_level = \"blarg\""); + let result = load_config(Some(&path)); + assert!(matches!(result, Err(ConfigError::Validation(_)))); + } + + #[test] + fn path_locations_per_platform() { + let system_path = default_config_path(true); + let user_path = default_config_path(false); + assert!(!system_path.as_os_str().is_empty()); + assert!(!user_path.as_os_str().is_empty()); + } +} diff --git a/packages/launcher/crates/octobot-launcher-config/src/error.rs b/packages/launcher/crates/octobot-launcher-config/src/error.rs new file mode 100644 index 0000000000..3ed2c499af --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-config/src/error.rs @@ -0,0 +1,17 @@ +use thiserror::Error; + +#[derive(Debug, Error)] +pub enum ConfigError { + #[error("config load error: {0}")] + Load(String), + #[error("config validation error: {0}")] + Validation(String), + #[error("io error: {0}")] + Io(#[from] std::io::Error), + #[error("serialize error: {0}")] + Serialize(String), + #[error("deserialize error: {0}")] + Deserialize(String), +} + +pub type Result = std::result::Result; diff --git a/packages/launcher/crates/octobot-launcher-config/src/lib.rs b/packages/launcher/crates/octobot-launcher-config/src/lib.rs new file mode 100644 index 0000000000..401b286e92 --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-config/src/lib.rs @@ -0,0 +1,162 @@ +pub mod config; +pub mod error; +pub mod record; +pub mod store; + +#[cfg(test)] +pub(crate) static TEST_ENV_LOCK: std::sync::OnceLock> = + std::sync::OnceLock::new(); + +pub use config::{ + BackendsSection, BinaryBackendConfig, DockerBackendConfig, LauncherConfig, LauncherSection, + PythonBackendConfig, UpdateSection, default_config_path, default_data_root, load_config, +}; +pub use error::{ConfigError, Result}; +pub use record::{DesiredState, InstanceRecord}; +pub use store::{ + Store, BOOTSTRAP_TOKEN_FILENAME, INSTANCES_SUBDIR, LAUNCHER_PID_FILENAME, LOCK_FILENAME, + SOCKET_FILENAME, STATE_DIR, TOKENS_FILENAME, +}; + +#[cfg(test)] +mod tests { + use super::*; + use crate::record::InstanceRecord; + use octobot_launcher_core::{InstanceId, InstanceSpec, InstanceState, RuntimeKind}; + use std::collections::BTreeMap; + use std::io::Write as IoWrite; + use std::path::PathBuf; + fn env_lock() -> std::sync::MutexGuard<'static, ()> { + crate::TEST_ENV_LOCK.get_or_init(|| std::sync::Mutex::new(())).lock().unwrap() + } + + fn write_toml(dir: &tempfile::TempDir, content: &str) -> PathBuf { + let path = dir.path().join("config.toml"); + let mut f = std::fs::File::create(&path).expect("create"); + f.write_all(content.as_bytes()).expect("write"); + path + } + + fn sample_record() -> InstanceRecord { + let id = InstanceId::new(); + InstanceRecord { + spec: InstanceSpec { + id, + name: "bot".to_string(), + runtime: RuntimeKind::Docker, + version: "1.0.0".to_string(), + data_dir: PathBuf::from("/data"), + config_dir: PathBuf::from("/config"), + env: BTreeMap::new(), + ports: vec![], + auto_restart: false, + auto_update: false, + runtime_options: serde_json::Value::Null, + }, + desired_state: DesiredState::Running, + last_known_state: InstanceState::Stopped, + created_at: chrono::Utc::now(), + updated_at: chrono::Utc::now(), + } + } + + #[test] + fn instance_records_list_skips_invalid_files() { + let dir = tempfile::TempDir::new().expect("tempdir"); + let store = Store::new(dir.path().to_path_buf()).expect("store"); + let instances_dir = store.instances_dir(); + + for i in 0..3 { + let record = sample_record(); + let path = instances_dir.join(format!("valid_{i}.json")); + store.write_atomic(&path, &record).expect("write"); + } + + std::fs::write(instances_dir.join("corrupt.json"), b"{bad json").expect("write corrupt"); + + let records = store.list_instance_records().expect("list"); + assert_eq!(records.len(), 3); + } + + #[test] + fn instance_records_create_then_remove() { + let dir = tempfile::TempDir::new().expect("tempdir"); + let store = Store::new(dir.path().to_path_buf()).expect("store"); + let record = sample_record(); + let path = store + .instances_dir() + .join(format!("{}.json", record.spec.id.0)); + + store.write_atomic(&path, &record).expect("write"); + let records = store.list_instance_records().expect("list after write"); + assert_eq!(records.len(), 1); + + store.delete(&path).expect("delete"); + let records = store.list_instance_records().expect("list after delete"); + assert!(records.is_empty()); + } + + #[test] + fn config_default_values_loaded() { + let _g = env_lock(); + std::env::remove_var("OCTOBOT_LAUNCHER__LAUNCHER__API_BIND"); + let dir = tempfile::TempDir::new().expect("tempdir"); + let path = write_toml(&dir, ""); + let config = load_config(Some(&path)).expect("load"); + assert_eq!(config.launcher.api_bind, "127.0.0.1:7531"); + assert_eq!(config.launcher.log_level, "info"); + assert_eq!(config.update.channel, "stable"); + assert!(config.update.auto_update_launcher); + assert!(!config.update.auto_update_instances); + assert_eq!(config.update.check_interval_hours, 6); + assert_eq!( + config.update.manifest_url, + "https://updates.drakkar.software/octobot-launcher/v1/manifest.json" + ); + } + + #[test] + fn config_env_var_override() { + let _g = env_lock(); + let dir = tempfile::TempDir::new().expect("tempdir"); + let path = write_toml(&dir, ""); + std::env::set_var("OCTOBOT_LAUNCHER__LAUNCHER__API_BIND", "0.0.0.0:9000"); + let config = load_config(Some(&path)).expect("load"); + std::env::remove_var("OCTOBOT_LAUNCHER__LAUNCHER__API_BIND"); + assert_eq!(config.launcher.api_bind, "0.0.0.0:9000"); + } + + #[test] + fn config_missing_file_uses_defaults() { + let _g = env_lock(); + std::env::remove_var("OCTOBOT_LAUNCHER__LAUNCHER__API_BIND"); + let dir = tempfile::TempDir::new().expect("tempdir"); + let path = dir.path().join("nonexistent.toml"); + let config = load_config(Some(&path)).expect("load"); + assert_eq!(config.launcher.api_bind, "127.0.0.1:7531"); + } + + #[test] + fn config_malformed_toml_returns_error() { + let dir = tempfile::TempDir::new().expect("tempdir"); + let path = write_toml(&dir, "[[[ broken"); + let result = load_config(Some(&path)); + assert!(matches!(result, Err(ConfigError::Load(_)))); + } + + #[test] + fn config_invalid_log_level_rejected() { + let dir = tempfile::TempDir::new().expect("tempdir"); + let path = write_toml(&dir, "[launcher]\nlog_level = \"blarg\""); + let result = load_config(Some(&path)); + assert!(matches!(result, Err(ConfigError::Validation(_)))); + } + + #[test] + fn config_path_locations_per_platform() { + let system = default_config_path(true); + let user = default_config_path(false); + assert!(!system.as_os_str().is_empty()); + assert!(!user.as_os_str().is_empty()); + } +} diff --git a/packages/launcher/crates/octobot-launcher-config/src/record.rs b/packages/launcher/crates/octobot-launcher-config/src/record.rs new file mode 100644 index 0000000000..c63add7b6e --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-config/src/record.rs @@ -0,0 +1,17 @@ +use octobot_launcher_core::{InstanceSpec, InstanceState}; +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub enum DesiredState { + Running, + Stopped, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct InstanceRecord { + pub spec: InstanceSpec, + pub desired_state: DesiredState, + pub last_known_state: InstanceState, + pub created_at: chrono::DateTime, + pub updated_at: chrono::DateTime, +} diff --git a/packages/launcher/crates/octobot-launcher-config/src/store.rs b/packages/launcher/crates/octobot-launcher-config/src/store.rs new file mode 100644 index 0000000000..b1141e3db0 --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-config/src/store.rs @@ -0,0 +1,268 @@ +use std::path::{Path, PathBuf}; + +use parking_lot::Mutex; + +pub const STATE_DIR: &str = "state"; +pub const INSTANCES_SUBDIR: &str = "instances"; +pub const TOKENS_FILENAME: &str = "tokens.json"; +pub const BOOTSTRAP_TOKEN_FILENAME: &str = "bootstrap_token.txt"; +pub const SOCKET_FILENAME: &str = "launcher.sock"; +pub const LOCK_FILENAME: &str = "launcher.lock"; +pub const LAUNCHER_PID_FILENAME: &str = "launcher.pid"; +use serde::{de::DeserializeOwned, Serialize}; +use tracing::warn; + +use crate::{ + error::{ConfigError, Result}, + record::InstanceRecord, +}; + +#[derive(Debug)] +pub struct Store { + data_root: PathBuf, + write_lock: Mutex<()>, +} + +impl Store { + pub fn new(data_root: PathBuf) -> Result { + let dirs = [ + data_root.join(STATE_DIR).join(INSTANCES_SUBDIR), + data_root.clone(), + ]; + for dir in &dirs { + std::fs::create_dir_all(dir)?; + } + Ok(Self { + data_root, + write_lock: Mutex::new(()), + }) + } + + pub fn write_atomic(&self, path: &Path, value: &T) -> Result<()> { + let _guard = self.write_lock.lock(); + + let content = + serde_json::to_string_pretty(value).map_err(|e| ConfigError::Serialize(e.to_string()))?; + + let parent = path.parent().ok_or_else(|| { + ConfigError::Io(std::io::Error::new( + std::io::ErrorKind::InvalidInput, + "path has no parent", + )) + })?; + std::fs::create_dir_all(parent)?; + + let mut tmp = tempfile::Builder::new() + .prefix(".tmp") + .tempfile_in(parent)?; + + { + use std::io::Write as IoWrite; + let file = tmp.as_file_mut(); + file.write_all(content.as_bytes())?; + file.flush()?; + #[cfg(unix)] + { + sync_file(file)?; + sync_dir(parent)?; + } + } + + tmp.persist(path).map_err(|e| ConfigError::Io(e.error))?; + + Ok(()) + } + + pub fn read(&self, path: &Path) -> Result> { + match std::fs::read_to_string(path) { + Ok(content) => { + let value: T = serde_json::from_str(&content) + .map_err(|e| ConfigError::Deserialize(e.to_string()))?; + Ok(Some(value)) + } + Err(e) if e.kind() == std::io::ErrorKind::NotFound => Ok(None), + Err(e) => Err(ConfigError::Io(e)), + } + } + + pub fn delete(&self, path: &Path) -> Result<()> { + match std::fs::remove_file(path) { + Ok(()) => Ok(()), + Err(e) if e.kind() == std::io::ErrorKind::NotFound => Ok(()), + Err(e) => Err(ConfigError::Io(e)), + } + } + + pub fn data_root(&self) -> &std::path::Path { + &self.data_root + } + + pub fn instances_dir(&self) -> PathBuf { + self.data_root.join(STATE_DIR).join(INSTANCES_SUBDIR) + } + + pub fn tokens_path(&self) -> PathBuf { + self.data_root.join(STATE_DIR).join(TOKENS_FILENAME) + } + + pub fn list_instance_records(&self) -> Result> { + let dir = self.instances_dir(); + let mut records = Vec::new(); + + let read_dir = match std::fs::read_dir(&dir) { + Ok(rd) => rd, + Err(e) if e.kind() == std::io::ErrorKind::NotFound => return Ok(records), + Err(e) => return Err(ConfigError::Io(e)), + }; + + for entry in read_dir { + let entry = entry?; + let path = entry.path(); + if path.extension().and_then(|e| e.to_str()) != Some("json") { + continue; + } + match self.read::(&path) { + Ok(Some(record)) => records.push(record), + Ok(None) => {} + Err(e) => { + warn!("skipping corrupt instance record {:?}: {}", path, e); + } + } + } + + Ok(records) + } +} + +#[cfg(unix)] +fn sync_file(file: &std::fs::File) -> Result<()> { + file.sync_all()?; + Ok(()) +} + +#[cfg(unix)] +fn sync_dir(path: &Path) -> Result<()> { + let dir = std::fs::File::open(path)?; + let _ = dir.sync_all(); + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::record::{DesiredState, InstanceRecord}; + use octobot_launcher_core::{InstanceId, InstanceSpec, InstanceState, RuntimeKind}; + use std::collections::BTreeMap; + use std::sync::Arc; + + fn sample_record() -> InstanceRecord { + let id = InstanceId::new(); + InstanceRecord { + spec: InstanceSpec { + id, + name: "test-bot".to_string(), + runtime: RuntimeKind::Binary, + version: "1.0.0".to_string(), + data_dir: PathBuf::from("/data"), + config_dir: PathBuf::from("/config"), + env: BTreeMap::new(), + ports: vec![], + auto_restart: false, + auto_update: false, + runtime_options: serde_json::Value::Null, + }, + desired_state: DesiredState::Running, + last_known_state: InstanceState::Stopped, + created_at: chrono::Utc::now(), + updated_at: chrono::Utc::now(), + } + } + + #[test] + fn write_then_read_roundtrip() { + let dir = tempfile::TempDir::new().expect("tempdir"); + let store = Store::new(dir.path().to_path_buf()).expect("store"); + let record = sample_record(); + let path = store.instances_dir().join(format!("{}.json", record.spec.id.0)); + store.write_atomic(&path, &record).expect("write"); + let back: Option = store.read(&path).expect("read"); + let back = back.expect("should have value"); + assert_eq!(back.spec.name, record.spec.name); + assert_eq!(back.spec.id, record.spec.id); + assert_eq!(back.desired_state, record.desired_state); + } + + #[test] + fn partial_write_visibility() { + let dir = tempfile::TempDir::new().expect("tempdir"); + let store = Store::new(dir.path().to_path_buf()).expect("store"); + let record = sample_record(); + let path = store.instances_dir().join("partial.json"); + store.write_atomic(&path, &record).expect("write"); + let result: Option = store.read(&path).expect("read"); + assert!(result.is_some()); + } + + #[test] + fn tempfile_cleanup_on_drop() { + let dir = tempfile::TempDir::new().expect("tempdir"); + let store = Store::new(dir.path().to_path_buf()).expect("store"); + let record = sample_record(); + let path = store.instances_dir().join("cleanup_test.json"); + store.write_atomic(&path, &record).expect("write"); + let tmp_count = std::fs::read_dir(store.instances_dir()) + .expect("read dir") + .filter_map(|e| e.ok()) + .filter(|e| { + e.file_name() + .to_string_lossy() + .starts_with(".tmp") + }) + .count(); + assert_eq!(tmp_count, 0); + } + + #[test] + fn missing_file_returns_none() { + let dir = tempfile::TempDir::new().expect("tempdir"); + let store = Store::new(dir.path().to_path_buf()).expect("store"); + let path = store.instances_dir().join("nonexistent.json"); + let result: Option = store.read(&path).expect("read should succeed"); + assert!(result.is_none()); + } + + #[test] + fn corrupt_file_returns_error_not_panic() { + let dir = tempfile::TempDir::new().expect("tempdir"); + let store = Store::new(dir.path().to_path_buf()).expect("store"); + let path = store.instances_dir().join("corrupt.json"); + std::fs::write(&path, b"{\"truncated\":").expect("write corrupt file"); + let result: crate::error::Result> = store.read(&path); + assert!(result.is_err()); + } + + #[tokio::test] + async fn concurrent_writes_serialized() { + let dir = tempfile::TempDir::new().expect("tempdir"); + let store = Arc::new(Store::new(dir.path().to_path_buf()).expect("store")); + let path = Arc::new(store.instances_dir().join("concurrent.json")); + + let handles: Vec<_> = (0..10) + .map(|_| { + let store = Arc::clone(&store); + let path = Arc::clone(&path); + let record = sample_record(); + tokio::spawn(async move { + store.write_atomic(&path, &record).expect("write"); + }) + }) + .collect(); + + for h in handles { + h.await.expect("task"); + } + + let result: Option = store.read(&path).expect("final read"); + assert!(result.is_some()); + } +} diff --git a/packages/launcher/crates/octobot-launcher-core/Cargo.toml b/packages/launcher/crates/octobot-launcher-core/Cargo.toml new file mode 100644 index 0000000000..5cb985d7ec --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-core/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "octobot-launcher-core" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +repository.workspace = true + +[features] +testing = [] + +[dependencies] +anyhow = "1" +async-trait = "0.1" +chrono = { version = "0.4", features = ["serde"] } +serde = { version = "1", features = ["derive"] } +serde_json = "1" +thiserror = "2" +tokio = { version = "1", features = ["rt", "rt-multi-thread", "macros", "sync", "time", "fs", "process", "signal", "io-util"] } +tracing = "0.1" +uuid = { version = "1", features = ["v4", "serde"] } + +[dev-dependencies] +tokio = { version = "1", features = ["full", "test-util"] } diff --git a/packages/launcher/crates/octobot-launcher-core/src/backend.rs b/packages/launcher/crates/octobot-launcher-core/src/backend.rs new file mode 100644 index 0000000000..89f72c006c --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-core/src/backend.rs @@ -0,0 +1,219 @@ +use crate::error::Result; +use crate::model::{HealthStatus, InstanceId, InstanceSpec, RuntimeKind}; + +pub type ProgressSender = tokio::sync::mpsc::UnboundedSender; + +pub fn send_progress(tx: Option<&ProgressSender>, msg: impl Into) { + if let Some(tx) = tx { + let _ = tx.send(msg.into()); + } +} + +#[async_trait::async_trait] +pub trait Backend: Send + Sync + 'static { + fn kind(&self) -> RuntimeKind; + async fn probe(&self) -> Result<()>; + async fn prepare(&self, spec: &InstanceSpec, progress: Option<&ProgressSender>) -> Result<()>; + async fn start(&self, spec: &InstanceSpec, progress: Option<&ProgressSender>) -> Result<()>; + async fn stop(&self, spec: &InstanceSpec, timeout: std::time::Duration) -> Result<()>; + async fn restart( + &self, + spec: &InstanceSpec, + timeout: std::time::Duration, + progress: Option<&ProgressSender>, + ) -> Result<()>; + async fn status(&self, id: InstanceId) -> Result; + async fn update( + &self, + spec: &InstanceSpec, + target_version: &str, + progress: Option<&ProgressSender>, + ) -> Result<()>; + async fn remove(&self, spec: &InstanceSpec, purge: bool) -> Result<()>; +} + +#[cfg(feature = "testing")] +pub mod mock { + use super::*; + use crate::model::{InstanceState, RuntimeKind}; + use std::collections::HashMap; + use std::sync::Arc; + use tokio::sync::Mutex; + + #[derive(Debug, Default)] + struct Inner { + states: HashMap, + probe_result: Option, + } + + #[derive(Debug, Clone, Default)] + pub struct MockBackend { + inner: Arc>, + } + + impl MockBackend { + pub async fn set_probe_fail(&self, reason: &str) { + self.inner.lock().await.probe_result = Some(reason.to_string()); + } + + pub async fn get_state(&self, id: InstanceId) -> Option { + self.inner.lock().await.states.get(&id).cloned() + } + } + + #[async_trait::async_trait] + impl Backend for MockBackend { + fn kind(&self) -> RuntimeKind { + RuntimeKind::Binary + } + + async fn probe(&self) -> Result<()> { + let lock = self.inner.lock().await; + if let Some(reason) = &lock.probe_result { + return Err(crate::error::LauncherError::BackendUnavailable(reason.clone())); + } + Ok(()) + } + + async fn prepare(&self, spec: &InstanceSpec, _progress: Option<&ProgressSender>) -> Result<()> { + self.inner + .lock() + .await + .states + .insert(spec.id, InstanceState::Stopped); + Ok(()) + } + + async fn start(&self, spec: &InstanceSpec, _progress: Option<&ProgressSender>) -> Result<()> { + let mut lock = self.inner.lock().await; + if matches!(lock.states.get(&spec.id), Some(InstanceState::Running { .. })) { + return Err(crate::error::LauncherError::AlreadyRunning); + } + lock.states.insert( + spec.id, + InstanceState::Running { + pid_or_container: "mock-pid".into(), + started_at: chrono::Utc::now(), + }, + ); + Ok(()) + } + + async fn stop(&self, spec: &InstanceSpec, _timeout: std::time::Duration) -> Result<()> { + let id = spec.id; + let mut lock = self.inner.lock().await; + if !matches!(lock.states.get(&id), Some(InstanceState::Running { .. })) { + return Err(crate::error::LauncherError::NotRunning); + } + lock.states.insert(id, InstanceState::Stopped); + Ok(()) + } + + async fn restart( + &self, + spec: &InstanceSpec, + timeout: std::time::Duration, + progress: Option<&ProgressSender>, + ) -> Result<()> { + self.stop(spec, timeout).await?; + self.start(spec, progress).await + } + + async fn status(&self, id: InstanceId) -> Result { + let lock = self.inner.lock().await; + let state = lock + .states + .get(&id) + .cloned() + .unwrap_or(InstanceState::Stopped); + Ok(HealthStatus { + state, + uptime_seconds: None, + last_http_check: None, + }) + } + + async fn update( + &self, + spec: &InstanceSpec, + _target_version: &str, + _progress: Option<&ProgressSender>, + ) -> Result<()> { + self.inner + .lock() + .await + .states + .insert(spec.id, InstanceState::Stopped); + Ok(()) + } + + async fn remove(&self, spec: &InstanceSpec, _purge: bool) -> Result<()> { + self.inner.lock().await.states.remove(&spec.id); + Ok(()) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn trait_object_is_object_safe() { + fn accepts_backend(_: &dyn Backend) {} + fn accepts_boxed(_: Box) {} + let _ = accepts_backend; + let _ = accepts_boxed; + } + + #[cfg(feature = "testing")] + #[tokio::test] + async fn mock_backend_lifecycle() { + use crate::model::InstanceState; + use mock::MockBackend; + use std::collections::BTreeMap; + use std::path::PathBuf; + + let backend = MockBackend::default(); + let spec = InstanceSpec { + id: InstanceId::new(), + name: "test".into(), + runtime: RuntimeKind::Binary, + version: "1.0.0".into(), + data_dir: PathBuf::from("/tmp/data"), + config_dir: PathBuf::from("/tmp/config"), + env: BTreeMap::new(), + ports: vec![], + auto_restart: false, + auto_update: false, + runtime_options: serde_json::Value::Null, + }; + + backend.prepare(&spec, None).await.unwrap(); + assert!(matches!( + backend.get_state(spec.id).await.unwrap(), + InstanceState::Stopped + )); + + backend.start(&spec, None).await.unwrap(); + assert!(matches!( + backend.get_state(spec.id).await.unwrap(), + InstanceState::Running { .. } + )); + + let health = backend.status(spec.id).await.unwrap(); + assert!(matches!(health.state, InstanceState::Running { .. })); + + backend + .stop(&spec, std::time::Duration::from_secs(5)) + .await + .unwrap(); + assert!(matches!( + backend.get_state(spec.id).await.unwrap(), + InstanceState::Stopped + )); + + backend.remove(&spec, false).await.unwrap(); + assert!(backend.get_state(spec.id).await.is_none()); + } +} diff --git a/packages/launcher/crates/octobot-launcher-core/src/error.rs b/packages/launcher/crates/octobot-launcher-core/src/error.rs new file mode 100644 index 0000000000..8bfebeea85 --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-core/src/error.rs @@ -0,0 +1,43 @@ +use thiserror::Error; + +#[derive(Debug, Error)] +pub enum LauncherError { + #[error("backend unavailable: {0}")] + BackendUnavailable(String), + #[error("instance not found: {0}")] + InstanceNotFound(String), + #[error("instance already running")] + AlreadyRunning, + #[error("instance not running")] + NotRunning, + #[error("operation timed out")] + Timeout, + #[error("io error: {0}")] + Io(#[from] std::io::Error), + #[error("backend error: {0}")] + Backend(String), + #[error("config error: {0}")] + Config(String), + #[error("other error: {0}")] + Other(#[from] anyhow::Error), +} + +pub type Result = std::result::Result; + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn variants_implement_send_sync() { + fn assert_send_sync() {} + assert_send_sync::(); + } + + #[test] + fn io_error_conversion() { + let io_err = std::io::Error::new(std::io::ErrorKind::NotFound, "file missing"); + let launcher_err: LauncherError = io_err.into(); + assert!(matches!(launcher_err, LauncherError::Io(_))); + } +} diff --git a/packages/launcher/crates/octobot-launcher-core/src/lib.rs b/packages/launcher/crates/octobot-launcher-core/src/lib.rs new file mode 100644 index 0000000000..2b88803daf --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-core/src/lib.rs @@ -0,0 +1,8 @@ +pub mod backend; +pub mod error; +pub mod model; +pub mod prelude; + +pub use error::{LauncherError, Result}; +pub use model::{HealthStatus, InstanceId, InstanceSpec, InstanceState, RuntimeKind}; +pub use backend::Backend; diff --git a/packages/launcher/crates/octobot-launcher-core/src/model.rs b/packages/launcher/crates/octobot-launcher-core/src/model.rs new file mode 100644 index 0000000000..dc238d8ca9 --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-core/src/model.rs @@ -0,0 +1,176 @@ +use serde::{Deserialize, Serialize}; +use std::collections::BTreeMap; +use std::net::IpAddr; +use std::path::PathBuf; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] +pub struct InstanceId(pub uuid::Uuid); + +impl InstanceId { + pub fn new() -> Self { + Self(uuid::Uuid::new_v4()) + } + + pub fn short(&self) -> String { + self.0.to_string()[..8].to_string() + } +} + +impl Default for InstanceId { + fn default() -> Self { + Self::new() + } +} + +impl std::fmt::Display for InstanceId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", &self.0.to_string()[..8]) + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +pub enum RuntimeKind { + Docker, + Binary, + Python, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct InstanceSpec { + pub id: InstanceId, + pub name: String, + pub runtime: RuntimeKind, + pub version: String, + pub data_dir: PathBuf, + pub config_dir: PathBuf, + pub env: BTreeMap, + pub ports: Vec, + pub auto_restart: bool, + pub auto_update: bool, + pub runtime_options: serde_json::Value, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PortMapping { + pub host: u16, + pub container_or_internal: u16, + #[serde(default = "default_bind_addr")] + pub bind_addr: IpAddr, +} + +fn default_bind_addr() -> IpAddr { + "127.0.0.1".parse().expect("loopback is valid") +} + +impl Default for PortMapping { + fn default() -> Self { + Self { + host: 0, + container_or_internal: 0, + bind_addr: default_bind_addr(), + } + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub enum InstanceState { + Stopped, + Starting, + Running { + pid_or_container: String, + started_at: chrono::DateTime, + }, + Stopping, + Failed { + reason: String, + at: chrono::DateTime, + }, + Updating, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct HealthStatus { + pub state: InstanceState, + pub uptime_seconds: Option, + pub last_http_check: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct HttpProbe { + pub url: String, + pub status_code: Option, + pub latency_ms: Option, + pub at: chrono::DateTime, +} + +#[cfg(test)] +mod tests { + use super::*; + + fn sample_spec() -> InstanceSpec { + InstanceSpec { + id: InstanceId::new(), + name: "test-bot".into(), + runtime: RuntimeKind::Docker, + version: "2.4.42".into(), + data_dir: PathBuf::from("/data"), + config_dir: PathBuf::from("/config"), + env: BTreeMap::new(), + ports: vec![], + auto_restart: false, + auto_update: false, + runtime_options: serde_json::Value::Null, + } + } + + #[test] + fn serde_roundtrip_instance_spec() { + for runtime in [RuntimeKind::Docker, RuntimeKind::Binary, RuntimeKind::Python] { + let mut spec = sample_spec(); + spec.runtime = runtime; + let json = serde_json::to_string(&spec).unwrap(); + let back: InstanceSpec = serde_json::from_str(&json).unwrap(); + assert_eq!(back.name, spec.name); + assert_eq!(back.runtime, spec.runtime); + assert_eq!(back.version, spec.version); + } + } + + #[test] + fn serde_roundtrip_instance_state() { + let states = vec![ + InstanceState::Stopped, + InstanceState::Starting, + InstanceState::Running { + pid_or_container: "abc123".into(), + started_at: chrono::Utc::now(), + }, + InstanceState::Stopping, + InstanceState::Failed { + reason: "oom".into(), + at: chrono::Utc::now(), + }, + InstanceState::Updating, + ]; + for state in states { + let json = serde_json::to_string(&state).unwrap(); + let back: InstanceState = serde_json::from_str(&json).unwrap(); + assert_eq!(back, state); + } + } + + #[test] + fn port_mapping_default_bind_addr() { + let pm = PortMapping::default(); + assert_eq!(pm.bind_addr, "127.0.0.1".parse::().unwrap()); + } + + #[test] + fn instance_id_display() { + let id = InstanceId::new(); + let display = format!("{id}"); + assert_eq!(display.len(), 8); + let debug = format!("{id:?}"); + assert!(debug.contains("InstanceId")); + } +} diff --git a/packages/launcher/crates/octobot-launcher-core/src/prelude.rs b/packages/launcher/crates/octobot-launcher-core/src/prelude.rs new file mode 100644 index 0000000000..6d5ef4b61b --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-core/src/prelude.rs @@ -0,0 +1,5 @@ +pub use crate::backend::Backend; +pub use crate::error::{LauncherError, Result}; +pub use crate::model::{ + HealthStatus, HttpProbe, InstanceId, InstanceSpec, InstanceState, PortMapping, RuntimeKind, +}; diff --git a/packages/launcher/crates/octobot-launcher-docker/Cargo.toml b/packages/launcher/crates/octobot-launcher-docker/Cargo.toml new file mode 100644 index 0000000000..1f5e7476c3 --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-docker/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "octobot-launcher-docker" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +octobot-launcher-core = { path = "../octobot-launcher-core" } +bollard = { version = "0.19", features = ["chrono"] } +async-trait = "0.1" +futures-util = "0.3" +tokio = { version = "1", features = ["full"] } +tracing = "0.1" +serde = { version = "1", features = ["derive"] } +serde_json = "1" +chrono = { version = "0.4", features = ["serde"] } diff --git a/packages/launcher/crates/octobot-launcher-docker/src/constants.rs b/packages/launcher/crates/octobot-launcher-docker/src/constants.rs new file mode 100644 index 0000000000..4b5ae1a112 --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-docker/src/constants.rs @@ -0,0 +1,11 @@ +pub(crate) const LABEL_LAUNCHER: &str = "org.drakkar.octobot.launcher"; +pub(crate) const LABEL_INSTANCE_ID: &str = "org.drakkar.octobot.instance_id"; +pub(crate) const LABEL_VERSION: &str = "org.drakkar.octobot.version"; + +pub(crate) const DEFAULT_IMAGE: &str = "drakkarsoftware/octobot"; +pub(crate) const CONTAINER_PREFIX: &str = "octobot-"; +pub(crate) const MOUNT_USER_DATA: &str = "/octobot/user_data"; +pub(crate) const MOUNT_CONFIG: &str = "/octobot/tentacles_config"; + +pub(crate) const RUNTIME_OPT_IMAGE: &str = "image"; +pub(crate) const RUNTIME_OPT_IMAGE_DIGEST: &str = "image_digest"; diff --git a/packages/launcher/crates/octobot-launcher-docker/src/docker.rs b/packages/launcher/crates/octobot-launcher-docker/src/docker.rs new file mode 100644 index 0000000000..314c01a5d0 --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-docker/src/docker.rs @@ -0,0 +1,347 @@ +// bollard 0.19 marks its own struct types as deprecated in favour of a +// query_parameters module that does not yet ship in 0.19.x. +#![allow(deprecated)] + +use std::time::Duration; + +use async_trait::async_trait; +use bollard::container::{ + Config, CreateContainerOptions, RemoveContainerOptions, StartContainerOptions, + StopContainerOptions, +}; +use bollard::image::CreateImageOptions; +use chrono::DateTime; +use futures_util::StreamExt; +use octobot_launcher_core::backend::{send_progress, ProgressSender}; +use octobot_launcher_core::error::{LauncherError, Result}; +use octobot_launcher_core::model::{ + HealthStatus, InstanceId, InstanceSpec, InstanceState, RuntimeKind, +}; +use octobot_launcher_core::Backend; +use tracing::info; + +use crate::helpers::{ + build_host_config, build_labels, container_name, resolve_image, +}; + +#[derive(Debug)] +pub struct DockerBackend { + docker: bollard::Docker, +} + +impl DockerBackend { + pub fn new() -> Result { + let docker = bollard::Docker::connect_with_defaults() + .map_err(|e: bollard::errors::Error| LauncherError::BackendUnavailable(e.to_string()))?; + Ok(Self { docker }) + } + + pub fn with_docker(docker: bollard::Docker) -> Self { + Self { docker } + } +} + +#[async_trait] +impl Backend for DockerBackend { + fn kind(&self) -> RuntimeKind { + RuntimeKind::Docker + } + + async fn probe(&self) -> Result<()> { + tokio::time::timeout(Duration::from_secs(2), self.docker.version()) + .await + .map_err(|_| LauncherError::BackendUnavailable("docker daemon not reachable".into()))? + .map_err(|_| LauncherError::BackendUnavailable("docker daemon not reachable".into()))?; + Ok(()) + } + + async fn prepare(&self, spec: &InstanceSpec, progress: Option<&ProgressSender>) -> Result<()> { + let image = resolve_image(spec); + let options = CreateImageOptions { + from_image: image.clone(), + ..Default::default() + }; + + send_progress(progress, format!("Pulling {image}")); + + let mut stream = self.docker.create_image(Some(options), None, None); + + while let Some(result) = stream.next().await { + match result { + Ok(info) => { + if let Some(status) = &info.status { + let msg = if let Some(prog) = &info.progress { + format!("{status} {prog}") + } else { + status.clone() + }; + send_progress(progress, &msg); + info!(status = ?info.status, progress = ?info.progress, "pulling image {image}"); + } + } + Err(e) => return Err(LauncherError::Backend(e.to_string())), + } + } + + Ok(()) + } + + async fn start(&self, spec: &InstanceSpec, progress: Option<&ProgressSender>) -> Result<()> { + let name = container_name(spec.id); + + if let Ok(inspect) = self.docker.inspect_container(&name, None::).await { + if let Some(state) = inspect.state { + if state.running == Some(true) { + return Err(LauncherError::AlreadyRunning); + } + } + } + + self.prepare(spec, progress).await?; + + let image = resolve_image(spec); + let env: Vec = spec + .env + .iter() + .map(|(k, v)| format!("{k}={v}")) + .collect(); + + let labels = build_labels(spec); + let host_config = build_host_config(spec); + + let config = Config { + image: Some(image), + env: Some(env), + labels: Some(labels), + host_config: Some(host_config), + ..Default::default() + }; + + let options = CreateContainerOptions { + name: name.clone(), + ..Default::default() + }; + + send_progress(progress, "Creating container"); + self.docker + .create_container(Some(options), config) + .await + .map_err(|e| LauncherError::Backend(e.to_string()))?; + + send_progress(progress, "Starting container"); + self.docker + .start_container(&name, None::>) + .await + .map_err(|e| LauncherError::Backend(e.to_string()))?; + + Ok(()) + } + + async fn stop(&self, spec: &InstanceSpec, timeout: Duration) -> Result<()> { + let name = container_name(spec.id); + let options = StopContainerOptions { + t: i64::try_from(timeout.as_secs()).unwrap_or(i64::MAX), + }; + self.docker + .stop_container(&name, Some(options)) + .await + .map_err(|e| match e { + bollard::errors::Error::DockerResponseServerError { + status_code: 404, .. + } => LauncherError::InstanceNotFound(name.clone()), + bollard::errors::Error::DockerResponseServerError { + status_code: 304, .. + } => LauncherError::NotRunning, + other => LauncherError::Backend(other.to_string()), + }) + } + + async fn restart( + &self, + spec: &InstanceSpec, + timeout: Duration, + progress: Option<&ProgressSender>, + ) -> Result<()> { + self.stop(spec, timeout).await?; + self.start(spec, progress).await + } + + async fn status(&self, id: InstanceId) -> Result { + let name = container_name(id); + let inspect = self + .docker + .inspect_container(&name, None::) + .await + .map_err(|e| match e { + bollard::errors::Error::DockerResponseServerError { + status_code: 404, .. + } => LauncherError::InstanceNotFound(name.clone()), + other => LauncherError::Backend(other.to_string()), + })?; + + let docker_state = inspect.state.unwrap_or_default(); + let is_running = docker_state.running.unwrap_or(false); + + let (state, uptime_seconds) = if is_running { + let started_at = docker_state + .started_at + .as_deref() + .and_then(|s| DateTime::parse_from_rfc3339(s).ok()) + .map(|dt| dt.with_timezone(&chrono::Utc)); + + let uptime = started_at.map(|sa| { + let diff = chrono::Utc::now().signed_duration_since(sa); + u64::try_from(diff.num_seconds().max(0)).unwrap_or(0) + }); + + let instance_state = InstanceState::Running { + pid_or_container: name, + started_at: started_at.unwrap_or_else(chrono::Utc::now), + }; + + (instance_state, uptime) + } else { + (InstanceState::Stopped, None) + }; + + Ok(HealthStatus { + state, + uptime_seconds, + last_http_check: None, + }) + } + + async fn update( + &self, + spec: &InstanceSpec, + target_version: &str, + progress: Option<&ProgressSender>, + ) -> Result<()> { + let updated_spec = InstanceSpec { + version: target_version.to_string(), + ..spec.clone() + }; + + self.prepare(&updated_spec, progress).await?; + match self.stop(spec, Duration::from_secs(10)).await { + Ok(()) | Err(LauncherError::NotRunning) => {} + Err(e) => return Err(e), + } + + let name = container_name(spec.id); + self.docker + .remove_container( + &name, + Some(RemoveContainerOptions { + force: true, + ..Default::default() + }), + ) + .await + .map_err(|e| LauncherError::Backend(e.to_string()))?; + + self.start(&updated_spec, progress).await + } + + async fn remove(&self, spec: &InstanceSpec, purge: bool) -> Result<()> { + let name = container_name(spec.id); + + let _ = self + .docker + .stop_container(&name, Some(StopContainerOptions { t: 10 })) + .await; + + self.docker + .remove_container( + &name, + Some(RemoveContainerOptions { + v: purge, + force: true, + ..Default::default() + }), + ) + .await + .map_err(|e| match e { + bollard::errors::Error::DockerResponseServerError { + status_code: 404, .. + } => LauncherError::InstanceNotFound(name.clone()), + other => LauncherError::Backend(other.to_string()), + }) + } +} + +#[cfg(test)] +mod tests { + use std::sync::Mutex; + + use octobot_launcher_core::model::{InstanceId, InstanceSpec, RuntimeKind}; + use std::collections::BTreeMap; + use std::path::PathBuf; + use std::sync::Arc; + + fn sample_spec_with_version(version: &str) -> InstanceSpec { + InstanceSpec { + id: InstanceId::new(), + name: "test-bot".into(), + runtime: RuntimeKind::Docker, + version: version.into(), + data_dir: PathBuf::from("/data"), + config_dir: PathBuf::from("/config"), + env: BTreeMap::new(), + ports: vec![], + auto_restart: false, + auto_update: false, + runtime_options: serde_json::Value::Null, + } + } + + #[derive(Debug, Default)] + struct MockDockerOps { + calls: Arc>>, + } + + impl MockDockerOps { + fn record(&self, op: &'static str) { + self.calls.lock().expect("mutex poisoned").push(op); + } + + fn pull_image(&self, _spec: &InstanceSpec) { + self.record("pull"); + } + + fn stop_container(&self, _spec: &InstanceSpec) { + self.record("stop"); + } + + fn remove_container(&self, _spec: &InstanceSpec) { + self.record("remove"); + } + + fn create_and_start_container(&self, _spec: &InstanceSpec) { + self.record("start"); + } + + fn blue_green_update(&self, spec: &InstanceSpec, target_version: &str) { + let updated = InstanceSpec { + version: target_version.to_string(), + ..spec.clone() + }; + self.pull_image(&updated); + self.stop_container(spec); + self.remove_container(spec); + self.create_and_start_container(&updated); + } + + fn get_calls(&self) -> Vec<&'static str> { + self.calls.lock().expect("mutex poisoned").clone() + } + } + + #[test] + fn update_blue_green_order() { + let ops = MockDockerOps::default(); + let spec = sample_spec_with_version("1.0.0"); + ops.blue_green_update(&spec, "2.0.0"); + assert_eq!(ops.get_calls(), vec!["pull", "stop", "remove", "start"]); + } +} diff --git a/packages/launcher/crates/octobot-launcher-docker/src/helpers.rs b/packages/launcher/crates/octobot-launcher-docker/src/helpers.rs new file mode 100644 index 0000000000..37d9a0867b --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-docker/src/helpers.rs @@ -0,0 +1,188 @@ +use std::collections::HashMap; + +use bollard::models::{HostConfig, PortBinding, RestartPolicy, RestartPolicyNameEnum}; +use octobot_launcher_core::model::{InstanceId, InstanceSpec}; + +use crate::constants::{ + CONTAINER_PREFIX, DEFAULT_IMAGE, LABEL_INSTANCE_ID, LABEL_LAUNCHER, LABEL_VERSION, + MOUNT_CONFIG, MOUNT_USER_DATA, RUNTIME_OPT_IMAGE, RUNTIME_OPT_IMAGE_DIGEST, +}; + +pub(crate) fn container_name(id: InstanceId) -> String { + format!("{CONTAINER_PREFIX}{}", id.short()) +} + +pub(crate) fn build_labels(spec: &InstanceSpec) -> HashMap { + let mut labels = HashMap::new(); + labels.insert(LABEL_LAUNCHER.to_string(), "1".to_string()); + labels.insert(LABEL_INSTANCE_ID.to_string(), spec.id.0.to_string()); + labels.insert(LABEL_VERSION.to_string(), spec.version.clone()); + labels +} + +pub(crate) fn build_binds(spec: &InstanceSpec) -> Vec { + vec![ + format!("{}:{MOUNT_USER_DATA}", spec.data_dir.display()), + format!("{}:{MOUNT_CONFIG}", spec.config_dir.display()), + ] +} + +pub(crate) fn resolve_image(spec: &InstanceSpec) -> String { + let base_image = if let Some(serde_json::Value::String(img)) = spec.runtime_options.get(RUNTIME_OPT_IMAGE) { + img.clone() + } else { + format!("{DEFAULT_IMAGE}:{}", spec.version) + }; + + if let Some(serde_json::Value::String(digest)) = spec.runtime_options.get(RUNTIME_OPT_IMAGE_DIGEST) { + format!("{base_image}@sha256:{digest}") + } else { + base_image + } +} + +pub(crate) fn build_restart_policy(auto_restart: bool) -> RestartPolicy { + if auto_restart { + RestartPolicy { + name: Some(RestartPolicyNameEnum::UNLESS_STOPPED), + maximum_retry_count: None, + } + } else { + RestartPolicy { + name: Some(RestartPolicyNameEnum::EMPTY), + maximum_retry_count: None, + } + } +} + +pub(crate) fn build_port_bindings( + spec: &InstanceSpec, +) -> HashMap>> { + let mut port_bindings: HashMap>> = HashMap::new(); + for pm in &spec.ports { + let key = format!("{}/tcp", pm.container_or_internal); + let binding = PortBinding { + host_ip: Some(pm.bind_addr.to_string()), + host_port: Some(pm.host.to_string()), + }; + port_bindings.insert(key, Some(vec![binding])); + } + port_bindings +} + +pub(crate) fn build_host_config(spec: &InstanceSpec) -> HostConfig { + HostConfig { + binds: Some(build_binds(spec)), + port_bindings: Some(build_port_bindings(spec)), + restart_policy: Some(build_restart_policy(spec.auto_restart)), + ..Default::default() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use octobot_launcher_core::model::{PortMapping, RuntimeKind}; + use std::collections::BTreeMap; + use std::net::IpAddr; + use std::path::PathBuf; + + fn sample_spec() -> InstanceSpec { + InstanceSpec { + id: InstanceId::new(), + name: "test-bot".into(), + runtime: RuntimeKind::Docker, + version: "2.4.42".into(), + data_dir: PathBuf::from("/data"), + config_dir: PathBuf::from("/config"), + env: BTreeMap::new(), + ports: vec![], + auto_restart: false, + auto_update: false, + runtime_options: serde_json::Value::Null, + } + } + + #[test] + fn naming_container_name_from_instance_id() { + let id = InstanceId::new(); + let name = container_name(id); + assert!(name.starts_with(CONTAINER_PREFIX)); + assert_eq!(name.len(), CONTAINER_PREFIX.len() + 8); + assert_eq!(&name[CONTAINER_PREFIX.len()..], id.short()); + } + + #[test] + fn naming_label_set_includes_required_keys() { + let spec = sample_spec(); + let labels = build_labels(&spec); + assert!(labels.contains_key(LABEL_LAUNCHER)); + assert!(labels.contains_key(LABEL_INSTANCE_ID)); + assert!(labels.contains_key(LABEL_VERSION)); + assert_eq!(labels[LABEL_LAUNCHER], "1"); + assert_eq!(labels[LABEL_INSTANCE_ID], spec.id.0.to_string()); + assert_eq!(labels[LABEL_VERSION], spec.version); + } + + #[test] + fn mounts_volume_paths_are_canonicalized() { + let spec = sample_spec(); + let binds = build_binds(&spec); + assert_eq!(binds[0], format!("/data:{MOUNT_USER_DATA}")); + assert_eq!(binds[1], format!("/config:{MOUNT_CONFIG}")); + } + + #[test] + fn mounts_container_paths_are_constants() { + let spec = sample_spec(); + let binds = build_binds(&spec); + assert!(binds[0].ends_with(MOUNT_USER_DATA)); + assert!(binds[1].ends_with(MOUNT_CONFIG)); + } + + #[test] + fn ports_bind_addr_default_is_loopback() { + let pm = PortMapping::default(); + assert_eq!(pm.bind_addr, "127.0.0.1".parse::().unwrap()); + } + + #[test] + fn restart_auto_restart_maps_to_unless_stopped() { + let policy = build_restart_policy(true); + assert_eq!(policy.name, Some(RestartPolicyNameEnum::UNLESS_STOPPED)); + } + + #[test] + fn restart_no_auto_restart_maps_to_empty() { + let policy = build_restart_policy(false); + assert_eq!(policy.name, Some(RestartPolicyNameEnum::EMPTY)); + } + + #[test] + fn image_digest_pin_uses_at_form() { + let mut spec = sample_spec(); + spec.version = "2.4.42".into(); + spec.runtime_options = serde_json::json!({ + RUNTIME_OPT_IMAGE_DIGEST: "abc123" + }); + let image = resolve_image(&spec); + assert_eq!(image, format!("{DEFAULT_IMAGE}:2.4.42@sha256:abc123")); + } + + #[test] + fn image_custom_image_override() { + let mut spec = sample_spec(); + spec.runtime_options = serde_json::json!({ + RUNTIME_OPT_IMAGE: "custom/octobot:latest" + }); + let image = resolve_image(&spec); + assert_eq!(image, "custom/octobot:latest"); + } + + #[test] + fn image_default_uses_version() { + let spec = sample_spec(); + let image = resolve_image(&spec); + assert_eq!(image, format!("{DEFAULT_IMAGE}:2.4.42")); + } +} diff --git a/packages/launcher/crates/octobot-launcher-docker/src/lib.rs b/packages/launcher/crates/octobot-launcher-docker/src/lib.rs new file mode 100644 index 0000000000..70dcf1b898 --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-docker/src/lib.rs @@ -0,0 +1,5 @@ +mod constants; +mod docker; +mod helpers; + +pub use docker::DockerBackend; diff --git a/packages/launcher/crates/octobot-launcher-python/Cargo.toml b/packages/launcher/crates/octobot-launcher-python/Cargo.toml new file mode 100644 index 0000000000..ac4b9d676f --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-python/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "octobot-launcher-python" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +octobot-launcher-core = { path = "../octobot-launcher-core" } +octobot-launcher-update = { path = "../octobot-launcher-update" } +async-trait = "0.1" +tokio = { version = "1", features = ["full"] } +sysinfo = "0.32" +which = "7" +tracing = "0.1" +serde = { version = "1", features = ["derive"] } +serde_json = "1" +chrono = { version = "0.4", features = ["serde"] } +sha2 = "0.10" + +[target.'cfg(unix)'.dependencies] +nix = { version = "0.29", features = ["signal", "process"] } + +[target.'cfg(windows)'.dependencies] +windows = { version = "0.58", features = ["Win32_System_Console", "Win32_System_Threading", "Win32_Foundation"] } + +[dev-dependencies] +tempfile = "3" +tokio = { version = "1", features = ["full"] } diff --git a/packages/launcher/crates/octobot-launcher-python/src/discovery.rs b/packages/launcher/crates/octobot-launcher-python/src/discovery.rs new file mode 100644 index 0000000000..c7c6767450 --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-python/src/discovery.rs @@ -0,0 +1,266 @@ +use std::path::{Path, PathBuf}; + +pub(crate) async fn check_python_version(path: &Path) -> Option<(u32, u32)> { + let output = tokio::process::Command::new(path) + .arg("--version") + .output() + .await + .ok()?; + + let raw = if output.stdout.is_empty() { + output.stderr + } else { + output.stdout + }; + + let text = String::from_utf8_lossy(&raw); + let version_str = text.trim().strip_prefix("Python ")?; + let mut parts = version_str.splitn(3, '.'); + let major: u32 = parts.next()?.parse().ok()?; + let minor: u32 = parts.next()?.parse().ok()?; + Some((major, minor)) +} + +pub(crate) async fn discover_python( + data_dir: &Path, + runtime_options: &serde_json::Value, +) -> Option { + if let Some(explicit) = runtime_options + .get("python_path") + .and_then(|v| v.as_str()) + { + let p = PathBuf::from(explicit); + if p.exists() { + return Some(p); + } + return None; + } + + let embedded_names = [ + "python3.12", + "python3.11", + "python3.10", + "python3", + "python", + ]; + let embedded_base = data_dir.join("python").join("bin"); + for name in &embedded_names { + let candidate = embedded_base.join(name); + if candidate.exists() { + return Some(candidate); + } + } + + let search_dirs = python_search_dirs(); + let system_names = ["python3.12", "python3.11", "python3.10", "python3"]; + for name in &system_names { + if let Ok(path) = which::which_in(name, Some(&search_dirs), ".") { + if let Some((major, minor)) = check_python_version(&path).await { + if (major, minor) >= (3, 10) { + return Some(path); + } + } + } + } + + None +} + +/// Build an extended PATH for Python discovery that works even when the +/// launcher runs as a daemon with a minimal system PATH. +/// +/// Combines the current PATH with well-known package manager and version +/// manager locations, deduplicating entries while preserving order. +pub(crate) fn python_search_dirs() -> std::ffi::OsString { + let mut dirs: Vec = Vec::new(); + + // Existing PATH first so user overrides remain authoritative. + if let Ok(existing) = std::env::var("PATH") { + dirs.extend(std::env::split_paths(&existing)); + } + + // pyenv shims — prepend so pyenv-selected version wins over system Python. + if let Ok(home) = std::env::var("HOME") { + let shims = PathBuf::from(&home).join(".pyenv").join("shims"); + if shims.is_dir() { + dirs.insert(0, shims); + } + // asdf Python shims + let asdf_shims = PathBuf::from(&home).join(".asdf").join("shims"); + if asdf_shims.is_dir() { + dirs.insert(0, asdf_shims); + } + } + + // Well-known package manager directories not always in a daemon's PATH. + for extra in &[ + "/opt/homebrew/bin", // Homebrew on Apple Silicon + "/usr/local/bin", // Homebrew on Intel / manual installs + "/opt/local/bin", // MacPorts + ] { + let p = PathBuf::from(extra); + if p.is_dir() && !dirs.contains(&p) { + dirs.push(p); + } + } + + std::env::join_paths(dirs).unwrap_or_default() +} + +#[cfg(test)] +#[allow(clippy::unwrap_used)] +mod tests { + use super::*; + + async fn require_python() -> Option { + which::which("python3") + .ok() + .or_else(|| which::which("python").ok()) + } + + #[tokio::test] + async fn explicit_path_override_wins() { + let python = match require_python().await { + Some(p) => p, + None => return, + }; + let opts = serde_json::json!({ "python_path": python.to_str().unwrap() }); + let result = discover_python(Path::new("/nonexistent"), &opts).await; + assert_eq!(result, Some(python)); + } + + #[tokio::test] + async fn embedded_path_preferred_over_system() { + use std::os::unix::fs::PermissionsExt; + let tmp = tempfile::tempdir().unwrap(); + let bin_dir = tmp.path().join("python").join("bin"); + std::fs::create_dir_all(&bin_dir).unwrap(); + let fake_python = bin_dir.join("python3"); + std::fs::write(&fake_python, b"#!/bin/sh\necho 'Python 3.10.0'\n").unwrap(); + let mut perms = std::fs::metadata(&fake_python).unwrap().permissions(); + perms.set_mode(0o755); + std::fs::set_permissions(&fake_python, perms).unwrap(); + + let result = discover_python(tmp.path(), &serde_json::Value::Null).await; + assert_eq!(result, Some(fake_python)); + } + + #[tokio::test] + async fn version_floor_enforced() { + use std::os::unix::fs::PermissionsExt; + let tmp = tempfile::tempdir().unwrap(); + let fake_python = tmp.path().join("python_old"); + std::fs::write(&fake_python, b"#!/bin/sh\necho 'Python 3.9.0'\n").unwrap(); + let mut perms = std::fs::metadata(&fake_python).unwrap().permissions(); + perms.set_mode(0o755); + std::fs::set_permissions(&fake_python, perms).unwrap(); + + let version = check_python_version(&fake_python).await; + assert_eq!(version, Some((3, 9))); + assert!(version.unwrap() < (3, 10)); + } + + #[tokio::test] + async fn no_python_available_returns_unavailable() { + let opts = + serde_json::json!({ "python_path": "/nonexistent/path/to/python_does_not_exist" }); + let result = discover_python(Path::new("/nonexistent"), &opts).await; + assert!(result.is_none()); + } + + #[tokio::test] + async fn finds_python_via_extended_path_when_not_in_minimal_path() { + use std::os::unix::fs::PermissionsExt; + + // Simulate a daemon's minimal PATH containing only /nonexistent. + let tmp = tempfile::tempdir().unwrap(); + let bin_dir = tmp.path().join("bin"); + std::fs::create_dir_all(&bin_dir).unwrap(); + let fake_python = bin_dir.join("python3.10"); + std::fs::write(&fake_python, b"#!/bin/sh\necho 'Python 3.10.0'\n").unwrap(); + let mut perms = std::fs::metadata(&fake_python).unwrap().permissions(); + perms.set_mode(0o755); + std::fs::set_permissions(&fake_python, perms).unwrap(); + + // Inject our fake bin dir as if it were /opt/homebrew/bin by placing + // it in PATH only for this search (simulate via which_in directly). + let search = std::env::join_paths([&bin_dir]).unwrap(); + let result = which::which_in("python3.10", Some(&search), ".").unwrap(); + assert_eq!(result, fake_python); + + // Verify the version check logic accepts it. + let version = check_python_version(&result).await; + assert_eq!(version, Some((3, 10))); + } + + #[test] + fn search_dirs_includes_path_entries() { + let original = std::env::var("PATH").ok(); + std::env::set_var("PATH", "/usr/bin:/bin"); + let dirs = python_search_dirs(); + let dirs_str = dirs.to_string_lossy().to_string(); + assert!(dirs_str.contains("/usr/bin"), "should include PATH entries: {dirs_str}"); + if let Some(orig) = original { + std::env::set_var("PATH", orig); + } + } + + #[test] + fn search_dirs_deduplicates_entries() { + let original = std::env::var("PATH").ok(); + // Put /opt/homebrew/bin in PATH; search_dirs should not duplicate it. + std::env::set_var("PATH", "/opt/homebrew/bin:/usr/bin"); + let dirs = python_search_dirs(); + let dirs_str = dirs.to_string_lossy().to_string(); + let count = dirs_str.split(':').filter(|s| *s == "/opt/homebrew/bin").count(); + assert_eq!(count, 1, "/opt/homebrew/bin should appear exactly once: {dirs_str}"); + if let Some(orig) = original { + std::env::set_var("PATH", orig); + } + } + + #[test] + fn search_dirs_puts_pyenv_shims_first() { + use std::os::unix::fs::PermissionsExt; + let tmp = tempfile::tempdir().unwrap(); + let shims = tmp.path().join(".pyenv").join("shims"); + std::fs::create_dir_all(&shims).unwrap(); + + let original_home = std::env::var("HOME").ok(); + let original_path = std::env::var("PATH").ok(); + std::env::set_var("HOME", tmp.path()); + std::env::set_var("PATH", "/usr/bin"); + + let dirs = python_search_dirs(); + let dirs_str = dirs.to_string_lossy().to_string(); + let first = dirs_str.split(':').next().unwrap_or(""); + assert_eq!(first, shims.to_str().unwrap(), "pyenv shims must be first: {dirs_str}"); + + if let Some(h) = original_home { std::env::set_var("HOME", h); } + if let Some(p) = original_path { std::env::set_var("PATH", p); } + } + + #[tokio::test] + async fn discovers_python_in_extra_dir_not_in_path() { + use std::os::unix::fs::PermissionsExt; + let tmp = tempfile::tempdir().unwrap(); + let extra_bin = tmp.path().join("extra").join("bin"); + std::fs::create_dir_all(&extra_bin).unwrap(); + let fake_python = extra_bin.join("python3.11"); + std::fs::write(&fake_python, b"#!/bin/sh\necho 'Python 3.11.0'\n").unwrap(); + let mut perms = std::fs::metadata(&fake_python).unwrap().permissions(); + perms.set_mode(0o755); + std::fs::set_permissions(&fake_python, perms).unwrap(); + + // With only /nonexistent in PATH the normal which() would miss it. + let original = std::env::var("PATH").ok(); + std::env::set_var("PATH", "/nonexistent"); + + // Searching via the extra dir directly should find it. + let search = std::env::join_paths([&extra_bin]).unwrap(); + let found = which::which_in("python3.11", Some(&search), ".").ok(); + assert_eq!(found.as_deref(), Some(fake_python.as_path())); + + if let Some(orig) = original { std::env::set_var("PATH", orig); } + } +} diff --git a/packages/launcher/crates/octobot-launcher-python/src/lib.rs b/packages/launcher/crates/octobot-launcher-python/src/lib.rs new file mode 100644 index 0000000000..8d77169b36 --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-python/src/lib.rs @@ -0,0 +1,5 @@ +pub mod discovery; +pub mod python; +pub mod venv; + +pub use python::{PythonBackend, PythonBackendConfig, PythonDistMode}; diff --git a/packages/launcher/crates/octobot-launcher-python/src/python.rs b/packages/launcher/crates/octobot-launcher-python/src/python.rs new file mode 100644 index 0000000000..e486812622 --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-python/src/python.rs @@ -0,0 +1,547 @@ +use std::path::{Path, PathBuf}; +use std::process::Stdio; +use std::sync::Arc; +use std::time::Duration; + +use async_trait::async_trait; +use chrono::{DateTime, Utc}; +use octobot_launcher_core::backend::{send_progress, ProgressSender}; +use octobot_launcher_core::error::{LauncherError, Result}; +use octobot_launcher_core::model::{HealthStatus, InstanceId, InstanceSpec, InstanceState, RuntimeKind}; +use octobot_launcher_core::Backend; +use tokio::process::Child; +use tokio::sync::Mutex; +use tracing::{info, warn}; + +use crate::discovery::discover_python; +use crate::venv::{create_venv, pip_install, pip_install_editable, venv_octobot, venv_pip, venv_python}; + +fn venv_dir(data_dir: &Path) -> PathBuf { + data_dir.join("venv") +} + +fn pid_file_path(data_dir: &Path) -> PathBuf { + data_dir.join("octobot.pid") +} + +fn stdio_log_path(data_dir: &Path) -> PathBuf { + data_dir.join("logs").join("launcher-stdio.log") +} + +fn write_pid_file(path: &Path, pid: u32) -> std::io::Result<()> { + if let Some(parent) = path.parent() { + std::fs::create_dir_all(parent)?; + } + std::fs::write(path, pid.to_string()) +} + +fn remove_pid_file(path: &Path) { + if path.exists() { + let _ = std::fs::remove_file(path); + } +} + +struct Inner { + child: Option, + started_at: Option>, + pid: Option, + data_dir: Option, + runtime_options: Option, +} + +impl std::fmt::Debug for Inner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("Inner") + .field("pid", &self.pid) + .field("started_at", &self.started_at) + .field("data_dir", &self.data_dir) + .finish_non_exhaustive() + } +} + +impl Inner { + fn new() -> Self { + Self { + child: None, + started_at: None, + pid: None, + data_dir: None, + runtime_options: None, + } + } +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum PythonDistMode { + Auto, + Always, + Never, +} + +#[derive(Debug)] +pub struct PythonBackendConfig { + pub python_dist: PythonDistMode, +} + +impl Default for PythonBackendConfig { + fn default() -> Self { + Self { + python_dist: PythonDistMode::Auto, + } + } +} + +#[derive(Debug)] +pub struct PythonBackend { + #[allow(dead_code)] + config: PythonBackendConfig, + inner: Arc>, + start_lock: Arc>, +} + +impl PythonBackend { + pub fn new(config: PythonBackendConfig) -> Self { + Self { + config, + inner: Arc::new(Mutex::new(Inner::new())), + start_lock: Arc::new(Mutex::new(())), + } + } +} + +impl Default for PythonBackend { + fn default() -> Self { + Self::new(PythonBackendConfig::default()) + } +} + +#[async_trait] +impl Backend for PythonBackend { + fn kind(&self) -> RuntimeKind { + RuntimeKind::Python + } + + async fn probe(&self) -> Result<()> { + let python = discover_python(Path::new("."), &serde_json::Value::Null).await; + match python { + Some(_) => Ok(()), + None => Err(LauncherError::BackendUnavailable( + "no Python ≥ 3.10 found".into(), + )), + } + } + + async fn prepare(&self, spec: &InstanceSpec, progress: Option<&ProgressSender>) -> Result<()> { + let venv = venv_dir(&spec.data_dir); + let logs_dir = spec.data_dir.join("logs"); + std::fs::create_dir_all(&venv)?; + std::fs::create_dir_all(&logs_dir)?; + + send_progress(progress, "Discovering Python interpreter"); + let python = discover_python(&spec.data_dir, &spec.runtime_options) + .await + .ok_or_else(|| LauncherError::BackendUnavailable("no Python ≥ 3.10 found".into()))?; + + let venv_py = venv_python(&venv); + if !venv_py.exists() { + send_progress(progress, "Creating virtual environment"); + create_venv(&python, &venv) + .await + .map_err(|e| LauncherError::Backend(format!("create_venv failed: {e}")))?; + } + + let pip = venv_pip(&venv); + send_progress(progress, "Upgrading pip"); + pip_install(&pip, "pip").await?; + + let source_dir = spec + .runtime_options + .get("source_dir") + .and_then(|v| v.as_str()) + .map(PathBuf::from); + + if let Some(src) = source_dir { + send_progress(progress, format!("Installing OctoBot from {}", src.display())); + pip_install_editable(&pip, &src).await?; + } else { + let pkg = if spec.version == "latest" { + "OctoBot".to_string() + } else { + format!("OctoBot=={}", spec.version) + }; + send_progress(progress, format!("Installing {pkg}")); + pip_install(&pip, &pkg).await?; + } + + Ok(()) + } + + async fn start(&self, spec: &InstanceSpec, progress: Option<&ProgressSender>) -> Result<()> { + let _start_guard = self + .start_lock + .try_lock() + .map_err(|_| LauncherError::AlreadyRunning)?; + { + let mut inner = self.inner.lock().await; + if let Some(ref mut child) = inner.child { + if let Ok(Some(_)) = child.try_wait() { + inner.child = None; + inner.started_at = None; + inner.pid = None; + } else { + return Err(LauncherError::AlreadyRunning); + } + } + } + + self.prepare(spec, progress).await?; + + let venv = venv_dir(&spec.data_dir); + let bin = venv_octobot(&venv); + + let mut cmd = tokio::process::Command::new(&bin); + cmd.current_dir(&spec.data_dir); + + for (k, v) in &spec.env { + cmd.env(k, v); + } + + #[cfg(windows)] + { + use std::os::windows::process::CommandExt; + const CREATE_NEW_PROCESS_GROUP: u32 = 0x0000_0200; + cmd.creation_flags(CREATE_NEW_PROCESS_GROUP); + } + + let log_path = stdio_log_path(&spec.data_dir); + if let Some(parent) = log_path.parent() { + std::fs::create_dir_all(parent)?; + } + let log_file = std::fs::OpenOptions::new() + .create(true) + .append(true) + .open(&log_path)?; + let log_file2 = log_file.try_clone()?; + cmd.stdout(Stdio::from(log_file)); + cmd.stderr(Stdio::from(log_file2)); + + cmd.kill_on_drop(false); + + send_progress(progress, "Starting OctoBot"); + let mut child = cmd.spawn()?; + let pid = child.id().ok_or_else(|| { + LauncherError::Backend("failed to get child PID after spawn".to_string()) + })?; + + write_pid_file(&pid_file_path(&spec.data_dir), pid)?; + + tokio::time::sleep(Duration::from_millis(300)).await; + if let Ok(Some(exit_status)) = child.try_wait() { + return Err(LauncherError::Backend(format!( + "OctoBot exited immediately ({exit_status}), check: {}", + log_path.display() + ))); + } + + let mut inner = self.inner.lock().await; + inner.pid = Some(pid); + inner.started_at = Some(Utc::now()); + inner.data_dir = Some(spec.data_dir.clone()); + inner.runtime_options = Some(spec.runtime_options.clone()); + inner.child = Some(child); + + info!(pid, bin = %bin.display(), "octobot python started"); + Ok(()) + } + + async fn stop(&self, spec: &InstanceSpec, timeout: Duration) -> Result<()> { + let pid = { + let inner = self.inner.lock().await; + if inner.child.is_none() { + let pid_path = pid_file_path(&spec.data_dir); + return if pid_path.exists() { + if let Ok(pid_str) = std::fs::read_to_string(&pid_path) { + if let Ok(pid) = pid_str.trim().parse::() { + let _ = send_graceful_signal(pid); + tokio::time::sleep(Duration::from_millis(300)).await; + } + } + remove_pid_file(&pid_path); + Ok(()) + } else { + Err(LauncherError::NotRunning) + }; + } + inner.pid.unwrap_or(0) + }; + + send_graceful_signal(pid)?; + + let (mut child, data_dir) = { + let mut inner = self.inner.lock().await; + let child = match inner.child.take() { + Some(c) => c, + None => return Ok(()), + }; + let data_dir = inner.data_dir.take(); + inner.started_at = None; + inner.pid = None; + inner.runtime_options = None; + (child, data_dir) + }; + + if let Some(ref dir) = data_dir { + remove_pid_file(&pid_file_path(dir)); + } + + let wait_result = tokio::time::timeout(timeout, child.wait()).await; + + match wait_result { + Ok(Ok(status)) => { + info!(pid, ?status, "octobot python stopped gracefully"); + } + Ok(Err(e)) => { + warn!(pid, error = %e, "error waiting for child process"); + } + Err(_) => { + warn!(pid, "graceful stop timed out, killing"); + let _ = child.kill().await; + let _ = child.wait().await; + } + } + + Ok(()) + } + + async fn restart( + &self, + spec: &InstanceSpec, + timeout: Duration, + progress: Option<&ProgressSender>, + ) -> Result<()> { + match self.stop(spec, timeout).await { + Ok(()) | Err(LauncherError::NotRunning) => {} + Err(e) => return Err(e), + } + remove_pid_file(&pid_file_path(&spec.data_dir)); + self.start(spec, progress).await + } + + async fn status(&self, _id: InstanceId) -> Result { + let mut inner = self.inner.lock().await; + + if let Some(ref mut child) = inner.child { + if let Ok(Some(_)) = child.try_wait() { + inner.child = None; + inner.started_at = None; + inner.pid = None; + } + } + + match (inner.child.as_ref(), inner.started_at) { + (Some(_), Some(started_at)) => { + let pid = inner.pid.unwrap_or(0); + let uptime = Utc::now() + .signed_duration_since(started_at) + .num_seconds() + .max(0); + let uptime = u64::try_from(uptime).unwrap_or(0); + Ok(HealthStatus { + state: InstanceState::Running { + pid_or_container: pid.to_string(), + started_at, + }, + uptime_seconds: Some(uptime), + last_http_check: None, + }) + } + _ => Ok(HealthStatus { + state: InstanceState::Stopped, + uptime_seconds: None, + last_http_check: None, + }), + } + } + + async fn update( + &self, + spec: &InstanceSpec, + target_version: &str, + progress: Option<&ProgressSender>, + ) -> Result<()> { + match self.stop(spec, Duration::from_secs(10)).await { + Ok(()) | Err(LauncherError::NotRunning) => {} + Err(e) => return Err(e), + } + + let venv = venv_dir(&spec.data_dir); + let pip = venv_pip(&venv); + + send_progress(progress, format!("Installing OctoBot=={target_version}")); + pip_install(&pip, &format!("OctoBot=={target_version}")).await?; + + self.start(spec, progress).await + } + + async fn remove(&self, spec: &InstanceSpec, purge: bool) -> Result<()> { + let data_dir = { + let inner = self.inner.lock().await; + inner.data_dir.clone().or_else(|| Some(spec.data_dir.clone())) + }; + + match self.stop(spec, Duration::from_secs(10)).await { + Ok(()) | Err(LauncherError::NotRunning) => {} + Err(e) => return Err(e), + } + + if let Some(dir) = data_dir { + remove_pid_file(&pid_file_path(&dir)); + + if purge { + let venv = venv_dir(&dir); + let log_dir = dir.join("logs"); + if venv.exists() { + std::fs::remove_dir_all(&venv)?; + } + if log_dir.exists() { + std::fs::remove_dir_all(&log_dir)?; + } + } + } + + Ok(()) + } +} + +fn send_graceful_signal(pid: u32) -> Result<()> { + #[cfg(unix)] + { + match nix::sys::signal::kill( + nix::unistd::Pid::from_raw(i32::try_from(pid).unwrap_or(i32::MAX)), + nix::sys::signal::Signal::SIGTERM, + ) { + Ok(()) | Err(nix::errno::Errno::ESRCH) => {} + Err(e) => return Err(LauncherError::Backend(format!("SIGTERM failed: {e}"))), + } + } + #[cfg(windows)] + { + use windows::Win32::System::Console::GenerateConsoleCtrlEvent; + unsafe { + GenerateConsoleCtrlEvent( + windows::Win32::System::Console::CTRL_BREAK_EVENT, + pid, + ) + .map_err(|e| LauncherError::Backend(format!("GenerateConsoleCtrlEvent failed: {e}")))?; + } + } + #[cfg(not(any(unix, windows)))] + let _ = pid; + Ok(()) +} + +#[cfg(test)] +#[allow(clippy::unwrap_used)] +mod tests { + use super::*; + use std::collections::BTreeMap; + + fn make_spec(data_dir: &Path) -> InstanceSpec { + InstanceSpec { + id: InstanceId::new(), + name: "test-bot".into(), + runtime: RuntimeKind::Python, + version: "2.4.0".into(), + data_dir: data_dir.to_path_buf(), + config_dir: data_dir.to_path_buf(), + env: BTreeMap::new(), + ports: vec![], + auto_restart: false, + auto_update: false, + runtime_options: serde_json::Value::Null, + } + } + + #[tokio::test] + async fn stop_when_not_running_returns_error() { + let tmp = tempfile::tempdir().unwrap(); + let backend = PythonBackend::default(); + let spec = make_spec(tmp.path()); + let result = backend.stop(&spec, Duration::from_secs(5)).await; + assert!(matches!(result, Err(LauncherError::NotRunning))); + } + + #[test] + fn default_config_is_auto_dist() { + let backend = PythonBackend::default(); + assert_eq!(backend.config.python_dist, PythonDistMode::Auto); + } + + #[tokio::test] + async fn prepare_emits_progress_messages() { + let python = match which::which("python3") + .ok() + .or_else(|| which::which("python").ok()) + { + Some(p) => p, + None => return, + }; + + let tmp = tempfile::tempdir().unwrap(); + let venv_dir = tmp.path().join("venv"); + crate::venv::create_venv(&python, &venv_dir).await.unwrap(); + + let (tx, mut rx) = tokio::sync::mpsc::unbounded_channel::(); + let backend = PythonBackend::default(); + let mut spec = make_spec(tmp.path()); + spec.version = "latest".into(); + + let _ = backend.prepare(&spec, Some(&tx)).await; + drop(tx); + + let mut messages = vec![]; + while let Ok(msg) = rx.try_recv() { + messages.push(msg); + } + assert!( + messages.iter().any(|m| m.contains("Discovering")), + "expected 'Discovering' in progress messages: {messages:?}" + ); + } + + mod update { + use super::*; + + #[cfg(unix)] + #[tokio::test] + async fn version_change_reflected() { + use crate::venv::{create_venv, pip_install}; + + let python = match which::which("python3") + .ok() + .or_else(|| which::which("python").ok()) + { + Some(p) => p, + None => return, + }; + + let tmp = tempfile::tempdir().unwrap(); + let venv = tmp.path().join("venv"); + create_venv(&python, &venv).await.unwrap(); + let pip = crate::venv::venv_pip(&venv); + + let result = pip_install(&pip, "OctoBot==2.4.0").await; + if result.is_err() { + return; + } + + let output = tokio::process::Command::new(&pip) + .args(["show", "OctoBot"]) + .output() + .await + .unwrap(); + let text = String::from_utf8_lossy(&output.stdout); + assert!(text.contains("2.4.0"), "expected version 2.4.0 in: {text}"); + } + } +} diff --git a/packages/launcher/crates/octobot-launcher-python/src/venv.rs b/packages/launcher/crates/octobot-launcher-python/src/venv.rs new file mode 100644 index 0000000000..c079a5ce07 --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-python/src/venv.rs @@ -0,0 +1,149 @@ +use std::path::{Path, PathBuf}; + +use octobot_launcher_core::error::{LauncherError, Result}; + +pub(crate) async fn create_venv(python: &Path, venv_dir: &Path) -> std::io::Result<()> { + let status = tokio::process::Command::new(python) + .args(["-m", "venv"]) + .arg(venv_dir) + .status() + .await?; + + if !status.success() { + return Err(std::io::Error::other(format!( + "python -m venv exited with status {}", + status.code().unwrap_or(-1) + ))); + } + Ok(()) +} + +pub(crate) fn venv_python(venv_dir: &Path) -> PathBuf { + #[cfg(windows)] + return venv_dir.join("Scripts").join("python.exe"); + #[cfg(not(windows))] + return venv_dir.join("bin").join("python"); +} + +pub(crate) fn venv_pip(venv_dir: &Path) -> PathBuf { + #[cfg(windows)] + return venv_dir.join("Scripts").join("pip.exe"); + #[cfg(not(windows))] + return venv_dir.join("bin").join("pip"); +} + +pub(crate) fn venv_octobot(venv_dir: &Path) -> PathBuf { + #[cfg(windows)] + return venv_dir.join("Scripts").join("OctoBot.exe"); + #[cfg(not(windows))] + return venv_dir.join("bin").join("OctoBot"); +} + +pub(crate) async fn pip_install(pip: &Path, package_spec: &str) -> Result<()> { + let status = tokio::process::Command::new(pip) + .args(["install", "--upgrade", package_spec]) + .status() + .await + .map_err(|e| LauncherError::Backend(format!("pip install failed to spawn: {e}")))?; + + if !status.success() { + return Err(LauncherError::Backend(format!( + "pip install failed: exit code {}", + status.code().unwrap_or(-1) + ))); + } + Ok(()) +} + +pub(crate) async fn pip_install_editable(pip: &Path, source_dir: &Path) -> Result<()> { + let status = tokio::process::Command::new(pip) + .args(["install", "-e"]) + .arg(source_dir) + .status() + .await + .map_err(|e| LauncherError::Backend(format!("pip install -e failed to spawn: {e}")))?; + + if !status.success() { + return Err(LauncherError::Backend(format!( + "pip install -e failed: exit code {}", + status.code().unwrap_or(-1) + ))); + } + Ok(()) +} + +#[cfg(test)] +#[allow(clippy::unwrap_used)] +mod tests { + use super::*; + + async fn require_python() -> Option { + which::which("python3") + .ok() + .or_else(|| which::which("python").ok()) + } + + #[tokio::test] + async fn create_venv_creates_pip() { + let python = match require_python().await { + Some(p) => p, + None => return, + }; + let tmp = tempfile::tempdir().unwrap(); + create_venv(&python, tmp.path()).await.unwrap(); + let pip = venv_pip(tmp.path()); + assert!(pip.exists(), "pip not found at {}", pip.display()); + } + + #[tokio::test] + async fn idempotent_prepare() { + let python = match require_python().await { + Some(p) => p, + None => return, + }; + let tmp = tempfile::tempdir().unwrap(); + create_venv(&python, tmp.path()).await.unwrap(); + create_venv(&python, tmp.path()).await.unwrap(); + let pip = venv_pip(tmp.path()); + assert!(pip.exists()); + } + + #[tokio::test] + async fn pip_install_failure_preserves_old_state() { + let python = match require_python().await { + Some(p) => p, + None => return, + }; + let tmp = tempfile::tempdir().unwrap(); + create_venv(&python, tmp.path()).await.unwrap(); + let pip = venv_pip(tmp.path()); + let result = pip_install(&pip, "OctoBot==999.0.0").await; + assert!(result.is_err()); + assert!(pip.exists(), "venv pip still intact after failed install"); + } + + #[cfg(unix)] + #[tokio::test] + async fn editable_install() { + let python = match require_python().await { + Some(p) => p, + None => return, + }; + let tmp = tempfile::tempdir().unwrap(); + let venv_dir = tmp.path().join("venv"); + create_venv(&python, &venv_dir).await.unwrap(); + let pip = venv_pip(&venv_dir); + + let src_dir = tmp.path().join("mypackage"); + std::fs::create_dir_all(&src_dir).unwrap(); + std::fs::write( + src_dir.join("setup.py"), + b"from setuptools import setup\nsetup(name='mypackage', version='0.1.0')\n", + ) + .unwrap(); + + let result = pip_install_editable(&pip, &src_dir).await; + assert!(result.is_ok(), "editable install failed: {result:?}"); + assert!(venv_python(&venv_dir).exists()); + } +} diff --git a/packages/launcher/crates/octobot-launcher-service/Cargo.toml b/packages/launcher/crates/octobot-launcher-service/Cargo.toml new file mode 100644 index 0000000000..cf47f55276 --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-service/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "octobot-launcher-service" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +octobot-launcher-core = { path = "../octobot-launcher-core" } +service-manager = "0.10" +thiserror = "2" +tracing = "0.1" diff --git a/packages/launcher/crates/octobot-launcher-service/src/error.rs b/packages/launcher/crates/octobot-launcher-service/src/error.rs new file mode 100644 index 0000000000..4b8ba9cc1e --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-service/src/error.rs @@ -0,0 +1,13 @@ +use thiserror::Error; + +#[derive(Debug, Error)] +pub enum ServiceError { + #[error("service manager error: {0}")] + Manager(String), + #[error("io error: {0}")] + Io(#[from] std::io::Error), + #[error("not supported on this platform")] + Unsupported, +} + +pub type Result = std::result::Result; diff --git a/packages/launcher/crates/octobot-launcher-service/src/lib.rs b/packages/launcher/crates/octobot-launcher-service/src/lib.rs new file mode 100644 index 0000000000..52c8ca5a8a --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-service/src/lib.rs @@ -0,0 +1,7 @@ +pub mod error; +mod service; + +pub use error::{Result, ServiceError}; +pub use service::{ + auto_level, service_label, ENV_FOREGROUND, LauncherService, ServiceLevel, ServiceStatus, +}; diff --git a/packages/launcher/crates/octobot-launcher-service/src/service.rs b/packages/launcher/crates/octobot-launcher-service/src/service.rs new file mode 100644 index 0000000000..39fdd123c4 --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-service/src/service.rs @@ -0,0 +1,373 @@ +use std::ffi::OsString; +use std::path::PathBuf; + +use service_manager::{ + ServiceInstallCtx, ServiceLabel, ServiceManager, ServiceStartCtx, ServiceStopCtx, + ServiceUninstallCtx, +}; +#[cfg(target_os = "macos")] +use service_manager::LaunchdServiceManager; +use tracing::debug; + +use crate::error::{Result, ServiceError}; + +const REVERSE_DNS_LABEL: &str = "org.drakkar.octobot-launcher"; +pub const ENV_FOREGROUND: &str = "OCTOBOT_LAUNCHER_FOREGROUND"; + +#[cfg(target_os = "linux")] +const SYSTEMD_SERVICE_NAME: &str = "octobot-launcher.service"; + +pub fn service_label() -> &'static str { + #[cfg(target_os = "linux")] + { + SYSTEMD_SERVICE_NAME + } + #[cfg(not(target_os = "linux"))] + { + REVERSE_DNS_LABEL + } +} + +fn parse_label() -> Result { + REVERSE_DNS_LABEL + .parse::() + .map_err(|e| ServiceError::Manager(e.to_string())) +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum ServiceLevel { + System, + User, +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum ServiceStatus { + NotInstalled, + Stopped, + Running, + Failed(String), +} + +pub fn auto_level() -> ServiceLevel { + #[cfg(target_os = "macos")] + { + return ServiceLevel::User; + } + #[cfg(not(target_os = "macos"))] + { + if std::env::var("DISPLAY").is_ok() { + return ServiceLevel::User; + } + if std::env::var("WAYLAND_DISPLAY").is_ok() { + return ServiceLevel::User; + } + if let Ok(val) = std::env::var("XDG_SESSION_TYPE") { + if val == "x11" || val == "wayland" { + return ServiceLevel::User; + } + } + ServiceLevel::System + } +} + +pub struct LauncherService { + manager: Box, + level: ServiceLevel, +} + +impl std::fmt::Debug for LauncherService { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("LauncherService") + .field("level", &self.level) + .finish_non_exhaustive() + } +} + +impl LauncherService { + pub fn new(level: ServiceLevel) -> Result { + let manager: Box = { + #[cfg(target_os = "macos")] + { + match level { + ServiceLevel::User => Box::new(LaunchdServiceManager::user()), + ServiceLevel::System => Box::new(LaunchdServiceManager::system()), + } + } + #[cfg(not(target_os = "macos"))] + { + ::native() + .map_err(|e| ServiceError::Manager(e.to_string()))? + } + }; + Ok(Self { manager, level }) + } + + pub fn install(&self) -> Result<()> { + let label = parse_label()?; + let program = std::env::current_exe()?.canonicalize()?; + let args: Vec = vec![ + OsString::from("service"), + OsString::from("run"), + ]; + debug!("installing service {:?} with program {:?}", label, program); + self.manager + .install(ServiceInstallCtx { + label, + program, + args, + working_directory: None, + environment: Some(vec![(ENV_FOREGROUND.into(), "1".into())]), + autostart: true, + username: None, + contents: None, + restart_policy: service_manager::RestartPolicy::default(), + }) + .map_err(|e| ServiceError::Manager(e.to_string())) + } + + pub fn uninstall(&self) -> Result<()> { + let label = parse_label()?; + debug!("uninstalling service {:?}", label); + self.manager + .uninstall(ServiceUninstallCtx { label }) + .map_err(|e| ServiceError::Manager(e.to_string())) + } + + pub fn start(&self) -> Result<()> { + let label = parse_label()?; + debug!("starting service {:?}", label); + self.manager + .start(ServiceStartCtx { label }) + .map_err(|e| ServiceError::Manager(e.to_string())) + } + + pub fn stop(&self) -> Result<()> { + let label = parse_label()?; + debug!("stopping service {:?}", label); + self.manager + .stop(ServiceStopCtx { label }) + .map_err(|e| ServiceError::Manager(e.to_string())) + } + + pub fn status(&self) -> Result { + #[cfg(target_os = "linux")] + { + query_status_linux() + } + #[cfg(target_os = "macos")] + { + query_status_macos() + } + #[cfg(target_os = "windows")] + { + query_status_windows() + } + #[cfg(not(any(target_os = "linux", target_os = "macos", target_os = "windows")))] + { + Err(ServiceError::Unsupported) + } + } + + pub fn is_installed(&self) -> bool { + #[cfg(target_os = "linux")] + { + is_installed_linux(self.level) + } + #[cfg(target_os = "macos")] + { + is_installed_macos(self.level) + } + #[cfg(target_os = "windows")] + { + is_installed_windows() + } + #[cfg(not(any(target_os = "linux", target_os = "macos", target_os = "windows")))] + { + false + } + } +} + +#[cfg(target_os = "linux")] +fn query_status_linux() -> Result { + let output = std::process::Command::new("systemctl") + .arg("is-active") + .arg("octobot-launcher") + .output()?; + let stdout = String::from_utf8_lossy(&output.stdout); + let state = stdout.trim(); + match state { + "active" => Ok(ServiceStatus::Running), + "inactive" => Ok(ServiceStatus::Stopped), + "failed" => Ok(ServiceStatus::Failed("failed".to_string())), + _ if output.status.code() == Some(4) => Ok(ServiceStatus::NotInstalled), + other => Ok(ServiceStatus::Failed(other.to_string())), + } +} + +#[cfg(target_os = "macos")] +fn query_status_macos() -> Result { + let output = std::process::Command::new("launchctl") + .arg("list") + .arg(REVERSE_DNS_LABEL) + .output()?; + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + if stderr.contains("Could not find service") || output.status.code() == Some(113) { + return Ok(ServiceStatus::NotInstalled); + } + return Ok(ServiceStatus::Stopped); + } + let stdout = String::from_utf8_lossy(&output.stdout); + if stdout.contains("\"PID\"") || stdout.contains("PID =") { + Ok(ServiceStatus::Running) + } else { + Ok(ServiceStatus::Stopped) + } +} + +#[cfg(target_os = "windows")] +fn query_status_windows() -> Result { + let output = std::process::Command::new("sc.exe") + .arg("query") + .arg(REVERSE_DNS_LABEL) + .output()?; + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + if stderr.contains("does not exist") || output.status.code() == Some(1060) { + return Ok(ServiceStatus::NotInstalled); + } + return Ok(ServiceStatus::NotInstalled); + } + let stdout = String::from_utf8_lossy(&output.stdout); + if stdout.contains("RUNNING") { + Ok(ServiceStatus::Running) + } else if stdout.contains("STOPPED") { + Ok(ServiceStatus::Stopped) + } else if stdout.contains("FAILED") { + Ok(ServiceStatus::Failed("FAILED".to_string())) + } else { + Ok(ServiceStatus::Stopped) + } +} + +#[cfg(target_os = "linux")] +fn is_installed_linux(level: ServiceLevel) -> bool { + match level { + ServiceLevel::System => { + PathBuf::from("/etc/systemd/system/octobot-launcher.service").exists() + } + ServiceLevel::User => { + if let Some(home) = dirs_home() { + home.join(".config/systemd/user/octobot-launcher.service").exists() + } else { + false + } + } + } +} + +#[cfg(target_os = "macos")] +fn is_installed_macos(level: ServiceLevel) -> bool { + match level { + ServiceLevel::System => { + PathBuf::from("/Library/LaunchDaemons/org.drakkar.octobot-launcher.plist").exists() + } + ServiceLevel::User => { + if let Some(home) = dirs_home() { + home.join("Library/LaunchAgents/org.drakkar.octobot-launcher.plist").exists() + } else { + false + } + } + } +} + +#[cfg(target_os = "windows")] +fn is_installed_windows() -> bool { + std::process::Command::new("sc.exe") + .arg("query") + .arg(REVERSE_DNS_LABEL) + .output() + .map(|o| o.status.success()) + .unwrap_or(false) +} + +fn dirs_home() -> Option { + std::env::var("HOME").ok().map(PathBuf::from) +} + +#[cfg(test)] +mod tests { + use super::*; + + mod level { + use super::*; + + #[test] + fn desktop_detection_picks_user() { + std::env::remove_var("WAYLAND_DISPLAY"); + std::env::remove_var("XDG_SESSION_TYPE"); + std::env::set_var("DISPLAY", ":0"); + let result = auto_level(); + std::env::remove_var("DISPLAY"); + assert_eq!(result, ServiceLevel::User); + } + + #[test] + fn headless_picks_system() { + std::env::remove_var("DISPLAY"); + std::env::remove_var("WAYLAND_DISPLAY"); + std::env::remove_var("XDG_SESSION_TYPE"); + #[cfg(target_os = "macos")] + assert_eq!(auto_level(), ServiceLevel::User); + #[cfg(not(target_os = "macos"))] + assert_eq!(auto_level(), ServiceLevel::System); + } + + #[test] + fn cli_override_wins() { + let level = ServiceLevel::System; + assert_eq!(level, ServiceLevel::System); + } + } + + mod naming { + use super::*; + + #[test] + fn reverse_dns_for_launchd() { + #[cfg(not(target_os = "linux"))] + assert_eq!(service_label(), "org.drakkar.octobot-launcher"); + #[cfg(target_os = "linux")] + assert_eq!(service_label(), "octobot-launcher.service"); + } + + #[test] + fn dotted_name_for_systemd() { + let label = service_label(); + assert!(label.contains("octobot")); + } + } + + mod args { + use super::*; + + #[test] + fn run_subcommand_is_invoked() { + let args: Vec = vec![ + OsString::from("service"), + OsString::from("run"), + ]; + assert!(args.iter().any(|a| a == "service")); + assert!(args.iter().any(|a| a == "run")); + } + + #[test] + fn current_exe_is_canonicalized() { + let exe = std::env::current_exe().expect("current_exe must succeed"); + let canonical = exe.canonicalize(); + assert!(canonical.is_ok()); + } + } +} diff --git a/packages/launcher/crates/octobot-launcher-tests/Cargo.toml b/packages/launcher/crates/octobot-launcher-tests/Cargo.toml new file mode 100644 index 0000000000..fda447fe7c --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-tests/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "octobot-launcher-tests" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +repository.workspace = true +publish = false + +[lints] +workspace = true + +[dev-dependencies] +octobot-launcher-core = { path = "../octobot-launcher-core" } +octobot-launcher-config = { path = "../octobot-launcher-config" } +octobot-launcher-api = { path = "../octobot-launcher-api" } +octobot-launcher-update = { path = "../octobot-launcher-update" } +assert_cmd = "2" +predicates = "3" +tempfile = "3" +wiremock = "0.6" +reqwest = { version = "0.12", default-features = false, features = ["rustls-tls", "json"] } +serde_json = "1" +tokio = { version = "1", features = ["full"] } +chrono = { version = "0.4", features = ["serde"] } +serde = { version = "1", features = ["derive"] } +ed25519-dalek = { version = "2", features = ["rand_core"] } +rand = "0.8" +hex = "0.4" +sha2 = "0.10" +semver = "1" +axum = { version = "0.8", features = ["macros"] } +axum-test = "20" diff --git a/packages/launcher/crates/octobot-launcher-tests/tests/auth_flow.rs b/packages/launcher/crates/octobot-launcher-tests/tests/auth_flow.rs new file mode 100644 index 0000000000..7c90638ff3 --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-tests/tests/auth_flow.rs @@ -0,0 +1,117 @@ +#![allow(clippy::unwrap_used, clippy::expect_used)] + +use std::sync::Arc; + +use axum_test::TestServer; +use octobot_launcher_api::{ + ApiState, + auth::LocalSocketMarker, + lockout::Lockout, + router, + token_store::TokenStore, +}; +use octobot_launcher_config::Store; +use tempfile::TempDir; + +fn make_state() -> (ApiState, TempDir) { + let dir = TempDir::new().expect("tempdir"); + let store = Arc::new(Store::new(dir.path().to_path_buf()).expect("store")); + let token_store = Arc::new(TokenStore::load(dir.path().join("tokens.json"))); + let lockout = Arc::new(Lockout::new()); + ( + ApiState { + store, + token_store, + lockout, + backends: Arc::new(vec![]), + }, + dir, + ) +} + +fn make_server(state: ApiState) -> TestServer { + TestServer::new(router(state)) +} + +fn auth_header(raw: &str) -> (axum::http::HeaderName, axum::http::HeaderValue) { + ( + "authorization".parse().unwrap(), + format!("Bearer {raw}").parse().unwrap(), + ) +} + +#[tokio::test] +async fn end_to_end_token_flow() { + let (state, _dir) = make_state(); + + let bootstrap_raw = state.token_store.bootstrap_if_empty().expect("bootstrap token").expect("should have token"); + + let server = make_server(state.clone()); + + let (name, value) = auth_header(&bootstrap_raw); + let resp = server.get("/v1/instances").add_header(name, value).await; + assert_eq!(resp.status_code(), 200); + + let (name, value) = auth_header(&bootstrap_raw); + let create_resp = server + .post("/v1/tokens") + .add_header(name, value) + .json(&serde_json::json!({ "label": "new-token" })) + .await; + assert_eq!(create_resp.status_code(), 201); + let body: serde_json::Value = create_resp.json(); + let new_raw = body["raw_token"].as_str().expect("raw_token field").to_string(); + let new_id = body["token"]["id"].as_str().expect("token.id field").to_string(); + + let (name, value) = auth_header(&new_raw); + let resp2 = server.get("/v1/instances").add_header(name, value).await; + assert_eq!(resp2.status_code(), 200); + + let (name, value) = auth_header(&bootstrap_raw); + let revoke_resp = server + .delete(&format!("/v1/tokens/{new_id}")) + .add_header(name, value) + .await; + assert_eq!(revoke_resp.status_code(), 204); + + let (name, value) = auth_header(&new_raw); + let resp3 = server.get("/v1/instances").add_header(name, value).await; + assert_eq!(resp3.status_code(), 401); +} + +#[tokio::test] +async fn brute_force_locks_ip() { + let (state, _dir) = make_state(); + let server = make_server(state); + + let bad_token = "oblch_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"; + + for _ in 0..10 { + let (name, value) = auth_header(bad_token); + server.get("/v1/instances").add_header(name, value).await; + } + + let (name, value) = auth_header(bad_token); + let resp = server.get("/v1/instances").add_header(name, value).await; + assert_eq!(resp.status_code(), 429); +} + +#[tokio::test] +async fn unix_socket_immune_to_lockout() { + use axum::{body::Body, extract::Request, middleware, response::Response}; + + let (state, _dir) = make_state(); + + async fn inject_local(mut req: Request, next: middleware::Next) -> Response { + req.extensions_mut().insert(LocalSocketMarker); + next.run(req).await + } + + let app = router(state).layer(middleware::from_fn(inject_local)); + let server = TestServer::new(app); + + for _ in 0..11 { + let resp = server.get("/v1/instances").await; + assert_eq!(resp.status_code(), 200); + } +} diff --git a/packages/launcher/crates/octobot-launcher-tests/tests/cli_basic.rs b/packages/launcher/crates/octobot-launcher-tests/tests/cli_basic.rs new file mode 100644 index 0000000000..eeac9a1d3f --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-tests/tests/cli_basic.rs @@ -0,0 +1,72 @@ +#![allow(clippy::unwrap_used, clippy::expect_used)] + +use assert_cmd::Command; +use predicates::prelude::*; +use tempfile::TempDir; + +#[test] +fn help_renders() { + let mut cmd = Command::cargo_bin("octobot-launcher").unwrap(); + cmd.arg("--help") + .assert() + .success() + .stdout(predicate::str::contains("instance")) + .stdout(predicate::str::contains("service")) + .stdout(predicate::str::contains("token")); +} + +#[test] +fn version_subcommand() { + let mut cmd = Command::cargo_bin("octobot-launcher").unwrap(); + let output = cmd.arg("version").output().unwrap(); + assert!(output.status.success()); + let stdout = String::from_utf8_lossy(&output.stdout); + assert!(!stdout.trim().is_empty()); +} + +#[test] +fn unknown_subcommand_exits_nonzero() { + let mut cmd = Command::cargo_bin("octobot-launcher").unwrap(); + cmd.arg("foobarxyz").assert().failure(); +} + +#[test] +fn doctor_exits_nonzero_without_docker() { + let dir = TempDir::new().unwrap(); + let config_path = dir.path().join("config.toml"); + std::fs::write(&config_path, "").unwrap(); + let output = Command::cargo_bin("octobot-launcher") + .unwrap() + .arg("--config") + .arg(&config_path) + .arg("doctor") + .output() + .unwrap(); + let combined = format!( + "{}{}", + String::from_utf8_lossy(&output.stdout), + String::from_utf8_lossy(&output.stderr), + ); + assert!(!combined.is_empty()); +} + +#[test] +fn instance_list_requires_daemon() { + let dir = TempDir::new().unwrap(); + let config_path = dir.path().join("config.toml"); + std::fs::write( + &config_path, + format!("[launcher]\ndata_root = {:?}\napi_bind = \"127.0.0.1:19999\"", dir.path()), + ) + .unwrap(); + let output = Command::cargo_bin("octobot-launcher") + .unwrap() + .arg("--config") + .arg(&config_path) + .arg("instance") + .arg("list") + .arg("--json") + .output() + .unwrap(); + assert!(!output.status.success(), "instance list should fail when daemon is not running"); +} diff --git a/packages/launcher/crates/octobot-launcher-tests/tests/state_flow.rs b/packages/launcher/crates/octobot-launcher-tests/tests/state_flow.rs new file mode 100644 index 0000000000..16416721a7 --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-tests/tests/state_flow.rs @@ -0,0 +1,118 @@ +#![allow(clippy::unwrap_used, clippy::expect_used)] + +use std::collections::BTreeMap; +use std::path::PathBuf; +use std::sync::Arc; + +use octobot_launcher_config::{DesiredState, InstanceRecord, Store, load_config}; +use octobot_launcher_core::{InstanceId, InstanceSpec, InstanceState, RuntimeKind}; +use tempfile::TempDir; + +fn sample_record() -> InstanceRecord { + let id = InstanceId::new(); + InstanceRecord { + spec: InstanceSpec { + id, + name: "test-bot".to_string(), + runtime: RuntimeKind::Docker, + version: "1.0.0".to_string(), + data_dir: PathBuf::from("/data"), + config_dir: PathBuf::from("/config"), + env: BTreeMap::new(), + ports: vec![], + auto_restart: false, + auto_update: false, + runtime_options: serde_json::Value::Null, + }, + desired_state: DesiredState::Running, + last_known_state: InstanceState::Stopped, + created_at: chrono::Utc::now(), + updated_at: chrono::Utc::now(), + } +} + +#[test] +fn store_write_read_delete_round_trip() { + let dir = TempDir::new().unwrap(); + let store = Store::new(dir.path().to_path_buf()).unwrap(); + let record = sample_record(); + let id = record.spec.id.clone(); + + let path = store.instances_dir().join(format!("{}.json", id.0)); + store.write_atomic(&path, &record).unwrap(); + + let records = store.list_instance_records().unwrap(); + assert_eq!(records.len(), 1); + assert_eq!(records[0].spec.id.0, id.0); + assert_eq!(records[0].spec.name, "test-bot"); + assert_eq!(records[0].desired_state, DesiredState::Running); + + store.delete(&path).unwrap(); + + let records_after = store.list_instance_records().unwrap(); + assert!(records_after.is_empty()); +} + +#[test] +fn corrupt_instance_file_skipped_not_panicked() { + let dir = TempDir::new().unwrap(); + let store = Store::new(dir.path().to_path_buf()).unwrap(); + let instances_dir = store.instances_dir(); + + for _ in 0..3 { + let record = sample_record(); + let path = instances_dir.join(format!("{}.json", record.spec.id.0)); + store.write_atomic(&path, &record).unwrap(); + } + + std::fs::write(instances_dir.join("corrupt.json"), b"{bad json").unwrap(); + + let records = store.list_instance_records().unwrap(); + assert_eq!(records.len(), 3); +} + +#[test] +fn config_loads_from_file() { + let dir = TempDir::new().unwrap(); + let config_path = dir.path().join("config.toml"); + std::fs::write( + &config_path, + format!( + "[launcher]\ndata_root = {:?}\napi_bind = \"127.0.0.1:9999\"\n", + dir.path() + ), + ) + .unwrap(); + + let config = load_config(Some(&config_path)).unwrap(); + assert_eq!(config.launcher.api_bind, "127.0.0.1:9999"); + assert_eq!(config.launcher.data_root, dir.path()); +} + +#[tokio::test] +async fn store_concurrent_writes_no_corruption() { + let dir = TempDir::new().unwrap(); + let store = Arc::new(Store::new(dir.path().to_path_buf()).unwrap()); + let record = sample_record(); + let path = store + .instances_dir() + .join(format!("{}.json", record.spec.id.0)); + + let mut handles = vec![]; + for _ in 0..10 { + let store_clone = Arc::clone(&store); + let record_clone = record.clone(); + let path_clone = path.clone(); + handles.push(tokio::spawn(async move { + store_clone.write_atomic(&path_clone, &record_clone).unwrap(); + })); + } + + for handle in handles { + handle.await.unwrap(); + } + + let content = std::fs::read_to_string(&path).unwrap(); + let parsed: serde_json::Value = serde_json::from_str(&content).expect("valid JSON after concurrent writes"); + assert!(parsed.is_object()); +} diff --git a/packages/launcher/crates/octobot-launcher-tests/tests/update_flow.rs b/packages/launcher/crates/octobot-launcher-tests/tests/update_flow.rs new file mode 100644 index 0000000000..f51256b2b8 --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-tests/tests/update_flow.rs @@ -0,0 +1,176 @@ +#![allow(clippy::unwrap_used, clippy::expect_used)] + +use std::collections::BTreeMap; + +use ed25519_dalek::{Signer, SigningKey}; +use octobot_launcher_update::{ + UpdateAvailability, UpdateError, Updater, UpdaterConfig, + manifest::{Artifact, ArtifactSet, Channel, Manifest}, +}; +use rand::rngs::OsRng; +use sha2::{Digest, Sha256}; +use wiremock::matchers::{method, path}; +use wiremock::{Mock, MockServer, ResponseTemplate}; + +const TRIPLE: &str = "x86_64-unknown-linux-gnu"; + +fn make_manifest(version: &str) -> Manifest { + let mut artifacts = BTreeMap::new(); + artifacts.insert( + TRIPLE.to_string(), + Artifact { + url: "https://example.com/launcher".to_string(), + sha256: "deadbeef".to_string(), + size: 1_000_000, + }, + ); + let mut channels = BTreeMap::new(); + channels.insert( + "stable".to_string(), + Channel { + launcher: Some(ArtifactSet { + version: version.to_string(), + artifacts, + }), + octobot_binary: None, + octobot_python: None, + python_dist: None, + }, + ); + Manifest { + schema_version: 1, + generated_at: chrono::Utc::now(), + channels, + } +} + +fn sign_manifest(key: &SigningKey, manifest_bytes: &[u8]) -> String { + let hash = Sha256::digest(manifest_bytes); + let sig = key.sign(&hash); + hex::encode(sig.to_bytes()) +} + +fn make_config(server: &MockServer, key: &SigningKey, current_version: &str) -> UpdaterConfig { + UpdaterConfig { + manifest_url: format!("{}/manifest.json", server.uri()), + channel: "stable".to_string(), + current_launcher_version: semver::Version::parse(current_version).unwrap(), + current_target_triple: TRIPLE, + pubkey: key.verifying_key(), + allow_downgrade: false, + user_agent: "test".to_string(), + state_dir: std::env::temp_dir(), + cache_dir: std::env::temp_dir(), + } +} + +async fn mount(server: &MockServer, manifest: &Manifest, key: &SigningKey) { + let body = serde_json::to_vec(manifest).unwrap(); + let sig = sign_manifest(key, &body); + Mock::given(method("GET")) + .and(path("/manifest.json")) + .respond_with(ResponseTemplate::new(200).set_body_bytes(body)) + .mount(server) + .await; + Mock::given(method("GET")) + .and(path("/manifest.json.sig")) + .respond_with(ResponseTemplate::new(200).set_body_string(sig)) + .mount(server) + .await; +} + +#[tokio::test] +async fn happy_path_update_check() { + let server = MockServer::start().await; + let key = SigningKey::generate(&mut OsRng); + let manifest = make_manifest("2.0.0"); + mount(&server, &manifest, &key).await; + + let updater = Updater::new(make_config(&server, &key, "1.0.0")); + let result = updater.check_launcher().await.unwrap(); + assert_eq!(result, UpdateAvailability::Available { version: "2.0.0".to_string() }); +} + +#[tokio::test] +async fn bad_signature_rejected() { + let server = MockServer::start().await; + let signing_key = SigningKey::generate(&mut OsRng); + let wrong_key = SigningKey::generate(&mut OsRng); + + let manifest = make_manifest("2.0.0"); + let body = serde_json::to_vec(&manifest).unwrap(); + let bad_sig = sign_manifest(&wrong_key, &body); + + Mock::given(method("GET")) + .and(path("/manifest.json")) + .respond_with(ResponseTemplate::new(200).set_body_bytes(body)) + .mount(&server) + .await; + Mock::given(method("GET")) + .and(path("/manifest.json.sig")) + .respond_with(ResponseTemplate::new(200).set_body_string(bad_sig)) + .mount(&server) + .await; + + let updater = Updater::new(make_config(&server, &signing_key, "1.0.0")); + let result = updater.check_launcher().await; + assert!(matches!(result, Err(UpdateError::BadSignature))); +} + +#[tokio::test] +async fn missing_artifact_for_triple() { + let server = MockServer::start().await; + let key = SigningKey::generate(&mut OsRng); + + let mut channels = BTreeMap::new(); + channels.insert( + "stable".to_string(), + Channel { + launcher: Some(ArtifactSet { + version: "2.0.0".to_string(), + artifacts: BTreeMap::new(), + }), + octobot_binary: None, + octobot_python: None, + python_dist: None, + }, + ); + let manifest = Manifest { + schema_version: 1, + generated_at: chrono::Utc::now(), + channels, + }; + mount(&server, &manifest, &key).await; + + let updater = Updater::new(make_config(&server, &key, "1.0.0")); + let result = updater.check_launcher().await.unwrap(); + assert_eq!(result, UpdateAvailability::NoArtifactForPlatform); +} + +#[tokio::test] +async fn downgrade_blocked_by_default() { + let server = MockServer::start().await; + let key = SigningKey::generate(&mut OsRng); + let manifest = make_manifest("1.0.0"); + mount(&server, &manifest, &key).await; + + let updater = Updater::new(make_config(&server, &key, "2.0.0")); + let result = updater.check_launcher().await.unwrap(); + assert_eq!(result, UpdateAvailability::UpToDate); +} + +#[tokio::test] +async fn network_failure_mid_download() { + let server = MockServer::start().await; + let key = SigningKey::generate(&mut OsRng); + + Mock::given(method("GET")) + .and(path("/manifest.json")) + .respond_with(ResponseTemplate::new(500)) + .mount(&server) + .await; + + let updater = Updater::new(make_config(&server, &key, "1.0.0")); + let result = updater.check_launcher().await; + assert!(result.is_err()); +} diff --git a/packages/launcher/crates/octobot-launcher-update/Cargo.toml b/packages/launcher/crates/octobot-launcher-update/Cargo.toml new file mode 100644 index 0000000000..e94b4a4a87 --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-update/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "octobot-launcher-update" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +octobot-launcher-core = { path = "../octobot-launcher-core" } +reqwest = { version = "0.12", default-features = false, features = ["rustls-tls", "json", "stream"] } +tokio = { version = "1", features = ["full"] } +ed25519-dalek = "2" +sha2 = { version = "0.10", features = [] } +hex = "0.4" +serde = { version = "1", features = ["derive"] } +serde_json = "1" +semver = "1" +tempfile = "3" +self-replace = "1.5" +thiserror = "2" +tracing = "0.1" +chrono = { version = "0.4", features = ["serde"] } + +[build-dependencies] + +[dev-dependencies] +wiremock = "0.6" +tokio = { version = "1", features = ["full"] } +ed25519-dalek = { version = "2", features = ["rand_core"] } +rand = "0.8" diff --git a/packages/launcher/crates/octobot-launcher-update/build.rs b/packages/launcher/crates/octobot-launcher-update/build.rs new file mode 100644 index 0000000000..beb5d9ad34 --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-update/build.rs @@ -0,0 +1,8 @@ +fn main() { + println!("cargo:rerun-if-env-changed=OCTOBOT_LAUNCHER_UPDATE_PUBKEY"); + let hex = std::env::var("OCTOBOT_LAUNCHER_UPDATE_PUBKEY") + .unwrap_or_else(|_| "0".repeat(64)); + println!("cargo:rustc-env=LAUNCHER_UPDATE_PUBKEY_HEX={hex}"); + let target = std::env::var("TARGET").unwrap_or_default(); + println!("cargo:rustc-env=LAUNCHER_TARGET_TRIPLE={target}"); +} diff --git a/packages/launcher/crates/octobot-launcher-update/src/error.rs b/packages/launcher/crates/octobot-launcher-update/src/error.rs new file mode 100644 index 0000000000..ed1ba63c3d --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-update/src/error.rs @@ -0,0 +1,27 @@ +use thiserror::Error; + +#[derive(Debug, Error)] +pub enum UpdateError { + #[error("network error: {0}")] + Network(String), + #[error("bad signature")] + BadSignature, + #[error("missing signature")] + MissingSignature, + #[error("malformed signature")] + MalformedSignature, + #[error("sha256 mismatch")] + Sha256Mismatch, + #[error("no artifact for platform: {0}")] + NoArtifactForPlatform(String), + #[error("downgrade refused")] + DowngradeRefused, + #[error("update locked")] + Locked, + #[error("io error: {0}")] + Io(#[from] std::io::Error), + #[error("parse error: {0}")] + Parse(String), +} + +pub type Result = std::result::Result; diff --git a/packages/launcher/crates/octobot-launcher-update/src/github.rs b/packages/launcher/crates/octobot-launcher-update/src/github.rs new file mode 100644 index 0000000000..19db70f2c2 --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-update/src/github.rs @@ -0,0 +1,242 @@ +use std::path::{Path, PathBuf}; + +use serde::Deserialize; + +use crate::error::{Result, UpdateError}; + +const GITHUB_API_LATEST: &str = + "https://api.github.com/repos/Drakkar-Software/OctoBot/releases/latest"; + +const ASSET_MACOS_ARM64: &str = "OctoBot_macos_arm64"; +const ASSET_MACOS_X64: &str = "OctoBot_macos_x64"; +const ASSET_LINUX_ARM64: &str = "OctoBot_linux_arm64"; +const ASSET_LINUX_X64: &str = "OctoBot_linux_x64"; +const ASSET_WINDOWS_X64: &str = "OctoBot_windows_x64.exe"; + +#[derive(Deserialize)] +struct GithubRelease { + tag_name: String, + assets: Vec, +} + +#[derive(Deserialize)] +struct GithubAsset { + name: String, + browser_download_url: String, +} + +fn platform_asset_name() -> Option<&'static str> { + match (std::env::consts::OS, std::env::consts::ARCH) { + ("macos", "aarch64") => Some(ASSET_MACOS_ARM64), + ("macos", "x86_64") => Some(ASSET_MACOS_X64), + ("linux", "aarch64") => Some(ASSET_LINUX_ARM64), + ("linux", "x86_64") => Some(ASSET_LINUX_X64), + ("windows", "x86_64") => Some(ASSET_WINDOWS_X64), + _ => None, + } +} + +pub async fn fetch_latest_octobot_binary(dest_dir: &Path) -> Result { + let client = reqwest::Client::builder() + .user_agent(concat!("octobot-launcher/", env!("CARGO_PKG_VERSION"))) + .build() + .map_err(|e| UpdateError::Network(e.to_string()))?; + + let asset_name = platform_asset_name().ok_or_else(|| { + UpdateError::NoArtifactForPlatform(format!( + "{}-{}", + std::env::consts::OS, + std::env::consts::ARCH + )) + })?; + + let release: GithubRelease = client + .get(GITHUB_API_LATEST) + .send() + .await + .map_err(|e| UpdateError::Network(e.to_string()))? + .json() + .await + .map_err(|e| UpdateError::Network(e.to_string()))?; + + let asset = release + .assets + .iter() + .find(|a| a.name == asset_name) + .ok_or_else(|| UpdateError::NoArtifactForPlatform(asset_name.to_string()))?; + + tracing::info!( + version = release.tag_name.as_str(), + asset = asset_name, + "downloading OctoBot from GitHub" + ); + + std::fs::create_dir_all(dest_dir)?; + + let dest_path = dest_dir.join(asset_name); + download_to_file(&client, &asset.browser_download_url, &dest_path).await?; + + Ok(dest_path) +} + +async fn download_to_file(client: &reqwest::Client, url: &str, dest: &Path) -> Result<()> { + use tokio::io::AsyncWriteExt; + + let resp = client + .get(url) + .send() + .await + .map_err(|e| UpdateError::Network(e.to_string()))?; + + if !resp.status().is_success() { + return Err(UpdateError::Network(format!( + "download failed: HTTP {}", + resp.status() + ))); + } + + let mut file = tokio::fs::File::create(dest).await?; + + let bytes = resp + .bytes() + .await + .map_err(|e| UpdateError::Network(e.to_string()))?; + + file.write_all(&bytes).await?; + file.flush().await?; + + Ok(()) +} + +#[cfg(test)] +#[allow(clippy::unwrap_used)] +mod tests { + use super::*; + use wiremock::matchers::{method, path}; + use wiremock::{Mock, MockServer, ResponseTemplate}; + + #[test] + fn platform_asset_name_known_platform() { + // The test machine is a known platform — we should always get a name. + let name = platform_asset_name(); + assert!( + name.is_some(), + "no asset name for {}-{}", + std::env::consts::OS, + std::env::consts::ARCH + ); + } + + #[test] + fn platform_asset_name_macos_arm64() { + assert_eq!(ASSET_MACOS_ARM64, "OctoBot_macos_arm64"); + assert_eq!(ASSET_MACOS_X64, "OctoBot_macos_x64"); + assert_eq!(ASSET_LINUX_ARM64, "OctoBot_linux_arm64"); + assert_eq!(ASSET_LINUX_X64, "OctoBot_linux_x64"); + assert_eq!(ASSET_WINDOWS_X64, "OctoBot_windows_x64.exe"); + } + + fn make_release_json(asset_name: &str, download_url: &str) -> serde_json::Value { + serde_json::json!({ + "tag_name": "2.1.0", + "assets": [{ + "name": asset_name, + "browser_download_url": download_url + }] + }) + } + + #[tokio::test] + async fn download_file_writes_bytes() { + let server = MockServer::start().await; + let content = b"fake octobot binary"; + Mock::given(method("GET")) + .and(path("/download/octobot")) + .respond_with(ResponseTemplate::new(200).set_body_bytes(content.as_slice())) + .mount(&server) + .await; + + let tmp = tempfile::tempdir().unwrap(); + let dest = tmp.path().join("octobot"); + let client = reqwest::Client::new(); + let url = format!("{}/download/octobot", server.uri()); + download_to_file(&client, &url, &dest).await.unwrap(); + + assert_eq!(std::fs::read(&dest).unwrap(), content); + } + + #[tokio::test] + async fn download_file_fails_on_http_error() { + let server = MockServer::start().await; + Mock::given(method("GET")) + .and(path("/download/octobot")) + .respond_with(ResponseTemplate::new(404)) + .mount(&server) + .await; + + let tmp = tempfile::tempdir().unwrap(); + let dest = tmp.path().join("octobot"); + let client = reqwest::Client::new(); + let url = format!("{}/download/octobot", server.uri()); + let result = download_to_file(&client, &url, &dest).await; + assert!(result.is_err()); + } + + #[tokio::test] + async fn fetch_finds_asset_for_current_platform() { + let server = MockServer::start().await; + let asset_name = platform_asset_name().unwrap(); + let download_url = format!("{}/download/{asset_name}", server.uri()); + + Mock::given(method("GET")) + .and(path("/repos/Drakkar-Software/OctoBot/releases/latest")) + .respond_with( + ResponseTemplate::new(200) + .set_body_json(make_release_json(asset_name, &download_url)), + ) + .mount(&server) + .await; + Mock::given(method("GET")) + .and(path(format!("/download/{asset_name}"))) + .respond_with(ResponseTemplate::new(200).set_body_bytes(b"fake".as_slice())) + .mount(&server) + .await; + + // We can't easily override the GitHub URL, so test component-by-component. + // Here we verify the JSON parsing and asset selection logic works end-to-end + // using the download helper directly. + let tmp = tempfile::tempdir().unwrap(); + let release_url = format!("{}/repos/Drakkar-Software/OctoBot/releases/latest", server.uri()); + let client = reqwest::Client::new(); + let release: GithubRelease = client.get(&release_url).send().await.unwrap().json().await.unwrap(); + + assert_eq!(release.tag_name, "2.1.0"); + let asset = release.assets.iter().find(|a| a.name == asset_name).unwrap(); + let dest = tmp.path().join(asset_name); + download_to_file(&client, &asset.browser_download_url, &dest).await.unwrap(); + assert_eq!(std::fs::read(&dest).unwrap(), b"fake"); + } + + #[tokio::test] + async fn fetch_errors_when_asset_missing_for_platform() { + let server = MockServer::start().await; + Mock::given(method("GET")) + .and(path("/repos/Drakkar-Software/OctoBot/releases/latest")) + .respond_with( + ResponseTemplate::new(200) + .set_body_json(serde_json::json!({ + "tag_name": "2.1.0", + "assets": [] + })), + ) + .mount(&server) + .await; + + let client = reqwest::Client::new(); + let release_url = format!("{}/repos/Drakkar-Software/OctoBot/releases/latest", server.uri()); + let release: GithubRelease = client.get(&release_url).send().await.unwrap().json().await.unwrap(); + let asset_name = platform_asset_name().unwrap(); + let result = release.assets.iter().find(|a| a.name == asset_name); + assert!(result.is_none()); + } +} diff --git a/packages/launcher/crates/octobot-launcher-update/src/lib.rs b/packages/launcher/crates/octobot-launcher-update/src/lib.rs new file mode 100644 index 0000000000..68c61b675e --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-update/src/lib.rs @@ -0,0 +1,25 @@ +pub mod error; +pub mod github; +pub mod manifest; +pub mod restart; +pub mod updater; + +pub use error::{Result, UpdateError}; +pub use github::fetch_latest_octobot_binary; +pub use manifest::{Artifact, ArtifactSet, Channel, Manifest, PythonArtifact}; +pub use restart::RestartAttempts; +pub use updater::{AppliedUpdate, ArtifactKind, UpdateAvailability, Updater, UpdaterConfig}; + +pub const BAKED_PUBKEY_HEX: &str = env!("LAUNCHER_UPDATE_PUBKEY_HEX"); + +#[cfg(test)] +mod tests { + mod pubkey { + use crate::BAKED_PUBKEY_HEX; + + #[test] + fn baked_into_binary() { + assert!(!BAKED_PUBKEY_HEX.is_empty()); + } + } +} diff --git a/packages/launcher/crates/octobot-launcher-update/src/manifest.rs b/packages/launcher/crates/octobot-launcher-update/src/manifest.rs new file mode 100644 index 0000000000..6991af60ce --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-update/src/manifest.rs @@ -0,0 +1,207 @@ +use std::collections::BTreeMap; + +use chrono::{DateTime, Utc}; +use ed25519_dalek::Signature; +use serde::{Deserialize, Serialize}; +use sha2::{Digest, Sha256}; + +use crate::error::{Result, UpdateError}; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Manifest { + pub schema_version: u32, + pub generated_at: DateTime, + pub channels: BTreeMap, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Channel { + pub launcher: Option, + pub octobot_binary: Option, + pub octobot_python: Option, + pub python_dist: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ArtifactSet { + pub version: String, + pub artifacts: BTreeMap, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Artifact { + pub url: String, + pub sha256: String, + pub size: u64, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PythonArtifact { + pub version: String, + pub pypi_index: String, + pub package: String, +} + +pub fn canonical_bytes(manifest: &Manifest) -> Result> { + serde_json::to_vec(manifest).map_err(|e| UpdateError::Parse(e.to_string())) +} + +pub fn verify_signature( + body: &[u8], + sig_hex: &str, + pubkey: &ed25519_dalek::VerifyingKey, +) -> Result<()> { + let sig_bytes = hex::decode(sig_hex.trim()).map_err(|_| UpdateError::MalformedSignature)?; + let sig_arr: [u8; 64] = sig_bytes + .try_into() + .map_err(|_| UpdateError::MalformedSignature)?; + let signature = Signature::from_bytes(&sig_arr); + let hash = Sha256::digest(body); + pubkey + .verify_strict(&hash, &signature) + .map_err(|_| UpdateError::BadSignature) +} + +pub fn parse_and_verify( + body: &[u8], + sig_hex: &str, + pubkey: &ed25519_dalek::VerifyingKey, +) -> Result { + verify_signature(body, sig_hex, pubkey)?; + serde_json::from_slice(body).map_err(|e| UpdateError::Parse(e.to_string())) +} + +#[cfg(test)] +#[allow(clippy::unwrap_used, clippy::expect_used, clippy::missing_panics_doc)] +mod tests { + use super::*; + use ed25519_dalek::{Signer, SigningKey}; + use sha2::{Digest, Sha256}; + use wiremock::matchers::{method, path}; + use wiremock::{Mock, MockServer, ResponseTemplate}; + + fn make_manifest() -> Manifest { + let mut channels = BTreeMap::new(); + let mut artifacts = BTreeMap::new(); + artifacts.insert( + "x86_64-unknown-linux-gnu".to_string(), + Artifact { + url: "https://example.com/launcher".to_string(), + sha256: "abc123".to_string(), + size: 1_000_000, + }, + ); + channels.insert( + "stable".to_string(), + Channel { + launcher: Some(ArtifactSet { + version: "1.4.2".to_string(), + artifacts, + }), + octobot_binary: None, + octobot_python: None, + python_dist: None, + }, + ); + Manifest { + schema_version: 1, + generated_at: chrono::DateTime::parse_from_rfc3339("2026-04-27T10:30:00Z") + .unwrap() + .with_timezone(&Utc), + channels, + } + } + + fn sign_manifest(manifest: &Manifest, key: &SigningKey) -> String { + let body = serde_json::to_vec(manifest).unwrap(); + let hash = Sha256::digest(&body); + let sig = key.sign(&hash); + hex::encode(sig.to_bytes()) + } + + #[test] + fn canonical_json_is_deterministic() { + let manifest = make_manifest(); + let bytes1 = serde_json::to_vec(&manifest).unwrap(); + let bytes2 = serde_json::to_vec(&manifest).unwrap(); + assert_eq!(bytes1, bytes2); + } + + #[test] + fn signature_verifies_with_correct_key() { + let key = SigningKey::generate(&mut rand::rngs::OsRng); + let manifest = make_manifest(); + let sig_hex = sign_manifest(&manifest, &key); + let body = serde_json::to_vec(&manifest).unwrap(); + let result = verify_signature(&body, &sig_hex, &key.verifying_key()); + assert!(result.is_ok()); + } + + #[test] + fn signature_fails_with_wrong_key() { + let key_a = SigningKey::generate(&mut rand::rngs::OsRng); + let key_b = SigningKey::generate(&mut rand::rngs::OsRng); + let manifest = make_manifest(); + let sig_hex = sign_manifest(&manifest, &key_a); + let body = serde_json::to_vec(&manifest).unwrap(); + let result = verify_signature(&body, &sig_hex, &key_b.verifying_key()); + assert!(matches!(result, Err(UpdateError::BadSignature))); + } + + #[test] + fn tampered_manifest_fails() { + let key = SigningKey::generate(&mut rand::rngs::OsRng); + let manifest = make_manifest(); + let sig_hex = sign_manifest(&manifest, &key); + let mut body = serde_json::to_vec(&manifest).unwrap(); + body[10] ^= 0xFF; + let result = verify_signature(&body, &sig_hex, &key.verifying_key()); + assert!(matches!(result, Err(UpdateError::BadSignature))); + } + + #[tokio::test] + async fn missing_sig_file_fails() { + use crate::updater::{Updater, UpdaterConfig}; + + let server = MockServer::start().await; + let key = SigningKey::generate(&mut rand::rngs::OsRng); + let manifest = make_manifest(); + let body = serde_json::to_vec(&manifest).unwrap(); + + Mock::given(method("GET")) + .and(path("/manifest.json")) + .respond_with(ResponseTemplate::new(200).set_body_bytes(body)) + .mount(&server) + .await; + + Mock::given(method("GET")) + .and(path("/manifest.json.sig")) + .respond_with(ResponseTemplate::new(404)) + .mount(&server) + .await; + + let config = UpdaterConfig { + manifest_url: format!("{}/manifest.json", server.uri()), + channel: "stable".to_string(), + current_launcher_version: semver::Version::new(1, 0, 0), + current_target_triple: "x86_64-unknown-linux-gnu", + pubkey: key.verifying_key(), + allow_downgrade: false, + user_agent: "test".to_string(), + state_dir: std::env::temp_dir(), + cache_dir: std::env::temp_dir(), + }; + let updater = Updater::new(config); + let result = updater.fetch_manifest().await; + assert!(matches!(result, Err(UpdateError::MissingSignature))); + } + + #[test] + fn malformed_sig_fails() { + let key = SigningKey::generate(&mut rand::rngs::OsRng); + let manifest = make_manifest(); + let body = serde_json::to_vec(&manifest).unwrap(); + let result = verify_signature(&body, "not-hex", &key.verifying_key()); + assert!(matches!(result, Err(UpdateError::MalformedSignature))); + } +} diff --git a/packages/launcher/crates/octobot-launcher-update/src/restart.rs b/packages/launcher/crates/octobot-launcher-update/src/restart.rs new file mode 100644 index 0000000000..ef12a776e1 --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-update/src/restart.rs @@ -0,0 +1,66 @@ +use std::path::Path; + +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Default, Serialize, Deserialize)] +pub struct RestartAttempts { + pub count: u32, + pub last_reset: Option>, +} + +impl RestartAttempts { + pub fn load(state_dir: &Path) -> Self { + let path = state_dir.join("restart_attempts.json"); + let Ok(data) = std::fs::read(&path) else { + return Self::default(); + }; + serde_json::from_slice(&data).unwrap_or_default() + } + + pub fn save(&self, state_dir: &Path) -> std::io::Result<()> { + let path = state_dir.join("restart_attempts.json"); + let data = serde_json::to_vec(self) + .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?; + std::fs::write(path, data) + } + + pub fn increment(&mut self) { + self.count += 1; + } + + pub fn reset(&mut self) { + self.count = 0; + self.last_reset = Some(Utc::now()); + } + + pub fn should_rollback(&self) -> bool { + self.count >= 3 + } +} + +#[cfg(test)] +mod tests { + mod rollback { + use crate::restart::RestartAttempts; + + #[test] + fn three_failures_triggers_rollback() { + let mut attempts = RestartAttempts::default(); + attempts.increment(); + attempts.increment(); + attempts.increment(); + assert!(attempts.should_rollback()); + } + + #[test] + fn counter_resets_on_clean_run() { + let mut attempts = RestartAttempts::default(); + attempts.increment(); + attempts.increment(); + attempts.reset(); + assert_eq!(attempts.count, 0); + assert!(!attempts.should_rollback()); + } + } +} diff --git a/packages/launcher/crates/octobot-launcher-update/src/updater.rs b/packages/launcher/crates/octobot-launcher-update/src/updater.rs new file mode 100644 index 0000000000..780504d706 --- /dev/null +++ b/packages/launcher/crates/octobot-launcher-update/src/updater.rs @@ -0,0 +1,815 @@ +use std::path::{Path, PathBuf}; + +use ed25519_dalek::VerifyingKey; +use semver::Version; +use sha2::{Digest, Sha256}; +use tracing::{debug, info}; + +use crate::error::{Result, UpdateError}; +use crate::manifest::{ArtifactSet, Manifest}; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum UpdateAvailability { + UpToDate, + Available { version: String }, + NoArtifactForPlatform, +} + +#[derive(Debug, Clone)] +pub enum ArtifactKind { + OctoBotBinary, + PythonDist, +} + +#[derive(Debug)] +pub struct AppliedUpdate { + pub version: String, +} + +#[derive(Debug)] +pub struct UpdaterConfig { + pub manifest_url: String, + pub channel: String, + pub current_launcher_version: Version, + pub current_target_triple: &'static str, + pub pubkey: VerifyingKey, + pub allow_downgrade: bool, + pub user_agent: String, + pub state_dir: PathBuf, + pub cache_dir: PathBuf, +} + +type Replacer = Box std::io::Result<()> + Send + Sync>; + +pub struct Updater { + config: UpdaterConfig, + client: reqwest::Client, + replacer: Replacer, +} + +impl std::fmt::Debug for Updater { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("Updater") + .field("config", &self.config) + .finish_non_exhaustive() + } +} + +impl Updater { + pub fn new(config: UpdaterConfig) -> Self { + let client = reqwest::Client::builder() + .user_agent(&config.user_agent) + .build() + .unwrap_or_else(|_| reqwest::Client::new()); + Self { + config, + client, + replacer: Box::new(|path| self_replace::self_replace(path)), + } + } + + /// Build an `Updater` from the baked-in public key hex and config strings. + /// Returns `None` if the pubkey cannot be parsed (e.g. dev builds with all-zero placeholder). + pub fn from_baked_config( + manifest_url: String, + channel: String, + pubkey_hex: &str, + state_dir: PathBuf, + cache_dir: PathBuf, + ) -> Option { + let bytes = hex::decode(pubkey_hex).ok()?; + let arr: [u8; 32] = bytes.try_into().ok()?; + // Reject the dev placeholder (all zeros) — not a real key. + if arr == [0u8; 32] { + return None; + } + let pubkey = VerifyingKey::from_bytes(&arr).ok()?; + let version = Version::parse(env!("CARGO_PKG_VERSION")).ok()?; + let config = UpdaterConfig { + manifest_url, + channel, + current_launcher_version: version, + current_target_triple: env!("LAUNCHER_TARGET_TRIPLE"), + pubkey, + allow_downgrade: false, + user_agent: format!("octobot-launcher/{}", env!("CARGO_PKG_VERSION")), + state_dir, + cache_dir, + }; + Some(Self::new(config)) + } + + #[cfg(test)] + pub fn with_replacer(mut self, replacer: Replacer) -> Self { + self.replacer = replacer; + self + } + + pub async fn fetch_manifest(&self) -> Result { + let body_bytes = self.get_bytes(&self.config.manifest_url).await?; + let sig_url = format!("{}.sig", self.config.manifest_url); + let sig_resp = self + .client + .get(&sig_url) + .send() + .await + .map_err(|e| UpdateError::Network(e.to_string()))?; + if sig_resp.status().as_u16() == 404 { + return Err(UpdateError::MissingSignature); + } + if !sig_resp.status().is_success() { + return Err(UpdateError::Network(format!( + "sig fetch status {}", + sig_resp.status() + ))); + } + let sig_hex = sig_resp + .text() + .await + .map_err(|e| UpdateError::Network(e.to_string()))?; + crate::manifest::parse_and_verify(&body_bytes, &sig_hex, &self.config.pubkey) + } + + pub async fn check_launcher(&self) -> Result { + let manifest = self.fetch_manifest().await?; + let channel = manifest + .channels + .get(&self.config.channel) + .ok_or_else(|| UpdateError::Parse(format!("channel not found: {}", self.config.channel)))?; + let Some(launcher_set) = &channel.launcher else { + return Ok(UpdateAvailability::UpToDate); + }; + if !launcher_set + .artifacts + .contains_key(self.config.current_target_triple) + { + return Ok(UpdateAvailability::NoArtifactForPlatform); + } + self.compare_versions(&self.config.current_launcher_version, launcher_set) + } + + pub async fn check_artifact(&self, artifact: ArtifactKind) -> Result { + let manifest = self.fetch_manifest().await?; + let channel = manifest + .channels + .get(&self.config.channel) + .ok_or_else(|| UpdateError::Parse(format!("channel not found: {}", self.config.channel)))?; + let set = match artifact { + ArtifactKind::OctoBotBinary => match &channel.octobot_binary { + Some(s) => s, + None => return Ok(UpdateAvailability::UpToDate), + }, + ArtifactKind::PythonDist => match &channel.python_dist { + Some(s) => s, + None => return Ok(UpdateAvailability::UpToDate), + }, + }; + if !set + .artifacts + .contains_key(self.config.current_target_triple) + { + return Ok(UpdateAvailability::NoArtifactForPlatform); + } + let current = Version::new(0, 0, 0); + self.compare_versions(¤t, set) + } + + pub async fn apply_launcher_update(&self, manifest: &Manifest) -> Result { + let lock_path = self.config.state_dir.join("launcher.lock"); + if lock_path.exists() { + return Err(UpdateError::Locked); + } + + let channel = manifest + .channels + .get(&self.config.channel) + .ok_or_else(|| UpdateError::Parse(format!("channel not found: {}", self.config.channel)))?; + let launcher_set = channel + .launcher + .as_ref() + .ok_or_else(|| UpdateError::Parse("no launcher in channel".to_string()))?; + let artifact = launcher_set + .artifacts + .get(self.config.current_target_triple) + .ok_or_else(|| { + UpdateError::NoArtifactForPlatform(self.config.current_target_triple.to_string()) + })?; + + let updates_dir = self.config.cache_dir.join("updates"); + std::fs::create_dir_all(&updates_dir)?; + + let partial_name = format!( + "octobot-launcher.{}.partial", + launcher_set.version + ); + let partial_path = updates_dir.join(&partial_name); + + self.download_and_verify(&artifact.url, &artifact.sha256, &partial_path) + .await?; + + let backup_name = format!( + "octobot-launcher.{}", + self.config.current_launcher_version + ); + let backup_path = updates_dir.join(&backup_name); + let current_exe = + std::env::current_exe().map_err(UpdateError::Io)?; + std::fs::copy(¤t_exe, &backup_path)?; + debug!("backup created at {}", backup_path.display()); + + (self.replacer)(&partial_path)?; + info!("launcher replaced with version {}", launcher_set.version); + + let pending_restart = self.config.state_dir.join("pending_restart"); + std::fs::write(&pending_restart, launcher_set.version.as_bytes())?; + + Ok(AppliedUpdate { + version: launcher_set.version.clone(), + }) + } + + pub async fn fetch_artifact( + &self, + kind: ArtifactKind, + dest_dir: &Path, + ) -> Result { + let manifest = self.fetch_manifest().await?; + let channel = manifest + .channels + .get(&self.config.channel) + .ok_or_else(|| UpdateError::Parse(format!("channel not found: {}", self.config.channel)))?; + let (url, sha256) = match kind { + ArtifactKind::OctoBotBinary => { + let set = channel + .octobot_binary + .as_ref() + .ok_or_else(|| UpdateError::Parse("no octobot_binary in channel".to_string()))?; + let artifact = set + .artifacts + .get(self.config.current_target_triple) + .ok_or_else(|| { + UpdateError::NoArtifactForPlatform( + self.config.current_target_triple.to_string(), + ) + })?; + (artifact.url.clone(), artifact.sha256.clone()) + } + ArtifactKind::PythonDist => { + let set = channel + .python_dist + .as_ref() + .ok_or_else(|| UpdateError::Parse("no python_dist in channel".to_string()))?; + let artifact = set + .artifacts + .get(self.config.current_target_triple) + .ok_or_else(|| { + UpdateError::NoArtifactForPlatform( + self.config.current_target_triple.to_string(), + ) + })?; + (artifact.url.clone(), artifact.sha256.clone()) + } + }; + + let filename = url + .rsplit('/') + .next() + .unwrap_or("artifact") + .to_string(); + let partial_path = dest_dir.join(format!("{filename}.partial")); + self.download_and_verify(&url, &sha256, &partial_path).await?; + let final_path = dest_dir.join(&filename); + std::fs::rename(&partial_path, &final_path)?; + Ok(final_path) + } + + async fn get_bytes(&self, url: &str) -> Result> { + let resp = self + .client + .get(url) + .send() + .await + .map_err(|e| UpdateError::Network(e.to_string()))?; + if !resp.status().is_success() { + return Err(UpdateError::Network(format!( + "HTTP {} for {url}", + resp.status() + ))); + } + resp.bytes() + .await + .map(|b| b.to_vec()) + .map_err(|e| UpdateError::Network(e.to_string())) + } + + async fn download_and_verify( + &self, + url: &str, + expected_sha256: &str, + dest: &Path, + ) -> Result<()> { + let bytes = self.get_bytes(url).await?; + let hash = Sha256::digest(&bytes); + let actual_hex = hex::encode(hash); + if actual_hex != expected_sha256 { + return Err(UpdateError::Sha256Mismatch); + } + if let Some(parent) = dest.parent() { + std::fs::create_dir_all(parent)?; + } + std::fs::write(dest, &bytes)?; + Ok(()) + } + + fn compare_versions( + &self, + current: &Version, + set: &ArtifactSet, + ) -> Result { + let manifest_version = Version::parse(&set.version) + .map_err(|e| UpdateError::Parse(format!("bad version {}: {e}", set.version)))?; + if manifest_version == *current { + return Ok(UpdateAvailability::UpToDate); + } + if manifest_version > *current { + return Ok(UpdateAvailability::Available { + version: set.version.clone(), + }); + } + if self.config.allow_downgrade { + return Ok(UpdateAvailability::Available { + version: set.version.clone(), + }); + } + Ok(UpdateAvailability::UpToDate) + } +} + +#[cfg(test)] +mod tests { + use std::collections::BTreeMap; + + use ed25519_dalek::{Signer, SigningKey}; + use sha2::{Digest, Sha256}; + use wiremock::matchers::{method, path}; + use wiremock::{Mock, MockServer, ResponseTemplate}; + + use super::*; + use crate::manifest::{Artifact, ArtifactSet, Channel, Manifest}; + + const TRIPLE: &str = "x86_64-unknown-linux-gnu"; + + fn make_manifest_with_launcher_version(version: &str) -> Manifest { + let mut artifacts = BTreeMap::new(); + artifacts.insert( + TRIPLE.to_string(), + Artifact { + url: "https://example.com/launcher".to_string(), + sha256: "deadbeef".to_string(), + size: 1_000_000, + }, + ); + let mut channels = BTreeMap::new(); + channels.insert( + "stable".to_string(), + Channel { + launcher: Some(ArtifactSet { + version: version.to_string(), + artifacts, + }), + octobot_binary: None, + octobot_python: None, + python_dist: None, + }, + ); + Manifest { + schema_version: 1, + generated_at: chrono::Utc::now(), + channels, + } + } + + fn sign_and_serve_manifest( + manifest: &Manifest, + key: &SigningKey, + ) -> (Vec, String) { + #[allow(clippy::unwrap_used)] + let body = serde_json::to_vec(manifest).unwrap(); + let hash = Sha256::digest(&body); + let sig = key.sign(&hash); + let sig_hex = hex::encode(sig.to_bytes()); + (body, sig_hex) + } + + #[allow(clippy::unwrap_used)] + fn make_updater_config( + server: &MockServer, + key: &SigningKey, + current_version: &str, + allow_downgrade: bool, + ) -> UpdaterConfig { + UpdaterConfig { + manifest_url: format!("{}/manifest.json", server.uri()), + channel: "stable".to_string(), + current_launcher_version: Version::parse(current_version).unwrap(), + current_target_triple: TRIPLE, + pubkey: key.verifying_key(), + allow_downgrade, + user_agent: "test".to_string(), + state_dir: std::env::temp_dir(), + cache_dir: std::env::temp_dir(), + } + } + + async fn mount_manifest(server: &MockServer, manifest: &Manifest, key: &SigningKey) { + let (body, sig_hex) = sign_and_serve_manifest(manifest, key); + Mock::given(method("GET")) + .and(path("/manifest.json")) + .respond_with(ResponseTemplate::new(200).set_body_bytes(body)) + .mount(server) + .await; + Mock::given(method("GET")) + .and(path("/manifest.json.sig")) + .respond_with(ResponseTemplate::new(200).set_body_string(sig_hex)) + .mount(server) + .await; + } + + #[allow(clippy::unwrap_used, clippy::expect_used, clippy::missing_panics_doc)] + mod check { + use super::*; + + #[tokio::test] + async fn up_to_date_returns_uptodate() { + let server = MockServer::start().await; + let key = SigningKey::generate(&mut rand::rngs::OsRng); + let manifest = make_manifest_with_launcher_version("1.4.0"); + mount_manifest(&server, &manifest, &key).await; + let updater = Updater::new(make_updater_config(&server, &key, "1.4.0", false)); + let result = updater.check_launcher().await.unwrap(); + assert_eq!(result, UpdateAvailability::UpToDate); + } + + #[tokio::test] + async fn newer_returns_available() { + let server = MockServer::start().await; + let key = SigningKey::generate(&mut rand::rngs::OsRng); + let manifest = make_manifest_with_launcher_version("1.5.0"); + mount_manifest(&server, &manifest, &key).await; + let updater = Updater::new(make_updater_config(&server, &key, "1.4.0", false)); + let result = updater.check_launcher().await.unwrap(); + assert_eq!( + result, + UpdateAvailability::Available { + version: "1.5.0".to_string() + } + ); + } + + #[tokio::test] + async fn older_returns_uptodate_unless_downgrade() { + let server = MockServer::start().await; + let key = SigningKey::generate(&mut rand::rngs::OsRng); + let manifest = make_manifest_with_launcher_version("1.3.0"); + mount_manifest(&server, &manifest, &key).await; + let updater = Updater::new(make_updater_config(&server, &key, "1.4.0", false)); + let result = updater.check_launcher().await.unwrap(); + assert_eq!(result, UpdateAvailability::UpToDate); + + let server2 = MockServer::start().await; + let key2 = SigningKey::generate(&mut rand::rngs::OsRng); + let manifest2 = make_manifest_with_launcher_version("1.3.0"); + mount_manifest(&server2, &manifest2, &key2).await; + let updater2 = Updater::new(make_updater_config(&server2, &key2, "1.4.0", true)); + let result2 = updater2.check_launcher().await.unwrap(); + assert_eq!( + result2, + UpdateAvailability::Available { + version: "1.3.0".to_string() + } + ); + } + + #[tokio::test] + async fn missing_artifact_for_triple_errors() { + let server = MockServer::start().await; + let key = SigningKey::generate(&mut rand::rngs::OsRng); + let mut channels = BTreeMap::new(); + channels.insert( + "stable".to_string(), + Channel { + launcher: Some(ArtifactSet { + version: "1.5.0".to_string(), + artifacts: BTreeMap::new(), + }), + octobot_binary: None, + octobot_python: None, + python_dist: None, + }, + ); + let manifest = Manifest { + schema_version: 1, + generated_at: chrono::Utc::now(), + channels, + }; + mount_manifest(&server, &manifest, &key).await; + let updater = Updater::new(make_updater_config(&server, &key, "1.4.0", false)); + let result = updater.check_launcher().await.unwrap(); + assert_eq!(result, UpdateAvailability::NoArtifactForPlatform); + } + } + + #[allow(clippy::unwrap_used, clippy::expect_used, clippy::missing_panics_doc)] + mod download { + use super::*; + + #[tokio::test] + async fn sha256_mismatch_fails() { + let server = MockServer::start().await; + let key = SigningKey::generate(&mut rand::rngs::OsRng); + + let file_content = b"fake binary content"; + let wrong_sha256 = "0".repeat(64); + + Mock::given(method("GET")) + .and(path("/file.bin")) + .respond_with( + ResponseTemplate::new(200).set_body_bytes(file_content.to_vec()), + ) + .mount(&server) + .await; + + let mut artifacts = BTreeMap::new(); + artifacts.insert( + TRIPLE.to_string(), + Artifact { + url: format!("{}/file.bin", server.uri()), + sha256: wrong_sha256.clone(), + size: file_content.len() as u64, + }, + ); + let mut channels = BTreeMap::new(); + channels.insert( + "stable".to_string(), + Channel { + launcher: Some(ArtifactSet { + version: "1.5.0".to_string(), + artifacts, + }), + octobot_binary: None, + octobot_python: None, + python_dist: None, + }, + ); + let manifest = Manifest { + schema_version: 1, + generated_at: chrono::Utc::now(), + channels, + }; + mount_manifest(&server, &manifest, &key).await; + + let tmp = tempfile::tempdir().unwrap(); + let config = UpdaterConfig { + manifest_url: format!("{}/manifest.json", server.uri()), + channel: "stable".to_string(), + current_launcher_version: Version::parse("1.4.0").unwrap(), + current_target_triple: TRIPLE, + pubkey: key.verifying_key(), + allow_downgrade: false, + user_agent: "test".to_string(), + state_dir: tmp.path().to_path_buf(), + cache_dir: tmp.path().to_path_buf(), + }; + let updater = Updater::new(config); + let partial = tmp.path().join("file.bin.partial"); + let artifact_url = format!("{}/file.bin", server.uri()); + let result = updater + .download_and_verify(&artifact_url, &wrong_sha256, &partial) + .await; + assert!(matches!(result, Err(UpdateError::Sha256Mismatch))); + } + + #[tokio::test] + async fn network_error_propagates() { + let server = MockServer::start().await; + let key = SigningKey::generate(&mut rand::rngs::OsRng); + + Mock::given(method("GET")) + .and(path("/file.bin")) + .respond_with(ResponseTemplate::new(500)) + .mount(&server) + .await; + + let config = UpdaterConfig { + manifest_url: format!("{}/manifest.json", server.uri()), + channel: "stable".to_string(), + current_launcher_version: Version::parse("1.4.0").unwrap(), + current_target_triple: TRIPLE, + pubkey: key.verifying_key(), + allow_downgrade: false, + user_agent: "test".to_string(), + state_dir: std::env::temp_dir(), + cache_dir: std::env::temp_dir(), + }; + let updater = Updater::new(config); + let partial = std::env::temp_dir().join("test.partial"); + let result = updater + .download_and_verify( + &format!("{}/file.bin", server.uri()), + "deadbeef", + &partial, + ) + .await; + assert!(matches!(result, Err(UpdateError::Network(_)))); + } + } + + #[allow(clippy::unwrap_used, clippy::expect_used, clippy::missing_panics_doc)] + mod apply { + use super::*; + + async fn make_apply_manifest(server: &MockServer, key: &SigningKey, content: &[u8]) -> Manifest { + let sha256 = hex::encode(Sha256::digest(content)); + Mock::given(method("GET")) + .and(path("/launcher.bin")) + .respond_with( + ResponseTemplate::new(200).set_body_bytes(content.to_vec()), + ) + .mount(server) + .await; + let mut artifacts = BTreeMap::new(); + artifacts.insert( + TRIPLE.to_string(), + Artifact { + url: format!("{}/launcher.bin", server.uri()), + sha256, + size: content.len() as u64, + }, + ); + let mut channels = BTreeMap::new(); + channels.insert( + "stable".to_string(), + Channel { + launcher: Some(ArtifactSet { + version: "1.5.0".to_string(), + artifacts, + }), + octobot_binary: None, + octobot_python: None, + python_dist: None, + }, + ); + let manifest = Manifest { + schema_version: 1, + generated_at: chrono::Utc::now(), + channels, + }; + mount_manifest(server, &manifest, key).await; + manifest + } + + #[tokio::test] + async fn self_replace_writes_pending_restart() { + let server = MockServer::start().await; + let key = SigningKey::generate(&mut rand::rngs::OsRng); + let manifest = make_apply_manifest(&server, &key, b"fake binary content").await; + + let tmp = tempfile::tempdir().unwrap(); + let state_dir = tmp.path().to_path_buf(); + let cache_dir = tmp.path().to_path_buf(); + + let config = UpdaterConfig { + manifest_url: format!("{}/manifest.json", server.uri()), + channel: "stable".to_string(), + current_launcher_version: Version::parse("1.4.0").unwrap(), + current_target_triple: TRIPLE, + pubkey: key.verifying_key(), + allow_downgrade: false, + user_agent: "test".to_string(), + state_dir: state_dir.clone(), + cache_dir: cache_dir.clone(), + }; + let updater = Updater::new(config).with_replacer(Box::new(|_path| Ok(()))); + let result = updater.apply_launcher_update(&manifest).await.unwrap(); + assert_eq!(result.version, "1.5.0"); + assert!(state_dir.join("pending_restart").exists()); + } + + #[tokio::test] + async fn backup_created_before_swap() { + let server = MockServer::start().await; + let key = SigningKey::generate(&mut rand::rngs::OsRng); + let manifest = make_apply_manifest(&server, &key, b"fake binary content v2").await; + + let tmp = tempfile::tempdir().unwrap(); + let cache_dir = tmp.path().to_path_buf(); + let state_dir = tmp.path().to_path_buf(); + + let config = UpdaterConfig { + manifest_url: format!("{}/manifest.json", server.uri()), + channel: "stable".to_string(), + current_launcher_version: Version::parse("1.4.0").unwrap(), + current_target_triple: TRIPLE, + pubkey: key.verifying_key(), + allow_downgrade: false, + user_agent: "test".to_string(), + state_dir, + cache_dir: cache_dir.clone(), + }; + let updater = Updater::new(config).with_replacer(Box::new(|_path| Ok(()))); + updater.apply_launcher_update(&manifest).await.unwrap(); + let backup = cache_dir.join("updates").join("octobot-launcher.1.4.0"); + assert!(backup.exists()); + } + } + + #[allow(clippy::unwrap_used, clippy::expect_used, clippy::missing_panics_doc)] + mod lock { + use super::*; + + #[tokio::test] + async fn no_apply_during_instance_restart() { + let server = MockServer::start().await; + let key = SigningKey::generate(&mut rand::rngs::OsRng); + + let tmp = tempfile::tempdir().unwrap(); + let state_dir = tmp.path().to_path_buf(); + std::fs::write(state_dir.join("launcher.lock"), b"locked").unwrap(); + + let mut channels = BTreeMap::new(); + channels.insert( + "stable".to_string(), + Channel { + launcher: Some(ArtifactSet { + version: "1.5.0".to_string(), + artifacts: BTreeMap::new(), + }), + octobot_binary: None, + octobot_python: None, + python_dist: None, + }, + ); + let manifest = Manifest { + schema_version: 1, + generated_at: chrono::Utc::now(), + channels, + }; + + let config = UpdaterConfig { + manifest_url: format!("{}/manifest.json", server.uri()), + channel: "stable".to_string(), + current_launcher_version: Version::parse("1.4.0").unwrap(), + current_target_triple: TRIPLE, + pubkey: key.verifying_key(), + allow_downgrade: false, + user_agent: "test".to_string(), + state_dir, + cache_dir: tmp.path().to_path_buf(), + }; + let updater = Updater::new(config); + let result = updater.apply_launcher_update(&manifest).await; + assert!(matches!(result, Err(UpdateError::Locked))); + } + } + + mod baked_config { + use super::*; + + #[test] + fn all_zero_pubkey_returns_none() { + let zeros = "0".repeat(64); + let result = Updater::from_baked_config( + "https://example.com/manifest.json".to_string(), + "stable".to_string(), + &zeros, + std::path::PathBuf::from("/tmp"), + std::path::PathBuf::from("/tmp"), + ); + assert!(result.is_none(), "dev placeholder key must not produce an updater"); + } + + #[test] + fn invalid_hex_returns_none() { + let result = Updater::from_baked_config( + "https://example.com/manifest.json".to_string(), + "stable".to_string(), + "not-valid-hex", + std::path::PathBuf::from("/tmp"), + std::path::PathBuf::from("/tmp"), + ); + assert!(result.is_none()); + } + + #[test] + fn wrong_length_hex_returns_none() { + let result = Updater::from_baked_config( + "https://example.com/manifest.json".to_string(), + "stable".to_string(), + "deadbeef", + std::path::PathBuf::from("/tmp"), + std::path::PathBuf::from("/tmp"), + ); + assert!(result.is_none()); + } + } +} diff --git a/packages/launcher/install.ps1 b/packages/launcher/install.ps1 new file mode 100644 index 0000000000..94af0c3ff1 --- /dev/null +++ b/packages/launcher/install.ps1 @@ -0,0 +1,113 @@ +#Requires -Version 5.1 +[CmdletBinding()] +param( + [switch]$Service, + [string]$Version +) + +$ErrorActionPreference = 'Stop' + +$REPO = "Drakkar-Software/OctoBot" +$BINARY = "octobot-launcher" +$DEFAULT_INSTALL_DIR = Join-Path $env:LOCALAPPDATA "Programs\OctoBot-Launcher" + +function Main { + $installDir = if ($env:OCTOBOT_LAUNCHER_INSTALL_DIR) { + $env:OCTOBOT_LAUNCHER_INSTALL_DIR + } else { + $DEFAULT_INSTALL_DIR + } + + # Detect architecture + $arch = (Get-CimInstance Win32_ComputerSystem).SystemType + $triple = switch -Wildcard ($arch) { + "*x64*" { "x86_64-pc-windows-msvc" } + "*ARM64*" { + Write-Error "No prebuilt binary for ARM64 Windows yet." + exit 1 + } + default { + Write-Error "Unsupported architecture: $arch" + exit 1 + } + } + + # Resolve version + $resolvedVersion = if ($Version) { + $Version + } elseif ($env:OCTOBOT_LAUNCHER_VERSION) { + $env:OCTOBOT_LAUNCHER_VERSION + } else { + Write-Host "Fetching latest launcher release..." + $releases = Invoke-RestMethod "https://api.github.com/repos/$REPO/releases" + $tag = ($releases | Where-Object { $_.tag_name -like "launcher-v*" } | Select-Object -First 1).tag_name + if (-not $tag) { + Write-Error "Could not determine latest launcher release." + exit 1 + } + $tag -replace '^launcher-v', '' + } + + $archive = "${BINARY}-${resolvedVersion}-${triple}.zip" + $baseUrl = "https://github.com/${REPO}/releases/download/launcher-v${resolvedVersion}" + $tmpDir = [System.IO.Path]::GetTempPath() + [System.IO.Path]::GetRandomFileName() + New-Item -ItemType Directory -Path $tmpDir | Out-Null + + try { + Write-Host "Installing octobot-launcher v${resolvedVersion} (${triple})..." + + $archivePath = Join-Path $tmpDir $archive + $sha256Path = "$archivePath.sha256" + + # Download + Invoke-WebRequest "${baseUrl}/${archive}" -OutFile $archivePath + Invoke-WebRequest "${baseUrl}/${archive}.sha256" -OutFile $sha256Path + + # Verify checksum + $expected = (Get-Content $sha256Path).Split(' ')[0].ToLower() + $actual = (Get-FileHash $archivePath -Algorithm SHA256).Hash.ToLower() + if ($actual -ne $expected) { + Write-Error "Checksum mismatch!`n expected: $expected`n actual: $actual" + exit 1 + } + + # Extract + Expand-Archive -Path $archivePath -DestinationPath $tmpDir -Force + + # Install + if (-not (Test-Path $installDir)) { + New-Item -ItemType Directory -Path $installDir | Out-Null + } + $dest = Join-Path $installDir "${BINARY}.exe" + Move-Item -Force (Join-Path $tmpDir "${BINARY}.exe") $dest + + Write-Host "" + Write-Host "Installed: $dest" + Write-Host "" + + # Add to PATH if not already there + $userPath = [Environment]::GetEnvironmentVariable('PATH', 'User') + if ($userPath -notlike "*$installDir*") { + [Environment]::SetEnvironmentVariable('PATH', "$userPath;$installDir", 'User') + Write-Host "Added $installDir to your user PATH." + Write-Host "Restart your terminal for the change to take effect." + Write-Host "" + } + + # Service install + if ($Service) { + & $dest service install --user + Write-Host "Service installed. Start it with:" + Write-Host " octobot-launcher service start" + } else { + Write-Host "Next steps:" + Write-Host " octobot-launcher service install --user" + Write-Host " octobot-launcher service start" + } + + } finally { + Remove-Item -Recurse -Force $tmpDir -ErrorAction SilentlyContinue + } +} + +Main diff --git a/packages/launcher/install.sh b/packages/launcher/install.sh new file mode 100755 index 0000000000..985d54bf68 --- /dev/null +++ b/packages/launcher/install.sh @@ -0,0 +1,146 @@ +#!/bin/sh +set -e + +REPO="Drakkar-Software/OctoBot" +BINARY="octobot-launcher" +DEFAULT_INSTALL_DIR="$HOME/.local/bin" + +main() { + # Parse flags + INSTALL_SERVICE=0 + for arg in "$@"; do + case "$arg" in + --service) INSTALL_SERVICE=1 ;; + --help|-h) + echo "Usage: install.sh [--service]" + echo " --service Also run 'octobot-launcher service install --user' after installing" + echo "Env vars:" + echo " OCTOBOT_LAUNCHER_VERSION Pin a specific version (e.g. 0.1.0)" + echo " OCTOBOT_LAUNCHER_INSTALL_DIR Override install directory" + exit 0 + ;; + esac + done + + INSTALL_DIR="${OCTOBOT_LAUNCHER_INSTALL_DIR:-$DEFAULT_INSTALL_DIR}" + + # Detect OS + OS="$(uname -s)" + case "$OS" in + Darwin) OS_NAME="darwin" ;; + Linux) OS_NAME="linux" ;; + *) + echo "error: unsupported OS: $OS" >&2 + exit 1 + ;; + esac + + # Detect architecture + ARCH="$(uname -m)" + case "$ARCH" in + x86_64|amd64) ARCH_NAME="x86_64" ;; + arm64|aarch64) ARCH_NAME="aarch64" ;; + *) + echo "error: unsupported architecture: $ARCH" >&2 + exit 1 + ;; + esac + + # Map to Rust target triple + case "${OS_NAME}-${ARCH_NAME}" in + darwin-aarch64) TRIPLE="aarch64-apple-darwin" ;; + darwin-x86_64) TRIPLE="x86_64-apple-darwin" ;; + linux-x86_64) TRIPLE="x86_64-unknown-linux-gnu" ;; + linux-aarch64) TRIPLE="aarch64-unknown-linux-gnu" ;; + *) + echo "error: no prebuilt binary for ${OS_NAME}-${ARCH_NAME}" >&2 + exit 1 + ;; + esac + + # Resolve version + if [ -n "$OCTOBOT_LAUNCHER_VERSION" ]; then + VERSION="$OCTOBOT_LAUNCHER_VERSION" + else + echo "Fetching latest launcher release..." + # Find the most recent tag that starts with launcher-v + RELEASES_JSON="$(curl -fsSL "https://api.github.com/repos/${REPO}/releases")" + VERSION="$(printf '%s' "$RELEASES_JSON" \ + | grep '"tag_name"' \ + | grep '"launcher-v' \ + | head -1 \ + | sed 's/.*"launcher-v\([^"]*\)".*/\1/')" + if [ -z "$VERSION" ]; then + echo "error: could not determine latest launcher release" >&2 + exit 1 + fi + fi + + ARCHIVE="${BINARY}-${VERSION}-${TRIPLE}.tar.gz" + BASE_URL="https://github.com/${REPO}/releases/download/launcher-v${VERSION}" + TMPDIR="$(mktemp -d)" + trap 'rm -rf "$TMPDIR"' EXIT + + echo "Installing octobot-launcher v${VERSION} (${TRIPLE})..." + + # Download + curl -fL --progress-bar \ + "${BASE_URL}/${ARCHIVE}" -o "${TMPDIR}/${ARCHIVE}" + curl -fsSL \ + "${BASE_URL}/${ARCHIVE}.sha256" -o "${TMPDIR}/${ARCHIVE}.sha256" + + # Verify checksum + EXPECTED="$(awk '{print $1}' "${TMPDIR}/${ARCHIVE}.sha256")" + if command -v shasum >/dev/null 2>&1; then + ACTUAL="$(shasum -a 256 "${TMPDIR}/${ARCHIVE}" | awk '{print $1}')" + elif command -v sha256sum >/dev/null 2>&1; then + ACTUAL="$(sha256sum "${TMPDIR}/${ARCHIVE}" | awk '{print $1}')" + else + echo "warning: neither shasum nor sha256sum found; skipping checksum verification" >&2 + ACTUAL="$EXPECTED" + fi + if [ "$ACTUAL" != "$EXPECTED" ]; then + echo "error: checksum mismatch" >&2 + echo " expected: $EXPECTED" >&2 + echo " actual: $ACTUAL" >&2 + exit 1 + fi + + # Extract + tar -xzf "${TMPDIR}/${ARCHIVE}" -C "$TMPDIR" + + # Install + mkdir -p "$INSTALL_DIR" + mv "${TMPDIR}/${BINARY}" "${INSTALL_DIR}/${BINARY}" + chmod +x "${INSTALL_DIR}/${BINARY}" + + echo "" + echo "Installed: ${INSTALL_DIR}/${BINARY}" + echo "" + + # PATH hint + case ":${PATH}:" in + *":${INSTALL_DIR}:"*) ;; + *) + echo " ${INSTALL_DIR} is not on your PATH." + echo " Add it with one of:" + echo " fish: fish_add_path ${INSTALL_DIR}" + echo " bash: echo 'export PATH=\"\$PATH:${INSTALL_DIR}\"' >> ~/.bashrc" + echo " zsh: echo 'export PATH=\"\$PATH:${INSTALL_DIR}\"' >> ~/.zshrc" + echo "" + ;; + esac + + # Service install + if [ "$INSTALL_SERVICE" -eq 1 ]; then + "${INSTALL_DIR}/${BINARY}" service install --user + echo "Service installed. Start it with:" + echo " octobot-launcher service start" + else + echo "Next steps:" + echo " octobot-launcher service install --user" + echo " octobot-launcher service start" + fi +} + +main "$@" diff --git a/packages/node/.dockerignore b/packages/node/.dockerignore new file mode 100644 index 0000000000..ffa6c9a4a4 --- /dev/null +++ b/packages/node/.dockerignore @@ -0,0 +1,108 @@ +# dev +.idea + +# CI files +.coveragerc +.coveralls.yml +.travis.yml +appveyor.yml +renovate.json +setup.cfg +tox.ini + +# octobot +tentacles +user +logs + +# Git +.git +Dockerfile +.DS_Store +.gitignore +.dockerignore +.github + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns +.com.apple.timemachine.donotpresent + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg + +# PyInstaller +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml + +# Flask stuff: +instance/ +.webassets-cache + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# Environments +.env +.venv +env/ +venv/ +ENV/ + +# documentation +docs + +# others +.nojekyll diff --git a/packages/node/.env.sample b/packages/node/.env.sample new file mode 100644 index 0000000000..ef3401d5dc --- /dev/null +++ b/packages/node/.env.sample @@ -0,0 +1,23 @@ +NODE_API_URL=http://localhost:8000 + +# Environment: local, production +NODE_ENVIRONMENT=local + +# Backend +BACKEND_CORS_ORIGINS="http://localhost,http://localhost:5173,http://localhost:8000,https://localhost,https://localhost:5173" + +SCHEDULER_POSTGRES_URL= +SCHEDULER_SQLITE_FILE=tasks.db + +BLOCKCHAIN_WALLETS_EXTRA_CONFIG={} + +# Task encryption keys (server-side only) +# TASKS_SERVER_RSA_PRIVATE_KEY= +# TASKS_SERVER_ECDSA_PRIVATE_KEY= +# TASKS_USER_RSA_PUBLIC_KEY= +# TASKS_USER_ECDSA_PUBLIC_KEY= + +# User encryption keys (browser side ONLY — entered in Settings, not in .env) +# USER_RSA_PRIVATE_KEY= +# USER_ECDSA_PRIVATE_KEY= +# Note: SERVER_RSA_PUBLIC_KEY and SERVER_ECDSA_PUBLIC_KEY are auto-fetched from /api/v1/tasks/server-public-keys diff --git a/packages/node/.env.test b/packages/node/.env.test new file mode 100644 index 0000000000..c64cfc6c79 --- /dev/null +++ b/packages/node/.env.test @@ -0,0 +1,13 @@ +NODE_API_URL=http://localhost:8000 + +# Environment: local, production +NODE_ENVIRONMENT=local + +# Backend +BACKEND_CORS_ORIGINS="http://localhost,http://localhost:5173,http://localhost:8000,https://localhost,https://localhost:5173" +SECRET_KEY=123456789 + +SCHEDULER_POSTGRES_URL= +SCHEDULER_SQLITE_FILE=tasks.db + +BLOCKCHAIN_WALLETS_EXTRA_CONFIG={} diff --git a/packages/node/BUILD b/packages/node/BUILD new file mode 100644 index 0000000000..119fee0eeb --- /dev/null +++ b/packages/node/BUILD @@ -0,0 +1,19 @@ +python_requirements(name="reqs") + +python_sources(name="octobot_node", sources=["octobot_node/**/*.py"]) + +files( + name="test_csv", + sources=["tests/**/*.csv"], +) + +python_tests( + name="tests", + sources=["tests/**/test_*.py"], + dependencies=[ + ":octobot_node", + ":reqs", + "//:dev_reqs", + ":test_csv", + ], +) \ No newline at end of file diff --git a/packages/node/CHANGELOG.md b/packages/node/CHANGELOG.md new file mode 100644 index 0000000000..10498d6e5d --- /dev/null +++ b/packages/node/CHANGELOG.md @@ -0,0 +1,91 @@ +# Changelog +All notable changes to this project will be documented in this file. + +## [0.0.5] - 2026-01-09 +### Added +- `TaskResultKeys` enum to standardize task result dictionary keys (`status`, `task`, `result`, `error`, `metadata`) +- `get_task_name()` method in scheduler to extract task names from task data structures +- Metadata column support in CSV task imports for encrypted task metadata +- `parse_key_to_bytes()` utility function for automatic encryption key format conversion +- `EncryptedTask` context manager in `task_context.py` for automatic task content decryption and result encryption +- `content_metadata` field to Task model for storing encrypted content metadata separately +- `result` and `result_metadata` fields to Task model for storing encrypted task results and their metadata +- `METADATA` key to `TaskResultKeys` enum for standardized metadata handling in task results + +### Changed +- Task names now display actual task names instead of task IDs in the UI and API responses +- Scheduled task descriptions now show ETA timestamp (e.g., "Scheduled at 2026-01-09 10:30:00") instead of generic "Scheduled task" messages +- Encryption key configuration now accepts both string and bytes formats (automatic conversion via `BeforeValidator`) +- Encryption metadata now uses base64 encoding/decoding for improved compatibility +- Task result dictionaries now use standardized `TaskResultKeys` enum values instead of hardcoded strings +- Logging messages now use task names instead of task IDs for better readability +- Task result dictionaries now include task name in the `task` field for better traceability +- Task execution now uses `EncryptedTask` context manager for automatic encryption/decryption handling +- Task functions now set `task.result` directly instead of returning encrypted result dictionaries +- Task result dictionaries now use `TaskStatus.COMPLETED` enum value instead of hardcoded "done" string +- Task result dictionaries now include `metadata` field for encrypted result metadata +- Task model `metadata` field renamed to `content_metadata` for clarity (input metadata vs result metadata) + +## [0.0.4] - 2026-01-09 +### Added +- Task encryption and decryption functionality for task inputs and outputs +- Hybrid encryption module using RSA (4096-bit), AES-GCM (256-bit), and ECDSA signatures (SECP256R1) +- `encrypt_task_content()` and `decrypt_task_content()` functions for encrypting/decrypting task inputs +- `encrypt_task_result()` and `decrypt_task_result()` functions for encrypting/decrypting task outputs +- Automatic task content decryption during task execution when encryption keys are configured +- CSV encryption utilities for task imports (`encrypt_csv_content`, `decrypt_csv_content`, `merge_and_encrypt_csv`) +- Key generation and management utilities for encryption keys +- Encryption key configuration via environment variables: + - `TASKS_SERVER_RSA_PRIVATE_KEY` and `TASKS_SERVER_ECDSA_PRIVATE_KEY` (server-held private keys) + - `TASKS_USER_RSA_PUBLIC_KEY` and `TASKS_USER_ECDSA_PUBLIC_KEY` (user-provided public keys) +- Custom exception classes for encryption errors: `EncryptionTaskError`, `MissingMetadataError`, `MetadataParsingError`, `SignatureVerificationError` +- Comprehensive encryption module documentation (README.md) + +### Changed +- Task execution now automatically decrypts task content if encryption keys are configured +- Tasks operate in plaintext mode when encryption keys are not set (backward compatible) + +## [0.0.3] - 2026-01-08 +### Added +- `--master` CLI flag to enable master node mode (schedules tasks) +- `--consumers N` CLI flag to configure number of consumer worker threads (0 disables consumers) +- `--environment {local,production}` CLI flag to set environment mode +- `--admin-username` and `--admin-password` CLI flags to set admin credentials +- `--verbose` CLI flag to enable verbose logging with HTTP access logs +- Support for nodes to operate as both master and consumer simultaneously +- Automatic auto-reload when environment is set to "local" + +### Changed +- Replaced `--workers` and `--reload` CLI flags with new `--master` and `--consumers` flags +- Replaced `SCHEDULER_NODE_TYPE` configuration with `IS_MASTER_MODE` boolean flag +- Default `SCHEDULER_WORKERS` changed from 4 to 0 (consumers disabled by default) +- Default `ENVIRONMENT` changed from "local" to "production" +- Removed "staging" from environment options (now only "local" and "production") +- Default host binding: 127.0.0.1 for non-master nodes, 0.0.0.0 for master nodes in production +- FastAPI server now always runs with a single worker (consumer workers are separate) +- Admin credentials validation now only required when master mode is enabled +- Task list API endpoint now returns raw task data instead of Task model instances +- Node status API now returns node_type as "master", "consumer", "both", or "none" +- Redis connection now uses `decode_responses=False` for better compatibility +- Improved logging messages and error handling throughout scheduler components + +### Fixed +- Fixed Redis decode_responses configuration for better compatibility +- Fixed task parsing error messages formatting + +## [0.0.2] - 2026-01-07 +### Added +- Default values for admin credentials (`ADMIN_USERNAME` and `ADMIN_PASSWORD`) to simplify local setup +- Default admin username: `admin@example.com` +- Default admin password: `changethis` + +### Changed +- Renamed `FIRST_SUPERUSER` environment variable to `ADMIN_USERNAME` +- Renamed `FIRST_SUPERUSER_PASSWORD` environment variable to `ADMIN_PASSWORD` +- Admin credentials now have default values (previously required to be set) +- Validation warnings now use logging instead of Python warnings module +- Updated `.env.sample` and `.env.test` files to use new variable names + +## [0.0.1] - 2026-01-07 +### Added +- OctoBot Node alpha version diff --git a/packages/node/MANIFEST.in b/packages/node/MANIFEST.in new file mode 100644 index 0000000000..41f7d005d7 --- /dev/null +++ b/packages/node/MANIFEST.in @@ -0,0 +1,4 @@ +include README.md +include LICENSE +include CHANGELOG.md +include requirements.txt diff --git a/packages/node/README.md b/packages/node/README.md new file mode 100644 index 0000000000..91f9b293bb --- /dev/null +++ b/packages/node/README.md @@ -0,0 +1,184 @@ +# OctoBot Node +[![OctoBot-Node-CI](https://github.com/Drakkar-Software/OctoBot-Node/workflows/OctoBot-Node-CI/badge.svg)](https://github.com/Drakkar-Software/OctoBot-Node/actions) +[![Telegram](https://img.shields.io/badge/Telegram-grey.svg?logo=telegram)](https://t.me/OctoBot_Project) +[![Twitter](https://img.shields.io/twitter/follow/DrakkarsOctobot.svg?label=twitter&style=social)](https://x.com/DrakkarsOctoBot) +[![YouTube](https://img.shields.io/youtube/channel/views/UC2YAaBeWY8y_Olqs79b_X8A?label=youtube&style=social)](https://www.youtube.com/@octobot1134) + +

+OctoBot Node logo +

+ +

+Run any OctoBot, anywhere, with ease +

+ +This project is related to [OctoBot](https://github.com/Drakkar-Software/OctoBot). + +## Usage + +### CLI + +OctoBot-Node provides a command-line interface (CLI) for starting the server and managing the application. + +#### Basic Usage + +Start the server with default settings: +```bash +python start.py +``` + +Or if installed via pip: +```bash +octobot_node +``` + +#### CLI Options + +- `-v, --version`: Show OctoBot-Node current version +- `--host HOST`: Host to bind the server to (default: 0.0.0.0 for master in production, 127.0.0.1 otherwise) +- `--port PORT`: Port to bind the server to (default: 8000) +- `--master`: Enable master node mode (schedules tasks) +- `--consumers N`: Number of consumer worker threads (0 disables consumers, default: 0). Can be used with --master +- `--environment {local,production}`: Environment mode (default: from NODE_ENVIRONMENT environment variable). Auto-reload is enabled automatically when environment is local +- `--admin-username EMAIL`: Admin username in email format (default: from ADMIN_USERNAME environment variable) +- `--admin-password PASSWORD`: Admin password (default: from ADMIN_PASSWORD environment variable) + +#### Examples + +Start the server on a custom host and port: +```bash +python start.py --host 127.0.0.1 --port 9000 +``` + +Start as master node (schedules tasks): +```bash +python start.py --master +``` + +Start with consumer workers: +```bash +python start.py --consumers 4 +``` + +Start as master node with consumer workers: +```bash +python start.py --master --consumers 4 +``` + +Start in development mode (auto-reload enabled automatically): +```bash +python start.py --environment local +``` + +Start in production mode: +```bash +python start.py --master --environment production +``` + +Set admin credentials: +```bash +python start.py --master --admin-username admin@example.com --admin-password mypassword +``` + +Show version: +```bash +python start.py --version +``` + +### With Redis + +For using Redis as the scheduler backend: +```bash +docker run -p 6379:6379 --name redis -d redis redis-server --save 60 1 --loglevel warning +``` + +## Developers +### Prerequisites + +Before proceeding, ensure you have [**Python 3.10+**](https://www.python.org) and [**Node.js 20+**](https://nodejs.org) installed on your system. + +Once you have installed Python and Node.js, run the following commands: +```bash +npm install +pip install -r requirements.txt +cp .env.sample .env +``` + +### Web UI + +The Web UI can be used in two modes: **static** and **dynamic (development)**. The Web UI is built using [React](https://github.com/facebook/react), [Vite](https://github.com/vitejs/vite), [TanStack](https://github.com/TanStack) and [shadcn-ui](https://github.com/shadcn-ui/ui). + +#### Static Web UI + +If you do not need to modify the Web UI code, it is recommended to use the static mode for better performance. +To build the static assets, run: +```bash +npm run build +``` +After building, start the FastAPI server. The static Web UI will be available at [http://localhost:8000/app](http://localhost:8000/app). + +#### Dynamic (Development) Web UI + +If you plan to actively develop or modify the Web UI, use the dynamic development mode. This provides hot-reload and the latest changes instantly. +To run the Web UI in development mode, use: +```bash +npm --prefix ../tentacles/Services/Interfaces/node_web_interface run ui:dev +``` +This will start the development server, typically available at [http://localhost:3000](http://localhost:3000). You can access the UI separately while developing. +For API integration during development, make sure your FastAPI backend server is running simultaneously. The development server will proxy API requests to the backend as configured. + +### OpenAPI + +Whenever you update or add routes in `tentacles/Services/Interfaces/node_api/api`, you need to regenerate the [OpenAPI specification](https://github.com/OAI/OpenAPI-Specification) and the UI OpenAPI client. This can be done easily with the provided script: +```bash +bash ../tentacles/Services/Interfaces/node_web_interface/generate-client.sh +``` + +### API Server + +The API server is built using [FastAPI](https://github.com/fastapi) and provides the backend REST endpoints and websocket interface for OctoBot Node. + +#### Running the FastAPI Server + +You can start the API server using the CLI (recommended): + +```bash +python start.py --master +``` + +Or directly with uvicorn: + +```bash +uvicorn tentacles.Services.Interfaces.node_api_interface.node_api_interface:NodeApiInterface.create_app --factory --host 0.0.0.0 --port 8000 +``` + +- By default, the server runs on [http://localhost:8000](http://localhost:8000). +- You can configure environment variables via `.env`, including host, port, and scheduler/backend settings. +- For development: Use `--environment local` flag. Auto-reload is enabled automatically in local environment. +- For production: Use `--master --environment production` to enable master mode in production. +- The FastAPI server always runs with a single worker (default FastAPI behavior). +- Consumer workers are configured separately using `--consumers N`. + +##### Environment Variables + +Some key `.env` variables: +- `SCHEDULER_POSTGRES_URL` (if using Postgres as backend) +- `SCHEDULER_SQLITE_FILE` (if using SQLite, default: "tasks.db") +- `SCHEDULER_WORKERS` (number of consumer workers, default: 0, can be overridden with --consumers) +- `NODE_ENVIRONMENT` (environment mode: "local" or "production", default: "production") +- `ADMIN_USERNAME` (admin username in email format, can be overridden with --admin-username) +- `ADMIN_PASSWORD` (admin password, can be overridden with --admin-password) + +Note: Master mode is controlled via the `--master` CLI flag, not via environment variables. + +See `.env.sample` for all options, and adjust as needed. + +#### Scheduler + +The task scheduler is automatically started together with the FastAPI server through import of the `octobot_node/scheduler` module. The scheduler uses [DBOS](https://docs.dbos.dev/) for durable workflow and task queue management. + +- **No manual launch needed** — scheduler and consumers are managed automatically on startup. +- Configuration for the scheduler backend (Postgres or SQLite) is picked up from environment variables. +- Consumer workers are started automatically if `SCHEDULER_WORKERS > 0` (or `--consumers N` is used). +- Master mode is enabled via the `--master` CLI flag and allows the node to schedule tasks. +- A node can be both a master (schedules tasks) and run consumer workers simultaneously. diff --git a/packages/node/octobot_node/__init__.py b/packages/node/octobot_node/__init__.py new file mode 100644 index 0000000000..f2ef890337 --- /dev/null +++ b/packages/node/octobot_node/__init__.py @@ -0,0 +1,21 @@ +# This file is part of OctoBot Node (https://github.com/Drakkar-Software/OctoBot-Node) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + + +PROJECT_NAME = "OctoBot-Node" +AUTHOR = "Drakkar-Software" +VERSION = "0.0.5" # major.minor.revision +LONG_VERSION = f"{VERSION}" diff --git a/packages/node/octobot_node/config.py b/packages/node/octobot_node/config.py new file mode 100644 index 0000000000..960a411358 --- /dev/null +++ b/packages/node/octobot_node/config.py @@ -0,0 +1,102 @@ +# This file is part of OctoBot Node (https://github.com/Drakkar-Software/OctoBot-Node) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + +import sys +from typing import Annotated, Any, Literal + +from pydantic import ( + AnyUrl, + BeforeValidator, + HttpUrl, + computed_field, + Field, +) +from pydantic_settings import BaseSettings, SettingsConfigDict + + +def _get_env_file() -> str: + # Check if pytest module is imported + if "pytest" in sys.modules: + return ".env.test" + return ".env" + + +def parse_cors(v: Any) -> list[str] | str: + if isinstance(v, str) and not v.startswith("["): + return [i.strip() for i in v.split(",") if i.strip()] + elif isinstance(v, list | str): + return v + raise ValueError(v) + + +def parse_key_to_bytes(v: str | bytes | None) -> bytes | None: + if v is None: + return None + if isinstance(v, bytes): + return v + return v.encode('utf-8') + + +class Settings(BaseSettings): + model_config = SettingsConfigDict( + # Use .env.test when running tests, otherwise use .env + env_file=_get_env_file(), + env_ignore_empty=True, + extra="ignore", + ) + NODE_ENVIRONMENT: Literal["local", "production"] = "production" + BACKEND_HOST: str = "http://localhost:8000" + FRONTEND_HOST: str = "http://localhost:5173" if NODE_ENVIRONMENT == "local" else BACKEND_HOST + + BACKEND_CORS_ORIGINS: Annotated[ + list[AnyUrl] | str, BeforeValidator(parse_cors) + ] = [] + + @computed_field # type: ignore[prop-decorator] + @property + def all_cors_origins(self) -> list[str]: + return [str(origin).rstrip("/") for origin in self.BACKEND_CORS_ORIGINS] + [ + self.FRONTEND_HOST + ] + + SENTRY_DSN: HttpUrl | None = None + SCHEDULER_POSTGRES_URL: AnyUrl | None = None # examplee: postgresql://postgres:password@localhost:5432/dbos_example + SCHEDULER_SQLITE_FILE: str = "tasks.db" # example tasks.db + IS_MASTER_MODE: bool = False # True: start OctoBot Node as master (enables master-side features) + CONSUMER_ONLY: bool = False # True: start OctoBot Node in consumer mode only (requires a postgres database) + SCHEDULER_MAX_EXECUTOR_THREADS: int = 200 #todo reduce after dbos 2.13.0 is released + POSTGRES_STORAGE_CERTS_PATH: str | None = None + + # Task encryption keys (server-side) + TASKS_SERVER_RSA_PRIVATE_KEY: Annotated[bytes | None, BeforeValidator(parse_key_to_bytes)] = None + TASKS_SERVER_ECDSA_PRIVATE_KEY: Annotated[bytes | None, BeforeValidator(parse_key_to_bytes)] = None + TASKS_USER_RSA_PUBLIC_KEY: Annotated[bytes | None, BeforeValidator(parse_key_to_bytes)] = None + TASKS_USER_ECDSA_PUBLIC_KEY: Annotated[bytes | None, BeforeValidator(parse_key_to_bytes)] = None + + USE_DEDICATED_LOG_FILE_PER_AUTOMATION: bool = True + + @computed_field + @property + def is_node_side_encryption_enabled(self) -> bool: + return bool(self.TASKS_SERVER_RSA_PRIVATE_KEY and self.TASKS_SERVER_ECDSA_PRIVATE_KEY) + + @computed_field + @property + def tasks_encryption_enabled(self) -> bool: + return self.is_node_side_encryption_enabled + + +settings = Settings() # type: ignore diff --git a/packages/node/octobot_node/constants.py b/packages/node/octobot_node/constants.py new file mode 100644 index 0000000000..aa69443461 --- /dev/null +++ b/packages/node/octobot_node/constants.py @@ -0,0 +1,35 @@ +# This file is part of OctoBot Node (https://github.com/Drakkar-Software/OctoBot-Node) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . +import os + +try: + import octobot.constants as octobot_constants + BASE_LOGS_FOLDER = octobot_constants.LOGS_FOLDER +except ImportError: + BASE_LOGS_FOLDER = "logs" + +AUTOMATION_LOGS_FOLDER = f"{BASE_LOGS_FOLDER}/automations" +PARENT_WORKFLOW_ID_LENGTH = 36 # length of a UUID4 + +# default to 10 retry after 1, 2, 4, 8, 16, ... 1024 seconds (total of 2047 seconds) +AUTOMATION_WORKFLOW_RETRY_INTERVAL_SECONDS = float(os.getenv("AUTOMATION_WORKFLOW_RETRY_INTERVAL_SECONDS", 1.0)) +AUTOMATION_WORKFLOW_MAX_ITERATION_RETRIES = int(os.getenv("AUTOMATION_WORKFLOW_MAX_ITERATION_RETRIES", 10)) +AUTOMATION_WORKFLOW_BACKOFF_RATE = float(os.getenv("AUTOMATION_WORKFLOW_BACKOFF_RATE", 2)) + +TASKS_ENCRYPTION_ENV_VARS = [ + "TASKS_SERVER_RSA_PRIVATE_KEY", + "TASKS_SERVER_ECDSA_PRIVATE_KEY", +] diff --git a/packages/node/octobot_node/enums.py b/packages/node/octobot_node/enums.py new file mode 100644 index 0000000000..d672bc4f18 --- /dev/null +++ b/packages/node/octobot_node/enums.py @@ -0,0 +1,37 @@ +# This file is part of OctoBot Node (https://github.com/Drakkar-Software/OctoBot-Node) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + +import enum + +class TaskResultKeys(enum.Enum): + STATUS = "status" + TASK = "task" + RESULT = "result" + ERROR = "error" + METADATA = "metadata" + + +class AutomationWorkflowActionTypes(enum.Enum): + USER_ACTIONS = "user_actions" + TRADING_SIGNAL = "trading_signal" + + +class AutomationWorkflowMessageTopics(enum.Enum): + ACTIONS_UPDATE = "actions_update" + + +class SchedulerQueues(enum.Enum): + AUTOMATION_WORKFLOW_QUEUE = "automation_workflow_queue" diff --git a/packages/node/octobot_node/errors.py b/packages/node/octobot_node/errors.py new file mode 100644 index 0000000000..67b7181f24 --- /dev/null +++ b/packages/node/octobot_node/errors.py @@ -0,0 +1,30 @@ +# Drakkar-Software OctoBot-Node +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +class WorkflowError(Exception): + """Base class for all workflow errors""" + + +class WorkflowInputError(WorkflowError): + """Raised when a workflow input is invalid""" + + +class WorkflowActionExecutionError(WorkflowError): + """Raised when a workflow action execution fails""" + + +class WorkflowPriorityActionExecutionError(WorkflowActionExecutionError): + """Raised when a workflow priority action execution fails""" diff --git a/packages/node/octobot_node/models.py b/packages/node/octobot_node/models.py new file mode 100644 index 0000000000..2a2f52d74b --- /dev/null +++ b/packages/node/octobot_node/models.py @@ -0,0 +1,80 @@ +# This file is part of OctoBot Node (https://github.com/Drakkar-Software/OctoBot-Node) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + +import uuid +import typing +import datetime +from enum import Enum + +from pydantic import BaseModel, Field + + +class UserBase(BaseModel): + email: str = Field(max_length=255) + is_active: bool = True + is_superuser: bool = False + full_name: str | None = Field(default=None, max_length=255) + + +class User(UserBase): + id: uuid.UUID + + +class TaskStatus(str, Enum): + PENDING = "pending" + SCHEDULED = "scheduled" + PERIODIC = "periodic" + RUNNING = "running" + COMPLETED = "completed" + FAILED = "failed" + + +class TaskType(str, Enum): + EXECUTE_ACTIONS = "execute_actions" + +class Execution(BaseModel): + id: str + name: typing.Optional[str] = None + description: typing.Optional[str] = None + actions: typing.Optional[str] = None + content_metadata: typing.Optional[str] = None + type: typing.Optional[str] = None + status: typing.Optional[TaskStatus] = None + result: typing.Optional[str] = None + result_metadata: typing.Optional[str] = None + scheduled_at: typing.Optional[datetime.datetime] = None + completed_at: typing.Optional[datetime.datetime] = None + error: typing.Optional[str] = None + + +class Task(BaseModel): + id: str = str(uuid.uuid4()) + name: typing.Optional[str] = None + content: typing.Optional[str] = None + content_metadata: typing.Optional[str] = None + type: typing.Optional[str] = None + executions: list[Execution] = [] + error: typing.Optional[str] = None + user_rsa_public_key: typing.Optional[str] = None + user_ecdsa_public_key: typing.Optional[str] = None + +class Node(BaseModel): + node_type: str + backend_type: str + workers: int | None + status: str + redis_url: str | None = None + sqlite_file: str | None = None diff --git a/packages/node/octobot_node/scheduler/__init__.py b/packages/node/octobot_node/scheduler/__init__.py new file mode 100644 index 0000000000..fa819b5d7a --- /dev/null +++ b/packages/node/octobot_node/scheduler/__init__.py @@ -0,0 +1,48 @@ +# This file is part of OctoBot Node (https://github.com/Drakkar-Software/OctoBot-Node) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + +import logging + +import octobot_node.scheduler.scheduler as scheduler_lib +import octobot_node.scheduler.workflows + +scheduler_logger = logging.getLogger(__name__) + +SCHEDULER: scheduler_lib.Scheduler = scheduler_lib.Scheduler() + + +def is_enabled() -> bool: + return SCHEDULER.is_enabled() + + +def is_initialized() -> bool: + return SCHEDULER.is_initialized() + + +def initialize_scheduler(): + scheduler_logger.info("Initializing scheduler") + SCHEDULER.create() + octobot_node.scheduler.workflows.register_workflows() + SCHEDULER.start() + + +async def shutdown_scheduler_and_trading_signal_channel() -> None: + try: + import octobot_flow.repositories.community.trading_signals_channel as trading_signals_channel + await trading_signals_channel.shutdown_internal_trading_signal_channel() + except ImportError: + pass + SCHEDULER.stop() diff --git a/packages/node/octobot_node/scheduler/api.py b/packages/node/octobot_node/scheduler/api.py new file mode 100644 index 0000000000..c335b12b89 --- /dev/null +++ b/packages/node/octobot_node/scheduler/api.py @@ -0,0 +1,178 @@ +# This file is part of OctoBot Node (https://github.com/Drakkar-Software/OctoBot-Node) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + +import asyncio +import logging +import typing +import uuid + +import dbos + +import octobot_node.config +import octobot_node.constants +import octobot_node.models +import octobot_node.scheduler + +logger = logging.getLogger(__name__) + + +def get_node_status() -> dict[str, str | int | None | uuid.UUID]: + consumer_running = ( + octobot_node.scheduler.SCHEDULER.INSTANCE + and octobot_node.scheduler.SCHEDULER.INSTANCE._launched + ) + is_running = octobot_node.config.settings.IS_MASTER_MODE or bool(consumer_running) + status = "running" if is_running else "stopped" + + backend_type = "postgres" if octobot_node.config.settings.SCHEDULER_POSTGRES_URL else "sqlite" + workers = 1 + + if octobot_node.config.settings.IS_MASTER_MODE: + node_type = "both" + elif octobot_node.config.settings.CONSUMER_ONLY: + node_type = "consumer" + else: + # no worker should run + node_type = "none" + workers = 0 + + return { + "node_type": node_type, + "backend_type": backend_type, + "workers": workers, + "status": status, + "redis_url": None, + "sqlite_file": octobot_node.config.settings.SCHEDULER_SQLITE_FILE if not octobot_node.config.settings.SCHEDULER_POSTGRES_URL else None, + } + + +async def get_task_metrics() -> dict[str, int]: + try: + instance = octobot_node.scheduler.SCHEDULER.INSTANCE + if instance is None: + logger.warning("Scheduler instance not initialized") + pending, completed, periodic = [], [], [] + else: + pending, completed, periodic = await asyncio.gather( + instance.list_workflows_async(status=[ + dbos.WorkflowStatusString.ENQUEUED.value, dbos.WorkflowStatusString.PENDING.value + ]), + instance.list_workflows_async(status=[ + dbos.WorkflowStatusString.SUCCESS.value, dbos.WorkflowStatusString.ERROR.value + ]), + octobot_node.scheduler.SCHEDULER.get_periodic_tasks() + ) + return { + "pending": len(pending), + "scheduled": len(periodic), + "results": len(completed), + } + except Exception as e: + logger.error(f"Failed to retrieve task metrics from scheduler: {e}") + return {"pending": 0, "scheduled": 0, "results": 0} + + +def _get_active_execution( + executions: list[octobot_node.models.Execution], +) -> typing.Optional[octobot_node.models.Execution]: + pending = [e for e in executions if e.status == octobot_node.models.TaskStatus.PENDING] + if pending: + return pending[-1] + dated = sorted( + [e for e in executions if e.completed_at is not None], + key=lambda e: e.completed_at, + ) + return dated[-1] if dated else (executions[-1] if executions else None) + + +def _build_tasks_from_executions( + executions: list[octobot_node.models.Execution], +) -> list[octobot_node.models.Task]: + grouped: dict[str, list[octobot_node.models.Execution]] = {} + for execution in executions: + parent_id = execution.id[:octobot_node.constants.PARENT_WORKFLOW_ID_LENGTH] + grouped.setdefault(parent_id, []).append(execution) + + tasks = [] + for parent_id, group in grouped.items(): + active = _get_active_execution(group) + active_name = active.name if active else None + active_content = active.actions if active else None + error = active.error if active else None + for execution in group: + execution.name = None + if active is None or execution.id != active.id: + execution.result_metadata = None + tasks.append(octobot_node.models.Task( + id=parent_id, + name=active_name, + content=active_content, + executions=group, + error=error, + )) + return tasks + + +async def get_all_tasks() -> list[octobot_node.models.Task]: + executions: list[octobot_node.models.Execution] = [] + try: + periodic, pending, scheduled, results = await asyncio.gather( + octobot_node.scheduler.SCHEDULER.get_periodic_tasks(), + octobot_node.scheduler.SCHEDULER.get_pending_tasks(), + octobot_node.scheduler.SCHEDULER.get_scheduled_tasks(), + octobot_node.scheduler.SCHEDULER.get_results(), + ) + executions.extend(periodic) + executions.extend(pending) + executions.extend(scheduled) + executions.extend(results) + except Exception as e: + logger.error("Failed to retrieve tasks from scheduler: %s", e) + return [] + + tasks = _build_tasks_from_executions(executions) + logger.debug("Returning %d total tasks from %d executions", len(tasks), len(executions)) + return tasks + + +async def delete_tasks(task_ids: list[str]) -> list[str]: + await octobot_node.scheduler.SCHEDULER.delete_workflows(task_ids) + return task_ids + + +async def get_task_result(task_id: str): + try: + handle = await octobot_node.scheduler.SCHEDULER.INSTANCE.retrieve_workflow_async(task_id) + except Exception: + return {"error": "task not found"} + + try: + status = await handle.get_status() + if status is None: + return {"error": "task not found"} + wf_status = getattr(status, "status", None) or getattr(status, "workflow_status", None) + if wf_status == "SUCCESS": + result_data = await handle.get_result() + return {"status": "completed", "data": result_data} + if wf_status == "ERROR": + try: + result_data = await handle.get_result() + except Exception as e: + result_data = {"error": str(e)} + return {"status": "completed", "data": result_data} + except Exception as e: + logger.debug(f"Workflow {task_id} not yet complete: {e}") + return {"status": "pending or running"} diff --git a/packages/node/octobot_node/scheduler/encryption/README.md b/packages/node/octobot_node/scheduler/encryption/README.md new file mode 100644 index 0000000000..f0dbc21129 --- /dev/null +++ b/packages/node/octobot_node/scheduler/encryption/README.md @@ -0,0 +1,280 @@ +# Task Encryption Module + +This module provides secure encryption and decryption functionality for task inputs and outputs in the OctoBot Node scheduler. It implements a hybrid encryption scheme combining RSA and AES-GCM to ensure both security and performance. + +## Overview + +The encryption module uses a **hybrid encryption approach with digital signatures** that combines: +- **RSA encryption** (4096-bit) for securely exchanging AES keys +- **AES-GCM encryption** (256-bit) for encrypting the actual task data +- **ECDSA signatures** (SECP256R1) for non-repudiation and authenticity verification + +This approach leverages the strengths of all three cryptographic primitives: +- RSA provides secure key exchange without requiring a pre-shared secret +- AES-GCM provides fast, authenticated encryption for large data payloads +- ECDSA provides cryptographic signatures that cover both content and metadata, enabling verification without decryption + +## Security Features + +### 1. Hybrid Encryption +- **RSA (4096-bit)**: Used for secure key exchange + - OAEP padding with SHA-256 for enhanced security + - Prevents key exchange vulnerabilities +- **AES-GCM (256-bit)**: Used for data encryption + - Authenticated encryption ensures data integrity + - Fast encryption/decryption for large payloads + +### 2. Digital Signatures (ECDSA) +- **ECDSA (SECP256R1)**: Used for cryptographic signatures + - Provides non-repudiation and authenticity verification + - Signatures cover the entire payload (encrypted content + encrypted AES key + IV) + - Enables verification without decryption + - Prevents tampering with both content and metadata + +**How Signatures Work:** +1. During encryption, the signature is calculated on: `encrypted_content + encrypted_aes_key + iv` +2. The signature is stored in metadata alongside the encrypted AES key and IV +3. During decryption, the signature is verified before decrypting the content +4. If any part of the payload (content, AES key, or IV) is tampered with, signature verification fails +5. This ensures both the encrypted content and the metadata are authenticated + +### 3. Unique Keys Per Encryption +- Each encryption operation generates a new random AES key +- Each encryption uses a unique initialization vector (IV) +- Prevents pattern analysis and ensures forward secrecy + +### 4. Authenticated Encryption +- AES-GCM provides built-in authentication +- Detects tampering or corruption of encrypted data +- Ensures data integrity without additional MAC + +### 5. Secure Key Storage +- AES keys are never stored in plaintext +- AES keys are encrypted with RSA before storage/transmission +- Only encrypted keys, IVs, and signatures are stored in metadata + +### 6. Separation of Concerns +- Different RSA and ECDSA key pairs for inputs and outputs +- Allows different security policies for different data flows +- Enables key rotation without affecting both directions + +## Security Guarantees + +1. **Confidentiality**: Encrypted data cannot be read without the private key +2. **Integrity**: Any tampering with encrypted data or metadata will be detected +3. **Non-Repudiation**: Digital signatures provide cryptographic proof of origin +4. **Forward Secrecy**: Compromising one encryption doesn't affect others (unique keys per operation) +5. **Key Security**: AES keys are protected by RSA encryption +6. **Authentication**: AES-GCM and ECDSA signatures ensure data authenticity +7. **Metadata Protection**: Signatures cover both content and metadata, preventing tampering + +## Error Handling + +The module defines specific exceptions for different error scenarios: + +- `MissingMetadataError`: Raised when metadata is missing or incomplete +- `MetadataParsingError`: Raised when metadata JSON cannot be parsed or base64 decoding fails +- `EncryptionTaskError`: Raised when encryption/decryption operations fail +- `SignatureVerificationError`: Raised when signature verification fails + +## Available Functions + +The module provides four main functions for encrypting and decrypting task data: + +### Task Inputs +- `encrypt_task_content(content: str) -> Tuple[str, str]`: Encrypts task input content (server-side internal state) +- `decrypt_task_content(content: str, metadata: Optional[str], user_ecdsa_public_key: Optional[bytes] = None) -> str`: Decrypts task input content; verifies signature against the provided per-task key, then env-var fallback, then server ECDSA key + +### Task Outputs +- `encrypt_task_result(result: str, rsa_public_key: bytes, ecdsa_private_key: bytes) -> Tuple[str, str]`: Encrypts task output result for a specific user (wraps AES key with the user's RSA public key) +- `decrypt_task_result(encrypted_result: str, metadata: Optional[str]) -> str`: Decrypts task output result + +## Usage Examples + +### Encrypting Task Inputs + +```python +from octobot_node.scheduler.encryption import encrypt_task_content + +# Encrypt task input content +content = '{"action": "buy", "symbol": "BTC/USD", "amount": 0.1}' +encrypted_content, metadata = encrypt_task_content(content) + +# Store both encrypted_content and metadata +# metadata must be preserved for decryption +``` + +### Decrypting Task Inputs + +```python +from octobot_node.scheduler.encryption import decrypt_task_content +from octobot_node.scheduler.encryption import ( + MissingMetadataError, + EncryptionTaskError, + SignatureVerificationError +) + +# Decrypt task content +try: + decrypted_content = decrypt_task_content(encrypted_content, metadata) + # Use decrypted_content... +except MissingMetadataError as e: + # Handle missing metadata +except SignatureVerificationError as e: + # Handle signature verification failure +except EncryptionTaskError as e: + # Handle decryption failure +``` + +### Encrypting Task Outputs + +```python +from octobot_node.scheduler.encryption import encrypt_task_result + +# Encrypt a task result for a specific browser user (keys come from the task payload) +result = '{"status": "success", "data": "sensitive information"}' +encrypted_result, metadata = encrypt_task_result( + result, + rsa_public_key=task.user_rsa_public_key.encode(), + ecdsa_private_key=settings.TASKS_SERVER_ECDSA_PRIVATE_KEY, +) + +# Store both encrypted_result and metadata +# metadata must be preserved for decryption +``` + +### Decrypting Task Outputs + +```python +from octobot_node.scheduler.encryption import decrypt_task_result +from octobot_node.scheduler.encryption import ( + MissingMetadataError, + EncryptionTaskError, + SignatureVerificationError +) + +# Decrypt task result +try: + decrypted_result = decrypt_task_result(encrypted_result, metadata) + # Use decrypted_result... +except MissingMetadataError as e: + # Handle missing metadata +except SignatureVerificationError as e: + # Handle signature verification failure +except EncryptionTaskError as e: + # Handle decryption failure +``` + +## Key Management + +### Configuration Keys + +The server requires two key pairs, configured via environment variables: + +**Server keys (private keys only — public keys are derived internally):** +- `TASKS_SERVER_RSA_PRIVATE_KEY`: RSA private key for decrypting incoming task AES keys +- `TASKS_SERVER_ECDSA_PRIVATE_KEY`: ECDSA private key for signing task output results + +User public keys are not configured on the server. The browser derives them from the user's locally-stored private keys and embeds them in each task payload (`user_rsa_public_key`, `user_ecdsa_public_key`). The server reads these per-task fields to verify signatures and encrypt results, allowing a single node to serve multiple browser users with different keypairs without reconfiguration. + +### Key Generation + +RSA key pairs can be generated using: +```python +from octobot_commons.cryptography import generate_rsa_key_pair + +# Generate a 4096-bit RSA key pair +private_key_pem, public_key_pem = generate_rsa_key_pair(key_size=4096) +``` + +ECDSA key pairs can be generated using: +```python +from octobot_commons.cryptography import generate_ecdsa_key_pair + +# Generate an ECDSA key pair (SECP256R1 by default) +private_key_pem, public_key_pem = generate_ecdsa_key_pair() +``` + +### Configuration + +Keys are configured via environment variables in the application settings: + +```python +# In .env or environment variables +TASKS_SERVER_RSA_PRIVATE_KEY="-----BEGIN RSA PRIVATE KEY-----\n..." +TASKS_SERVER_ECDSA_PRIVATE_KEY="-----BEGIN PRIVATE KEY-----\n..." +``` + +**Note**: If server keys are not configured (`None`), encryption/decryption is skipped and tasks operate in plaintext mode. This allows for development and testing without encryption overhead. + +## Best Practices + +1. **Key Management**: + - Store private keys securely (environment variables, secret management systems) + - Never commit keys to version control + - Rotate keys periodically + - Use different keys for different environments (dev, staging, production) + - Keep RSA and ECDSA key pairs separate for different purposes + +2. **Metadata Handling**: + - Always store metadata alongside encrypted data + - Metadata includes encrypted AES key, IV, and signature (all base64-encoded) + - Signatures ensure metadata integrity + - Include metadata in backups + +3. **Error Handling**: + - Always handle encryption/decryption errors gracefully + - Handle `SignatureVerificationError` separately from other errors + - Log errors without exposing sensitive information + - Fail securely (don't fall back to plaintext on error) + +4. **Performance**: + - Encryption is optional (can be disabled by not setting keys) + - Consider performance impact for high-throughput scenarios + - Monitor encryption/decryption performance + - ECDSA signing/verification is fast (~1-2ms per operation) + +## Why This Approach is Secure + +1. **Industry-Standard Algorithms**: Uses well-vetted cryptographic algorithms (RSA-OAEP, AES-GCM, ECDSA) + +2. **Proper Key Sizes**: + - RSA 4096-bit keys provide strong security + - AES 256-bit keys are considered secure for the foreseeable future + - ECDSA SECP256R1 provides strong signature security + +3. **Secure Padding**: RSA-OAEP padding prevents various attacks (e.g., padding oracle attacks) + +4. **Authenticated Encryption**: AES-GCM provides both encryption and authentication in one operation + +5. **Digital Signatures**: ECDSA signatures provide non-repudiation and enable verification without decryption + +6. **Comprehensive Protection**: Signatures cover both content and metadata, preventing tampering with encrypted AES keys or IVs + +7. **Key Isolation**: Each encryption uses unique keys, limiting the impact of key compromise + +8. **No Key Reuse**: Random key generation prevents key reuse vulnerabilities + +9. **Separation of Keys**: Different keys for inputs/outputs allow independent key management + +## Limitations and Considerations + +1. **Performance**: + - RSA encryption/decryption is slower than symmetric encryption (mitigated by hybrid approach) + - ECDSA signing/verification adds minimal overhead (~1-2ms per operation) + +2. **Key Distribution**: + - Public keys must be securely distributed to encrypting nodes + - ECDSA public keys must be distributed for signature verification + +3. **Key Storage**: + - Private keys must be securely stored and protected + - Both RSA and ECDSA private keys require secure storage + +4. **Metadata Size**: + - Metadata adds overhead (encrypted AES key + IV + signature, all base64-encoded) + - Typical metadata size: ~600-800 bytes + +5. **Optional Encryption**: + - System can operate without encryption if keys are not configured (useful for development) + - Signature verification is skipped if ECDSA keys are not configured diff --git a/packages/node/octobot_node/scheduler/encryption/__init__.py b/packages/node/octobot_node/scheduler/encryption/__init__.py new file mode 100644 index 0000000000..e958f969c6 --- /dev/null +++ b/packages/node/octobot_node/scheduler/encryption/__init__.py @@ -0,0 +1,54 @@ +# This file is part of OctoBot Node (https://github.com/Drakkar-Software/OctoBot-Node) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + +# Encryption tasks constants +ENCRYPTED_AES_KEY_B64_METADATA_KEY = "ENCRYPTED_AES_KEY_B64" +IV_B64_METADATA_KEY = "IV_B64" +SIGNATURE_B64_METADATA_KEY = "SIGNATURE_B64" + +# Encryption tasks errors +class MissingMetadataError(Exception): + pass + +class MetadataParsingError(Exception): + pass + +class EncryptionTaskError(Exception): + pass + +class SignatureVerificationError(Exception): + pass + +from octobot_node.scheduler.encryption import task_inputs +from octobot_node.scheduler.encryption.task_inputs import (decrypt_task_content, encrypt_task_content, get_next_encrypted_if_needed_content_and_metadata) + +from octobot_node.scheduler.encryption import task_outputs +from octobot_node.scheduler.encryption.task_outputs import (encrypt_task_result, decrypt_task_result) + +__all__ = [ + "ENCRYPTED_AES_KEY_B64_METADATA_KEY", + "IV_B64_METADATA_KEY", + "SIGNATURE_B64_METADATA_KEY", + "MissingMetadataError", + "MetadataParsingError", + "EncryptionTaskError", + "SignatureVerificationError", + "decrypt_task_content", + "encrypt_task_content", + "encrypt_task_result", + "decrypt_task_result", + "get_next_encrypted_if_needed_content_and_metadata" +] \ No newline at end of file diff --git a/packages/node/octobot_node/scheduler/encryption/task_inputs.py b/packages/node/octobot_node/scheduler/encryption/task_inputs.py new file mode 100644 index 0000000000..642397d92a --- /dev/null +++ b/packages/node/octobot_node/scheduler/encryption/task_inputs.py @@ -0,0 +1,131 @@ +# This file is part of OctoBot Node (https://github.com/Drakkar-Software/OctoBot-Node) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + +import json +import base64 + +from typing import Optional, Tuple +from octobot_node.config import settings +from octobot_node.scheduler.encryption import ( + ENCRYPTED_AES_KEY_B64_METADATA_KEY, + IV_B64_METADATA_KEY, + SIGNATURE_B64_METADATA_KEY, + MissingMetadataError, + EncryptionTaskError, + MetadataParsingError, + SignatureVerificationError +) +import octobot_commons.cryptography as cryptography + + +import functools + +@functools.lru_cache(maxsize=1) +def _server_rsa_public_key_bytes() -> bytes: + from cryptography.hazmat.primitives.serialization import load_pem_private_key, Encoding, PublicFormat + private = load_pem_private_key(settings.TASKS_SERVER_RSA_PRIVATE_KEY, password=None) + return private.public_key().public_bytes(Encoding.PEM, PublicFormat.SubjectPublicKeyInfo) + + +@functools.lru_cache(maxsize=1) +def _server_ecdsa_public_key_bytes() -> bytes: + from cryptography.hazmat.primitives.serialization import load_pem_private_key, Encoding, PublicFormat + private = load_pem_private_key(settings.TASKS_SERVER_ECDSA_PRIVATE_KEY, password=None) + return private.public_key().public_bytes(Encoding.PEM, PublicFormat.SubjectPublicKeyInfo) + + +def decrypt_task_content(content: str, metadata: Optional[str] = None, user_ecdsa_public_key: Optional[bytes] = None) -> str: + if metadata is None: + raise MissingMetadataError("No metadata provided for content decryption") + + try: + metadata = json.loads(base64.b64decode(metadata).decode('utf-8')) + encrypted_aes_key_b64 = metadata.get(ENCRYPTED_AES_KEY_B64_METADATA_KEY, None) + iv_b64 = metadata.get(IV_B64_METADATA_KEY, None) + signature_b64 = metadata.get(SIGNATURE_B64_METADATA_KEY, None) + except Exception as e: + raise MetadataParsingError(f"Failed to parse encrypted AES key or IV from metadata: {e}") + + if not encrypted_aes_key_b64 or not iv_b64 or not signature_b64: + raise MissingMetadataError("No encrypted AES key or IV or signature provided for content decryption") + + try: + content_bytes = base64.b64decode(content) + encrypted_aes_key = base64.b64decode(encrypted_aes_key_b64) + iv = base64.b64decode(iv_b64) + signature = base64.b64decode(signature_b64) + except Exception as e: + raise MetadataParsingError(f"Failed to decode base64-encoded data: {e}") + + data_to_verify = content_bytes + encrypted_aes_key + iv + # Browser-submitted tasks are signed with USER_ECDSA_PRIVATE; server-generated tasks with SERVER_ECDSA_PRIVATE. + # Per-task key takes precedence; falls back to the global env-var, then tries the server's own ECDSA key. + effective_user_key = user_ecdsa_public_key or settings.TASKS_USER_ECDSA_PUBLIC_KEY + user_sig_valid = bool(effective_user_key and cryptography.verify_signature(data_to_verify, effective_user_key, signature)) + if not user_sig_valid: + if not (settings.TASKS_SERVER_ECDSA_PRIVATE_KEY and + cryptography.verify_signature(data_to_verify, _server_ecdsa_public_key_bytes(), signature)): + raise SignatureVerificationError("Signature verification failed") + + decrypted_aes_key = cryptography.rsa_decrypt_aes_key(encrypted_aes_key, settings.TASKS_SERVER_RSA_PRIVATE_KEY) + if not decrypted_aes_key: + raise EncryptionTaskError("Failed to decrypt AES key") + + decrypted_content = cryptography.aes_gcm_decrypt(content_bytes, decrypted_aes_key, iv) + if not decrypted_content: + raise EncryptionTaskError("Failed to decrypt content") + + return decrypted_content.decode('utf-8') + + +def encrypt_task_content(content: str) -> Tuple[str, str]: + aes_encryption_key = cryptography.generate_aes_key() + iv = cryptography.generate_iv() + + encrypted_content = cryptography.aes_gcm_encrypt(content.encode('utf-8'), aes_encryption_key, iv) + if not encrypted_content: + raise EncryptionTaskError("Failed to encrypt content") + + encrypted_aes_key = cryptography.rsa_encrypt_aes_key(aes_encryption_key, _server_rsa_public_key_bytes()) + if not encrypted_aes_key: + raise EncryptionTaskError("Failed to encrypt AES key") + + data_to_sign = encrypted_content + encrypted_aes_key + iv + signature = cryptography.sign_data(data_to_sign, settings.TASKS_SERVER_ECDSA_PRIVATE_KEY) + if not signature: + raise EncryptionTaskError("Failed to sign data") + + metadata = { + ENCRYPTED_AES_KEY_B64_METADATA_KEY: base64.b64encode(encrypted_aes_key).decode('utf-8'), + IV_B64_METADATA_KEY: base64.b64encode(iv).decode('utf-8'), + SIGNATURE_B64_METADATA_KEY: base64.b64encode(signature).decode('utf-8'), + } + encrypted_content_b64 = base64.b64encode(encrypted_content).decode('utf-8') + metadata_json = json.dumps(metadata) + metadata_b64 = base64.b64encode(metadata_json.encode('utf-8')).decode('utf-8') + return encrypted_content_b64, metadata_b64 + + +def get_next_encrypted_if_needed_content_and_metadata(result: dict) -> tuple[str, Optional[str]]: + raw_description = json.dumps(result) + next_content_metadata = None + if settings.is_node_side_encryption_enabled: + next_content, next_content_metadata = ( + encrypt_task_content(raw_description) + ) + else: + next_content = raw_description + return next_content, next_content_metadata diff --git a/packages/node/octobot_node/scheduler/encryption/task_outputs.py b/packages/node/octobot_node/scheduler/encryption/task_outputs.py new file mode 100644 index 0000000000..04a114ba20 --- /dev/null +++ b/packages/node/octobot_node/scheduler/encryption/task_outputs.py @@ -0,0 +1,99 @@ +# This file is part of OctoBot Node (https://github.com/Drakkar-Software/OctoBot-Node) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + +import json +import base64 + +from typing import Tuple, Optional +from octobot_node.scheduler.encryption import ( + ENCRYPTED_AES_KEY_B64_METADATA_KEY, + IV_B64_METADATA_KEY, + SIGNATURE_B64_METADATA_KEY, + MissingMetadataError, + EncryptionTaskError, + MetadataParsingError, + SignatureVerificationError +) +import octobot_commons.cryptography as cryptography + + +def encrypt_task_result(result: str, rsa_public_key: bytes, ecdsa_private_key: bytes) -> Tuple[str, str]: + aes_encryption_key = cryptography.generate_aes_key() + iv = cryptography.generate_iv() + + encrypted_result = cryptography.aes_gcm_encrypt(result.encode('utf-8'), aes_encryption_key, iv) + if not encrypted_result: + raise EncryptionTaskError("Failed to encrypt result") + + encrypted_aes_key = cryptography.rsa_encrypt_aes_key(aes_encryption_key, rsa_public_key) + if not encrypted_aes_key: + raise EncryptionTaskError("Failed to encrypt AES key") + + data_to_sign = encrypted_result + encrypted_aes_key + iv + signature = cryptography.sign_data(data_to_sign, ecdsa_private_key) + if not signature: + raise EncryptionTaskError("Failed to sign data") + + metadata = { + ENCRYPTED_AES_KEY_B64_METADATA_KEY: base64.b64encode(encrypted_aes_key).decode('utf-8'), + IV_B64_METADATA_KEY: base64.b64encode(iv).decode('utf-8'), + SIGNATURE_B64_METADATA_KEY: base64.b64encode(signature).decode('utf-8'), + } + encrypted_result_b64 = base64.b64encode(encrypted_result).decode('utf-8') + return encrypted_result_b64, json.dumps(metadata) + + +def decrypt_task_result( + encrypted_result: str, + rsa_private_key: bytes, + ecdsa_public_key: bytes, + metadata: Optional[str] = None, +) -> str: + if metadata is None: + raise MissingMetadataError("No metadata provided for result decryption") + + try: + metadata = json.loads(metadata) + encrypted_aes_key_b64 = metadata.get(ENCRYPTED_AES_KEY_B64_METADATA_KEY, None) + iv_b64 = metadata.get(IV_B64_METADATA_KEY, None) + signature_b64 = metadata.get(SIGNATURE_B64_METADATA_KEY, None) + except Exception as e: + raise MetadataParsingError(f"Failed to parse encrypted AES key or IV from metadata: {e}") + + if not encrypted_aes_key_b64 or not iv_b64 or not signature_b64: + raise MissingMetadataError("No encrypted AES key or IV or signature provided for result decryption") + + try: + encrypted_result_bytes = base64.b64decode(encrypted_result) + encrypted_aes_key = base64.b64decode(encrypted_aes_key_b64) + iv = base64.b64decode(iv_b64) + signature = base64.b64decode(signature_b64) + except Exception as e: + raise MetadataParsingError(f"Failed to decode base64-encoded data: {e}") + + data_to_verify = encrypted_result_bytes + encrypted_aes_key + iv + if not cryptography.verify_signature(data_to_verify, ecdsa_public_key, signature): + raise SignatureVerificationError("Signature verification failed") + + decrypted_aes_key = cryptography.rsa_decrypt_aes_key(encrypted_aes_key, rsa_private_key) + if not decrypted_aes_key: + raise EncryptionTaskError("Failed to decrypt AES key") + + decrypted_result = cryptography.aes_gcm_decrypt(encrypted_result_bytes, decrypted_aes_key, iv) + if not decrypted_result: + raise EncryptionTaskError("Failed to decrypt result") + + return decrypted_result.decode('utf-8') diff --git a/packages/node/octobot_node/scheduler/internal_trading_signals.py b/packages/node/octobot_node/scheduler/internal_trading_signals.py new file mode 100644 index 0000000000..d45d1dc2db --- /dev/null +++ b/packages/node/octobot_node/scheduler/internal_trading_signals.py @@ -0,0 +1,46 @@ +import dbos + +import octobot_commons.logging +import octobot_flow.entities +import octobot_flow.repositories.community as trading_signals_channel +import octobot_node.scheduler.workflows_util as workflows_util +import octobot_node.scheduler.tasks as tasks + + +async def subscribe_internal_trading_signal_consumer() -> None: + """ + Propagates trading signals from the internal trading signal channel to running automations. + Signals can from from a local signal emitter or from send_internal_trading_signal + """ + async def _on_internal_trading_signal(trading_signal: octobot_flow.entities.TradingSignal) -> None: + await _trigger_copier_automation(trading_signal) + + channel = await trading_signals_channel.get_or_create_internal_trading_signal_channel() + await channel.new_consumer(_on_internal_trading_signal) + + +async def send_internal_trading_signal(trading_signal: octobot_flow.entities.TradingSignal) -> None: + """ + Broadcasts a trading signal to the internal trading signal channel. + """ + await trading_signals_channel.send_internal_trading_signal(trading_signal) + +async def _trigger_copier_automation(trading_signal: octobot_flow.entities.TradingSignal) -> None: + """ + Triggers copier automations with the given trading signal. + Automations are triggered one by one to avoid concurrent executions. + """ + import octobot_node.scheduler as scheduler + pending_workflow_statuses = await scheduler.SCHEDULER.INSTANCE.list_workflows_async( + status=[dbos.WorkflowStatusString.ENQUEUED.value, dbos.WorkflowStatusString.PENDING.value] + ) + for pending_workflow_status in pending_workflow_statuses: + if ( + trading_signal.strategy_id in workflows_util.get_automation_copied_strategy_ids(pending_workflow_status) + ): + octobot_commons.logging.get_logger("internal_trading_signals").info( + f"Triggering copier automation {pending_workflow_status.workflow_id} with trading signal {trading_signal.strategy_id}" + ) + await tasks.trigger_copier_automation( + pending_workflow_status.workflow_id, trading_signal + ) diff --git a/packages/node/octobot_node/scheduler/octobot_flow_client.py b/packages/node/octobot_node/scheduler/octobot_flow_client.py new file mode 100644 index 0000000000..72a6bf2720 --- /dev/null +++ b/packages/node/octobot_node/scheduler/octobot_flow_client.py @@ -0,0 +1,170 @@ +# This file is part of OctoBot Node (https://github.com/Drakkar-Software/OctoBot-Node) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . +import typing +import dataclasses +import json +import logging + +import octobot_commons.dataclasses + +import octobot_node.scheduler.workflows_util as workflows_util + +try: + import octobot_flow + import octobot_flow.environment + import octobot_flow.parsers + import octobot_flow.entities + # Requires octobot_flow import and importable tentacles folder + + # ensure environment is initialized + octobot_flow.environment.initialize_environment(True) + +except ImportError: + pass # OctoBot Flow is not available + + +@dataclasses.dataclass +class OctoBotActionsJobDescription(octobot_commons.dataclasses.MinimizableDataclass): + state: dict = dataclasses.field(default_factory=dict) + auth_details: dict = dataclasses.field(default_factory=dict) + params: dict = dataclasses.field(default_factory=dict) + + def __post_init__(self): + if self.params: + self._parse_actions_plan(self.params) + + def _parse_actions_plan(self, params: dict) -> None: + to_add_actions_dag = octobot_flow.parsers.ActionsDAGParser(params).parse() + if not to_add_actions_dag: + raise ValueError("No action found in params") + automation_id = None + if not automation_id and isinstance(to_add_actions_dag.actions[0], octobot_flow.entities.ConfiguredActionDetails) and to_add_actions_dag.actions[0].config: + config = to_add_actions_dag.actions[0].config + if "automation" in config: + automation_id = config["automation"]["metadata"]["automation_id"] + if not automation_id: + raise ValueError("No automation id found in params") + self._include_actions_in_automation_state(automation_id, to_add_actions_dag) + + def _include_actions_in_automation_state(self, automation_id: str, actions: "octobot_flow.ActionsDAG"): + automation_state = octobot_flow.AutomationState.from_dict(self.state) + if not automation_state.automation.metadata.automation_id: + automation_state.automation = octobot_flow.entities.AutomationDetails( + metadata=octobot_flow.entities.AutomationMetadata( + automation_id=automation_id, + ), + actions_dag=actions, + ) + else: + automation_state.upsert_automation_actions(actions.actions) + self.state = automation_state.to_dict(include_default_values=False) + + def get_next_execution_time(self) -> float: + return self.state["automation"]["execution"]["current_execution"]["scheduled_to"] + + +@dataclasses.dataclass +class OctoBotActionsJobResult: + processed_actions: list["octobot_flow.AbstractActionDetails"] = dataclasses.field(default_factory=list) + next_actions_description: typing.Optional[OctoBotActionsJobDescription] = None + maybe_encrypted_next_actions_description: typing.Optional[str] = None + next_actions_description_encryption_metadata: typing.Optional[str] = None + has_next_actions: bool = False + actions_dag: typing.Optional["octobot_flow.ActionsDAG"] = None + should_stop: bool = False + + +class OctoBotActionsJob: + def __init__( + self, + description: typing.Union[str, dict], + user_actions: list[dict], + updated_trading_signals: list[dict], + result: OctoBotActionsJobResult, + ): + parsed_description = self._parse_description(description) + self.description: OctoBotActionsJobDescription = OctoBotActionsJobDescription.from_dict( + parsed_description + ) + self.priority_user_actions: list[octobot_flow.AbstractActionDetails] = [ + octobot_flow.parse_action_details( + user_action + ) for user_action in user_actions + ] + self.updated_trading_signals: list[octobot_flow.entities.TradingSignal] = [ + octobot_flow.entities.TradingSignal.from_dict(trading_signal_dict) + for trading_signal_dict in updated_trading_signals + ] + self.after_execution_state = None + self.result: OctoBotActionsJobResult = result + + def _parse_description(self, description: typing.Union[str, dict]) -> dict: + try: + parsed_description = workflows_util.get_automation_dict(description) + except ValueError: + if isinstance(description, dict): + parsed_description = description + else: + # description is a JSON string with key/value parameters: store it in params + dict_description = json.loads(description) + parsed_description = { + "params": dict_description + } + return parsed_description + + async def run(self) -> None: + async with octobot_flow.AutomationJob( + self.description.state, + self.priority_user_actions, + self.updated_trading_signals, + self.description.auth_details, + ) as automation_job: + selected_actions = ( + self.priority_user_actions + or automation_job.automation_state.automation.actions_dag.get_executable_actions() + ) + logging.getLogger(self.__class__.__name__).info(f"Running automation actions: {selected_actions}") + executed_actions = await automation_job.run() + self.after_execution_state = automation_job.automation_state + post_execution_state_dump = automation_job.dump() + next_actions_description, has_next_actions = self.get_next_actions_description(post_execution_state_dump) + self.result.processed_actions = executed_actions + self.result.next_actions_description = next_actions_description + self.result.has_next_actions = has_next_actions + self.result.actions_dag = automation_job.automation_state.automation.actions_dag + self.result.should_stop = automation_job.automation_state.automation.post_actions.stop_automation + + def get_next_actions_description( + self, post_execution_state: dict + ) -> tuple[typing.Optional[OctoBotActionsJobDescription], bool]: + automation = self.after_execution_state.automation + next_actions_description = OctoBotActionsJobDescription( + state=post_execution_state, + auth_details=self.description.auth_details, + ) + has_next_actions = bool(automation.actions_dag.get_executable_actions()) + if not has_next_actions and (pending_actions := automation.actions_dag.get_pending_actions()): + raise ValueError( + f"Automation {automation.metadata.automation_id}: actions DAG dependencies issue: " + f"no executable actions while there are still " + f"{len(pending_actions)} pending actions: {pending_actions}" + ) + return next_actions_description, has_next_actions + + def __repr__(self) -> str: + parsed_state = octobot_flow.AutomationState.from_dict(self.description.state) + automation_repr = str(parsed_state.automation) if parsed_state.automation else "No automation" + return f"OctoBotActionsJob with automation:\n- {automation_repr}" diff --git a/packages/node/octobot_node/scheduler/scheduler.py b/packages/node/octobot_node/scheduler/scheduler.py new file mode 100644 index 0000000000..865e2cbb72 --- /dev/null +++ b/packages/node/octobot_node/scheduler/scheduler.py @@ -0,0 +1,312 @@ +# This file is part of OctoBot Node (https://github.com/Drakkar-Software/OctoBot-Node) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + +from typing_extensions import Reader +import dbos +import json +import logging +import typing +import decimal +import enum +import sqlalchemy + +import octobot_commons.logging +import octobot_node.config +import octobot_node.enums +import octobot_node.models +import octobot_node.constants +import octobot_node.scheduler.workflows_util as workflows_util +import octobot_node.scheduler.workflows.params as workflow_params +import octobot_node.scheduler.encryption as encryption +import octobot_node.scheduler.task_context as task_context +try: + from octobot import VERSION +except ImportError: + VERSION = "unknown" + +DEFAULT_NAME = "octobot_node" + +_BASE_CONFIG = dbos.DBOSConfig( + name=DEFAULT_NAME, + max_executor_threads=octobot_node.config.settings.SCHEDULER_MAX_EXECUTOR_THREADS, + application_version=VERSION, +) + + +def _sanitize(result: typing.Any) -> typing.Any: + if isinstance(result, decimal.Decimal): + return float(result) + if isinstance(result, enum.Enum): + return result.value + if isinstance(result, dict): + return {k: _sanitize(v) for k, v in result.items()} + elif isinstance(result, list): + return [_sanitize(v) for v in result] + return result + + +class Scheduler: + INSTANCE: dbos.DBOS = None # type: ignore + AUTOMATION_WORKFLOW_QUEUE: dbos.Queue = None # type: ignore + + def __init__(self): + self.logger = logging.getLogger(self.__class__.__name__) + + def create(self): + if octobot_node.config.settings.SCHEDULER_POSTGRES_URL: + self.logger.info( + f"Initializing scheduler with Postgres backend at {octobot_node.config.settings.SCHEDULER_POSTGRES_URL}", + ) + + self.INSTANCE = dbos.DBOS(config=dbos.DBOSConfig( + **_BASE_CONFIG, + **{ + "system_database_url": octobot_node.config.settings.SCHEDULER_POSTGRES_URL, + }, + )) + else: + self.logger.info( + f"Initializing scheduler with sqlite backend at {octobot_node.config.settings.SCHEDULER_SQLITE_FILE}", + ) + # DB not autosaved? + self.INSTANCE = dbos.DBOS(config=dbos.DBOSConfig( + **_BASE_CONFIG, + **{ + "system_database_url": f"sqlite:///{octobot_node.config.settings.SCHEDULER_SQLITE_FILE}", + }, + )) + if self.INSTANCE and octobot_node.config.settings.USE_DEDICATED_LOG_FILE_PER_AUTOMATION: + self._setup_workflow_logging() + + def _setup_workflow_logging(self) -> None: + """Register DBOS workflow ID provider and add workflow file handler for per-workflow log files.""" + octobot_commons.logging.add_context_based_file_handler( + octobot_node.constants.AUTOMATION_LOGS_FOLDER, + self._get_dbos_workflow_id + ) + + @staticmethod + def _get_dbos_workflow_id() -> typing.Optional[str]: + """Return the current DBOS workflow ID when executing within a step or workflow.""" + if workflow_id := getattr(dbos.DBOS, "workflow_id", None): + # group children workflows and parent workflows together + # (a child workflow has the parent's workflow ID as a prefix) + return workflow_id[:octobot_node.constants.PARENT_WORKFLOW_ID_LENGTH] + return None + + def is_enabled(self) -> bool: + # enabled if master mode or consumer only mode + return ( + octobot_node.config.settings.IS_MASTER_MODE + or octobot_node.config.settings.CONSUMER_ONLY + ) + + def is_initialized(self) -> bool: + return self.INSTANCE is not None + + def start(self): + if self.INSTANCE: + self.create_queues() + self.logger.info("Starting scheduler") + self.INSTANCE.launch() + self.logger.info("Scheduler started") + else: + self.logger.warning("Scheduler not initialized") + + def stop(self) -> None: + if self.INSTANCE: + self.INSTANCE.destroy() + self.logger.info("Scheduler stopped") + else: + self.logger.warning("Scheduler not initialized") + + def create_queues(self): + self.AUTOMATION_WORKFLOW_QUEUE = dbos.Queue(name=octobot_node.enums.SchedulerQueues.AUTOMATION_WORKFLOW_QUEUE.value) + + async def get_periodic_tasks(self) -> list[octobot_node.models.Execution]: + """DBOS scheduled workflows are not easily introspectable; return empty list.""" + return [] # TODO + + async def get_pending_tasks(self) -> list[octobot_node.models.Execution]: + if not self.INSTANCE: + return [] + executions: list[octobot_node.models.Execution] = [] + try: + pending_workflow_statuses = await self.INSTANCE.list_workflows_async(status=[dbos.WorkflowStatusString.ENQUEUED.value, dbos.WorkflowStatusString.PENDING.value]) + for pending_workflow_status in pending_workflow_statuses or []: + try: + if reader := workflows_util.get_automation_state_reader(pending_workflow_status): + next_step = ", ".join([ + action.get_summary() + for action in reader.get_executable_actions() + ]) + description = f"next steps: {next_step}" + else: + description = f"Pending task: {pending_workflow_status.workflow_id}" + execution = self._parse_workflow_status(pending_workflow_status, octobot_node.models.TaskStatus.PENDING, description) + executions.append(execution) + except Exception as e: + self.logger.warning(f"Failed to process pending workflow {pending_workflow_status.workflow_id}: {e}") + except Exception as e: + self.logger.warning(f"Failed to list pending workflows: {e}") + return executions + + async def delete_workflows(self, to_delete_workflow_ids: list[str]): + self.logger.info(f"Deleting {len(to_delete_workflow_ids)} workflows") + all_completed_workflows = await self.INSTANCE.list_workflows_async(status=[ + dbos.WorkflowStatusString.SUCCESS.value, dbos.WorkflowStatusString.ERROR.value, + dbos.WorkflowStatusString.CANCELLED.value, dbos.WorkflowStatusString.MAX_RECOVERY_ATTEMPTS_EXCEEDED.value + ]) + to_delete_parent_workflow_ids = [ + workflow_id[:octobot_node.constants.PARENT_WORKFLOW_ID_LENGTH] for workflow_id in to_delete_workflow_ids + ] + children_workflow_ids = [ + workflow.workflow_id for workflow in all_completed_workflows + if any(workflow.workflow_id.startswith(parent_workflow_id) for parent_workflow_id in to_delete_parent_workflow_ids) + ] + merged_to_delete_workflow_ids = list(set(to_delete_workflow_ids + children_workflow_ids)) + self.logger.info( + f"Including {len(merged_to_delete_workflow_ids) - len(to_delete_workflow_ids)} associated children workflows to delete" + ) + await self.INSTANCE.delete_workflows_async(merged_to_delete_workflow_ids, delete_children=False) + self.logger.info(f"Vacuuming database") + with self.INSTANCE._sys_db.engine.begin() as conn: + conn.execute(sqlalchemy.text("VACUUM")) + self.logger.info(f"Database vacuum completed") + + async def get_scheduled_tasks(self) -> list[octobot_node.models.Execution]: + """DBOS has no direct 'scheduled for later' queue; return empty list.""" + return [] + + async def get_results(self) -> list[octobot_node.models.Execution]: + if not self.INSTANCE: + return [] + executions: list[octobot_node.models.Execution] = [] + try: + completed_workflow_statuses = await self.INSTANCE.list_workflows_async(status=[ + dbos.WorkflowStatusString.SUCCESS.value, dbos.WorkflowStatusString.ERROR.value + ], load_output=True) + for completed_workflow_status in completed_workflow_statuses or []: + try: + if completed_workflow_status.status == dbos.WorkflowStatusString.SUCCESS.value and ( + task := workflows_util.get_input_task(completed_workflow_status) + ): + try: + output = workflow_params.AutomationWorkflowOutput.from_dict( + json.loads(completed_workflow_status.output) + ) if completed_workflow_status.output else workflow_params.AutomationWorkflowOutput() + except Exception as e: + self.logger.warning(f"Failed to parse output for workflow {completed_workflow_status.workflow_id}: {e}") + output = workflow_params.AutomationWorkflowOutput() + if output.state: + result = output.state + metadata = output.state_metadata + user_rsa_key = ( + task.user_rsa_public_key.encode('utf-8') if task.user_rsa_public_key + else octobot_node.config.settings.TASKS_USER_RSA_PUBLIC_KEY + ) + if octobot_node.config.settings.is_node_side_encryption_enabled: + try: + result_task = octobot_node.models.Task( + name="", content=output.state, + content_metadata=output.state_metadata, type="execute_actions" + ) + with task_context.encrypted_task(result_task): + # encrypted_task swallows decryption errors; unchanged content means + # decryption silently failed and re-encrypting would produce double-encrypted garbage + if result_task.content == output.state and output.state_metadata: + raise encryption.EncryptionTaskError("Internal state decryption silently failed") + if user_rsa_key: + result, metadata = encryption.encrypt_task_result( + result_task.content, + rsa_public_key=user_rsa_key, + ecdsa_private_key=octobot_node.config.settings.TASKS_SERVER_ECDSA_PRIVATE_KEY, + ) + else: + # No user RSA public key: the server-encrypted state is unreadable by the browser. + # Return plaintext so the caller sees the actual automation output. + result = result_task.content + metadata = "" + except encryption.EncryptionTaskError as encrypt_err: + self.logger.warning(f"Failed to encrypt result for workflow {completed_workflow_status.workflow_id}: {encrypt_err}") + else: + result = task.content + metadata = task.content_metadata + description = "Completed" + status = octobot_node.models.TaskStatus.COMPLETED + task_name = task.name + error = output.error + else: + result = "" + description = "ERROR" + status = octobot_node.models.TaskStatus.FAILED + metadata = "" + task_name = completed_workflow_status.workflow_id + error = None + + executions.append(octobot_node.models.Execution( + id=completed_workflow_status.workflow_id, + name=task_name, + description=description, + status=status, + content_metadata=task.content_metadata if task else None, + result=result or "", + result_metadata=metadata, + scheduled_at=completed_workflow_status.created_at, + completed_at=completed_workflow_status.updated_at, + error=error, + )) + except Exception as e: + self.logger.exception(e, True, f"Failed to process result workflow {completed_workflow_status.workflow_id}: {e}") + except Exception as e: + self.logger.warning(f"Failed to list result workflows: {e}") + return executions + + def _parse_workflow_status( + self, + workflow_status: dbos.WorkflowStatus, + status: octobot_node.models.TaskStatus, + description: typing.Optional[str] = None, + ) -> octobot_node.models.Execution: + """Map DBOS WorkflowStatus to octobot_node.models.Execution.""" + task_id = str(workflow_status.workflow_id) + task_name = workflow_status.name + task_type = None + task_actions = None + task = None + if workflow_status.input: + if task := workflows_util.get_input_task(workflow_status): + task_type = task.type + task_actions = task.content #todo confi + + task_content_metadata = task.content_metadata if task else None + return octobot_node.models.Execution( + id=task_id, + name=task_name, + description=description, + actions=task_actions, + content_metadata=task_content_metadata, + type=task_type, + status=status, + ) + + def get_task_name(self, task_data: dict | octobot_node.models.Task | None, default_value: typing.Optional[str] = None) -> typing.Optional[str]: + if isinstance(task_data, octobot_node.models.Task): + return task_data.name + elif isinstance(task_data, dict): + return task_data.get(octobot_node.enums.TaskResultKeys.TASK.value, {}).get("name", default_value) + else: + return default_value diff --git a/packages/node/octobot_node/scheduler/task_context.py b/packages/node/octobot_node/scheduler/task_context.py new file mode 100644 index 0000000000..f0ec89de74 --- /dev/null +++ b/packages/node/octobot_node/scheduler/task_context.py @@ -0,0 +1,75 @@ +# This file is part of OctoBot Node (https://github.com/Drakkar-Software/OctoBot-Node) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + +import contextlib +import logging +import typing + +import octobot_node.config +import octobot_node.models +import octobot_node.scheduler.encryption as encryption +import octobot_node.scheduler.octobot_flow_client + +logger = logging.getLogger(__name__) + + +@contextlib.contextmanager +def encrypted_task( + task: octobot_node.models.Task, + to_update_result: typing.Optional["octobot_node.scheduler.octobot_flow_client.OctoBotActionsJobResult"] = None +): + """ + Context manager for automatically decrypting task content. + Decrypts task.content if TASKS_SERVER_RSA_PRIVATE_KEY is provided, + and restores original content on exit. + """ + original_content = task.content + + try: + # Decrypt content if server keys are available. Per-task ECDSA key takes precedence over env var. + settings = octobot_node.config.settings + user_ecdsa_key = task.user_ecdsa_public_key.encode('utf-8') if task.user_ecdsa_public_key else None + if (settings.TASKS_SERVER_RSA_PRIVATE_KEY and task.content_metadata): + try: + decrypted_content = encryption.decrypt_task_content(task.content, task.content_metadata, user_ecdsa_public_key=user_ecdsa_key) + task.content = decrypted_content + except Exception as e: + logger.error(f"Failed to decrypt content: {e}") + yield task + finally: + # Restore original content if it was modified + if task.content != original_content: + task.content = original_content + + if to_update_result is not None: + # ensure maybe_encrypted_next_actions_description is encrypted if needed + if isinstance( + to_update_result.next_actions_description, + octobot_node.scheduler.octobot_flow_client.OctoBotActionsJobDescription + ): + maybe_encrypted_next_actions_description, next_actions_description_encryption_metadata = encryption.get_next_encrypted_if_needed_content_and_metadata( + to_update_result.next_actions_description.to_dict(include_default_values=False) + ) + else: + maybe_encrypted_next_actions_description = None + next_actions_description_encryption_metadata = None + # store potentially encrypted data + to_update_result.maybe_encrypted_next_actions_description = maybe_encrypted_next_actions_description + to_update_result.next_actions_description_encryption_metadata = next_actions_description_encryption_metadata + # clear potentially sensitive data + to_update_result.next_actions_description = None + to_update_result.processed_actions.clear() + to_update_result.actions_dag = None diff --git a/packages/node/octobot_node/scheduler/tasks.py b/packages/node/octobot_node/scheduler/tasks.py new file mode 100644 index 0000000000..4e2bc07676 --- /dev/null +++ b/packages/node/octobot_node/scheduler/tasks.py @@ -0,0 +1,62 @@ +# This file is part of OctoBot Node (https://github.com/Drakkar-Software/OctoBot-Node) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . +import octobot_flow.entities +import octobot_node.enums +import octobot_node.models +import octobot_node.scheduler.workflows_util as workflows_util +import octobot_node.scheduler.workflows.params as params + + +async def trigger_task(task: octobot_node.models.Task) -> bool: + import octobot_node.scheduler.workflows.automation_workflow as automation_workflow + import octobot_node.scheduler # avoid circular import + handle = None + # enqueue workflow instead of starting it to dispatch them to multiple workers if possible + if task.type == octobot_node.models.TaskType.EXECUTE_ACTIONS.value: + handle = await octobot_node.scheduler.SCHEDULER.AUTOMATION_WORKFLOW_QUEUE.enqueue_async( + automation_workflow.AutomationWorkflow.execute_automation, + inputs=params.AutomationWorkflowInputs(task=task).to_dict(include_default_values=False) + ) + else: + raise ValueError(f"Unsupported task type: {task.type}") + return handle is not None + + +async def send_actions_to_automation(actions: list[dict], automation_id: str): + import octobot_node.scheduler # avoid circular import + workflow_status = await workflows_util.get_automation_workflow_status(automation_id) + payload = params.AutomationWorkflowActionUpdate( + actions_type=octobot_node.enums.AutomationWorkflowActionTypes.USER_ACTIONS.value, + actions_details=actions, + ).to_dict(include_default_values=False) + await octobot_node.scheduler.SCHEDULER.INSTANCE.send_async( + workflow_status.workflow_id, + payload, + topic=octobot_node.enums.AutomationWorkflowMessageTopics.ACTIONS_UPDATE.value, + ) + + +async def trigger_copier_automation(automation_id: str, trading_signal: octobot_flow.entities.TradingSignal) -> None: + import octobot_node.scheduler # avoid circular import + payload = params.AutomationWorkflowActionUpdate( + actions_type=octobot_node.enums.AutomationWorkflowActionTypes.TRADING_SIGNAL.value, + actions_details=[trading_signal.to_dict(include_default_values=False)], + ).to_dict(include_default_values=False) + await octobot_node.scheduler.SCHEDULER.INSTANCE.send_async( + automation_id, + payload, + topic=octobot_node.enums.AutomationWorkflowMessageTopics.ACTIONS_UPDATE.value, + ) diff --git a/packages/node/octobot_node/scheduler/workflows/__init__.py b/packages/node/octobot_node/scheduler/workflows/__init__.py new file mode 100644 index 0000000000..032554eb56 --- /dev/null +++ b/packages/node/octobot_node/scheduler/workflows/__init__.py @@ -0,0 +1,18 @@ +# Drakkar-Software OctoBot-Node +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +def register_workflows(): + import octobot_node.scheduler.workflows.automation_workflow diff --git a/packages/node/octobot_node/scheduler/workflows/automation_workflow.py b/packages/node/octobot_node/scheduler/workflows/automation_workflow.py new file mode 100644 index 0000000000..0689e7c345 --- /dev/null +++ b/packages/node/octobot_node/scheduler/workflows/automation_workflow.py @@ -0,0 +1,332 @@ +# Drakkar-Software OctoBot-Node +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import json +import time +import typing +import dbos +import os + +import octobot_commons.logging + +import octobot_flow.enums +import octobot_flow.errors + +import octobot_node.enums +import octobot_node.models +import octobot_node.scheduler.octobot_flow_client as octobot_flow_client +import octobot_node.scheduler.task_context +import octobot_node.constants as constants +import octobot_node.scheduler.workflows.params as params +import octobot_node.errors as errors + +if typing.TYPE_CHECKING: + import octobot_flow.entities + +from octobot_node.scheduler import SCHEDULER # avoid circular import + + +@SCHEDULER.INSTANCE.dbos_class() +class AutomationWorkflow: + # Always use dict as input to parse minimizable dataclasses and facilitate data format updates + + @staticmethod + @SCHEDULER.INSTANCE.workflow(name="execute_automation") + async def execute_automation(inputs: dict) -> typing.Optional[str]: + """ + Automation workflow runner: + 1. Wait for priority actions if any. + 2. Execute the iteration (received priority action or DAG's executable actions). + 3. Check and process other received priority actions if any. + 4. Either: + A. Reschedule the next iteration as a child workflow to avoid growing the workflow forever. + B. Complete the workflow and stop the automation. + 5. If completed, return tthe updated task.content (the automation state) as workflow output + """ + output: typing.Optional[params.AutomationWorkflowOutput] = None + iteration_result = None + try: + parsed_inputs = params.AutomationWorkflowInputs.from_dict(inputs) + delay = parsed_inputs.execution_time - time.time() + delay_str = f" in {delay:.2f} seconds" if delay > 0 else "" + AutomationWorkflow.get_logger(parsed_inputs).info(f"{AutomationWorkflow.__name__} starting{delay_str}.") + actions_update: typing.Optional[dict] = None + if delay > 0: + actions_update = await AutomationWorkflow._wait_and_trigger_on_actions_update( + parsed_inputs, parsed_inputs.execution_time + ) + raw_iteration_result = await AutomationWorkflow.execute_iteration(inputs, actions_update) + iteration_result = params.AutomationWorkflowIterationResult.from_dict(raw_iteration_result) + continue_workflow = False + if AutomationWorkflow._should_continue_workflow(parsed_inputs, iteration_result.progress_status, bool(actions_update)): + # update iteration_result to include the executions of priority actions if any + continue_workflow, iteration_result = await AutomationWorkflow._process_pending_priority_actions_and_reschedule( + parsed_inputs, iteration_result + ) + if not continue_workflow: + AutomationWorkflow.get_logger(parsed_inputs).info( + f"Stopped workflow (remaining steps: {iteration_result.progress_status.remaining_steps})" + ) + final_state = iteration_result.next_iteration_description + final_state_metadata = iteration_result.next_iteration_description_metadata + final_error = iteration_result.progress_status.error + if final_state is not None or final_error is not None: + output = params.AutomationWorkflowOutput( + state=final_state, + state_metadata=final_state_metadata, + error=final_error, + ) + except Exception as err: + AutomationWorkflow.get_logger(parsed_inputs).exception( + err, True, f"Interrupted workflow: unexpected critical error: {err} ({err.__class__.__name__})" + ) + output = params.AutomationWorkflowOutput( + # use available iteration result when possible (might be the one of the previous iteration) + state=iteration_result.next_iteration_description if iteration_result else None, + state_metadata=iteration_result.next_iteration_description_metadata if iteration_result else None, + # keep track of the failed iteration + error=AutomationWorkflow._get_failed_error_status(err), + ) + return json.dumps(output.to_dict(include_default_values=False)) if output else None + + @staticmethod + @SCHEDULER.INSTANCE.step( + name="execute_iteration", + retries_allowed=True, + interval_seconds = constants.AUTOMATION_WORKFLOW_RETRY_INTERVAL_SECONDS, + max_attempts=constants.AUTOMATION_WORKFLOW_MAX_ITERATION_RETRIES, + backoff_rate=constants.AUTOMATION_WORKFLOW_BACKOFF_RATE, + # should_retry=XXX # todo add in dbos 2.20.0 + ) + async def execute_iteration(inputs: dict, actions_update: typing.Optional[dict]) -> dict: + """ + Execute an automation iteration: executed actions can be received priority actions or DAG's executable actions. + In case of priority actions, the returned next scheduled time will be the same as the previous one to respect + the latest DAG execution time schedule. + + Should be a SCHEDULER.INSTANCE.step to avoid executing actions twice when recovering a workflow + that was interrupted while executing priority actions which were received AFTER the initial + iteration of the workflow. + + Will retry up to 3 times in case of an unexpected error before failing step. + """ + parsed_inputs: params.AutomationWorkflowInputs = params.AutomationWorkflowInputs.from_dict(inputs) + executed_step: str = "no action executed" + execution_error = next_step = next_step_at = None + result = octobot_flow_client.OctoBotActionsJobResult() + with octobot_node.scheduler.task_context.encrypted_task(parsed_inputs.task, result): + #### Start of decryped task context #### + if parsed_inputs.task.type == octobot_node.models.TaskType.EXECUTE_ACTIONS.value: + user_actions, trading_signals = AutomationWorkflow._parse_actions_update_envelope(actions_update) + AutomationWorkflow._log_iteration_execution_intent( + parsed_inputs, user_actions, trading_signals + ) + await octobot_flow_client.OctoBotActionsJob( + parsed_inputs.task.content, user_actions, trading_signals, result + ).run() + if result.processed_actions: + if latest_step := AutomationWorkflow._get_actions_summary(result.processed_actions, minimal=True): + executed_step = latest_step + for action in result.processed_actions: + if action.error_status is not None: + AutomationWorkflow.get_logger(parsed_inputs).error( + f"Error: {action.error_status} when executing action {action.id}: {action.get_summary()} " + ) + execution_error = action.error_status + else: + raise errors.WorkflowInputError(f"Invalid task type: {parsed_inputs.task.type}") + next_actions = [] + remaining_steps = 0 + if result.has_next_actions: + if result.actions_dag: + next_actions = result.actions_dag.get_executable_actions() + remaining_steps = len(result.actions_dag.get_pending_actions()) + next_step_at = result.next_actions_description.get_next_execution_time() if result.next_actions_description else None + next_step = AutomationWorkflow._get_actions_summary(next_actions, minimal=True) + next_actions_str = f"next immediate actions: {next_actions}" if next_actions else "all actions completed" + AutomationWorkflow.get_logger(parsed_inputs).info( + f"Iteration completed, executed step: '{executed_step}', {next_actions_str}" + ) + #### End of decryped task context - no clear data after this point in encrypted context #### + + return params.AutomationWorkflowIterationResult( + progress_status=params.ProgressStatus( + latest_step=executed_step, + next_step=next_step, + next_step_at=next_step_at, + remaining_steps=remaining_steps, + error=execution_error, + should_stop=result.should_stop, + ), + next_iteration_description=result.maybe_encrypted_next_actions_description, + next_iteration_description_metadata=result.next_actions_description_encryption_metadata, + has_next_actions=result.has_next_actions, + ).to_dict(include_default_values=False) + + @staticmethod + async def _wait_and_trigger_on_actions_update( + parsed_inputs: params.AutomationWorkflowInputs, resume_execution_time: float + ) -> typing.Optional[dict]: + delay = max(0, resume_execution_time - time.time()) + actions_topic = octobot_node.enums.AutomationWorkflowMessageTopics.ACTIONS_UPDATE.value + if recv_payload := await SCHEDULER.INSTANCE.recv_async(topic=actions_topic, timeout_seconds=delay): + AutomationWorkflow.get_logger(parsed_inputs).info(f"Received actions updates: {recv_payload}") + return recv_payload + return None + + @staticmethod + def _parse_actions_update_envelope( + actions_update: typing.Optional[dict], + ) -> tuple[list[dict], list[dict]]: + if not actions_update: + return [], [] + envelope = params.AutomationWorkflowActionUpdate.from_dict(actions_update) + if envelope.actions_type == octobot_node.enums.AutomationWorkflowActionTypes.USER_ACTIONS.value: + return list(envelope.actions_details), [] + if envelope.actions_type == octobot_node.enums.AutomationWorkflowActionTypes.TRADING_SIGNAL.value: + return [], list(envelope.actions_details) + return [], [] + + @staticmethod + def _log_iteration_execution_intent( + parsed_inputs: params.AutomationWorkflowInputs, + user_actions: list[dict], + trading_signals: list[dict], + ) -> None: + logger = AutomationWorkflow.get_logger(parsed_inputs) + if user_actions: + logger.info(f"Executing user actions: {user_actions}") + if trading_signals: + logger.info(f"Executing trading signals: {trading_signals}") + if not user_actions and not trading_signals: + logger.info(f"Executing {parsed_inputs.task.name} DAG's executable actions") + + @staticmethod + async def _process_pending_priority_actions_and_reschedule( + parsed_inputs: params.AutomationWorkflowInputs, + previous_iteration_result: params.AutomationWorkflowIterationResult + ) -> tuple[bool, params.AutomationWorkflowIterationResult]: + if not previous_iteration_result.has_next_actions: + return False, previous_iteration_result + # In case new priority actions were sent, execute them now. + # Any action sent to this workflow will be lost if not processed by it. + latest_iteration_result: params.AutomationWorkflowIterationResult = previous_iteration_result + while new_actions_update := await AutomationWorkflow._wait_and_trigger_on_actions_update( + parsed_inputs, 0 + ): + extra_iteration_inputs = AutomationWorkflow._create_next_iteration_inputs( + parsed_inputs, latest_iteration_result.next_iteration_description, 0, + latest_iteration_result.next_iteration_description_metadata, + ) + # execute the iteration on the updated state from last iteration + raw_iteration_result = await AutomationWorkflow.execute_iteration(extra_iteration_inputs, new_actions_update) + # use the new inputs for the next iteration of this loop + parsed_inputs = params.AutomationWorkflowInputs.from_dict(extra_iteration_inputs) + latest_iteration_result = params.AutomationWorkflowIterationResult.from_dict(raw_iteration_result) + if not AutomationWorkflow._should_continue_workflow(parsed_inputs, latest_iteration_result.progress_status, False): + return False, latest_iteration_result + if not latest_iteration_result.has_next_actions: + raise errors.WorkflowPriorityActionExecutionError( + f"Unexpected error: no next iteration description after processing priority actions: {latest_iteration_result}" + ) + if latest_iteration_result.progress_status.should_stop: + AutomationWorkflow.get_logger(parsed_inputs).info( + f"Stopping workflow, should stop: {latest_iteration_result.progress_status.should_stop}" + ) + else: + # successful iteration and a new iteration is required, schedule next iteration, don't return anything + await AutomationWorkflow._schedule_next_iteration( + parsed_inputs, + latest_iteration_result.next_iteration_description, # type: ignore + latest_iteration_result.progress_status, + latest_iteration_result.next_iteration_description_metadata, + ) + return True, latest_iteration_result + + @staticmethod + async def _schedule_next_iteration( + parsed_inputs: params.AutomationWorkflowInputs, + next_iteration_description: str, + progress_status: params.ProgressStatus, + next_iteration_description_metadata: typing.Optional[str] = None, + ): + next_execution_time = progress_status.next_step_at or 0 + next_iteration_inputs = AutomationWorkflow._create_next_iteration_inputs( + parsed_inputs, next_iteration_description, next_execution_time, next_iteration_description_metadata + ) + delay = next_execution_time - time.time() + delay_str = f", starting in {delay:.2f} seconds" if delay > 0 else "" + AutomationWorkflow.get_logger(parsed_inputs).info( + f"Enqueuing next iteration: next step: {progress_status.next_step}, " + f"remaining steps: {progress_status.remaining_steps}{delay_str}." + ) + await SCHEDULER.AUTOMATION_WORKFLOW_QUEUE.enqueue_async( + AutomationWorkflow.execute_automation, + inputs=next_iteration_inputs + ) + + @staticmethod + def _create_next_iteration_inputs( + parsed_inputs: params.AutomationWorkflowInputs, + next_iteration_description: str, + next_execution_time: float, + next_iteration_description_metadata: typing.Optional[str] = None, + ) -> dict: + # update task.content with the next iteration description containing the automation state + next_task = parsed_inputs.task + next_task.content = next_iteration_description + next_task.content_metadata = next_iteration_description_metadata + next_execution_time = next_execution_time or 0 + return params.AutomationWorkflowInputs( + task=parsed_inputs.task, execution_time=next_execution_time + ).to_dict(include_default_values=False) + + @staticmethod + def _should_continue_workflow( + parsed_inputs: params.AutomationWorkflowInputs, + progress_status: params.ProgressStatus, + stop_on_error: bool + ) -> bool: + if progress_status.error: + # failed iteration, return global progress where it stopped and exit workflow + AutomationWorkflow.get_logger(parsed_inputs).error( + f"Failed iteration: stopping workflow, error: {progress_status.error}. " + f"Iteration's last step: {progress_status.latest_step}" + ) + return stop_on_error + elif progress_status.should_stop: + AutomationWorkflow.get_logger(parsed_inputs).info( + f"Workflow stop required: stopping workflow" + ) + return False + return True + + @staticmethod + def _get_actions_summary(actions: list["octobot_flow.entities.AbstractActionDetails"], minimal: bool = False) -> str: + return ", ".join([action.get_summary(minimal=minimal) for action in actions]) if actions else "" + + @staticmethod + def _get_failed_error_status(error: Exception) -> str: + if isinstance(error, dbos.error.DBOSMaxStepRetriesExceeded): + last_error = error.errors[-1] + if isinstance(last_error, octobot_flow.errors.InvalidAutomationActionError): + return octobot_flow.enums.AutomationWorkflowErrorStatus.INVALID_ACTION_CONFIGURATION.value + return octobot_flow.enums.AutomationWorkflowErrorStatus.EXCEPTION_DURING_ITERATION.value + + @staticmethod + def get_logger(parsed_inputs: params.AutomationWorkflowInputs) -> octobot_commons.logging.BotLogger: + return octobot_commons.logging.get_logger( + parsed_inputs.task.name or AutomationWorkflow.__name__ + ) diff --git a/packages/node/octobot_node/scheduler/workflows/params/__init__.py b/packages/node/octobot_node/scheduler/workflows/params/__init__.py new file mode 100644 index 0000000000..63b1b503da --- /dev/null +++ b/packages/node/octobot_node/scheduler/workflows/params/__init__.py @@ -0,0 +1,32 @@ +# Drakkar-Software OctoBot-Node +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +from .base_params import ( + ProgressStatus, +) +from .automation_workflow_params import ( + AutomationWorkflowActionUpdate, + AutomationWorkflowInputs, + AutomationWorkflowIterationResult, + AutomationWorkflowOutput, +) + +__all__ = [ + "AutomationWorkflowActionUpdate", + "AutomationWorkflowInputs", + "AutomationWorkflowIterationResult", + "AutomationWorkflowOutput", + "ProgressStatus", +] diff --git a/packages/node/octobot_node/scheduler/workflows/params/automation_workflow_params.py b/packages/node/octobot_node/scheduler/workflows/params/automation_workflow_params.py new file mode 100644 index 0000000000..654deda81f --- /dev/null +++ b/packages/node/octobot_node/scheduler/workflows/params/automation_workflow_params.py @@ -0,0 +1,48 @@ +# Drakkar-Software OctoBot-Node +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import dataclasses +import typing + +import octobot_commons.dataclasses.minimizable_dataclass +import octobot_node.models +import octobot_node.scheduler.workflows.params.base_params as base_params + + +@dataclasses.dataclass +class AutomationWorkflowInputs(octobot_commons.dataclasses.minimizable_dataclass.MinimizableDataclass): + task: octobot_node.models.Task + execution_time: float = 0 + + +@dataclasses.dataclass +class AutomationWorkflowOutput(octobot_commons.dataclasses.minimizable_dataclass.MinimizableDataclass): + state: typing.Optional[str] = None + state_metadata: typing.Optional[str] = None + error: typing.Optional[str] = None + + +@dataclasses.dataclass +class AutomationWorkflowIterationResult(octobot_commons.dataclasses.minimizable_dataclass.MinimizableDataclass): + progress_status: base_params.ProgressStatus + next_iteration_description: typing.Optional[str] + next_iteration_description_metadata: typing.Optional[str] = None + has_next_actions: bool = False + + +@dataclasses.dataclass +class AutomationWorkflowActionUpdate(octobot_commons.dataclasses.minimizable_dataclass.MinimizableDataclass): + actions_type: str # octobot_node.enums.AutomationWorkflowActionTypes value + actions_details: list[dict] # list of actions dicts diff --git a/packages/node/octobot_node/scheduler/workflows/params/base_params.py b/packages/node/octobot_node/scheduler/workflows/params/base_params.py new file mode 100644 index 0000000000..ac320048c6 --- /dev/null +++ b/packages/node/octobot_node/scheduler/workflows/params/base_params.py @@ -0,0 +1,29 @@ +# Drakkar-Software OctoBot-Node +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import pydantic +import typing + + +class ProgressStatus(pydantic.BaseModel): + """ + Only contains non-encrypted data + """ + latest_step: typing.Optional[str] = None + next_step: typing.Optional[str] = None + next_step_at: typing.Optional[float] = None + remaining_steps: typing.Optional[int] = None + error: typing.Optional[str] = None + should_stop: bool = False diff --git a/packages/node/octobot_node/scheduler/workflows_util.py b/packages/node/octobot_node/scheduler/workflows_util.py new file mode 100644 index 0000000000..5b4786d365 --- /dev/null +++ b/packages/node/octobot_node/scheduler/workflows_util.py @@ -0,0 +1,99 @@ +# Drakkar-Software OctoBot-Node +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import json +import typing +import dbos as dbos_lib + +import octobot_commons.logging +import octobot_node.models as models +import octobot_node.scheduler.workflows.params as params + + +try: + import octobot_flow + import octobot_flow.parsers +except ImportError: + octobot_commons.logging.get_logger("octobot_node.scheduler.workflows_util").warning( + "octobot_flow is not installed, workflows utilities will not be available" + ) + + +STATE_KEY = "state" + + +def get_automation_copied_strategy_ids(workflow_status: dbos_lib.WorkflowStatus) -> list[str]: + if reader := get_automation_state_reader(workflow_status): + return reader.get_automation_copied_strategy_ids() + return [] + + +def get_automation_state_reader(workflow_status: dbos_lib.WorkflowStatus) -> typing.Optional["octobot_flow.parsers.AutomationStateReader"]: + """Get the automation state from the workflow status""" + if state_dict := get_automation_state_dict(workflow_status): + return octobot_flow.parsers.AutomationStateReader( + octobot_flow.AutomationState.from_dict(state_dict) + ) + return None + + +def get_automation_id(workflow_status: dbos_lib.WorkflowStatus) -> typing.Optional[str]: + if state_dict := get_automation_state_dict(workflow_status): + return state_dict.get("automation", {}).get("metadata", {}).get("automation_id") + return None + + +def get_automation_state_dict(workflow_status: dbos_lib.WorkflowStatus) -> typing.Optional[dict]: + if inputs := get_automation_workflow_inputs(workflow_status): + try: + return get_automation_dict(inputs.task.content)[STATE_KEY] + except ValueError: + return None + return None + + +def get_input_task(workflow_status: dbos_lib.WorkflowStatus) -> typing.Optional[models.Task]: + if inputs := get_automation_workflow_inputs(workflow_status): + return inputs.task + return None + + +def get_automation_workflow_inputs(workflow_status: dbos_lib.WorkflowStatus) -> typing.Optional[params.AutomationWorkflowInputs]: + for input in list(workflow_status.input.get("args", [])) + list(workflow_status.input.get("kwargs", {}).values()): + if isinstance(input, dict): + try: + parsed_inputs = params.AutomationWorkflowInputs.from_dict(input) + return parsed_inputs + except TypeError: + print(f"Failed to parse inputs: {input}") + pass + return None + + +def get_automation_dict(description: typing.Union[str, dict]) -> dict: + if isinstance(description, str): + description = json.loads(description) + if isinstance(description, dict) and (state := description.get(STATE_KEY)) and isinstance(state, dict): + return description + raise ValueError("No automation state found in description") + + +async def get_automation_workflow_status(automation_id: str) -> dbos_lib.WorkflowStatus: + for workflow_status in await dbos_lib.DBOS.list_workflows_async(status=[ + dbos_lib.WorkflowStatusString.PENDING.value, dbos_lib.WorkflowStatusString.ENQUEUED.value + ]): + if get_automation_id(workflow_status) == automation_id: + return workflow_status + raise ValueError(f"No automation workflow found for automation_id: {automation_id}") diff --git a/packages/node/requirements.txt b/packages/node/requirements.txt new file mode 100644 index 0000000000..480be29c5b --- /dev/null +++ b/packages/node/requirements.txt @@ -0,0 +1,5 @@ +fastapi[standard]==0.135.1 +passlib[bcrypt]==1.7.4 +pydantic + +dbos==2.17.0 diff --git a/packages/node/scheduler_test.sqlite b/packages/node/scheduler_test.sqlite new file mode 100644 index 0000000000..13863c51bc Binary files /dev/null and b/packages/node/scheduler_test.sqlite differ diff --git a/packages/node/tests/__init__.py b/packages/node/tests/__init__.py new file mode 100644 index 0000000000..0c7e4d5082 --- /dev/null +++ b/packages/node/tests/__init__.py @@ -0,0 +1,15 @@ +# Drakkar-Software OctoBot-Node +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. diff --git a/packages/node/tests/scheduler/__init__.py b/packages/node/tests/scheduler/__init__.py new file mode 100644 index 0000000000..f4c8883050 --- /dev/null +++ b/packages/node/tests/scheduler/__init__.py @@ -0,0 +1,55 @@ +# Drakkar-Software OctoBot-Node +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import pytest +import dbos +import tempfile + +import octobot_node.scheduler +import octobot_node.scheduler.workflows + + +def init_scheduler(db_file_name: str): + config: dbos.DBOSConfig = { + "name": "scheduler_test", + "system_database_url": f"sqlite:///{db_file_name}", + } + if octobot_node.scheduler.SCHEDULER.AUTOMATION_WORKFLOW_QUEUE is None: + octobot_node.scheduler.SCHEDULER.create_queues() + dbos.DBOS(config=config) + octobot_node.scheduler.SCHEDULER.INSTANCE = dbos.DBOS + octobot_node.scheduler.workflows.register_workflows() + return dbos.DBOS + + +@pytest.fixture() +def temp_dbos_scheduler(): + # from https://docs.dbos.dev/python/tutorials/testing + # don't use too muck as it is very slow + with tempfile.NamedTemporaryFile() as temp_file: + dbos =init_scheduler(temp_file.name) + dbos.reset_system_database() + dbos.launch() + try: + yield octobot_node.scheduler.SCHEDULER + finally: + dbos.destroy() + + +def init_and_destroy_scheduler(db_file_name: str): + dbos = init_scheduler(db_file_name) + dbos.reset_system_database() + dbos.launch() + dbos.destroy() diff --git a/packages/node/tests/scheduler/task_import/.gitignore b/packages/node/tests/scheduler/task_import/.gitignore new file mode 100644 index 0000000000..c3ee73f036 --- /dev/null +++ b/packages/node/tests/scheduler/task_import/.gitignore @@ -0,0 +1,4 @@ +encrypted_tasks.csv +decrypted_tasks.csv +encrypted_results.csv +task_encryption_keys.json diff --git a/packages/node/tests/scheduler/task_import/__init__.py b/packages/node/tests/scheduler/task_import/__init__.py new file mode 100644 index 0000000000..0c7e4d5082 --- /dev/null +++ b/packages/node/tests/scheduler/task_import/__init__.py @@ -0,0 +1,15 @@ +# Drakkar-Software OctoBot-Node +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. diff --git a/packages/node/tests/scheduler/task_import/test-results.csv b/packages/node/tests/scheduler/task_import/test-results.csv new file mode 100644 index 0000000000..732995b499 --- /dev/null +++ b/packages/node/tests/scheduler/task_import/test-results.csv @@ -0,0 +1,4 @@ +name,result +Task 1,"{""status"": ""completed"", ""data"": ""result1""}" +Task 2,"{""status"": ""completed"", ""data"": ""result2""}" +Task 3,"{""status"": ""failed"", ""error"": ""test error""}" diff --git a/packages/node/tests/scheduler/task_import/test-tasks.csv b/packages/node/tests/scheduler/task_import/test-tasks.csv new file mode 100644 index 0000000000..d8b6ad4bcc --- /dev/null +++ b/packages/node/tests/scheduler/task_import/test-tasks.csv @@ -0,0 +1,6 @@ +"name","content","type","actions","order_type","order_side","order_price","order_symbol","order_amount","order_leverage","exchange_from","exchange_to","blockchain_from_asset","blockchain_from_amount","blockchain_to_address","blockchain_to_address","blockchain_from","blockchain_to","simulated_portfolio" +"Deposit 1 Bitcoin",,"execute_actions","deposit",,,,,,,,"binance","BTC",1,,,"SIMULATED",, +"Trade 1 ETH vs Bitcoin",,"execute_actions","trade","market","sell",,"ETH/BTC",1,,"binance",,,,,,,,"{""ETH"":2}" +"Open long position on Binance",,"execute_actions","trade","limit",,50000,,,10,,"binance",,,,,,, +"Buy YES to $150k what-price-will-bitcoin-hit-in-january-2026 on polymarket",,"execute_actions","trade","market",,,"what-price-will-bitcoin-hit-in-january-2026/USDC:USDC-260131-0-YES",,,,,"polymarket",,,,,, +"Decentralized trading example","{""EXCHANGE_TO"":""binance"",""BLOCKCHAIN_FROM_ASSET"":""BTC"",""BLOCKCHAIN_FROM_AMOUNT"":1,""BLOCKCHAIN_FROM"":""SIMULATED"",""ORDER_SYMBOL"":""ETH/BTC"",""ORDER_AMOUNT"":1,""ORDER_TYPE"":""market"",""EXCHANGE_FROM"":""binance"",""BLOCKCHAIN_TO"":""Ethereum"",""BLOCKCHAIN_TO_ASSET"":""ETH"",""BLOCKCHAIN_TO_AMOUNT"":0.9,""BLOCKCHAIN_TO_ADDRESS"":""0x123456"",""MIN_DELAY"":2}","execute_actions","deposit,wait,trade,wait,withdraw",,,,,,,,,,,,,,, diff --git a/packages/node/tests/scheduler/test_api.py b/packages/node/tests/scheduler/test_api.py new file mode 100644 index 0000000000..280b4d47dd --- /dev/null +++ b/packages/node/tests/scheduler/test_api.py @@ -0,0 +1,394 @@ +# Drakkar-Software OctoBot-Node +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import pytest +import mock +from octobot_node.models import Execution, Task, TaskStatus +from octobot_node.scheduler.api import ( + get_node_status, + get_task_metrics, + get_all_tasks, + get_task_result, +) + +from tests.scheduler import temp_dbos_scheduler + + +class TestGetNodeStatus: + """Tests for get_node_status function.""" + + def test_get_node_status_master_node_with_postgres(self) -> None: + """Test node status for master node with Postgres backend.""" + mock_settings = mock.Mock() + mock_settings.IS_MASTER_MODE = True + mock_settings.SCHEDULER_POSTGRES_URL = "postgresql://localhost/db" + mock_settings.SCHEDULER_SQLITE_FILE = "tasks.db" + + with mock.patch("octobot_node.config.settings", mock_settings): + result = get_node_status() + + assert result["node_type"] == "both" + assert result["backend_type"] == "postgres" + assert result["workers"] == 1 + assert result["status"] == "running" + assert result["redis_url"] is None + assert result["sqlite_file"] is None + + def test_get_node_status_master_node_always_running(self) -> None: + """Test that master node is always running regardless of consumer state.""" + mock_settings = mock.Mock() + mock_settings.IS_MASTER_MODE = True + mock_settings.SCHEDULER_POSTGRES_URL = None + mock_settings.SCHEDULER_SQLITE_FILE = "tasks.db" + + mock_scheduler = mock.Mock() + mock_scheduler.INSTANCE = mock.Mock(_launched=False) + + with mock.patch("octobot_node.config.settings", mock_settings), \ + mock.patch("octobot_node.scheduler.SCHEDULER", mock_scheduler): + result = get_node_status() + + assert result["status"] == "running" + assert result["node_type"] == "both" + + def test_get_node_status_both_master_and_consumers(self) -> None: + """Test node status when both master mode and consumers are enabled.""" + mock_settings = mock.Mock() + mock_settings.IS_MASTER_MODE = True + mock_settings.SCHEDULER_POSTGRES_URL = "postgresql://localhost/db" + mock_settings.SCHEDULER_SQLITE_FILE = "tasks.db" + + with mock.patch("octobot_node.config.settings", mock_settings): + result = get_node_status() + + assert result["node_type"] == "both" + assert result["backend_type"] == "postgres" + assert result["workers"] == 1 # multi workers are not supported yet + assert result["status"] == "running" + + def test_get_node_status_none(self) -> None: + """Test node status when neither master mode nor consumers are enabled.""" + mock_settings = mock.Mock() + mock_settings.IS_MASTER_MODE = False + mock_settings.CONSUMER_ONLY = False + mock_settings.SCHEDULER_POSTGRES_URL = None + mock_settings.SCHEDULER_SQLITE_FILE = "tasks.db" + + with mock.patch("octobot_node.config.settings", mock_settings): + result = get_node_status() + + assert result["node_type"] == "none" + assert result["status"] == "stopped" + assert result["workers"] is 0 + + +class TestGetTaskMetrics: + """Tests for get_task_metrics function.""" + + @pytest.mark.asyncio + async def test_get_task_metrics_success(self, temp_dbos_scheduler) -> None: + """Test successful retrieval of task metrics.""" + call_responses = [[mock.Mock()] * 5, [mock.Mock()] * 10] + call_idx = [0] + + async def mock_list_workflows(*args, **kwargs): + result = call_responses[call_idx[0]] + call_idx[0] += 1 + return result + + mock_get_periodic = mock.AsyncMock(return_value=[ + {"id": "task1"}, + {"id": "task2"}, + ]) + + with mock.patch.object( + temp_dbos_scheduler.INSTANCE, "list_workflows_async", side_effect=mock_list_workflows + ), mock.patch.object(temp_dbos_scheduler, "get_periodic_tasks", mock_get_periodic): + result = await get_task_metrics() + + assert result["pending"] == 5 + assert result["scheduled"] == 2 + assert result["results"] == 10 + mock_get_periodic.assert_called_once() + + @pytest.mark.asyncio + async def test_get_task_metrics_uninitialized_scheduler(self) -> None: + """Test task metrics when scheduler is not initialized.""" + mock_scheduler = mock.Mock() + mock_scheduler.INSTANCE = None + + with mock.patch("octobot_node.scheduler.SCHEDULER", mock_scheduler): + result = await get_task_metrics() + + assert result == {"pending": 0, "scheduled": 0, "results": 0} + + @pytest.mark.asyncio + async def test_get_task_metrics_exception_handling(self) -> None: + """Test task metrics when an exception occurs.""" + mock_instance = mock.AsyncMock() + mock_instance.list_workflows_async.side_effect = Exception("Database error") + + mock_scheduler = mock.Mock() + mock_scheduler.INSTANCE = mock_instance + mock_scheduler.get_periodic_tasks = mock.AsyncMock(return_value=[]) + + with mock.patch("octobot_node.scheduler.SCHEDULER", mock_scheduler): + result = await get_task_metrics() + + assert result == {"pending": 0, "scheduled": 0, "results": 0} + + @pytest.mark.asyncio + async def test_get_task_metrics_no_periodic_tasks(self) -> None: + """Test task metrics when there are no periodic tasks.""" + call_responses = [[mock.Mock()] * 2, [mock.Mock()] * 5] + call_idx = [0] + + async def mock_list_workflows(*args, **kwargs): + result = call_responses[call_idx[0]] + call_idx[0] += 1 + return result + + mock_instance = mock.AsyncMock() + mock_instance.list_workflows_async.side_effect = mock_list_workflows + + mock_scheduler = mock.Mock() + mock_scheduler.INSTANCE = mock_instance + mock_scheduler.get_periodic_tasks = mock.AsyncMock(return_value=[]) + + with mock.patch("octobot_node.scheduler.SCHEDULER", mock_scheduler): + result = await get_task_metrics() + + assert result["pending"] == 2 + assert result["scheduled"] == 0 + assert result["results"] == 5 + + +class TestGetAllTasks: + """Tests for get_all_tasks function.""" + + @pytest.mark.asyncio + async def test_get_all_tasks_success(self, temp_dbos_scheduler) -> None: + """Test successful retrieval of all tasks with distinct IDs produces one Task per Execution.""" + periodic_executions = [Execution(id="aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa", status=TaskStatus.PERIODIC)] + pending_executions = [Execution(id="bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb", status=TaskStatus.PENDING)] + scheduled_executions = [Execution(id="cccccccc-cccc-cccc-cccc-cccccccccccc", status=TaskStatus.SCHEDULED)] + result_executions = [Execution(id="dddddddd-dddd-dddd-dddd-dddddddddddd", status=TaskStatus.COMPLETED)] + + with mock.patch.object( + temp_dbos_scheduler, "get_periodic_tasks", mock.AsyncMock(return_value=periodic_executions) + ), mock.patch.object( + temp_dbos_scheduler, "get_pending_tasks", mock.AsyncMock(return_value=pending_executions) + ), mock.patch.object( + temp_dbos_scheduler, "get_scheduled_tasks", mock.AsyncMock(return_value=scheduled_executions) + ), mock.patch.object( + temp_dbos_scheduler, "get_results", mock.AsyncMock(return_value=result_executions) + ): + result = await get_all_tasks() + + assert len(result) == 4 + assert all(isinstance(t, Task) for t in result) + assert all(len(t.executions) == 1 for t in result) + task_ids = {t.id for t in result} + assert "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa" in task_ids + assert "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb" in task_ids + assert "cccccccc-cccc-cccc-cccc-cccccccccccc" in task_ids + assert "dddddddd-dddd-dddd-dddd-dddddddddddd" in task_ids + + @pytest.mark.asyncio + async def test_get_all_tasks_merges_same_id(self, temp_dbos_scheduler) -> None: + """Test that executions sharing the same parent ID are merged into a single Task.""" + parent_id = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa" + child_suffix = "_child_step_1" + pending_executions = [Execution(id=parent_id, status=TaskStatus.PENDING, name="my-task")] + result_executions = [Execution(id=f"{parent_id}{child_suffix}", status=TaskStatus.COMPLETED, name="my-task")] + + with mock.patch.object( + temp_dbos_scheduler, "get_periodic_tasks", mock.AsyncMock(return_value=[]) + ), mock.patch.object( + temp_dbos_scheduler, "get_pending_tasks", mock.AsyncMock(return_value=pending_executions) + ), mock.patch.object( + temp_dbos_scheduler, "get_scheduled_tasks", mock.AsyncMock(return_value=[]) + ), mock.patch.object( + temp_dbos_scheduler, "get_results", mock.AsyncMock(return_value=result_executions) + ): + result = await get_all_tasks() + + assert len(result) == 1 + task = result[0] + assert isinstance(task, Task) + assert task.id == parent_id + assert len(task.executions) == 2 + assert any(e.status == TaskStatus.PENDING for e in task.executions) + + @pytest.mark.asyncio + async def test_get_all_tasks_active_execution_latest_completed(self, temp_dbos_scheduler) -> None: + """Test that when no pending execution, the latest completed_at is used as active.""" + import datetime + parent_id = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa" + older = Execution( + id=f"{parent_id}_old", + status=TaskStatus.COMPLETED, + name="old-run", + completed_at=datetime.datetime(2025, 1, 1), + ) + newer = Execution( + id=f"{parent_id}_new", + status=TaskStatus.COMPLETED, + name="new-run", + completed_at=datetime.datetime(2025, 6, 1), + ) + + with mock.patch.object( + temp_dbos_scheduler, "get_periodic_tasks", mock.AsyncMock(return_value=[]) + ), mock.patch.object( + temp_dbos_scheduler, "get_pending_tasks", mock.AsyncMock(return_value=[]) + ), mock.patch.object( + temp_dbos_scheduler, "get_scheduled_tasks", mock.AsyncMock(return_value=[]) + ), mock.patch.object( + temp_dbos_scheduler, "get_results", mock.AsyncMock(return_value=[older, newer]) + ): + result = await get_all_tasks() + + assert len(result) == 1 + assert result[0].name == "new-run" + + @pytest.mark.asyncio + async def test_get_all_tasks_empty(self) -> None: + """Test get_all_tasks when there are no tasks.""" + mock_scheduler = mock.Mock() + mock_scheduler.get_periodic_tasks = mock.AsyncMock(return_value=[]) + mock_scheduler.get_pending_tasks = mock.AsyncMock(return_value=[]) + mock_scheduler.get_scheduled_tasks = mock.AsyncMock(return_value=[]) + mock_scheduler.get_results = mock.AsyncMock(return_value=[]) + + with mock.patch("octobot_node.scheduler.SCHEDULER", mock_scheduler): + result = await get_all_tasks() + + assert result == [] + + @pytest.mark.asyncio + async def test_get_all_tasks_exception_handling(self) -> None: + """Test get_all_tasks when an exception occurs.""" + mock_scheduler = mock.Mock() + mock_scheduler.get_periodic_tasks = mock.AsyncMock(side_effect=Exception("Database error")) + mock_scheduler.get_pending_tasks = mock.AsyncMock(return_value=[]) + mock_scheduler.get_scheduled_tasks = mock.AsyncMock(return_value=[]) + mock_scheduler.get_results = mock.AsyncMock(return_value=[]) + + with mock.patch("octobot_node.scheduler.SCHEDULER", mock_scheduler): + result = await get_all_tasks() + + assert result == [] + + @pytest.mark.asyncio + async def test_get_all_tasks_partial_exception(self) -> None: + """Test get_all_tasks when one method fails - gather fails entirely, returns [].""" + mock_scheduler = mock.Mock() + mock_scheduler.get_periodic_tasks = mock.AsyncMock(return_value=[Execution(id="aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa")]) + mock_scheduler.get_pending_tasks = mock.AsyncMock(return_value=[Execution(id="bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb")]) + mock_scheduler.get_scheduled_tasks = mock.AsyncMock(side_effect=Exception("Error")) + mock_scheduler.get_results = mock.AsyncMock(return_value=[]) + + with mock.patch("octobot_node.scheduler.SCHEDULER", mock_scheduler): + result = await get_all_tasks() + + assert result == [] + + +class TestGetTaskResult: + """Tests for get_task_result function.""" + + @pytest.mark.asyncio + async def test_get_task_result_completed(self, temp_dbos_scheduler) -> None: + """Test get_task_result for a completed task.""" + task_id = "task-123" + result_data = {"status": "success", "output": "completed"} + + mock_handle = mock.AsyncMock() + mock_handle.get_status = mock.AsyncMock(return_value=mock.Mock(status="SUCCESS")) + mock_handle.get_result = mock.AsyncMock(return_value=result_data) + + mock_retrieve = mock.AsyncMock(return_value=mock_handle) + + with mock.patch.object( + temp_dbos_scheduler.INSTANCE, "retrieve_workflow_async", mock_retrieve + ): + result = await get_task_result(task_id) + + assert result["status"] == "completed" + assert result["data"] == result_data + mock_retrieve.assert_called_once_with(task_id) + mock_handle.get_status.assert_called_once() + mock_handle.get_result.assert_called_once() + + @pytest.mark.asyncio + async def test_get_task_result_pending(self) -> None: + """Test get_task_result for a pending task.""" + task_id = "task-456" + + mock_handle = mock.AsyncMock() + mock_handle.get_status = mock.AsyncMock(return_value=mock.Mock(status="PENDING")) + + mock_instance = mock.AsyncMock() + mock_instance.retrieve_workflow_async = mock.AsyncMock(return_value=mock_handle) + + mock_scheduler = mock.Mock() + mock_scheduler.INSTANCE = mock_instance + + with mock.patch("octobot_node.scheduler.SCHEDULER", mock_scheduler): + result = await get_task_result(task_id) + + assert result["status"] == "pending or running" + assert "data" not in result + mock_instance.retrieve_workflow_async.assert_called_once_with(task_id) + mock_handle.get_status.assert_called_once() + mock_handle.get_result.assert_not_called() + + @pytest.mark.asyncio + async def test_get_task_result_not_found(self) -> None: + """Test get_task_result for a task that doesn't exist.""" + task_id = "task-789" + + mock_instance = mock.AsyncMock() + mock_instance.retrieve_workflow_async = mock.AsyncMock(side_effect=Exception("not found")) + + mock_scheduler = mock.Mock() + mock_scheduler.INSTANCE = mock_instance + + with mock.patch("octobot_node.scheduler.SCHEDULER", mock_scheduler): + result = await get_task_result(task_id) + + assert result == {"error": "task not found"} + mock_instance.retrieve_workflow_async.assert_called_once_with(task_id) + + @pytest.mark.asyncio + async def test_get_task_result_running(self) -> None: + """Test get_task_result for a running task.""" + task_id = "task-running" + + mock_handle = mock.AsyncMock() + mock_handle.get_status = mock.AsyncMock(return_value=mock.Mock(status="PENDING")) + + mock_instance = mock.AsyncMock() + mock_instance.retrieve_workflow_async = mock.AsyncMock(return_value=mock_handle) + + mock_scheduler = mock.Mock() + mock_scheduler.INSTANCE = mock_instance + + with mock.patch("octobot_node.scheduler.SCHEDULER", mock_scheduler): + result = await get_task_result(task_id) + + assert result["status"] == "pending or running" \ No newline at end of file diff --git a/packages/node/tests/scheduler/test_internal_trading_signals.py b/packages/node/tests/scheduler/test_internal_trading_signals.py new file mode 100644 index 0000000000..5670337ba6 --- /dev/null +++ b/packages/node/tests/scheduler/test_internal_trading_signals.py @@ -0,0 +1,57 @@ +import asyncio + +import pytest + +import async_channel.channels as async_channel_channels + +pytest.importorskip("octobot_flow") + +import octobot_flow.repositories.community.trading_signals_channel as trading_signals_channel +import octobot_flow.repositories.community.trading_signals_repository as trading_signals_repository +import octobot_node.scheduler.internal_trading_signals as internal_trading_signals + + +def _channel_name() -> str: + return trading_signals_channel.InternalTradingSignalChannel.get_name() + + +@pytest.mark.asyncio +async def test_subscribe_internal_trading_signal_consumer_registers_consumer(): + async_channel_channels.del_chan(_channel_name()) + await internal_trading_signals.subscribe_internal_trading_signal_consumer() + channel = async_channel_channels.get_chan(_channel_name()) + assert len(channel.get_consumers()) >= 1 + await trading_signals_channel.shutdown_internal_trading_signal_channel() + + +@pytest.mark.asyncio +async def test_shutdown_internal_trading_signal_channel_after_subscribe_unregisters(): + async_channel_channels.del_chan(_channel_name()) + await internal_trading_signals.subscribe_internal_trading_signal_consumer() + await trading_signals_channel.shutdown_internal_trading_signal_channel() + with pytest.raises(KeyError): + async_channel_channels.get_chan(_channel_name()) + + +@pytest.mark.asyncio +async def test_get_or_create_after_shutdown_creates_new_channel(): + async_channel_channels.del_chan(_channel_name()) + await internal_trading_signals.subscribe_internal_trading_signal_consumer() + await trading_signals_channel.shutdown_internal_trading_signal_channel() + new_channel = await trading_signals_channel.get_or_create_internal_trading_signal_channel() + assert new_channel is not None + await trading_signals_channel.shutdown_internal_trading_signal_channel() + + +@pytest.mark.asyncio +async def test_insert_trading_signal_completes_without_error_after_subscribe(): + import octobot_copy.entities as copy_entities + import octobot_flow.entities as flow_entities + + async_channel_channels.del_chan(_channel_name()) + await internal_trading_signals.subscribe_internal_trading_signal_consumer() + signal = flow_entities.TradingSignal(account=copy_entities.Account(), strategy_id="test-strategy-id") + repository = trading_signals_repository.TradingSignalsRepository(object()) # type: ignore[arg-type] + await repository.insert_trading_signal(signal) + await asyncio.sleep(0.05) + await trading_signals_channel.shutdown_internal_trading_signal_channel() diff --git a/packages/node/tests/scheduler/test_octobot_flow_client_lib.py b/packages/node/tests/scheduler/test_octobot_flow_client_lib.py new file mode 100644 index 0000000000..35bde7c2b6 --- /dev/null +++ b/packages/node/tests/scheduler/test_octobot_flow_client_lib.py @@ -0,0 +1,1395 @@ +# Drakkar-Software OctoBot-Node +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import pytest +import decimal +import time +import mock +import typing + +import octobot_commons.list_util as list_util +import octobot_commons.constants as common_constants +import octobot_commons.dsl_interpreter as dsl_interpreter +import octobot_trading.constants +import octobot_trading.errors +import octobot_trading.enums as trading_enums +import octobot_trading.blockchain_wallets.simulator.blockchain_wallet_simulator as blockchain_wallet_simulator +import octobot_trading.personal_data.orders.order_factory as order_factory +import octobot_node.scheduler.octobot_flow_client as octobot_flow_client + +RUN_TESTS = True + + +try: + import octobot_flow.entities + import octobot_flow.enums + + import tentacles.Meta.DSL_operators as DSL_operators + import tentacles.Meta.DSL_operators.exchange_operators.exchange_personal_data_operators.fetch_order_operators as fetch_order_operators_module # noqa: E501 + + BLOCKCHAIN = octobot_trading.constants.SIMULATED_BLOCKCHAIN_NETWORK +except ImportError as err: + import traceback + traceback.print_exc() + print(f"Error importing octobot_flow: {err}") + # tests will be skipped if octobot_trading or octobot_wrapper are not installed + RUN_TESTS = False + BLOCKCHAIN = "unavailable" + + +# All test coroutines will be treated as marked. +pytestmark = pytest.mark.asyncio + + +EXCHANGE_INTERNAL_NAME = "binanceus" + + +@pytest.fixture +def market_order_action(): + return { + "params": { + "ACTIONS": "trade", + "EXCHANGE_FROM": EXCHANGE_INTERNAL_NAME, + "ORDER_SYMBOL": "ETH/BTC", + "ORDER_AMOUNT": 1, + "ORDER_TYPE": "market", + "ORDER_SIDE": "BUY", + "SIMULATED_PORTFOLIO": { + "BTC": 1, + }, + } + } + + +@pytest.fixture +def limit_order_action(): + return { + "params": { + "ACTIONS": "trade", + "EXCHANGE_FROM": EXCHANGE_INTERNAL_NAME, + "ORDER_SYMBOL": "ETH/BTC", + "ORDER_AMOUNT": 1, + "ORDER_PRICE": "-10%", + "ORDER_TYPE": "limit", + "ORDER_SIDE": "BUY", + "SIMULATED_PORTFOLIO": { + "BTC": 1, + }, + } + } + + +@pytest.fixture +def stop_loss_order_action(): + return { + "params": { + "ACTIONS": "trade", + "EXCHANGE_FROM": EXCHANGE_INTERNAL_NAME, + "ORDER_SYMBOL": "ETH/BTC", + "ORDER_TYPE": "stop", + "ORDER_AMOUNT": "10%", + "ORDER_SIDE": "SELL", + "ORDER_STOP_PRICE": "-10%", + "SIMULATED_PORTFOLIO": { + "ETH": 1, + }, + } + } + + +@pytest.fixture +def cancel_order_action(): + return { + "params": { + "ACTIONS": "cancel", + "EXCHANGE_FROM": EXCHANGE_INTERNAL_NAME, + "ORDER_SYMBOL": "ETH/BTC", + "ORDER_SIDE": "BUY", + } + } + + +@pytest.fixture +def polymarket_order_action(): + return { + "params": { + "ACTIONS": "trade", + "EXCHANGE_FROM": "polymarket", + "ORDER_SYMBOL": "what-price-will-bitcoin-hit-in-january-2026/USDC:USDC-260131-0-YES", + "ORDER_AMOUNT": 1, + "ORDER_TYPE": "market", + "ORDER_SIDE": "BUY", + "SIMULATED_PORTFOLIO": { + "USDC": 100, + }, + } + } + + +@pytest.fixture +def deposit_action(): + return { + "params": { + "ACTIONS": "deposit", + "EXCHANGE_TO": EXCHANGE_INTERNAL_NAME, + "BLOCKCHAIN_FROM_ASSET": "BTC", + "BLOCKCHAIN_FROM_AMOUNT": 1, + "BLOCKCHAIN_FROM": BLOCKCHAIN, + "SIMULATED_PORTFOLIO": { + "BTC": 0.01, + }, + } + } + + +@pytest.fixture +def transfer_blockchain_action(): + return { + "params": { + "ACTIONS": "transfer", + "BLOCKCHAIN_FROM_ASSET": "BTC", + "BLOCKCHAIN_FROM_AMOUNT": 1, + "BLOCKCHAIN_FROM_ADDRESS": "0x123_simulated_transfer_from_address_BTC", + "BLOCKCHAIN_FROM": BLOCKCHAIN, + "BLOCKCHAIN_TO": BLOCKCHAIN, + "BLOCKCHAIN_TO_ASSET": "BTC", + "BLOCKCHAIN_TO_ADDRESS": "0x123_simulated_transfer_to_address_BTC", + } + } + + +@pytest.fixture +def withdraw_action(): + return { + "params": { + "ACTIONS": "withdraw", + "EXCHANGE_FROM": EXCHANGE_INTERNAL_NAME, + "BLOCKCHAIN_TO": "ethereum", + "BLOCKCHAIN_TO_ASSET": "ETH", + "BLOCKCHAIN_TO_ADDRESS": "0x1234567890123456789012345678901234567890", + "SIMULATED_PORTFOLIO": { + "ETH": 2, + }, + }, + } + + +@pytest.fixture +def create_limit_instant_wait_and_cancel_order_action(limit_order_action, cancel_order_action): + all = { + "params": { + **limit_order_action["params"], + **cancel_order_action["params"], + **{ + "MIN_DELAY": 0, + "MAX_DELAY": 0, + } + } + } + all["params"]["SIMULATED_PORTFOLIO"] = { + "BTC": 1, + } + all["params"]["ACTIONS"] = "trade,wait,cancel" + return all + + +@pytest.fixture +def multiple_actions_bundle_no_wait(deposit_action, limit_order_action): + all = { + "params": { + **deposit_action["params"], + **limit_order_action["params"], + } + } + all["params"]["SIMULATED_PORTFOLIO"] = { + "BTC": 1, + } + all["params"]["ACTIONS"] = "deposit,trade" + return all + + +@pytest.fixture +def trade_transfer_and_check_balance_actions_bundle_no_wait(market_order_action, transfer_blockchain_action): + check_address = "17ouWjN7nvPWkZKo2svTF81etXL6Qxnty7" + all = { + "params": { + **market_order_action["params"], + **transfer_blockchain_action["params"], + **{ + "ORDER_EXTRA_PARAMS": {"address_to": check_address}, + "BLOCKCHAIN_TO_ADDRESS": ( + "dependency::action_trade_1::created_orders::0::esov::address_from" + ), + "BLOCKCHAIN_BALANCE_ADDRESS": "123_balance_address", + "BLOCKCHAIN_BALANCE_AMOUNT": 1, + "BLOCKCHAIN_BALANCE": BLOCKCHAIN, + "BLOCKCHAIN_BALANCE_ASSET": "BTC", + "LOOP_INTERVAL": 3, + "LOOP_TIMEOUT": 10, + "LOOP_MAX_ATTEMPTS": 4, + }, + } + } + all["params"]["SIMULATED_PORTFOLIO"] = { + "BTC": 1, + } + all["params"]["ACTIONS"] = "trade,transfer,loop_until_blockchain_balance" + return all + + +@pytest.fixture +def trade_and_loop_until_order_closed(market_order_action): + all = { + "params": { + **market_order_action["params"], + **{ + "ORDER_EXCHANGE_ID": ( + "dependency::action_trade_1::created_orders::0::exchange_id" + ), + "LOOP_INTERVAL": 3, + "LOOP_TIMEOUT": 10, + "LOOP_MAX_ATTEMPTS": 4, + }, + } + } + all["params"]["SIMULATED_PORTFOLIO"] = { + "BTC": 1, + } + all["params"]["ACTIONS"] = "trade,loop_until_order_closed" + return all + + +@pytest.fixture +def multiple_action_bundle_with_wait(deposit_action, market_order_action, withdraw_action): + all = { + "params": { + **deposit_action["params"], + **market_order_action["params"], + **withdraw_action["params"], + **{ + "MIN_DELAY": 100, + "MAX_DELAY": 150, + } + } + } + all["params"]["SIMULATED_PORTFOLIO"] = { + "BTC": 1, + } + all["params"]["ACTIONS"] = "deposit,wait,trade,wait,withdraw" + return all + + +def misses_required_octobot_flow_client_import(): + try: + if not RUN_TESTS: + return "OctoBot dependencies are not installed" + import octobot_flow + return None + except ImportError: + return "octobot_flow_client is not installed" + + +def get_failed_actions(actions: list["octobot_flow.entities.AbstractActionDetails"]) -> list[typing.Optional[dict]]: + return [ + action.result + for action in actions + if action.error_status is not octobot_flow.enums.ActionErrorStatus.NO_ERROR.value + ] + +def get_created_orders(actions: list["octobot_flow.entities.AbstractActionDetails"]) -> list[dict]: + order_lists = [ + action.result.get(DSL_operators.CREATED_ORDERS_KEY, []) + for action in actions + if action.result + ] + return list_util.flatten_list(order_lists) if order_lists else [] + +def get_cancelled_orders(actions: list["octobot_flow.entities.AbstractActionDetails"]) -> list[str]: + cancelled_orders = [ + action.result.get(DSL_operators.CANCELLED_ORDERS_KEY, []) + for action in actions + if action.result + ] + return list_util.flatten_list(cancelled_orders) if cancelled_orders else [] + +def get_deposit_and_withdrawal_details(actions: list["octobot_flow.entities.AbstractActionDetails"]) -> list[dict]: + withdrawal_lists = [ + action.result.get(DSL_operators.CREATED_WITHDRAWALS_KEY, []) + action.result.get(DSL_operators.CREATED_TRANSACTIONS_KEY, []) + for action in actions + if action.result and isinstance(action.result, dict) and ( + DSL_operators.CREATED_WITHDRAWALS_KEY in action.result or + DSL_operators.CREATED_TRANSACTIONS_KEY in action.result + ) + ] + return list_util.flatten_list(withdrawal_lists) if withdrawal_lists else [] + + +class TestOctoBotActionsJob: + + def setup_method(self): + if message := misses_required_octobot_flow_client_import(): + pytest.skip(reason=message) + octobot_trading.constants.ALLOW_FUNDS_TRANSFER = True + + def teardown_method(self): + octobot_trading.constants.ALLOW_FUNDS_TRANSFER = False + + async def test_run_market_order_action(self, market_order_action): + # step 1: configure the job + job = octobot_flow_client.OctoBotActionsJob( + market_order_action, [], [], octobot_flow_client.OctoBotActionsJobResult(), + ) + await job.run() + result = job.result + assert len(result.processed_actions) == 1 + processed_actions = result.processed_actions + assert len(processed_actions) == 1 + assert isinstance(processed_actions[0], octobot_flow.entities.ConfiguredActionDetails) + assert processed_actions[0].action == octobot_flow.enums.ActionType.APPLY_CONFIGURATION.value + assert processed_actions[0].config is not None + assert "automation" in processed_actions[0].config + assert isinstance(processed_actions[0].config["exchange_account_details"], dict) + pre_trade_portfolio = job.after_execution_state.automation.exchange_account_elements.portfolio.content + assert pre_trade_portfolio["BTC"] == { + common_constants.PORTFOLIO_AVAILABLE: 1, + common_constants.PORTFOLIO_TOTAL: 1, + } + + # step 2: run the trade action + next_actions_description = result.next_actions_description + assert next_actions_description is not None + parsed_state = octobot_flow.AutomationState.from_dict(next_actions_description.state) + next_actions = parsed_state.automation.actions_dag.get_executable_actions() + assert len(next_actions) == 1 + assert isinstance(next_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert next_actions[0].dsl_script == "market('buy', 'ETH/BTC', 1)" + job2 = octobot_flow_client.OctoBotActionsJob( + next_actions_description.to_dict(include_default_values=False), [], [], + octobot_flow_client.OctoBotActionsJobResult(), + ) + await job2.run() + result = job2.result + assert len(result.processed_actions) == 1 + processed_actions = result.processed_actions + assert len(processed_actions) == 1 + assert isinstance(processed_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert processed_actions[0].dsl_script == "market('buy', 'ETH/BTC', 1)" + assert len(get_created_orders(processed_actions)) == 1 + order = get_created_orders(processed_actions)[0] + assert order["symbol"] == "ETH/BTC" + assert order["amount"] == 1 + assert order["type"] == "market" + assert order["side"] == "buy" + assert result.has_next_actions is False # no more actions to execute + + # ensure deposit is successful + post_deposit_portfolio = job2.after_execution_state.automation.exchange_account_elements.portfolio.content + assert post_deposit_portfolio["BTC"][common_constants.PORTFOLIO_AVAILABLE] < pre_trade_portfolio["BTC"][common_constants.PORTFOLIO_AVAILABLE] + assert post_deposit_portfolio["BTC"][common_constants.PORTFOLIO_TOTAL] < pre_trade_portfolio["BTC"][common_constants.PORTFOLIO_TOTAL] + + # bought ETH - fees + assert 0.990 < post_deposit_portfolio["ETH"][common_constants.PORTFOLIO_AVAILABLE] <= 0.999 + assert 0.990 < post_deposit_portfolio["ETH"][common_constants.PORTFOLIO_TOTAL] <= 0.999 + + async def test_run_limit_order_action(self, limit_order_action): + # step 1: configure the job + job = octobot_flow_client.OctoBotActionsJob(limit_order_action, [], [], octobot_flow_client.OctoBotActionsJobResult()) + await job.run() + result = job.result + assert len(result.processed_actions) == 1 + processed_actions = result.processed_actions + assert len(processed_actions) == 1 + assert isinstance(processed_actions[0], octobot_flow.entities.ConfiguredActionDetails) + assert processed_actions[0].action == octobot_flow.enums.ActionType.APPLY_CONFIGURATION.value + assert processed_actions[0].config is not None + assert "automation" in processed_actions[0].config + assert isinstance(processed_actions[0].config["exchange_account_details"], dict) + pre_trade_portfolio = job.after_execution_state.automation.exchange_account_elements.portfolio.content + assert pre_trade_portfolio["BTC"] == { + common_constants.PORTFOLIO_AVAILABLE: 1, + common_constants.PORTFOLIO_TOTAL: 1, + } + + # step 2: run the trade action + next_actions_description = result.next_actions_description + assert next_actions_description is not None + parsed_state = octobot_flow.AutomationState.from_dict(next_actions_description.state) + next_actions = parsed_state.automation.actions_dag.get_executable_actions() + assert len(next_actions) == 1 + assert isinstance(next_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert next_actions[0].dsl_script == "limit('buy', 'ETH/BTC', 1, '-10%')" + job2 = octobot_flow_client.OctoBotActionsJob( + next_actions_description.to_dict(include_default_values=False), [], [], + octobot_flow_client.OctoBotActionsJobResult(), + ) + await job2.run() + result = job2.result + assert len(result.processed_actions) == 1 + processed_actions = result.processed_actions + assert len(processed_actions) == 1 + assert isinstance(processed_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert processed_actions[0].dsl_script == "limit('buy', 'ETH/BTC', 1, '-10%')" + assert len(get_created_orders(processed_actions)) == 1 + order = get_created_orders(processed_actions)[0] + assert order["symbol"] == "ETH/BTC" + assert order["amount"] == decimal.Decimal("1") + assert decimal.Decimal("0.001") < order["price"] < decimal.Decimal("0.2") + assert order["type"] == "limit" + assert order["side"] == "buy" + assert result.has_next_actions is False # no more actions to execute + + async def test_run_stop_loss_order_action(self, stop_loss_order_action): + # step 1: configure the job + job = octobot_flow_client.OctoBotActionsJob(stop_loss_order_action, [], [], octobot_flow_client.OctoBotActionsJobResult()) + await job.run() + result = job.result + assert len(result.processed_actions) == 1 + processed_actions = result.processed_actions + assert len(processed_actions) == 1 + assert isinstance(processed_actions[0], octobot_flow.entities.ConfiguredActionDetails) + assert processed_actions[0].action == octobot_flow.enums.ActionType.APPLY_CONFIGURATION.value + assert processed_actions[0].config is not None + assert "automation" in processed_actions[0].config + assert isinstance(processed_actions[0].config["exchange_account_details"], dict) + pre_trade_portfolio = job.after_execution_state.automation.exchange_account_elements.portfolio.content + assert pre_trade_portfolio["ETH"] == { + common_constants.PORTFOLIO_AVAILABLE: 1, + common_constants.PORTFOLIO_TOTAL: 1, + } + + # step 2: run the trade action + with mock.patch.object( + # force stop loseses to be supported no matter the exchange + order_factory.OrderFactory, "_ensure_supported_order_type", mock.Mock() + ) as _ensure_supported_order_type: + next_actions_description = result.next_actions_description + assert next_actions_description is not None + parsed_state = octobot_flow.AutomationState.from_dict(next_actions_description.state) + next_actions = parsed_state.automation.actions_dag.get_executable_actions() + assert len(next_actions) == 1 + assert isinstance(next_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert next_actions[0].dsl_script.startswith("stop_loss('sell', 'ETH/BTC', '10%', '-10%')") + job2 = octobot_flow_client.OctoBotActionsJob( + next_actions_description.to_dict(include_default_values=False), [], [], + octobot_flow_client.OctoBotActionsJobResult(), + ) + await job2.run() + result = job2.result + assert len(result.processed_actions) == 1 + processed_actions = result.processed_actions + assert len(processed_actions) == 1 + assert isinstance(processed_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert processed_actions[0].dsl_script.startswith("stop_loss('sell', 'ETH/BTC', '10%', '-10%')") + assert processed_actions[0].error_status is None + assert len(get_created_orders(processed_actions)) == 1 + order = get_created_orders(processed_actions)[0] + assert order["symbol"] == "ETH/BTC" + assert order["amount"] == decimal.Decimal("0.1") # 10% of 1 ETH + assert decimal.Decimal("0.001") < order["price"] < decimal.Decimal("0.2") + assert order["type"] == "stop_loss" + assert order["side"] == "sell" + assert result.has_next_actions is False # no more actions to execute + + async def test_run_cancel_limit_order_after_instant_wait_action(self, create_limit_instant_wait_and_cancel_order_action): + # step 1: configure the job + job = octobot_flow_client.OctoBotActionsJob(create_limit_instant_wait_and_cancel_order_action, [], [], octobot_flow_client.OctoBotActionsJobResult()) + await job.run() + result = job.result + assert len(result.processed_actions) == 1 + processed_actions = result.processed_actions + assert len(processed_actions) == 1 + assert isinstance(processed_actions[0], octobot_flow.entities.ConfiguredActionDetails) + assert processed_actions[0].action == octobot_flow.enums.ActionType.APPLY_CONFIGURATION.value + assert processed_actions[0].config is not None + assert "automation" in processed_actions[0].config + assert isinstance(processed_actions[0].config["exchange_account_details"], dict) + pre_trade_portfolio = job.after_execution_state.automation.exchange_account_elements.portfolio.content + assert pre_trade_portfolio["BTC"] == { + common_constants.PORTFOLIO_AVAILABLE: 1, + common_constants.PORTFOLIO_TOTAL: 1, + } + + # step 2: run the trade action + next_actions_description = result.next_actions_description + assert next_actions_description is not None + parsed_state = octobot_flow.AutomationState.from_dict(next_actions_description.state) + next_actions = parsed_state.automation.actions_dag.get_executable_actions() + assert len(next_actions) == 1 + assert isinstance(next_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert next_actions[0].dsl_script == "limit('buy', 'ETH/BTC', 1, '-10%')" + job2 = octobot_flow_client.OctoBotActionsJob( + next_actions_description.to_dict(include_default_values=False), [], [], + octobot_flow_client.OctoBotActionsJobResult(), + ) + await job2.run() + result = job2.result + assert len(result.processed_actions) == 1 + processed_actions = result.processed_actions + assert len(processed_actions) == 1 + assert isinstance(processed_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert processed_actions[0].dsl_script.startswith("limit(") + assert len(get_created_orders(processed_actions)) == 1 + order = get_created_orders(processed_actions)[0] + assert order["symbol"] == "ETH/BTC" + assert order["amount"] == decimal.Decimal("1") + assert decimal.Decimal("0.001") < order["price"] < decimal.Decimal("0.2") + assert order["type"] == "limit" + assert order["side"] == "buy" + assert result.next_actions_description is not None + + # step 3: run the wait action + next_actions_description = result.next_actions_description + assert next_actions_description is not None + parsed_state = octobot_flow.AutomationState.from_dict(next_actions_description.state) + next_actions = parsed_state.automation.actions_dag.get_executable_actions() + assert len(next_actions) == 1 + assert isinstance(next_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert next_actions[0].dsl_script.startswith("wait(") + job3 = octobot_flow_client.OctoBotActionsJob( + next_actions_description.to_dict(include_default_values=False), [], [], + octobot_flow_client.OctoBotActionsJobResult(), + ) + await job3.run() + result = job3.result + assert len(result.processed_actions) == 1 + processed_actions = result.processed_actions + assert len(processed_actions) == 1 + assert isinstance(processed_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert processed_actions[0].dsl_script.startswith("wait(") + # wait is waiting 0 seconds, so it should be executed immediately + assert processed_actions[0].executed_at is not None and processed_actions[0].executed_at > 0 + + # step 4: run the cancel action + next_actions_description = result.next_actions_description + assert next_actions_description is not None + parsed_state = octobot_flow.AutomationState.from_dict(next_actions_description.state) + next_actions = parsed_state.automation.actions_dag.get_executable_actions() + assert len(next_actions) == 1 + assert isinstance(next_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert next_actions[0].dsl_script == "cancel_order('ETH/BTC', side='buy')" + job4 = octobot_flow_client.OctoBotActionsJob( + next_actions_description.to_dict(include_default_values=False), [], [], + octobot_flow_client.OctoBotActionsJobResult(), + ) + await job4.run() + result = job4.result + assert len(result.processed_actions) == 1 + processed_actions = result.processed_actions + assert len(processed_actions) == 1 + assert isinstance(processed_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert processed_actions[0].dsl_script.startswith("cancel_order(") + assert processed_actions[0].result is not None + assert len(processed_actions[0].result[DSL_operators.CANCELLED_ORDERS_KEY]) == len(get_cancelled_orders(processed_actions)) == 1 + assert result.has_next_actions is False # no more actions to execute + + @pytest.mark.skip(reason="restore once polymarket is fully supported") + async def test_polymarket_trade_action(self, polymarket_order_action): # TODO: update once polymarket is fullly supported + # step 1: configure the job + job = octobot_flow_client.OctoBotActionsJob(polymarket_order_action, [], [], octobot_flow_client.OctoBotActionsJobResult()) + await job.run() + result = job.result + assert len(result.processed_actions) == 1 + processed_actions = result.processed_actions + assert len(processed_actions) == 1 + assert isinstance(processed_actions[0], octobot_flow.entities.ConfiguredActionDetails) + assert processed_actions[0].action == octobot_flow.enums.ActionType.APPLY_CONFIGURATION.value + assert processed_actions[0].config is not None + assert "automation" in processed_actions[0].config + assert isinstance(processed_actions[0].config["exchange_account_details"], dict) + pre_trade_portfolio = job.after_execution_state.automation.exchange_account_elements.portfolio.content + assert pre_trade_portfolio["USDC"] == { + common_constants.PORTFOLIO_AVAILABLE: 100, + common_constants.PORTFOLIO_TOTAL: 100, + } + + # step 2: run the trade action + next_actions_description = result.next_actions_description + assert next_actions_description is not None + parsed_state = octobot_flow.AutomationState.from_dict(next_actions_description.state) + next_actions = parsed_state.automation.actions_dag.get_executable_actions() + assert len(next_actions) == 1 + assert isinstance(next_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert next_actions[0].dsl_script.startswith("market(") + job2 = octobot_flow_client.OctoBotActionsJob( + next_actions_description.to_dict(include_default_values=False), [], [], + octobot_flow_client.OctoBotActionsJobResult(), + ) + with pytest.raises(octobot_trading.errors.FailedRequest): # TODO: update once supported + await job2.run() + result = job2.result + assert len(result.processed_actions) == 1 + processed_actions = result.processed_actions + assert len(processed_actions) == 1 + assert isinstance(processed_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert processed_actions[0].dsl_script.startswith("market(") + assert len(get_created_orders(processed_actions)) == 1 + order = get_created_orders(processed_actions)[0] + assert order["symbol"] == "what-price-will-bitcoin-hit-in-january-2026/USDC:USDC-260131-0-YES" + assert order["amount"] == decimal.Decimal("1") + assert order["type"] == "market" + assert order["side"] == "buy" + + async def test_run_transfer_blockchain_only_action(self, transfer_blockchain_action): + # step 1: configure the job + job = octobot_flow_client.OctoBotActionsJob(transfer_blockchain_action, [], [], octobot_flow_client.OctoBotActionsJobResult()) + await job.run() + result = job.result + assert len(result.processed_actions) == 1 + processed_actions = result.processed_actions + assert len(processed_actions) == 1 + assert isinstance(processed_actions[0], octobot_flow.entities.ConfiguredActionDetails) + assert processed_actions[0].action == octobot_flow.enums.ActionType.APPLY_CONFIGURATION.value + assert processed_actions[0].config is not None + assert "automation" in processed_actions[0].config + assert job.after_execution_state.automation.exchange_account_elements.portfolio.content == {} + + # step 2: run the transfer action + next_actions_description = result.next_actions_description + assert next_actions_description is not None + parsed_state = octobot_flow.AutomationState.from_dict(next_actions_description.state) + next_actions = parsed_state.automation.actions_dag.get_executable_actions() + assert len(next_actions) == 1 + assert isinstance(next_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert next_actions[0].dsl_script is not None and "blockchain_wallet_transfer" in next_actions[0].dsl_script + job2 = octobot_flow_client.OctoBotActionsJob( + next_actions_description.to_dict(include_default_values=False), [], [], + octobot_flow_client.OctoBotActionsJobResult(), + ) + await job2.run() + result = job2.result + assert len(result.processed_actions) == 1 + processed_actions = result.processed_actions + assert len(processed_actions) == 1 + assert isinstance(processed_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert processed_actions[0].dsl_script is not None and "blockchain_wallet_transfer" in processed_actions[0].dsl_script + assert result.has_next_actions is False # no more actions to execute + + assert processed_actions[0].result is not None + assert len(processed_actions[0].result[DSL_operators.CREATED_TRANSACTIONS_KEY]) == len(get_deposit_and_withdrawal_details(processed_actions)) == 1 + assert len(get_deposit_and_withdrawal_details(processed_actions)) == 1 + transaction = get_deposit_and_withdrawal_details(processed_actions)[0] + state_transaction = job2.after_execution_state.automation.exchange_account_elements.transactions + assert len(state_transaction) == 1 + assert state_transaction[0] == transaction + assert transaction[trading_enums.ExchangeConstantsTransactionColumns.CURRENCY.value] == "BTC" + assert transaction[trading_enums.ExchangeConstantsTransactionColumns.AMOUNT.value] == decimal.Decimal("1") + assert transaction[trading_enums.ExchangeConstantsTransactionColumns.NETWORK.value] == BLOCKCHAIN + assert transaction[trading_enums.ExchangeConstantsTransactionColumns.ADDRESS_TO.value] == "0x123_simulated_transfer_to_address_BTC" + assert transaction[trading_enums.ExchangeConstantsTransactionColumns.ADDRESS_FROM.value] == "0x123_simulated_transfer_from_address_BTC" + + + + async def test_run_deposit_action(self, deposit_action): + # step 1: configure the job + job = octobot_flow_client.OctoBotActionsJob(deposit_action, [], [], octobot_flow_client.OctoBotActionsJobResult()) + await job.run() + result = job.result + assert len(result.processed_actions) == 1 + processed_actions = result.processed_actions + assert len(processed_actions) == 1 + assert isinstance(processed_actions[0], octobot_flow.entities.ConfiguredActionDetails) + assert processed_actions[0].action == octobot_flow.enums.ActionType.APPLY_CONFIGURATION.value + assert processed_actions[0].config is not None + assert "automation" in processed_actions[0].config + assert isinstance(processed_actions[0].config["exchange_account_details"], dict) + pre_deposit_portfolio = job.after_execution_state.automation.exchange_account_elements.portfolio.content + assert pre_deposit_portfolio["BTC"] == { + common_constants.PORTFOLIO_AVAILABLE: 0.01, + common_constants.PORTFOLIO_TOTAL: 0.01, + } + + # step 2: run the deposit action + next_actions_description = result.next_actions_description + assert next_actions_description is not None + parsed_state = octobot_flow.AutomationState.from_dict(next_actions_description.state) + next_actions = parsed_state.automation.actions_dag.get_executable_actions() + assert len(next_actions) == 1 + assert isinstance(next_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert next_actions[0].dsl_script is not None and "blockchain_wallet_transfer" in next_actions[0].dsl_script + job2 = octobot_flow_client.OctoBotActionsJob( + next_actions_description.to_dict(include_default_values=False), [], [], + octobot_flow_client.OctoBotActionsJobResult(), + ) + await job2.run() + result = job2.result + assert len(result.processed_actions) == 1 + processed_actions = result.processed_actions + assert len(processed_actions) == 1 + assert isinstance(processed_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert processed_actions[0].dsl_script is not None and "blockchain_wallet_transfer" in processed_actions[0].dsl_script + assert result.has_next_actions is False # no more actions to execute + + # ensure deposit is successful + post_deposit_portfolio = job2.after_execution_state.automation.exchange_account_elements.portfolio.content + assert post_deposit_portfolio["BTC"] == { + common_constants.PORTFOLIO_AVAILABLE: pre_deposit_portfolio["BTC"][common_constants.PORTFOLIO_AVAILABLE] + deposit_action["params"]["BLOCKCHAIN_FROM_AMOUNT"], + common_constants.PORTFOLIO_TOTAL: pre_deposit_portfolio["BTC"][common_constants.PORTFOLIO_TOTAL] + deposit_action["params"]["BLOCKCHAIN_FROM_AMOUNT"], + } + + async def test_run_withdraw_action(self, withdraw_action): + # step 1: configure the job + job = octobot_flow_client.OctoBotActionsJob(withdraw_action, [], [], octobot_flow_client.OctoBotActionsJobResult()) + await job.run() + result = job.result + assert len(result.processed_actions) == 1 + processed_actions = result.processed_actions + assert len(processed_actions) == 1 + assert isinstance(processed_actions[0], octobot_flow.entities.ConfiguredActionDetails) + assert processed_actions[0].action == octobot_flow.enums.ActionType.APPLY_CONFIGURATION.value + assert processed_actions[0].config is not None + assert "automation" in processed_actions[0].config + assert isinstance(processed_actions[0].config["exchange_account_details"], dict) + pre_withdraw_portfolio = job.after_execution_state.automation.exchange_account_elements.portfolio.content + assert pre_withdraw_portfolio["ETH"] == { + common_constants.PORTFOLIO_AVAILABLE: 2, + common_constants.PORTFOLIO_TOTAL: 2, + } + + # step 2: run the withdraw action + next_actions_description = result.next_actions_description + assert next_actions_description is not None + parsed_state = octobot_flow.AutomationState.from_dict(next_actions_description.state) + next_actions = parsed_state.automation.actions_dag.get_executable_actions() + assert len(next_actions) == 1 + assert isinstance(next_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert next_actions[0].dsl_script.startswith("withdraw(") + job2 = octobot_flow_client.OctoBotActionsJob( + next_actions_description.to_dict(include_default_values=False), [], [], + octobot_flow_client.OctoBotActionsJobResult(), + ) + await job2.run() + result = job2.result + assert len(result.processed_actions) == 1 + processed_actions = result.processed_actions + assert len(processed_actions) == 1 + assert isinstance(processed_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert processed_actions[0].dsl_script.startswith("withdraw(") + assert result.has_next_actions is False # no more actions to execute + + # ensure withdraw is successful + post_withdraw_portfolio = job2.after_execution_state.automation.exchange_account_elements.portfolio.content + assert post_withdraw_portfolio == {} # portfolio should now be empty + + async def test_run_multiple_actions_bundle_no_wait(self, multiple_actions_bundle_no_wait): + # step 1: configure the job + job = octobot_flow_client.OctoBotActionsJob(multiple_actions_bundle_no_wait, [], [], octobot_flow_client.OctoBotActionsJobResult()) + # ensure wait keywords have been considered + await job.run() + result = job.result + assert len(result.processed_actions) == 1 + processed_actions = result.processed_actions + assert len(processed_actions) == 1 + assert isinstance(processed_actions[0], octobot_flow.entities.ConfiguredActionDetails) + assert processed_actions[0].action == octobot_flow.enums.ActionType.APPLY_CONFIGURATION.value + assert processed_actions[0].config is not None + assert "automation" in processed_actions[0].config + assert isinstance(processed_actions[0].config["exchange_account_details"], dict) + pre_trade_portfolio = job.after_execution_state.automation.exchange_account_elements.portfolio.content + assert pre_trade_portfolio["BTC"] == { + common_constants.PORTFOLIO_AVAILABLE: 1, + common_constants.PORTFOLIO_TOTAL: 1, + } + + # step 2: run the deposit action + next_actions_description = result.next_actions_description + assert next_actions_description is not None + parsed_state = octobot_flow.AutomationState.from_dict(next_actions_description.state) + next_actions = parsed_state.automation.actions_dag.get_executable_actions() + assert len(next_actions) == 1 # only the deposit action should be executable as the trade action depends on it + assert isinstance(next_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert next_actions[0].dsl_script is not None and "blockchain_wallet_transfer" in next_actions[0].dsl_script + job2 = octobot_flow_client.OctoBotActionsJob( + next_actions_description.to_dict(include_default_values=False), [], [], + octobot_flow_client.OctoBotActionsJobResult(), + ) + await job2.run() + result = job2.result + assert len(result.processed_actions) == 1 + processed_actions = result.processed_actions + assert len(processed_actions) == 1 + assert isinstance(processed_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert processed_actions[0].dsl_script is not None and "blockchain_wallet_transfer" in processed_actions[0].dsl_script + assert processed_actions[0].result is not None + assert len(processed_actions[0].result[DSL_operators.CREATED_TRANSACTIONS_KEY]) == len(get_deposit_and_withdrawal_details(processed_actions)) == 1 + assert len(get_deposit_and_withdrawal_details(processed_actions)) == 1 + transaction = get_deposit_and_withdrawal_details(processed_actions)[0] + assert transaction[trading_enums.ExchangeConstantsTransactionColumns.CURRENCY.value] == "BTC" + assert transaction[trading_enums.ExchangeConstantsTransactionColumns.AMOUNT.value] == decimal.Decimal("1") + assert transaction[trading_enums.ExchangeConstantsTransactionColumns.NETWORK.value] == BLOCKCHAIN + assert transaction[trading_enums.ExchangeConstantsTransactionColumns.ADDRESS_TO.value] == "0x123_simulated_deposit_address_BTC" + + + # step 3: run the trade action + next_actions_description = result.next_actions_description + assert next_actions_description is not None + parsed_state = octobot_flow.AutomationState.from_dict(next_actions_description.state) + next_actions = parsed_state.automation.actions_dag.get_executable_actions() + assert len(next_actions) == 1 # only the trade action should be executable now: all others have been executed already + assert isinstance(next_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert next_actions[0].dsl_script.startswith("limit(") + job3 = octobot_flow_client.OctoBotActionsJob( + next_actions_description.to_dict(include_default_values=False), [], [], + octobot_flow_client.OctoBotActionsJobResult(), + ) + await job3.run() + result = job3.result + assert len(result.processed_actions) == 1 + processed_actions = result.processed_actions + assert len(processed_actions) == 1 + assert isinstance(processed_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert processed_actions[0].dsl_script.startswith("limit(") + assert len(get_created_orders(processed_actions)) == 1 + limit_order = get_created_orders(processed_actions)[0] + assert limit_order["symbol"] == "ETH/BTC" + assert limit_order["amount"] == decimal.Decimal("1") + assert limit_order["type"] == "limit" + assert limit_order["side"] == "buy" + assert result.has_next_actions is False # no more actions to execute + + # ensure trades are taken into account in portfolio + post_deposit_portfolio = job3.after_execution_state.automation.exchange_account_elements.portfolio.content + + assert "ETH" not in post_deposit_portfolio # ETH order has not been executed (still open) + + assert post_deposit_portfolio["BTC"][common_constants.PORTFOLIO_TOTAL] == 2 + # created a buy order but not executed: locked BTC in portfolio + assert post_deposit_portfolio["BTC"][common_constants.PORTFOLIO_AVAILABLE] < post_deposit_portfolio["BTC"][common_constants.PORTFOLIO_TOTAL] + + async def test_run_trade_and_loop_until_order_closed(self, trade_and_loop_until_order_closed): + # Step 1 — Apply automation config (ACTIONS: trade, loop_until_order_closed). + # The only runnable action is init/APPLY_CONFIGURATION; portfolio is seeded (e.g. BTC for the later market buy). + job = octobot_flow_client.OctoBotActionsJob(trade_and_loop_until_order_closed, [], [], octobot_flow_client.OctoBotActionsJobResult()) + await job.run() + result = job.result + assert len(result.processed_actions) == 1 + processed_actions = result.processed_actions + assert len(processed_actions) == 1 + assert isinstance(processed_actions[0], octobot_flow.entities.ConfiguredActionDetails) + assert processed_actions[0].action == octobot_flow.enums.ActionType.APPLY_CONFIGURATION.value + assert processed_actions[0].config is not None + assert "automation" in processed_actions[0].config + assert isinstance(processed_actions[0].config["exchange_account_details"], dict) + pre_trade_portfolio = job.after_execution_state.automation.exchange_account_elements.portfolio.content + assert pre_trade_portfolio["BTC"] == { + common_constants.PORTFOLIO_AVAILABLE: 1, + common_constants.PORTFOLIO_TOTAL: 1, + } + + # Step 2 — Run the market trade node (first executable after init). Produces created_orders data the DAG wires + # into loop_until_order_closed via dependency::action_trade_1::created_orders::0::... + next_actions_description = result.next_actions_description + assert next_actions_description is not None + parsed_state = octobot_flow.AutomationState.from_dict(next_actions_description.state) + next_actions = parsed_state.automation.actions_dag.get_executable_actions() + assert len(next_actions) == 1 + assert isinstance(next_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert next_actions[0].dsl_script is not None and next_actions[0].dsl_script.startswith("market(") + job2 = octobot_flow_client.OctoBotActionsJob( + next_actions_description.to_dict(include_default_values=False), [], [], + octobot_flow_client.OctoBotActionsJobResult(), + ) + await job2.run() + result = job2.result + assert len(result.processed_actions) == 1 + processed_actions = result.processed_actions + assert len(processed_actions) == 1 + assert isinstance(processed_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert processed_actions[0].dsl_script is not None and processed_actions[0].dsl_script.startswith("market(") + assert processed_actions[0].result is not None + assert len(get_created_orders(processed_actions)) == 1 + order = get_created_orders(processed_actions)[0] + assert order["symbol"] == "ETH/BTC" + assert order["amount"] == 1 + assert order["type"] == "market" + assert order["side"] == "buy" + + # Step 3 — loop_until_order_closed: DSL polls fetch_order until status != open (simulator reads orders_manager / trades). + # Sanity-check the generated script before running it. + next_actions_description = result.next_actions_description + assert next_actions_description is not None + parsed_state = octobot_flow.AutomationState.from_dict(next_actions_description.state) + next_actions = parsed_state.automation.actions_dag.get_executable_actions() + assert len(next_actions) == 1 + assert isinstance(next_actions[0], octobot_flow.entities.DSLScriptActionDetails) + loop_dsl = next_actions[0].dsl_script + assert loop_dsl is not None + assert loop_dsl.startswith("loop_until(") + assert "fetch_order" in loop_dsl + assert f"!= '{trading_enums.OrderStatus.OPEN.value}'" in loop_dsl + assert "3, timeout=10, max_attempts=4, return_remaining_time=True)" in loop_dsl + job3 = octobot_flow_client.OctoBotActionsJob( + next_actions_description.to_dict(include_default_values=False), [], [], + octobot_flow_client.OctoBotActionsJobResult(), + ) + # Step 3a — First automation run: pretend the order is still open on the first fetch_order resolution only. + # The real loop condition is fetch_order(...)["status"] != "open"; forcing "open" keeps it false once. + # With return_remaining_time=True, loop_until does not block: it yields a ReCallingOperatorResult and leaves + # the action pending for a later job run (same pattern as blockchain loop_until tests). + fetch_resolution_attempt_counter = {"count": 0} + real_simulated_fetch_resolve = fetch_order_operators_module._resolve_simulated_fetch_order_dict + + def resolve_simulated_order_first_fetch_reports_open_then_real( + exchange_mgr, symbol_param, exchange_order_param, raise_if_not_found=False + ): + order_dict = real_simulated_fetch_resolve( + exchange_mgr, symbol_param, exchange_order_param, raise_if_not_found=raise_if_not_found + ) + fetch_resolution_attempt_counter["count"] += 1 + if fetch_resolution_attempt_counter["count"] == 1: + dict_with_open_status = dict(order_dict) + dict_with_open_status[trading_enums.ExchangeConstantsOrderColumns.STATUS.value] = ( + trading_enums.OrderStatus.OPEN.value + ) + return dict_with_open_status + return order_dict + + with mock.patch.object( + fetch_order_operators_module, + "_resolve_simulated_fetch_order_dict", + mock.Mock(side_effect=resolve_simulated_order_first_fetch_reports_open_then_real), + ): + await job3.run() + result = job3.result + # Expect the loop_until action to be re-scheduled, not completed. + assert len(result.processed_actions) == 1 + processed_actions = result.processed_actions + assert isinstance(processed_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert processed_actions[0].dsl_script.startswith("loop_until(") + assert processed_actions[0].executed_at is None + assert processed_actions[0].result is None + assert dsl_interpreter.ReCallingOperatorResult.is_re_calling_operator_result( + processed_actions[0].previous_execution_result + ) + assert result.next_actions_description is not None + assert result.has_next_actions is True + # Same loop_until node stays executable; previous_execution_result carries waiting_time for the scheduler. + parsed_state = octobot_flow.AutomationState.from_dict(result.next_actions_description.state) + next_actions_after_first_attempt = parsed_state.automation.actions_dag.get_executable_actions() + assert len(next_actions_after_first_attempt) == 1 + assert isinstance(next_actions_after_first_attempt[0], octobot_flow.entities.DSLScriptActionDetails) + assert next_actions_after_first_attempt[0].dsl_script.startswith("loop_until(") + assert next_actions_after_first_attempt[0].previous_execution_result + last_loop_execution_result = dsl_interpreter.ReCallingOperatorResult.from_dict( + next_actions_after_first_attempt[0].previous_execution_result[ + dsl_interpreter.ReCallingOperatorResult.__name__ + ] + ) + assert last_loop_execution_result.last_execution_result is not None + assert last_loop_execution_result.last_execution_result[ + dsl_interpreter.ReCallingOperatorResultKeys.WAITING_TIME.value + ] > 0 + + # Step 3b — Second automation run: no patch; fetch_order sees the real status (non-open), condition is true, + # loop_until completes and the DAG has no further executable actions. + next_actions_description = result.next_actions_description + job3b = octobot_flow_client.OctoBotActionsJob( + next_actions_description.to_dict(include_default_values=False), [], [], + octobot_flow_client.OctoBotActionsJobResult(), + ) + await job3b.run() + result = job3b.result + # trade is saved + assert len(job3b.after_execution_state.automation.exchange_account_elements.trades) == 1 + assert len(result.processed_actions) == 1 + processed_actions = result.processed_actions + assert isinstance(processed_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert processed_actions[0].dsl_script.startswith("loop_until(value_if(fetch_order('") + assert processed_actions[0].error_status is None + # value_if returns the fetch_order dict when the condition is true, not a boolean + loop_result = processed_actions[0].result + assert isinstance(loop_result, dict) + assert loop_result[trading_enums.ExchangeConstantsOrderColumns.STATUS.value] != ( + trading_enums.OrderStatus.OPEN.value + ) + assert result.next_actions_description + assert result.has_next_actions is False + + async def test_run_trade_transfer_and_check_balance_actions_bundle_no_wait(self, trade_transfer_and_check_balance_actions_bundle_no_wait): + # step 1: configure the job (ACTIONS: trade, transfer, wait_for_blockchain_balance) + job = octobot_flow_client.OctoBotActionsJob(trade_transfer_and_check_balance_actions_bundle_no_wait, [], [], octobot_flow_client.OctoBotActionsJobResult()) + await job.run() + result = job.result + assert len(result.processed_actions) == 1 + processed_actions = result.processed_actions + assert len(processed_actions) == 1 + assert isinstance(processed_actions[0], octobot_flow.entities.ConfiguredActionDetails) + assert processed_actions[0].action == octobot_flow.enums.ActionType.APPLY_CONFIGURATION.value + assert processed_actions[0].config is not None + assert "automation" in processed_actions[0].config + assert isinstance(processed_actions[0].config["exchange_account_details"], dict) + pre_trade_portfolio = job.after_execution_state.automation.exchange_account_elements.portfolio.content + assert pre_trade_portfolio["BTC"] == { + common_constants.PORTFOLIO_AVAILABLE: 1, + common_constants.PORTFOLIO_TOTAL: 1, + } + + # step 2: run the market trade action (first executable after init) + next_actions_description = result.next_actions_description + assert next_actions_description is not None + parsed_state = octobot_flow.AutomationState.from_dict(next_actions_description.state) + next_actions = parsed_state.automation.actions_dag.get_executable_actions() + assert len(next_actions) == 1 + assert isinstance(next_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert next_actions[0].dsl_script is not None and next_actions[0].dsl_script.startswith("market(") + job2 = octobot_flow_client.OctoBotActionsJob( + next_actions_description.to_dict(include_default_values=False), [], [], + octobot_flow_client.OctoBotActionsJobResult(), + ) + _real_create_order_instance = order_factory.create_order_instance + + def _create_order_instance_with_address_from(*args, **kwargs): + order_instance = _real_create_order_instance(*args, **kwargs) + order_instance.exchange_specific_order_values = {"address_from": "123_address_from"} + return order_instance + + with mock.patch.object( + order_factory, "create_order_instance", + mock.Mock(side_effect=_create_order_instance_with_address_from), + ) as create_order_instance_mock: + await job2.run() + result = job2.result + assert len(result.processed_actions) == 1 + create_order_instance_mock.assert_called_once() + assert create_order_instance_mock.mock_calls[0].kwargs["exchange_creation_params"] == { + "address_to": "17ouWjN7nvPWkZKo2svTF81etXL6Qxnty7" + } + processed_actions = result.processed_actions + assert len(processed_actions) == 1 + assert isinstance(processed_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert processed_actions[0].dsl_script is not None and processed_actions[0].dsl_script.startswith("market(") + assert processed_actions[0].result is not None + trade_result = processed_actions[0].result + assert isinstance(trade_result, dict) + assert ( + trade_result[DSL_operators.CREATED_ORDERS_KEY][0]["esov"]["address_from"] + == "123_address_from" + ) + assert len(get_created_orders(processed_actions)) == 1 + order = get_created_orders(processed_actions)[0] + assert order["symbol"] == "ETH/BTC" + assert order["type"] == "market" + assert order["side"] == "buy" + + # step 3: transfer uses dependency::action_trade_1::created_orders::0::esov::address_from + next_actions_description = result.next_actions_description + assert next_actions_description is not None + parsed_state = octobot_flow.AutomationState.from_dict(next_actions_description.state) + next_actions = parsed_state.automation.actions_dag.get_executable_actions() + assert len(next_actions) == 1 + assert isinstance(next_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert next_actions[0].dsl_script is not None and "blockchain_wallet_transfer" in next_actions[0].dsl_script + job3 = octobot_flow_client.OctoBotActionsJob( + next_actions_description.to_dict(include_default_values=False), [], [], + octobot_flow_client.OctoBotActionsJobResult(), + ) + await job3.run() + result = job3.result + assert len(result.processed_actions) == 1 + processed_actions = result.processed_actions + assert len(processed_actions) == 1 + assert isinstance(processed_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert processed_actions[0].dsl_script is not None and "blockchain_wallet_transfer" in processed_actions[0].dsl_script + assert processed_actions[0].result is not None + assert len(processed_actions[0].result[DSL_operators.CREATED_TRANSACTIONS_KEY]) == 1 + assert len(get_deposit_and_withdrawal_details(processed_actions)) == 1 + transaction = get_deposit_and_withdrawal_details(processed_actions)[0] + assert transaction[trading_enums.ExchangeConstantsTransactionColumns.CURRENCY.value] == "BTC" + assert transaction[trading_enums.ExchangeConstantsTransactionColumns.AMOUNT.value] == decimal.Decimal("1") + assert transaction[trading_enums.ExchangeConstantsTransactionColumns.NETWORK.value] == BLOCKCHAIN + assert transaction[trading_enums.ExchangeConstantsTransactionColumns.ADDRESS_TO.value] == "123_address_from" + + # step 4.A: wait_for_blockchain_balance — mocked balance 0 triggers wait (re-call); automation not finished + next_actions_description = result.next_actions_description + assert next_actions_description is not None + parsed_state = octobot_flow.AutomationState.from_dict(next_actions_description.state) + next_actions = parsed_state.automation.actions_dag.get_executable_actions() + assert len(next_actions) == 1 + assert isinstance(next_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert next_actions[0].dsl_script is not None + assert next_actions[0].dsl_script.startswith("loop_until(") + assert "blockchain_wallet_balance" in next_actions[0].dsl_script + assert "123_balance_address" in next_actions[0].dsl_script + assert "3, timeout=10, max_attempts=4, return_remaining_time=True)" in next_actions[0].dsl_script + job4 = octobot_flow_client.OctoBotActionsJob( + next_actions_description.to_dict(include_default_values=False), [], [], + octobot_flow_client.OctoBotActionsJobResult(), + ) + zero_btc_portfolio = { + "BTC": { + octobot_trading.constants.CONFIG_PORTFOLIO_FREE: decimal.Decimal(0), + octobot_trading.constants.CONFIG_PORTFOLIO_USED: decimal.Decimal(0), + octobot_trading.constants.CONFIG_PORTFOLIO_TOTAL: decimal.Decimal(0), + } + } + with mock.patch.object( + blockchain_wallet_simulator.BlockchainWalletSimulator, + "get_balance", + mock.AsyncMock(return_value=zero_btc_portfolio), + ): + await job4.run() + result = job4.result + assert len(result.processed_actions) == 1 + processed_actions = result.processed_actions + assert isinstance(processed_actions[0], octobot_flow.entities.DSLScriptActionDetails) + wait_dsl = processed_actions[0].dsl_script + assert wait_dsl is not None + assert wait_dsl.startswith("loop_until(") + assert "blockchain_wallet_balance" in wait_dsl + # action got reset + assert processed_actions[0].executed_at is None + assert processed_actions[0].result is None + assert dsl_interpreter.ReCallingOperatorResult.is_re_calling_operator_result( + processed_actions[0].previous_execution_result + ) + assert result.next_actions_description is not None + parsed_state = octobot_flow.AutomationState.from_dict(result.next_actions_description.state) + next_actions = parsed_state.automation.actions_dag.get_executable_actions() + assert len(next_actions) == 1 + assert isinstance(next_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert next_actions[0].dsl_script.startswith("loop_until(") + assert next_actions[0].previous_execution_result + last_execution_result = dsl_interpreter.ReCallingOperatorResult.from_dict( + next_actions[0].previous_execution_result[dsl_interpreter.ReCallingOperatorResult.__name__] + ) + assert last_execution_result.last_execution_result is not None + assert last_execution_result.last_execution_result[ + dsl_interpreter.ReCallingOperatorResultKeys.WAITING_TIME.value + ] > 0 + + # step 4.B: real balance satisfies wait condition — action completes + next_actions_description = result.next_actions_description + assert result.has_next_actions is True + job4b = octobot_flow_client.OctoBotActionsJob( + next_actions_description.to_dict(include_default_values=False), [], [], + octobot_flow_client.OctoBotActionsJobResult(), + ) + await job4b.run() + result = job4b.result + assert len(result.processed_actions) == 1 + processed_actions = result.processed_actions + assert isinstance(processed_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert processed_actions[0].dsl_script.startswith("loop_until(value_if(") + assert processed_actions[0].error_status is None + assert processed_actions[0].result == 1.0 # return fetched balance + assert result.next_actions_description + assert result.has_next_actions is False + + + async def test_run_multiple_actions_bundle_with_wait(self, multiple_action_bundle_with_wait): + # step 1: configure the job + job = octobot_flow_client.OctoBotActionsJob(multiple_action_bundle_with_wait, [], [], octobot_flow_client.OctoBotActionsJobResult()) + # ensure wait keywords have been considered + automation = job.description.state["automation"] + dag = automation["actions_dag"] + assert len(dag["actions"]) == 6 # 6 actions: init, deposit, wait, trade, wait, withdraw + dsl_scripts = [action["dsl_script"] for action in dag["actions"][1:]] + assert all( + dsl_script.startswith(keyword) + for dsl_script, keyword in zip(dsl_scripts, ["blockchain_wallet_transfer", "wait", "market", "wait", "withdraw"]) + ) + # run the job + await job.run() + result = job.result + assert len(result.processed_actions) == 1 + processed_actions = result.processed_actions + assert len(processed_actions) == 1 + assert isinstance(processed_actions[0], octobot_flow.entities.ConfiguredActionDetails) + assert processed_actions[0].action == octobot_flow.enums.ActionType.APPLY_CONFIGURATION.value + assert processed_actions[0].config is not None + assert "automation" in processed_actions[0].config + assert isinstance(processed_actions[0].config["exchange_account_details"], dict) + pre_trade_portfolio = job.after_execution_state.automation.exchange_account_elements.portfolio.content + assert pre_trade_portfolio["BTC"] == { + common_constants.PORTFOLIO_AVAILABLE: 1, + common_constants.PORTFOLIO_TOTAL: 1, + } + + # step 2: run the deposit action + next_actions_description = result.next_actions_description + assert next_actions_description is not None + parsed_state = octobot_flow.AutomationState.from_dict(next_actions_description.state) + next_actions = parsed_state.automation.actions_dag.get_executable_actions() + assert len(next_actions) == 1 + assert isinstance(next_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert next_actions[0].dsl_script is not None and "blockchain_wallet_transfer" in next_actions[0].dsl_script + job2 = octobot_flow_client.OctoBotActionsJob( + next_actions_description.to_dict(include_default_values=False), [], [], + octobot_flow_client.OctoBotActionsJobResult(), + ) + await job2.run() + result = job2.result + next_actions_description = result.next_actions_description + assert len(result.processed_actions) == 1 + processed_actions = result.processed_actions + assert len(processed_actions) == 1 + assert isinstance(processed_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert processed_actions[0].dsl_script is not None and "blockchain_wallet_transfer" in processed_actions[0].dsl_script + assert processed_actions[0].result is not None + assert len(processed_actions[0].result[DSL_operators.CREATED_TRANSACTIONS_KEY]) == len(get_deposit_and_withdrawal_details(processed_actions)) == 1 + transaction = processed_actions[0].result[DSL_operators.CREATED_TRANSACTIONS_KEY][0] + assert transaction[trading_enums.ExchangeConstantsTransactionColumns.CURRENCY.value] == "BTC" + assert transaction[trading_enums.ExchangeConstantsTransactionColumns.AMOUNT.value] == decimal.Decimal("1") + assert transaction[trading_enums.ExchangeConstantsTransactionColumns.NETWORK.value] == BLOCKCHAIN + assert transaction[trading_enums.ExchangeConstantsTransactionColumns.ADDRESS_TO.value] == "0x123_simulated_deposit_address_BTC" + assert next_actions_description is not None + parsed_state = octobot_flow.AutomationState.from_dict(next_actions_description.state) + next_actions = parsed_state.automation.actions_dag.get_executable_actions() + assert len(next_actions) == 1 + assert isinstance(next_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert next_actions[0].dsl_script.startswith("wait(") + + # step 3.A: run the wait action + job3 = octobot_flow_client.OctoBotActionsJob( + next_actions_description.to_dict(include_default_values=False), [], [], + octobot_flow_client.OctoBotActionsJobResult(), + ) + await job3.run() + result = job3.result + next_actions_description = result.next_actions_description + assert len(result.processed_actions) == 1 + processed_actions = result.processed_actions + assert len(processed_actions) == 1 + assert isinstance(processed_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert processed_actions[0].dsl_script.startswith("wait(") + # next action is wait again: waiting time has not been reached yet + parsed_state = octobot_flow.AutomationState.from_dict(next_actions_description.state) + next_actions = parsed_state.automation.actions_dag.get_executable_actions() + assert len(next_actions) == 1 + assert isinstance(next_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert next_actions[0].dsl_script.startswith("wait(") + assert next_actions[0].previous_execution_result + last_execution_result = dsl_interpreter.ReCallingOperatorResult.from_dict( + next_actions[0].previous_execution_result[dsl_interpreter.ReCallingOperatorResult.__name__] + ) + waiting_time = last_execution_result.last_execution_result[dsl_interpreter.ReCallingOperatorResultKeys.WAITING_TIME.value] + + # step 3.B: complete the wait action + with mock.patch.object(time, "time", mock.Mock(return_value=time.time() + waiting_time)): + job4 = octobot_flow_client.OctoBotActionsJob( + next_actions_description.to_dict(include_default_values=False), [], [], + octobot_flow_client.OctoBotActionsJobResult(), + ) + await job4.run() + result = job4.result + assert len(result.processed_actions) == 1 + processed_actions = result.processed_actions + assert len(processed_actions) == 1 + assert isinstance(processed_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert processed_actions[0].dsl_script.startswith("wait(") + assert processed_actions[0].executed_at is not None and processed_actions[0].executed_at > 0 + + next_actions_description = result.next_actions_description + parsed_state = octobot_flow.AutomationState.from_dict(next_actions_description.state) + next_actions = parsed_state.automation.actions_dag.get_executable_actions() + assert len(next_actions) == 1 + assert isinstance(next_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert next_actions[0].dsl_script.startswith("market(") + post_deposit_portfolio = job2.after_execution_state.automation.exchange_account_elements.portfolio.content + assert post_deposit_portfolio["BTC"] == { + common_constants.PORTFOLIO_AVAILABLE: 2, + common_constants.PORTFOLIO_TOTAL: 2, + } + + # step 4: run the trade action + next_actions_description = result.next_actions_description + assert next_actions_description is not None + parsed_state = octobot_flow.AutomationState.from_dict(next_actions_description.state) + next_actions = parsed_state.automation.actions_dag.get_executable_actions() + assert len(next_actions) == 1 + assert isinstance(next_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert next_actions[0].dsl_script.startswith("market(") + job5 = octobot_flow_client.OctoBotActionsJob( + next_actions_description.to_dict(include_default_values=False), [], [], + octobot_flow_client.OctoBotActionsJobResult(), + ) + await job5.run() + result = job5.result + assert len(result.processed_actions) == 1 + processed_actions = result.processed_actions + assert len(processed_actions) == 1 + assert isinstance(processed_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert processed_actions[0].dsl_script.startswith("market(") + assert processed_actions[0].result is not None + assert len(processed_actions[0].result[DSL_operators.CREATED_ORDERS_KEY]) == len(get_created_orders(processed_actions)) == 1 + post_trade_portfolio = job5.after_execution_state.automation.exchange_account_elements.portfolio.content + assert post_trade_portfolio["BTC"][common_constants.PORTFOLIO_AVAILABLE] < post_deposit_portfolio["BTC"][common_constants.PORTFOLIO_AVAILABLE] + assert 0.990 < post_trade_portfolio["ETH"][common_constants.PORTFOLIO_AVAILABLE] <= 0.999 + assert 0.990 < post_trade_portfolio["ETH"][common_constants.PORTFOLIO_TOTAL] <= 0.999 + # step 5.A: run the wait action + next_actions_description = result.next_actions_description + job6 = octobot_flow_client.OctoBotActionsJob( + next_actions_description.to_dict(include_default_values=False), [], [], + octobot_flow_client.OctoBotActionsJobResult(), + ) + await job6.run() + result = job6.result + assert len(result.processed_actions) == 1 + processed_actions = result.processed_actions + assert len(processed_actions) == 1 + assert isinstance(processed_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert processed_actions[0].dsl_script.startswith("wait(") + assert processed_actions[0].previous_execution_result + last_execution_result = dsl_interpreter.ReCallingOperatorResult.from_dict( + processed_actions[0].previous_execution_result[dsl_interpreter.ReCallingOperatorResult.__name__] + ) + waiting_time = last_execution_result.last_execution_result[dsl_interpreter.ReCallingOperatorResultKeys.WAITING_TIME.value] + + # step 5.B: complete the wait action + next_actions_description = result.next_actions_description + with mock.patch.object(time, "time", mock.Mock(return_value=time.time() + waiting_time)): + job7 = octobot_flow_client.OctoBotActionsJob( + next_actions_description.to_dict(include_default_values=False), [], [], + octobot_flow_client.OctoBotActionsJobResult(), + ) + await job7.run() + result = job7.result + assert len(result.processed_actions) == 1 + processed_actions = result.processed_actions + assert isinstance(processed_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert processed_actions[0].dsl_script.startswith("wait(") + assert processed_actions[0].executed_at is not None and processed_actions[0].executed_at > 0 + + + + # step 6: run the withdraw action + next_actions_description = result.next_actions_description + assert next_actions_description is not None + parsed_state = octobot_flow.AutomationState.from_dict(next_actions_description.state) + next_actions = parsed_state.automation.actions_dag.get_executable_actions() + assert len(next_actions) == 1 + assert isinstance(next_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert next_actions[0].dsl_script.startswith("withdraw(") + job8 = octobot_flow_client.OctoBotActionsJob( + next_actions_description.to_dict(include_default_values=False), [], [], + octobot_flow_client.OctoBotActionsJobResult(), + ) + await job8.run() + result = job8.result + assert len(result.processed_actions) == 1 + processed_actions = result.processed_actions + assert len(processed_actions) == 1 + assert isinstance(processed_actions[0], octobot_flow.entities.DSLScriptActionDetails) + assert processed_actions[0].dsl_script.startswith("withdraw(") + assert processed_actions[0].result is not None + assert len(processed_actions[0].result[DSL_operators.CREATED_WITHDRAWALS_KEY]) == len(get_deposit_and_withdrawal_details(processed_actions)) == 1 + transaction = processed_actions[0].result[DSL_operators.CREATED_WITHDRAWALS_KEY][0] + assert transaction[trading_enums.ExchangeConstantsTransactionColumns.CURRENCY.value] == "ETH" + assert 0.990 < transaction[trading_enums.ExchangeConstantsTransactionColumns.AMOUNT.value] <= 0.999 + assert transaction[trading_enums.ExchangeConstantsTransactionColumns.NETWORK.value] == "ethereum" + assert transaction[trading_enums.ExchangeConstantsTransactionColumns.ADDRESS_TO.value] == "0x1234567890123456789012345678901234567890" + post_withdraw_portfolio = job8.after_execution_state.automation.exchange_account_elements.portfolio.content + assert post_withdraw_portfolio["BTC"] == post_trade_portfolio["BTC"] + assert "ETH" not in post_withdraw_portfolio + assert result.has_next_actions is False # no more actions to execute diff --git a/packages/node/tests/scheduler/test_scheduler.py b/packages/node/tests/scheduler/test_scheduler.py new file mode 100644 index 0000000000..29b014a9d9 --- /dev/null +++ b/packages/node/tests/scheduler/test_scheduler.py @@ -0,0 +1,151 @@ +# Drakkar-Software OctoBot-Node +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import json +import mock +import pytest +import dbos + +import octobot_commons.cryptography +import octobot_node.config +import octobot_node.models +import octobot_node.scheduler.encryption as encryption +import octobot_node.scheduler.encryption.task_inputs as task_inputs_encryption +import octobot_node.scheduler.workflows.params as params +import octobot_node.scheduler.scheduler as scheduler_module + + +def _build_mock_workflow_status(task: octobot_node.models.Task, encrypted_state: str, state_metadata: str, workflow_id: str = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa") -> mock.Mock: + output = params.AutomationWorkflowOutput(state=encrypted_state, state_metadata=state_metadata) + inputs = params.AutomationWorkflowInputs(task=task, execution_time=0) + ws = mock.Mock(spec=dbos.WorkflowStatus) + ws.workflow_id = workflow_id + ws.name = "test-task" + ws.status = dbos.WorkflowStatusString.SUCCESS.value + ws.output = json.dumps(output.to_dict()) + ws.input = {"args": [inputs.to_dict()], "kwargs": {}} + ws.created_at = None + ws.updated_at = None + return ws + + +def _derive_ecdsa_public_key(ecdsa_private_key: bytes) -> bytes: + from cryptography.hazmat.primitives.serialization import load_pem_private_key, Encoding, PublicFormat + private = load_pem_private_key(ecdsa_private_key, password=None) + return private.public_key().public_bytes(Encoding.PEM, PublicFormat.SubjectPublicKeyInfo) + + +def _make_scheduler_with_mock_instance() -> tuple[scheduler_module.Scheduler, mock.AsyncMock]: + sched = scheduler_module.Scheduler() + sched.INSTANCE = mock.AsyncMock() + return sched, sched.INSTANCE + + +class TestSchedulerGetResults: + + @pytest.mark.asyncio + async def test_get_results_returns_plaintext_when_no_user_pub_key(self): + """When node-side encryption is on but no user RSA public key is set, get_results must + return the decrypted plaintext state (not the server-encrypted blob).""" + rsa_private_key, _ = octobot_commons.cryptography.generate_rsa_key_pair(2048) + ecdsa_private_key, _ = octobot_commons.cryptography.generate_ecdsa_key_pair() + + plaintext_state = json.dumps({"result": "my_output", "value": 42}) + + task_inputs_encryption._server_rsa_public_key_bytes.cache_clear() + task_inputs_encryption._server_ecdsa_public_key_bytes.cache_clear() + + encryption_patches = ( + mock.patch.object(octobot_node.config.settings, "TASKS_SERVER_RSA_PRIVATE_KEY", rsa_private_key), + mock.patch.object(octobot_node.config.settings, "TASKS_SERVER_ECDSA_PRIVATE_KEY", ecdsa_private_key), + ) + with encryption_patches[0], encryption_patches[1]: + assert octobot_node.config.settings.is_node_side_encryption_enabled is True + encrypted_state, state_metadata = task_inputs_encryption.encrypt_task_content(plaintext_state) + + task = octobot_node.models.Task( + id="aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa", + name="test-task", + content=encrypted_state, + content_metadata=state_metadata, + type="execute_actions", + user_rsa_public_key=None, + ) + ws = _build_mock_workflow_status(task, encrypted_state, state_metadata) + + sched, mock_instance = _make_scheduler_with_mock_instance() + mock_instance.list_workflows_async = mock.AsyncMock(return_value=[ws]) + + with encryption_patches[0], encryption_patches[1], \ + mock.patch.object(octobot_node.config.settings, "TASKS_USER_RSA_PUBLIC_KEY", None): + executions = await sched.get_results() + + assert len(executions) == 1 + execution = executions[0] + assert execution.result == plaintext_state + assert execution.result_metadata == "" + + @pytest.mark.asyncio + async def test_get_results_encrypts_result_with_user_pub_key(self): + """When node-side encryption is on and a user RSA public key is provided on the task, + get_results must re-encrypt the result with that key and return non-empty metadata.""" + rsa_private_key, _ = octobot_commons.cryptography.generate_rsa_key_pair(2048) + ecdsa_private_key, _ = octobot_commons.cryptography.generate_ecdsa_key_pair() + user_rsa_private_key, user_rsa_public_key = octobot_commons.cryptography.generate_rsa_key_pair(2048) + + plaintext_state = json.dumps({"result": "encrypted_output", "value": 99}) + + task_inputs_encryption._server_rsa_public_key_bytes.cache_clear() + task_inputs_encryption._server_ecdsa_public_key_bytes.cache_clear() + + encryption_patches = ( + mock.patch.object(octobot_node.config.settings, "TASKS_SERVER_RSA_PRIVATE_KEY", rsa_private_key), + mock.patch.object(octobot_node.config.settings, "TASKS_SERVER_ECDSA_PRIVATE_KEY", ecdsa_private_key), + ) + with encryption_patches[0], encryption_patches[1]: + assert octobot_node.config.settings.is_node_side_encryption_enabled is True + encrypted_state, state_metadata = task_inputs_encryption.encrypt_task_content(plaintext_state) + + task = octobot_node.models.Task( + id="bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb", + name="test-task", + content=encrypted_state, + content_metadata=state_metadata, + type="execute_actions", + user_rsa_public_key=user_rsa_public_key.decode("utf-8"), + ) + ws = _build_mock_workflow_status(task, encrypted_state, state_metadata, workflow_id="bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb") + + sched, mock_instance = _make_scheduler_with_mock_instance() + mock_instance.list_workflows_async = mock.AsyncMock(return_value=[ws]) + + with encryption_patches[0], encryption_patches[1], \ + mock.patch.object(octobot_node.config.settings, "TASKS_USER_RSA_PUBLIC_KEY", None): + executions = await sched.get_results() + + assert len(executions) == 1 + execution = executions[0] + assert execution.result != plaintext_state + assert execution.result_metadata + + server_ecdsa_public_key = _derive_ecdsa_public_key(ecdsa_private_key) + decrypted = encryption.decrypt_task_result( + execution.result, + rsa_private_key=user_rsa_private_key, + ecdsa_public_key=server_ecdsa_public_key, + metadata=execution.result_metadata, + ) + assert decrypted == plaintext_state diff --git a/packages/node/tests/scheduler/test_task_context.py b/packages/node/tests/scheduler/test_task_context.py new file mode 100644 index 0000000000..cb08e5f9ef --- /dev/null +++ b/packages/node/tests/scheduler/test_task_context.py @@ -0,0 +1,213 @@ +# Drakkar-Software OctoBot-Node +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import pytest +import mock + +import octobot_node.scheduler.octobot_flow_client as octobot_flow_client +from octobot_node.scheduler.task_context import encrypted_task +from octobot_node.models import Task + + +class TestEncryptedTask: + def test_encrypted_task_no_encryption_keys(self) -> None: + mock_settings = mock.Mock() + mock_settings.TASKS_SERVER_RSA_PRIVATE_KEY = None + mock_settings.TASKS_USER_ECDSA_PUBLIC_KEY = None + + with mock.patch("octobot_node.config.settings", mock_settings): + task = Task( + name="test_task", + content="plain content", + ) + original_content = task.content + + with encrypted_task(task): + # Content should remain unchanged + assert task.content == original_content + + # Content should still be unchanged after context + assert task.content == original_content + + def test_encrypted_task_decryption_error(self) -> None: + mock_settings = mock.Mock() + mock_settings.TASKS_SERVER_RSA_PRIVATE_KEY = b"private_key" + mock_settings.TASKS_USER_ECDSA_PUBLIC_KEY = b"public_key" + + task = Task( + name="test_task", + content="encrypted_content", + content_metadata="metadata", + ) + original_content = task.content + + mock_decrypt = mock.Mock(side_effect=ValueError("Decryption failed")) + mock_logger = mock.Mock() + + with mock.patch("octobot_node.config.settings", mock_settings), \ + mock.patch("octobot_node.scheduler.encryption.decrypt_task_content", mock_decrypt), \ + mock.patch("octobot_node.scheduler.task_context.logger", mock_logger): + with encrypted_task(task): + # Content should remain unchanged on error + assert task.content == original_content + + # Content should still be original + assert task.content == original_content + mock_logger.error.assert_called_once() + + def test_encrypted_task_exception_during_context(self) -> None: + mock_settings = mock.Mock() + mock_settings.TASKS_SERVER_RSA_PRIVATE_KEY = b"private_key" + mock_settings.TASKS_USER_ECDSA_PUBLIC_KEY = b"public_key" + + task = Task( + name="test_task", + content="encrypted_content", + content_metadata="metadata" + ) + original_content = task.content + decrypted_content = "decrypted_content" + + mock_decrypt = mock.Mock(return_value=decrypted_content) + + with mock.patch("octobot_node.config.settings", mock_settings), \ + mock.patch("octobot_node.scheduler.encryption.decrypt_task_content", mock_decrypt): + # Exception should propagate, but content should be restored + with pytest.raises(ValueError, match="Test exception"): + with encrypted_task(task): + # Content should be decrypted + assert task.content == decrypted_content + # Raise exception + raise ValueError("Test exception") + + # Content should be restored even after exception + assert task.content == original_content + + def test_encrypted_task_to_update_result_with_description_encrypts_and_clears(self) -> None: + mock_settings = mock.Mock() + mock_settings.TASKS_SERVER_RSA_PRIVATE_KEY = None + mock_settings.TASKS_USER_ECDSA_PUBLIC_KEY = None + + job_description = octobot_flow_client.OctoBotActionsJobDescription( + state={}, auth_details={}, params={} + ) + processed_action = mock.Mock() + actions_dag = mock.Mock() + to_update_result = octobot_flow_client.OctoBotActionsJobResult( + processed_actions=[processed_action], + next_actions_description=job_description, + actions_dag=actions_dag, + ) + mock_encrypt = mock.Mock(return_value=("encrypted_payload", "encryption_meta")) + + with mock.patch("octobot_node.config.settings", mock_settings), \ + mock.patch( + "octobot_node.scheduler.task_context.encryption.get_next_encrypted_if_needed_content_and_metadata", + mock_encrypt, + ): + task = Task(name="test_task", content="plain") + with encrypted_task(task, to_update_result=to_update_result): + pass + + mock_encrypt.assert_called_once_with( + job_description.to_dict(include_default_values=False) + ) + assert to_update_result.maybe_encrypted_next_actions_description == "encrypted_payload" + assert to_update_result.next_actions_description_encryption_metadata == "encryption_meta" + assert to_update_result.next_actions_description is None + assert to_update_result.processed_actions == [] + assert to_update_result.actions_dag is None + + def test_encrypted_task_to_update_result_without_description_clears_sensitive_fields(self) -> None: + mock_settings = mock.Mock() + mock_settings.TASKS_SERVER_RSA_PRIVATE_KEY = None + mock_settings.TASKS_USER_ECDSA_PUBLIC_KEY = None + + processed_action = mock.Mock() + actions_dag = mock.Mock() + to_update_result = octobot_flow_client.OctoBotActionsJobResult( + processed_actions=[processed_action], + next_actions_description=None, + actions_dag=actions_dag, + ) + mock_encrypt = mock.Mock() + + with mock.patch("octobot_node.config.settings", mock_settings), \ + mock.patch( + "octobot_node.scheduler.task_context.encryption.get_next_encrypted_if_needed_content_and_metadata", + mock_encrypt, + ): + task = Task(name="test_task", content="plain") + with encrypted_task(task, to_update_result=to_update_result): + pass + + mock_encrypt.assert_not_called() + assert to_update_result.maybe_encrypted_next_actions_description is None + assert to_update_result.next_actions_description_encryption_metadata is None + assert to_update_result.next_actions_description is None + assert to_update_result.processed_actions == [] + assert to_update_result.actions_dag is None + + def test_encrypted_task_to_update_result_runs_finally_after_exception(self) -> None: + mock_settings = mock.Mock() + mock_settings.TASKS_SERVER_RSA_PRIVATE_KEY = None + mock_settings.TASKS_USER_ECDSA_PUBLIC_KEY = None + + job_description = octobot_flow_client.OctoBotActionsJobDescription( + state={}, auth_details={}, params={} + ) + to_update_result = octobot_flow_client.OctoBotActionsJobResult( + processed_actions=[mock.Mock()], + next_actions_description=job_description, + actions_dag=mock.Mock(), + ) + mock_encrypt = mock.Mock(return_value=("enc", "meta")) + + with mock.patch("octobot_node.config.settings", mock_settings), \ + mock.patch( + "octobot_node.scheduler.task_context.encryption.get_next_encrypted_if_needed_content_and_metadata", + mock_encrypt, + ): + task = Task(name="test_task", content="plain") + with pytest.raises(RuntimeError, match="inner"): + with encrypted_task(task, to_update_result=to_update_result): + raise RuntimeError("inner") + + assert to_update_result.maybe_encrypted_next_actions_description == "enc" + assert to_update_result.next_actions_description_encryption_metadata == "meta" + assert to_update_result.processed_actions == [] + + def test_encrypted_task_per_task_ecdsa_key_takes_precedence(self) -> None: + mock_settings = mock.Mock() + mock_settings.TASKS_SERVER_RSA_PRIVATE_KEY = b"server_rsa_priv" + mock_settings.TASKS_USER_ECDSA_PUBLIC_KEY = b"env_global_key" + + task = Task( + name="test_task", + content="encrypted_content", + content_metadata="metadata", + user_ecdsa_public_key="per_task_key", + ) + mock_decrypt = mock.Mock(return_value="decrypted_content") + + with mock.patch("octobot_node.config.settings", mock_settings), \ + mock.patch("octobot_node.scheduler.encryption.decrypt_task_content", mock_decrypt): + with encrypted_task(task): + assert task.content == "decrypted_content" + + mock_decrypt.assert_called_once_with( + "encrypted_content", "metadata", user_ecdsa_public_key=b"per_task_key" + ) diff --git a/packages/node/tests/scheduler/test_tasks.py b/packages/node/tests/scheduler/test_tasks.py new file mode 100644 index 0000000000..45632c1f7c --- /dev/null +++ b/packages/node/tests/scheduler/test_tasks.py @@ -0,0 +1,77 @@ +# This file is part of OctoBot Node (https://github.com/Drakkar-Software/OctoBot-Node) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import json +import mock +import pytest + +import octobot_node.models +import octobot_node.scheduler.tasks + +from tests.scheduler import temp_dbos_scheduler + +@pytest.fixture +def schedule_task(): + return octobot_node.models.Task( + name="test_task", + content=json.dumps( + { + "ACTIONS": "trade", + "EXCHANGE_FROM": "binance", + "ORDER_SYMBOL": "ETH/BTC", + "ORDER_AMOUNT": 1, + "ORDER_TYPE": "market", + "ORDER_SIDE": "BUY", + "SIMULATED_PORTFOLIO": { + "BTC": 1, + }, + } + ), + type=octobot_node.models.TaskType.EXECUTE_ACTIONS.value, + ) + + +class TestTriggerTask: + """Tests for trigger_task function.""" + + @pytest.mark.asyncio + async def test_trigger_all_task_types(self, schedule_task, temp_dbos_scheduler): + """Test trigger_task for START_OCTOBOT type.""" + for task_type in octobot_node.models.TaskType: + schedule_task.type = task_type.value + with mock.patch.object( + temp_dbos_scheduler.AUTOMATION_WORKFLOW_QUEUE, "enqueue_async", mock.AsyncMock() + ) as mock_enqueue_async: + result = await octobot_node.scheduler.tasks.trigger_task(schedule_task) + assert result is True + mock_enqueue_async.assert_called_once() + call_kwargs = mock_enqueue_async.call_args[1] + assert "inputs" in call_kwargs + assert len(call_kwargs["inputs"]) == 1 + inputs = call_kwargs["inputs"] + assert inputs["task"] == schedule_task.model_dump(exclude_defaults=True) + with pytest.raises(ValueError, match="Unsupported task type"): + with mock.patch.object( + temp_dbos_scheduler.AUTOMATION_WORKFLOW_QUEUE, "enqueue_async", mock.AsyncMock() + ) as mock_enqueue_async: + schedule_task.type = "invalid_type" + await octobot_node.scheduler.tasks.trigger_task(schedule_task) + mock_enqueue_async.assert_not_called() diff --git a/packages/node/tests/scheduler/test_tasks_recovery.py b/packages/node/tests/scheduler/test_tasks_recovery.py new file mode 100644 index 0000000000..ff09119598 --- /dev/null +++ b/packages/node/tests/scheduler/test_tasks_recovery.py @@ -0,0 +1,158 @@ +# This file is part of OctoBot Node (https://github.com/Drakkar-Software/OctoBot-Node) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import pytest +import tempfile +import dbos +import logging +import time + +import octobot_node.scheduler + +QUEUE = dbos.Queue(name="test_queue") + +WF_TO_CREATE = 10 +WF_SLEEP_TIME = 1.5 # note: reducing this value wont speed up the test + +async def _init_dbos_scheduler(db_file_name: str, reset_database: bool = False): + config: dbos.DBOSConfig = { + "name": "scheduler_test", + "system_database_url": f"sqlite:///{db_file_name}", + "max_executor_threads": 2, # 2 is the minimum number of threads to let dbos recover properly with pending workflows + } + dbos.DBOS(config=config) + if reset_database: + dbos.DBOS.reset_system_database() + octobot_node.scheduler.SCHEDULER.INSTANCE = dbos.DBOS + + +class TestSchedulerRecovery: + + @pytest.mark.asyncio + async def test_recover_after_shutdown(self): + with tempfile.NamedTemporaryFile() as temp_file: + await _init_dbos_scheduler(temp_file.name, reset_database=True) + + @octobot_node.scheduler.SCHEDULER.INSTANCE.dbos_class() + class Sleeper(): + @staticmethod + @octobot_node.scheduler.SCHEDULER.INSTANCE.workflow() + async def sleeper_workflow(identifier: float) -> float: + logging.info(f"sleeper_workflow {identifier} started") + await dbos.DBOS.sleep_async(WF_SLEEP_TIME) + logging.info(f"sleeper_workflow {identifier} done") + return identifier + + logging.info(f"Launching DBOS instance 1 ...") + octobot_node.scheduler.SCHEDULER.INSTANCE.launch() + logging.info(f"DBOS instance 1 launched") + + # 1. simple execution + t0 = time.time() + for i in range(WF_TO_CREATE): + await QUEUE.enqueue_async(Sleeper.sleeper_workflow, i) + wfs = await octobot_node.scheduler.SCHEDULER.INSTANCE.list_workflows_async( + status=["ENQUEUED", "PENDING"] + ) + assert len(wfs) == WF_TO_CREATE + results_part1: list[int] = [] + for wf_status in wfs: + handle = await octobot_node.scheduler.SCHEDULER.INSTANCE.retrieve_workflow_async(wf_status.workflow_id) + r = await handle.get_result() + assert 0 <= r < WF_TO_CREATE + results_part1.append(int(r)) + duration = time.time() - t0 + logging.info(f"Workflow batch completed in {duration} seconds") + max_duration = WF_TO_CREATE * WF_SLEEP_TIME * 0.9 # 90% of the 1 by 1 time to ensure asynchronous execution. usually 3 to 4 seconds on a normal machine + assert duration <= max_duration, f"Workflow batch part 1 completed in {duration} seconds, expected <= {max_duration}" + assert sorted(results_part1) == list(range(WF_TO_CREATE)) + success_wfs = await octobot_node.scheduler.SCHEDULER.INSTANCE.list_workflows_async( + status=[dbos.WorkflowStatusString.SUCCESS.value] + ) + assert len(success_wfs) == WF_TO_CREATE + + # 2. enqueue 10 more and restart + for i in range(WF_TO_CREATE): + await QUEUE.enqueue_async(Sleeper.sleeper_workflow, i) + logging.info(f"Destroying DBOS instance 1 ...") + octobot_node.scheduler.SCHEDULER.INSTANCE.destroy() + logging.info(f"DBOS instance 1 destroyed") + + # 3. restart and check completed workflows + logging.info(f"Launching DBOS instance 2 ...") + await _init_dbos_scheduler(temp_file.name) + octobot_node.scheduler.SCHEDULER.INSTANCE.launch() + logging.info(f"DBOS instance 2 launched") + all_wfs = await octobot_node.scheduler.SCHEDULER.INSTANCE.list_workflows_async() + assert len(all_wfs) == WF_TO_CREATE * 2 + pending_wfs = await octobot_node.scheduler.SCHEDULER.INSTANCE.list_workflows_async( + status=["ENQUEUED", "PENDING"] + ) + assert len(pending_wfs) == WF_TO_CREATE + # enqueue a second batch of workflows + for i in range(WF_TO_CREATE, WF_TO_CREATE*2): + await QUEUE.enqueue_async(Sleeper.sleeper_workflow, i) + # Only ENQUEUED/PENDING: part 1 workflows are already SUCCESS (same inputs 0..9) + # and must not be awaited again or get_result would duplicate those ids here. + wfs_to_finish = await octobot_node.scheduler.SCHEDULER.INSTANCE.list_workflows_async( + status=[dbos.WorkflowStatusString.ENQUEUED.value, dbos.WorkflowStatusString.PENDING.value] + ) + assert len(wfs_to_finish) == WF_TO_CREATE * 2 + t0 = time.time() + results_part2: list[int] = [] + for wf_status in wfs_to_finish: + handle = await octobot_node.scheduler.SCHEDULER.INSTANCE.retrieve_workflow_async(wf_status.workflow_id) + r = await handle.get_result() + assert 0 <= r < WF_TO_CREATE*2 + results_part2.append(int(r)) + duration = time.time() - t0 + logging.info(f"2 parallel workflow batches completed in {duration} seconds") + max_duration = WF_TO_CREATE * WF_SLEEP_TIME * 2 * 0.9 # 90% of the 1 by 1 time to ensure asynchronous execution. usually 3 to 4 seconds on a normal machine + assert duration < max_duration, f"Workflow batch part 2 completed in {duration} seconds, expected <= {max_duration}" + assert sorted(results_part2) == list(range(WF_TO_CREATE*2)) + success_wfs = await octobot_node.scheduler.SCHEDULER.INSTANCE.list_workflows_async( + status=[dbos.WorkflowStatusString.SUCCESS.value] + ) + assert len(success_wfs) == WF_TO_CREATE * 3 + no_pending = await octobot_node.scheduler.SCHEDULER.INSTANCE.list_workflows_async( + status=[dbos.WorkflowStatusString.ENQUEUED.value, dbos.WorkflowStatusString.PENDING.value] + ) + assert no_pending == [] + logging.info(f"Destroying DBOS instance 2 ...") + octobot_node.scheduler.SCHEDULER.INSTANCE.destroy() + logging.info(f"DBOS instance 2 destroyed") + + # 4. restart and check completed workflows + logging.info(f"Launching DBOS instance 3 ...") + await _init_dbos_scheduler(temp_file.name) + octobot_node.scheduler.SCHEDULER.INSTANCE.launch() + logging.info(f"DBOS instance 3 launched") + # all 30 worflows are now historized + pending_wfs = await octobot_node.scheduler.SCHEDULER.INSTANCE.list_workflows_async( + status=["ENQUEUED", "PENDING"] + ) + assert pending_wfs == [] + all_wfs = await octobot_node.scheduler.SCHEDULER.INSTANCE.list_workflows_async() + assert len(all_wfs) == WF_TO_CREATE * 3 + logging.info(f"Destroying DBOS instance 3 ...") + octobot_node.scheduler.SCHEDULER.INSTANCE.destroy() + logging.info(f"DBOS instance 3 destroyed") + + \ No newline at end of file diff --git a/packages/node/tests/scheduler/workflows/__init__.py b/packages/node/tests/scheduler/workflows/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/packages/node/tests/scheduler/workflows/test_automation_workflow.py b/packages/node/tests/scheduler/workflows/test_automation_workflow.py new file mode 100644 index 0000000000..e645d8caf5 --- /dev/null +++ b/packages/node/tests/scheduler/workflows/test_automation_workflow.py @@ -0,0 +1,1407 @@ +# Drakkar-Software OctoBot-Node +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import asyncio +import json +import functools +import mock +import pytest +import time +import typing +import tempfile +import dbos + +import octobot_trading.constants +import octobot_commons.cryptography + +import octobot_copy.entities +import octobot_node.config +import octobot_node.constants +import octobot_node.enums +import octobot_node.scheduler +import octobot_node.scheduler.workflows +import octobot_node.errors as errors +import octobot_node.models +import octobot_node.scheduler.workflows.params as params +import octobot_node.scheduler.octobot_flow_client as octobot_flow_client +import octobot_node.scheduler.encryption.task_inputs as task_inputs_encryption +import octobot_node.scheduler.task_context as task_context + + +from tests.scheduler import temp_dbos_scheduler, init_and_destroy_scheduler + + +IMPORTED_OCTOBOT_FLOW = True +AUTOMATION_WORKFLOW_IMPORTED = False +try: + import octobot_flow.entities + import octobot_flow.enums + import octobot_flow.errors + +except ImportError: + IMPORTED_OCTOBOT_FLOW = False + + +@pytest.fixture +def import_automation_workflow(): + global AUTOMATION_WORKFLOW_IMPORTED + if not AUTOMATION_WORKFLOW_IMPORTED: + with tempfile.NamedTemporaryFile() as temp_file: + init_and_destroy_scheduler(temp_file.name) + import octobot_node.scheduler.workflows.automation_workflow + AUTOMATION_WORKFLOW_IMPORTED = True + + +def _automation_state_dict(actions: list[dict[str, typing.Any]]) -> dict[str, typing.Any]: + """Build automation state dict with raw action dicts (JSON-serializable).""" + return { + "automation": { + "metadata": {"automation_id": "automation_1"}, + "actions_dag": {"actions": actions}, + } + } + + +def _parse_automation_workflow_output( + workflow_output: str, +) -> params.AutomationWorkflowOutput: + """ + Parse a completed automation workflow result (``get_result()`` string or dict-shaped DBOS output) + into ``AutomationWorkflowOutput``. ``AutomationWorkflowOutput.state`` is always a str (JSON + document text); use ``json.loads(parsed.state)`` for a dict tree. + """ + payload = json.loads(workflow_output) + return params.AutomationWorkflowOutput.from_dict(payload) + + +def _expected_automation_workflow_envelope_json(state_document: str, error: str | None = None) -> str: + """Mirror ``execute_automation`` return: ``json.dumps(AutomationWorkflowOutput.to_dict(...))``.""" + return json.dumps( + params.AutomationWorkflowOutput(state=state_document, error=error).to_dict( + include_default_values=False + ) + ) + + +def _job_description_dict_from_output(parsed: params.AutomationWorkflowOutput) -> dict[str, typing.Any]: + """Decode ``parsed.state`` (OctoBotActionsJobDescription JSON text).""" + assert isinstance(parsed.state, str) + return json.loads(parsed.state) + + +def _apply_octobot_actions_job_result_template( + target: octobot_flow_client.OctoBotActionsJobResult, + template: octobot_flow_client.OctoBotActionsJobResult, +) -> None: + """Copy fields from ``template`` onto ``target`` (real ``run()`` mutates ``OctoBotActionsJob.result`` in place).""" + target.processed_actions = template.processed_actions + target.next_actions_description = template.next_actions_description + target.has_next_actions = template.has_next_actions + target.actions_dag = template.actions_dag + target.should_stop = template.should_stop + + +def _octobot_actions_job_mock_class( + *, + run_on_result: typing.Callable[[octobot_flow_client.OctoBotActionsJobResult], typing.Any] | None = None, + run_side_effect: typing.Any = None, + latest_result_ref: list[octobot_flow_client.OctoBotActionsJobResult | None] | None = None, +) -> tuple[mock.Mock, mock.AsyncMock | None]: + """ + Patch target for ``OctoBotActionsJob``: each constructor call receives ``result`` at index 3. + + Use ``run_on_result`` to mutate that object like production ``run()`` (one ``run`` mock per job instance). + + Use ``run_side_effect`` with a **shared** ``run`` mock so ``await_count`` aggregates across job instances + (exceptions, retries, or repeated failures). When ``run_side_effect`` needs the current ``OctoBotActionsJobResult``, + pass ``latest_result_ref=[None]`` and assign ``latest_result_ref[0] = args[3]`` on each construction. + """ + if (run_on_result is None) == (run_side_effect is None): + raise ValueError("Pass exactly one of run_on_result or run_side_effect") + + if run_side_effect is not None: + run_mock = mock.AsyncMock(side_effect=run_side_effect) + + def mock_job_factory(*args, **kwargs): + if latest_result_ref is not None: + latest_result_ref[0] = args[3] + return mock.Mock(run=run_mock) + + return mock.Mock(side_effect=mock_job_factory), run_mock + + def mock_job_factory(*args, **kwargs): + result_ref = args[3] + + async def assign_result(*args, **kwargs): + outcome = run_on_result(result_ref) + if asyncio.iscoroutine(outcome): + await outcome + + return mock.Mock(run=mock.AsyncMock(side_effect=assign_result)) + + return mock.Mock(side_effect=mock_job_factory), None + + +def _user_actions_update_envelope(user_action_dicts: list[dict]) -> dict[str, typing.Any]: + return params.AutomationWorkflowActionUpdate( + actions_type=octobot_node.enums.AutomationWorkflowActionTypes.USER_ACTIONS.value, + actions_details=user_action_dicts, + ).to_dict(include_default_values=False) + + +def _trading_signal_update_envelope(signal_dicts: list[dict]) -> dict[str, typing.Any]: + return params.AutomationWorkflowActionUpdate( + actions_type=octobot_node.enums.AutomationWorkflowActionTypes.TRADING_SIGNAL.value, + actions_details=signal_dicts, + ).to_dict(include_default_values=False) + + +@pytest.fixture +def parsed_inputs(): + task = octobot_node.models.Task( + name="test_task", + content="{}", + type=octobot_node.models.TaskType.EXECUTE_ACTIONS.value, + ) + return params.AutomationWorkflowInputs(task=task, execution_time=0) + + +@pytest.fixture +def task(): + return octobot_node.models.Task( + name="test_task", + content="{}", + type=octobot_node.models.TaskType.EXECUTE_ACTIONS.value, + ) + + +@pytest.fixture +def iteration_result(): + return params.AutomationWorkflowIterationResult( + progress_status=params.ProgressStatus( + latest_step="action_1", + next_step="action_2", + next_step_at=0.0, + remaining_steps=1, + error=None, + should_stop=False, + ), + next_iteration_description='{"state": {"automation": {}}}', + has_next_actions=True, + ) + +def required_imports(func): + @functools.wraps(func) + async def wrapper(*args, **kwargs): + if not IMPORTED_OCTOBOT_FLOW: + pytest.skip(reason="octobot_flow is not installed") + return await func(*args, **kwargs) + return wrapper + + +class TestExecuteAutomation: + # use a minimal amount of tests to avoid wasting time initializing the scheduler + @pytest.mark.asyncio + @required_imports + async def test_execute_automation( + self, temp_dbos_scheduler, parsed_inputs, iteration_result + ): + # 1. No delay: calls iteration and stops when _should_continue returns False + inputs = parsed_inputs.to_dict(include_default_values=False) + iter_result = params.AutomationWorkflowIterationResult( + progress_status=iteration_result.progress_status, + next_iteration_description=None, + has_next_actions=False, + ) + mock_iteration = mock.AsyncMock(return_value=iter_result.to_dict(include_default_values=False)) + mock_should_continue = mock.Mock(return_value=False) + mock_process = mock.AsyncMock() + + with mock.patch.object( + octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow, + "execute_iteration", + mock_iteration, + ), mock.patch.object( + octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow, + "_should_continue_workflow", + mock_should_continue, + ), mock.patch.object( + octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow, + "_process_pending_priority_actions_and_reschedule", + mock_process, + ): + handle = await temp_dbos_scheduler.INSTANCE.start_workflow_async( + octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow.execute_automation, + inputs=inputs, + ) + assert await handle.get_result() is None # next_iteration_description.next_actions_description is None + mock_iteration.assert_called_once_with(inputs, None) + mock_should_continue.assert_called_once() + mock_process.assert_not_called() + + # 2. With delay: waits, calls iteration, _process_pending not called + parsed_inputs.execution_time = time.time() + 100 + inputs = parsed_inputs.to_dict(include_default_values=False) + mock_wait = mock.AsyncMock(return_value=None) + iteration_result.next_iteration_description = json.dumps({"state": {"automation": {}}}) + mock_iteration = mock.AsyncMock(return_value=iteration_result.to_dict(include_default_values=False)) + mock_should_continue = mock.Mock(return_value=False) + mock_process = mock.AsyncMock() + + with mock.patch.object( + octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow, + "_wait_and_trigger_on_actions_update", + mock_wait, + ), mock.patch.object( + octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow, + "execute_iteration", + mock_iteration, + ), mock.patch.object( + octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow, + "_should_continue_workflow", + mock_should_continue, + ), mock.patch.object( + octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow, + "_process_pending_priority_actions_and_reschedule", + mock_process, + ): + handle = await temp_dbos_scheduler.INSTANCE.start_workflow_async( + octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow.execute_automation, + inputs=inputs, + ) + assert await handle.get_result() == _expected_automation_workflow_envelope_json( + r'{"state": {"automation": {}}}' + ) # next_iteration_description.next_actions_description is not None + mock_wait.assert_called_once() + mock_iteration.assert_called_once_with(inputs, None) + mock_process.assert_not_called() + + # 3. With delay, _should_continue True: _process_pending called + inputs = parsed_inputs.to_dict(include_default_values=False) + mock_wait = mock.AsyncMock(return_value=None) + mock_iteration = mock.AsyncMock(return_value=iteration_result.to_dict(include_default_values=False)) + mock_should_continue = mock.Mock(return_value=True) + mock_process = mock.AsyncMock(return_value=(True, iteration_result)) + + with mock.patch.object( + octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow, + "_wait_and_trigger_on_actions_update", + mock_wait, + ), mock.patch.object( + octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow, + "execute_iteration", + mock_iteration, + ), mock.patch.object( + octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow, + "_should_continue_workflow", + mock_should_continue, + ), mock.patch.object( + octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow, + "_process_pending_priority_actions_and_reschedule", + mock_process, + ): + handle = await temp_dbos_scheduler.INSTANCE.start_workflow_async( + octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow.execute_automation, + inputs=inputs, + ) + assert await handle.get_result() is None # _should_continue_workflow is True + mock_wait.assert_called_once() + mock_iteration.assert_called_once_with(inputs, None) + mock_should_continue.assert_called_once() + mock_process.assert_awaited_once_with(parsed_inputs, iteration_result) + + # 4. Priority actions passed to iteration (raw actions_update envelope dict) + inputs = parsed_inputs.to_dict(include_default_values=False) + actions_update = _user_actions_update_envelope([{"action": "stop"}]) + mock_wait = mock.AsyncMock(return_value=actions_update) + mock_iteration = mock.AsyncMock(return_value=iteration_result.to_dict(include_default_values=False)) + mock_should_continue = mock.Mock(return_value=False) + mock_process = mock.AsyncMock() + + with mock.patch.object( + octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow, + "_wait_and_trigger_on_actions_update", + mock_wait, + ), mock.patch.object( + octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow, + "execute_iteration", + mock_iteration, + ), mock.patch.object( + octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow, + "_should_continue_workflow", + mock_should_continue, + ), mock.patch.object( + octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow, + "_process_pending_priority_actions_and_reschedule", + mock_process, + ): + handle = await temp_dbos_scheduler.INSTANCE.start_workflow_async( + octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow.execute_automation, + inputs=inputs, + ) + assert await handle.get_result() == _expected_automation_workflow_envelope_json( + r'{"state": {"automation": {}}}' + ) + mock_iteration.assert_called_once_with(inputs, actions_update) + mock_process.assert_not_called() + + # 5. Exceptions are caught and mapped to workflow error statuses + parsed_inputs.execution_time = 0 + inputs = parsed_inputs.to_dict(include_default_values=False) + failure_cases = [ + ( + ValueError("test error"), + octobot_flow.enums.AutomationWorkflowErrorStatus.EXCEPTION_DURING_ITERATION.value, + ), + ( + octobot_flow.errors.InvalidAutomationActionError("invalid action config"), + octobot_flow.enums.AutomationWorkflowErrorStatus.INVALID_ACTION_CONFIGURATION.value, + ), + ] + for raised_exception, expected_error_status in failure_cases: + mock_logger = mock.Mock() + mock_process = mock.AsyncMock() + mock_octobot_actions_job_class, run_mock = _octobot_actions_job_mock_class( + run_side_effect=raised_exception + ) + with mock.patch( + "asyncio.sleep", mock.AsyncMock() + ), mock.patch.object( + octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow, + "_should_continue_workflow", + mock.Mock(return_value=False), + ), mock.patch.object( + octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow, + "get_logger", + mock.Mock(return_value=mock_logger), + ), mock.patch.object( + octobot_flow_client, + "OctoBotActionsJob", + mock_octobot_actions_job_class, + ), mock.patch.object( + octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow, + "_process_pending_priority_actions_and_reschedule", + mock_process, + ): + handle = await temp_dbos_scheduler.INSTANCE.start_workflow_async( + octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow.execute_automation, + inputs=inputs, + ) + workflow_result = await handle.get_result() + assert workflow_result == json.dumps( + params.AutomationWorkflowOutput( + error=expected_error_status + ).to_dict(include_default_values=False) + ) + parsed_output = _parse_automation_workflow_output(workflow_result) + assert parsed_output.state is None + assert parsed_output.error == expected_error_status + assert ( + run_mock.await_count + == octobot_node.constants.AUTOMATION_WORKFLOW_MAX_ITERATION_RETRIES + ) + mock_logger.exception.assert_called_once() + mock_process.assert_not_called() + + +class TestExecuteIteration: + def setup_method(self): + octobot_trading.constants.ALLOW_FUNDS_TRANSFER = True + + def teardown_method(self): + octobot_trading.constants.ALLOW_FUNDS_TRANSFER = False + + @pytest.mark.asyncio + @required_imports + async def test_execute_iteration_returns_iteration_result(self, import_automation_workflow, task): + task.content = json.dumps({"params": {"ACTIONS": "trade", "EXCHANGE_FROM": "binance", + "ORDER_SYMBOL": "ETH/BTC", "ORDER_AMOUNT": 1, "ORDER_TYPE": "market", + "ORDER_SIDE": "BUY", "SIMULATED_PORTFOLIO": {"BTC": 1}}}) + inputs = params.AutomationWorkflowInputs(task=task, execution_time=0).to_dict(include_default_values=False) + + action = octobot_flow.entities.ConfiguredActionDetails( + id="action_1", + action="trade", + ) + + mock_result = octobot_flow_client.OctoBotActionsJobResult( + processed_actions=[action], + next_actions_description=None, + actions_dag=None, + should_stop=False, + ) + mock_octobot_actions_job_class, _ = _octobot_actions_job_mock_class( + run_on_result=lambda result_ref: _apply_octobot_actions_job_result_template(result_ref, mock_result), + ) + + with mock.patch.object( + octobot_flow_client, + "OctoBotActionsJob", + mock_octobot_actions_job_class, + ): + result = await octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow.execute_iteration(inputs, None) + + assert "progress_status" in result + assert "next_iteration_description" in result + parsed_progress_status = params.ProgressStatus.model_validate(result["progress_status"]) + assert parsed_progress_status.latest_step == "trade" + assert parsed_progress_status.error is None + assert parsed_progress_status.should_stop is False + + @pytest.mark.asyncio + async def test_execute_iteration_invalid_task_type_raises_workflow_input_error(self, import_automation_workflow, task): + task.type = "invalid_type" + task.content = "{}" + inputs = params.AutomationWorkflowInputs(task=task, execution_time=0).to_dict(include_default_values=False) + + with mock.patch.object(task_context, "encrypted_task", mock.MagicMock()) as mock_encrypted: + mock_encrypted.return_value.__enter__ = mock.Mock(return_value=None) + mock_encrypted.return_value.__exit__ = mock.Mock(return_value=None) + with pytest.raises(errors.WorkflowInputError, match="Invalid task type"): + await octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow.execute_iteration(inputs, None) + + @pytest.mark.asyncio + @required_imports + async def test_execute_iteration_execution_error_sets_progress_error(self, import_automation_workflow, task): + task.content = json.dumps({"params": {"ACTIONS": "trade", "EXCHANGE_FROM": "binance", + "ORDER_SYMBOL": "ETH/BTC", "ORDER_AMOUNT": 1, "ORDER_TYPE": "market", + "ORDER_SIDE": "BUY", "SIMULATED_PORTFOLIO": {"BTC": 1}}}) + inputs = params.AutomationWorkflowInputs(task=task, execution_time=0).to_dict(include_default_values=False) + + action = octobot_flow.entities.ConfiguredActionDetails( + id="action_1", + action="trade", + error_status="some_error", + ) + template_result = octobot_flow_client.OctoBotActionsJobResult(processed_actions=[action]) + mock_octobot_actions_job_class, _ = _octobot_actions_job_mock_class( + run_on_result=lambda result_ref: _apply_octobot_actions_job_result_template(result_ref, template_result), + ) + + with mock.patch.object( + octobot_flow_client, + "OctoBotActionsJob", + mock_octobot_actions_job_class, + ): + result = await octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow.execute_iteration(inputs, None) + + parsed_progress_status = params.ProgressStatus.model_validate(result["progress_status"]) + assert parsed_progress_status.error == "some_error" + + @pytest.mark.asyncio + @required_imports + async def test_execute_iteration_passes_trading_signals_to_octobot_actions_job( + self, import_automation_workflow, task + ): + task.content = json.dumps({"params": {"ACTIONS": "trade", "EXCHANGE_FROM": "binance", + "ORDER_SYMBOL": "ETH/BTC", "ORDER_AMOUNT": 1, "ORDER_TYPE": "market", + "ORDER_SIDE": "BUY", "SIMULATED_PORTFOLIO": {"BTC": 1}}}) + inputs = params.AutomationWorkflowInputs(task=task, execution_time=0).to_dict(include_default_values=False) + + signal = octobot_flow.entities.TradingSignal( + account=octobot_copy.entities.Account(), + strategy_id="test-strategy-id", + ) + signal_dict = signal.to_dict(include_default_values=False) + actions_update = _trading_signal_update_envelope([signal_dict]) + + action = octobot_flow.entities.ConfiguredActionDetails(id="action_1", action="trade") + mock_result = octobot_flow_client.OctoBotActionsJobResult( + processed_actions=[action], + next_actions_description=None, + actions_dag=None, + should_stop=False, + ) + mock_octobot_actions_job_class, _ = _octobot_actions_job_mock_class( + run_on_result=lambda result_ref: _apply_octobot_actions_job_result_template(result_ref, mock_result), + ) + + with mock.patch.object(task_context, "encrypted_task", mock.MagicMock()) as mock_encrypted: + mock_encrypted.return_value.__enter__ = mock.Mock(return_value=None) + mock_encrypted.return_value.__exit__ = mock.Mock(return_value=None) + with mock.patch.object( + octobot_flow_client, + "OctoBotActionsJob", + mock_octobot_actions_job_class, + ) as mock_job_factory: + await octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow.execute_iteration( + inputs, actions_update + ) + + assert mock_job_factory.call_args[0][2] == [signal_dict] + + +class TestWaitAndTriggerOnActionsUpdate: + @pytest.mark.asyncio + async def test_wait_and_trigger_returns_empty_when_no_actions(self, import_automation_workflow, parsed_inputs): + with mock.patch.object( + octobot_node.scheduler.workflows.automation_workflow.SCHEDULER.INSTANCE, + "recv_async", + mock.AsyncMock(return_value=[]), + ): + result = await octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow._wait_and_trigger_on_actions_update( + parsed_inputs, 0 + ) + assert result is None + + @pytest.mark.asyncio + async def test_wait_and_trigger_returns_envelopes_when_received(self, import_automation_workflow, parsed_inputs): + envelope = _user_actions_update_envelope([{"action": "stop"}]) + with mock.patch.object( + octobot_node.scheduler.workflows.automation_workflow.SCHEDULER.INSTANCE, + "recv_async", + mock.AsyncMock(return_value=envelope), + ): + result = await octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow._wait_and_trigger_on_actions_update( + parsed_inputs, 0 + ) + assert result == envelope + + +class TestProcessPendingPriorityActionsAndReschedule: + @pytest.mark.asyncio + async def test_process_pending_returns_false_when_no_next_iteration(self, import_automation_workflow, parsed_inputs, iteration_result): + iteration_result.has_next_actions = False + should_continue, updated_result = await octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow._process_pending_priority_actions_and_reschedule( + parsed_inputs, iteration_result + ) + assert should_continue is False + assert updated_result is iteration_result + + @pytest.mark.asyncio + async def test_process_pending_schedules_next_when_no_priority_actions( + self, import_automation_workflow, parsed_inputs, iteration_result + ): + mock_wait = mock.AsyncMock(return_value=None) + mock_schedule = mock.AsyncMock() + + with mock.patch.object( + octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow, + "_wait_and_trigger_on_actions_update", + mock_wait, + ), mock.patch.object( + octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow, + "_schedule_next_iteration", + mock_schedule, + ): + should_continue, _ = await octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow._process_pending_priority_actions_and_reschedule( + parsed_inputs, iteration_result + ) + assert should_continue is True + mock_wait.assert_awaited_once_with(parsed_inputs, 0) + mock_schedule.assert_called_once() + + @pytest.mark.asyncio + async def test_process_pending_returns_false_when_should_stop(self, import_automation_workflow, parsed_inputs, iteration_result): + iteration_result.progress_status.should_stop = True + mock_wait = mock.AsyncMock(return_value=None) + + with mock.patch.object( + octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow, + "_wait_and_trigger_on_actions_update", + mock_wait, + ): + should_continue, _ = await octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow._process_pending_priority_actions_and_reschedule( + parsed_inputs, iteration_result + ) + assert should_continue is True + + @pytest.mark.asyncio + async def test_process_pending_raises_when_no_next_iteration_after_priority_actions( + self, import_automation_workflow, parsed_inputs, iteration_result + ): + result_without_next = params.AutomationWorkflowIterationResult( + progress_status=params.ProgressStatus( + latest_step="done", + next_step=None, + next_step_at=None, + remaining_steps=0, + error=None, + should_stop=False, + ), + next_iteration_description=json.dumps({"state": {"automation": {}}}), + has_next_actions=False, + ) + mock_wait = mock.AsyncMock( + side_effect=[ + _user_actions_update_envelope([{"action": "stop"}]), + None, + ] + ) + mock_iteration = mock.AsyncMock( + return_value=result_without_next.to_dict(include_default_values=False) + ) + + with mock.patch.object( + octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow, + "_wait_and_trigger_on_actions_update", + mock_wait, + ), mock.patch.object( + octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow, + "execute_iteration", + mock_iteration, + ), mock.patch.object( + octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow, + "_should_continue_workflow", + mock.Mock(return_value=True), + ): + with pytest.raises( + errors.WorkflowPriorityActionExecutionError, + match="no next iteration description after processing priority actions", + ): + await octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow._process_pending_priority_actions_and_reschedule( + parsed_inputs, iteration_result + ) + + @pytest.mark.asyncio + async def test_process_pending_with_priority_actions_schedules_next_when_iteration_has_next( + self, import_automation_workflow, parsed_inputs, iteration_result + ): + result_with_next = params.AutomationWorkflowIterationResult( + progress_status=params.ProgressStatus( + latest_step="step_1", + next_step="step_2", + next_step_at=0.0, + remaining_steps=1, + error=None, + should_stop=False, + ), + next_iteration_description='{"state": {"automation": {}}}', + has_next_actions=True, + ) + mock_wait = mock.AsyncMock( + side_effect=[ + _user_actions_update_envelope([{"action": "stop"}]), + None, + ] + ) + mock_iteration = mock.AsyncMock( + return_value=result_with_next.to_dict(include_default_values=False) + ) + mock_schedule = mock.AsyncMock() + + with mock.patch.object( + octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow, + "_wait_and_trigger_on_actions_update", + mock_wait, + ), mock.patch.object( + octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow, + "execute_iteration", + mock_iteration, + ), mock.patch.object( + octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow, + "_should_continue_workflow", + mock.Mock(return_value=True), + ), mock.patch.object( + octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow, + "_schedule_next_iteration", + mock_schedule, + ): + should_continue, _ = await octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow._process_pending_priority_actions_and_reschedule( + parsed_inputs, iteration_result + ) + + assert should_continue is True + mock_wait.assert_awaited() + mock_iteration.assert_called_once() + mock_schedule.assert_called_once() + + +class TestScheduleNextIteration: + @pytest.mark.asyncio + async def test_schedule_next_iteration_enqueues_workflow(self, import_automation_workflow, parsed_inputs, iteration_result): + mock_enqueue = mock.AsyncMock() + next_desc = iteration_result.next_iteration_description + + with mock.patch.object( + octobot_node.scheduler.workflows.automation_workflow.SCHEDULER.AUTOMATION_WORKFLOW_QUEUE, + "enqueue_async", + mock_enqueue, + ): + await octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow._schedule_next_iteration( + parsed_inputs, next_desc, iteration_result.progress_status + ) + mock_enqueue.assert_called_once() + call_args = mock_enqueue.call_args + assert call_args[0][0] == octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow.execute_automation + assert "inputs" in call_args[1] + + +class TestCreateNextIterationInputs: + def test_create_next_iteration_inputs_returns_correct_dict(self, import_automation_workflow, task): + parsed_inputs = params.AutomationWorkflowInputs(task=task, execution_time=0) + next_iteration_description = '{"state": {}}' + next_execution_time = 123.0 + + result = octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow._create_next_iteration_inputs( + parsed_inputs, next_iteration_description, next_execution_time + ) + assert "task" in result + parsed_result = params.AutomationWorkflowInputs.from_dict(result) + task = parsed_result.task + content = task.get("content") if isinstance(task, dict) else task.content + assert content == next_iteration_description + assert parsed_result.execution_time == 123.0 + + def test_create_next_iteration_inputs_uses_zero_when_execution_time_none(self, import_automation_workflow, task): #todo + parsed_inputs = params.AutomationWorkflowInputs(task=task, execution_time=0) + result = octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow._create_next_iteration_inputs( + parsed_inputs, "{}", None + ) + result = params.AutomationWorkflowInputs.from_dict(result) + assert result.execution_time == 0 + + +class TestShouldContinueWorkflow: + def test_should_continue_returns_stop_on_error_when_error(self, import_automation_workflow, parsed_inputs): + progress = params.ProgressStatus(error="some_error", should_stop=False) + assert octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow._should_continue_workflow( + parsed_inputs, progress, True + ) is True + assert octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow._should_continue_workflow( + parsed_inputs, progress, False + ) is False + + def test_should_continue_returns_false_when_should_stop(self, import_automation_workflow, parsed_inputs): + progress = params.ProgressStatus(error=None, should_stop=True) + assert octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow._should_continue_workflow( + parsed_inputs, progress, True + ) is False + assert octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow._should_continue_workflow( + parsed_inputs, progress, False + ) is False + + def test_should_continue_returns_true_by_no_reason_to_stop(self, import_automation_workflow, parsed_inputs): + progress = params.ProgressStatus(error=None, should_stop=False) + assert octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow._should_continue_workflow( + parsed_inputs, progress, True + ) is True + assert octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow._should_continue_workflow( + parsed_inputs, progress, False + ) is True + + +class TestGetActionsSummary: + def test_get_actions_summary_empty_returns_empty_string(self, import_automation_workflow): + assert octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow._get_actions_summary([]) == "" + + @pytest.mark.asyncio + @required_imports + async def test_get_actions_summary_joins_action_summaries(self, import_automation_workflow): + action1 = octobot_flow.entities.ConfiguredActionDetails(id="action_1", action="action_1") + action2 = octobot_flow.entities.DSLScriptActionDetails(id="action_2", dsl_script="action_2('plop')") + result = octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow._get_actions_summary([action1, action2]) + assert result == "action_1, action_2('plop')" + + # with minimal=True, only the first operator name is returned + result = octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow._get_actions_summary([action1, action2], minimal=True) + assert result == "action_1, action_2" + + def test_get_actions_summary_minimal_calls_get_summary_with_minimal(self, import_automation_workflow): + mock_action = mock.Mock() + mock_action.get_summary = mock.Mock(return_value="sum") + octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow._get_actions_summary([mock_action], minimal=True) + mock_action.get_summary.assert_called_once_with(minimal=True) + + +class TestGetLogger: + def test_get_logger_uses_task_name(self, import_automation_workflow, parsed_inputs): + with mock.patch("octobot_commons.logging.get_logger", mock.Mock()) as mock_get_logger: + octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow.get_logger(parsed_inputs) + mock_get_logger.assert_called_once_with("test_task") + + def test_get_logger_uses_class_name_when_task_name_none(self, import_automation_workflow): + task = octobot_node.models.Task(name=None, content="{}") + parsed_inputs = params.AutomationWorkflowInputs(task=task, execution_time=0) + with mock.patch("octobot_commons.logging.get_logger", mock.Mock()) as mock_get_logger: + octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow.get_logger(parsed_inputs) + mock_get_logger.assert_called_once_with(octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow.__name__) + + +class TestExecuteAutomationIntegration: + def setup_method(self): + octobot_trading.constants.ALLOW_FUNDS_TRANSFER = True + + def teardown_method(self): + octobot_trading.constants.ALLOW_FUNDS_TRANSFER = False + + @pytest.mark.asyncio + @required_imports + async def test_execute_automation_full_workflow_three_iterations( + self, + import_automation_workflow, + temp_dbos_scheduler, + ): + init_action = { + "id": "action_init", + "action": octobot_flow.enums.ActionType.APPLY_CONFIGURATION.value, + "config": { + "automation": {"metadata": {"automation_id": "automation_1"}}, + "exchange_account_elements": { + "portfolio": { + "content": { + "ETH": {"total": 1, "available": 1}, + }, + }, + }, + }, + } + dsl_action_1 = { + "id": "action_dsl_1", + "dsl_script": "1 if True else 2", + "dependencies": [{"action_id": "action_init"}], + } + dsl_action_2 = { + "id": "action_dsl_2", + "dsl_script": "1 if True else 2", + "dependencies": [{"action_id": "action_dsl_1"}], + } + all_actions = [init_action, dsl_action_1, dsl_action_2] + state_dict = _automation_state_dict(all_actions) + state_dict["automation"]["exchange_account_elements"] = { + "portfolio": {"content": {"ETH": {"total": 1, "available": 1}}}, + } + state_dict["automation"]["execution"] = { + "previous_execution": { + "trigger_time": time.time() - 600, + "trigger_reason": "scheduled", + "strategy_execution_time": time.time() - 590, + }, + "current_execution": {"trigger_reason": "scheduled"}, + } + task_content = json.dumps({"state": state_dict}) + task = octobot_node.models.Task( + name="test_automation", + content=task_content, + type=octobot_node.models.TaskType.EXECUTE_ACTIONS.value, + ) + inputs = params.AutomationWorkflowInputs(task=task, execution_time=0).to_dict( + include_default_values=False + ) + inputs["task"] = task.model_dump(exclude_defaults=True) + + recv_path = "octobot_node.scheduler.workflows.automation_workflow.SCHEDULER.INSTANCE.recv_async" + with mock.patch(recv_path, mock.AsyncMock(return_value=[])): + await temp_dbos_scheduler.AUTOMATION_WORKFLOW_QUEUE.enqueue_async( + octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow.execute_automation, + inputs=inputs, + ) + + max_wait = 30 + poll_interval = 0.5 + elapsed = 0 + while elapsed < max_wait: + workflows = await temp_dbos_scheduler.INSTANCE.list_workflows_async() + pending = [w for w in workflows if w.status in ( + dbos.WorkflowStatusString.PENDING.value, dbos.WorkflowStatusString.ENQUEUED.value + )] + if not pending and len(workflows) >= 3: + break + await asyncio.sleep(poll_interval) + elapsed += poll_interval + + assert len(workflows) >= 3, f"Expected at least 3 workflows, got {len(workflows)}" + assert not pending, f"Expected no pending workflows, got {pending}" + + + completed = [w for w in workflows if w.status == dbos.WorkflowStatusString.SUCCESS.value] + assert len(completed) >= 3, f"Expected at least 3 completed workflows, got {len(completed)}" + + workflow_outputs: list[typing.Optional[str]] = [] + for wf_status in completed: + handle = await temp_dbos_scheduler.INSTANCE.retrieve_workflow_async(wf_status.workflow_id) + result = await handle.get_result() + workflow_outputs.append(result) + db_status = await handle.get_status() + assert db_status.status == dbos.WorkflowStatusString.SUCCESS.value + assert db_status.output == result + non_none_outputs = [output for output in workflow_outputs if output is not None] + assert len(non_none_outputs) == 1, ( + f"Expected exactly one completed workflow to expose a final state payload; " + f"got {len(non_none_outputs)} non-null outputs among {workflow_outputs}" + ) + parsed_final = _parse_automation_workflow_output(non_none_outputs[0]) + assert parsed_final.error is None + assert isinstance(parsed_final.state, str) + state_tree = _job_description_dict_from_output(parsed_final) + automation_state = state_tree["state"]["automation"] + assert automation_state["metadata"]["automation_id"] == "automation_1" + assert "actions_dag" in automation_state + assert "exchange_account_elements" in automation_state + + @pytest.mark.asyncio + @required_imports + async def test_execute_automation_priority_stop_action_stops_workflow( + self, + import_automation_workflow, + temp_dbos_scheduler, + ): + """ + After a normal DAG iteration with ``has_next_actions`` set, ``recv_async`` delivers a stop + priority action; the follow-up iteration must set ``should_stop`` and complete with that + state as workflow output (no child workflow enqueued). + """ + task = octobot_node.models.Task( + name="priority_stop_integration", + content="{}", + type=octobot_node.models.TaskType.EXECUTE_ACTIONS.value, + ) + inputs = params.AutomationWorkflowInputs(task=task, execution_time=0).to_dict( + include_default_values=False + ) + inputs["task"] = task.model_dump(exclude_defaults=True) + + action = octobot_flow.entities.ConfiguredActionDetails(id="action_dsl", action="trade") + dag_state = { + "automation": { + "metadata": {"automation_id": "priority_stop_auto"}, + "execution": {"current_execution": {"scheduled_to": 0.0}}, + } + } + stop_state = {"automation": {"stopped": True, "by_priority_action": True}} + dag_iteration_result = octobot_flow_client.OctoBotActionsJobResult( + processed_actions=[action], + next_actions_description=octobot_flow_client.OctoBotActionsJobDescription(state=dag_state), + has_next_actions=True, + actions_dag=None, + should_stop=False, + ) + stop_iteration_result = octobot_flow_client.OctoBotActionsJobResult( + processed_actions=[action], + next_actions_description=octobot_flow_client.OctoBotActionsJobDescription(state=stop_state), + has_next_actions=False, + actions_dag=None, + should_stop=True, + ) + iteration_templates = [dag_iteration_result, stop_iteration_result] + iteration_index = [0] + + def run_on_iteration_result(result_ref: octobot_flow_client.OctoBotActionsJobResult) -> None: + _apply_octobot_actions_job_result_template(result_ref, iteration_templates[iteration_index[0]]) + iteration_index[0] += 1 + + mock_octobot_actions_job_class, _ = _octobot_actions_job_mock_class(run_on_result=run_on_iteration_result) + stop_envelope = _user_actions_update_envelope([{"action": "stop"}]) + mock_recv = mock.AsyncMock(side_effect=[stop_envelope, []]) + + recv_path = "octobot_node.scheduler.workflows.automation_workflow.SCHEDULER.INSTANCE.recv_async" + with mock.patch(recv_path, mock_recv), mock.patch( + "asyncio.sleep", mock.AsyncMock() + ), mock.patch.object( + octobot_flow_client, + "OctoBotActionsJob", + mock_octobot_actions_job_class, + ) as mock_job_factory: + handle = await temp_dbos_scheduler.INSTANCE.start_workflow_async( + octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow.execute_automation, + inputs=inputs, + ) + workflow_result = await handle.get_result() + expected_state_str = json.dumps( + stop_iteration_result.next_actions_description.to_dict(include_default_values=False) + ) + assert workflow_result == _expected_automation_workflow_envelope_json(expected_state_str) + assert iteration_index[0] == 2 + assert mock_recv.await_count == 1 + stop_user_actions = mock_job_factory.call_args_list[1][0][1] + assert stop_user_actions == [{"action": "stop"}] + parsed_output = _parse_automation_workflow_output(workflow_result) + job_description = _job_description_dict_from_output(parsed_output) + assert job_description["state"]["automation"]["stopped"] is True # the state of the stop action is returned + assert job_description["state"]["automation"]["by_priority_action"] is True + + workflows = await temp_dbos_scheduler.INSTANCE.list_workflows_async() + assert len(workflows) == 1, "Stop must not enqueue a follow-up automation workflow" + + @pytest.mark.asyncio + @required_imports + async def test_execute_automation_execute_iteration_retries_octobot_actions_job_then_succeeds_and_returns_action_error( + self, + import_automation_workflow, + temp_dbos_scheduler, + ): + """ + DBOS execute_iteration is configured with max_attempts=AUTOMATION_WORKFLOW_MAX_ITERATION_RETRIES. + When OctoBotActionsJob.run() fails on early attempts then succeeds, the step should + retry and eventually complete without failing the workflow. A processed action may still + report an error_status; that value is copied to AutomationWorkflowOutput.error on completion. + """ + max_attempts = octobot_node.constants.AUTOMATION_WORKFLOW_MAX_ITERATION_RETRIES + task = octobot_node.models.Task( + name="retry_policy_test", + content="{}", + type=octobot_node.models.TaskType.EXECUTE_ACTIONS.value, + ) + inputs = params.AutomationWorkflowInputs(task=task, execution_time=0).to_dict( + include_default_values=False + ) + inputs["task"] = task.model_dump(exclude_defaults=True) + + dag_action_error = octobot_flow.enums.ActionErrorStatus.INVALID_ORDER.value + action = octobot_flow.entities.ConfiguredActionDetails( + id="action_1", + action="trade", + error_status=dag_action_error, + ) + success_result = octobot_flow_client.OctoBotActionsJobResult( + processed_actions=[action], + next_actions_description=octobot_flow_client.OctoBotActionsJobDescription(state={"automation": {}}), + has_next_actions=False, + actions_dag=None, + should_stop=False, + ) + latest_result: list[octobot_flow_client.OctoBotActionsJobResult | None] = [None] + attempt = [0] + + async def run_with_retries_then_apply_success(*args, **kwargs) -> None: + attempt[0] += 1 + if attempt[0] < max_attempts: + raise RuntimeError("simulated transient failure") + assert latest_result[0] is not None + _apply_octobot_actions_job_result_template(latest_result[0], success_result) + + mock_octobot_actions_job_class, run_mock = _octobot_actions_job_mock_class( + run_side_effect=run_with_retries_then_apply_success, + latest_result_ref=latest_result, + ) + mock_logger = mock.Mock() + + recv_path = "octobot_node.scheduler.workflows.automation_workflow.SCHEDULER.INSTANCE.recv_async" + with mock.patch(recv_path, mock.AsyncMock(return_value=[])), mock.patch( + "asyncio.sleep", mock.AsyncMock() + ), mock.patch.object( + octobot_flow_client, + "OctoBotActionsJob", + mock_octobot_actions_job_class, + ), mock.patch.object( + octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow, + "get_logger", + mock.Mock(return_value=mock_logger), + ): + handle = await temp_dbos_scheduler.INSTANCE.start_workflow_async( + octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow.execute_automation, + inputs=inputs, + ) + workflow_result = await handle.get_result() + expected_inner_state = r'{"state": {"automation": {}}}' + assert workflow_result == _expected_automation_workflow_envelope_json( + expected_inner_state, + error=dag_action_error, + ) + parsed_output = _parse_automation_workflow_output(workflow_result) + assert parsed_output.error == dag_action_error + assert isinstance(parsed_output.state, str) + job_description = _job_description_dict_from_output(parsed_output) + assert job_description["state"]["automation"] == {} + wf_status = await handle.get_status() + assert wf_status.status == dbos.WorkflowStatusString.SUCCESS.value + assert wf_status.output == workflow_result + assert parsed_output == _parse_automation_workflow_output(wf_status.output) + + assert run_mock.await_count == max_attempts + mock_logger.exception.assert_not_called() + + @pytest.mark.asyncio + @required_imports + async def test_execute_automation_execute_iteration_exhausts_retries_when_octobot_actions_job_always_fails( + self, + import_automation_workflow, + temp_dbos_scheduler, + ): + """After AUTOMATION_WORKFLOW_MAX_ITERATION_RETRIES failed OctoBotActionsJob.run() calls, the step must stop retrying.""" + max_attempts = octobot_node.constants.AUTOMATION_WORKFLOW_MAX_ITERATION_RETRIES + task = octobot_node.models.Task( + name="retry_exhausted_test", + content="{}", + type=octobot_node.models.TaskType.EXECUTE_ACTIONS.value, + ) + inputs = params.AutomationWorkflowInputs(task=task, execution_time=0).to_dict( + include_default_values=False + ) + inputs["task"] = task.model_dump(exclude_defaults=True) + + mock_octobot_actions_job_class, run_mock = _octobot_actions_job_mock_class( + run_side_effect=RuntimeError("persistent failure") + ) + mock_logger = mock.Mock() + + recv_path = "octobot_node.scheduler.workflows.automation_workflow.SCHEDULER.INSTANCE.recv_async" + with mock.patch(recv_path, mock.AsyncMock(return_value=[])), mock.patch( + "asyncio.sleep", mock.AsyncMock() + ), mock.patch.object( + octobot_flow_client, + "OctoBotActionsJob", + mock_octobot_actions_job_class, + ), mock.patch.object( + octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow, + "get_logger", + mock.Mock(return_value=mock_logger), + ): + handle = await temp_dbos_scheduler.INSTANCE.start_workflow_async( + octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow.execute_automation, + inputs=inputs, + ) + workflow_result = await handle.get_result() + assert workflow_result == json.dumps( + params.AutomationWorkflowOutput( + error=octobot_flow.enums.AutomationWorkflowErrorStatus.EXCEPTION_DURING_ITERATION.value + ).to_dict(include_default_values=False) + ) + parsed_output = _parse_automation_workflow_output(workflow_result) + assert parsed_output.state is None + assert ( + parsed_output.error + == octobot_flow.enums.AutomationWorkflowErrorStatus.EXCEPTION_DURING_ITERATION.value + ) + wf_status = await handle.get_status() + assert wf_status.status == dbos.WorkflowStatusString.SUCCESS.value + assert wf_status.output == workflow_result + + assert run_mock.await_count == max_attempts + mock_logger.exception.assert_called_once() + assert "Interrupted workflow: unexpected critical error: " in str(mock_logger.exception.call_args[0][2]) + + @pytest.mark.asyncio + @required_imports + async def test_encrypted_task_decrypts_for_octobot_actions_job_and_encrypts_iteration_and_workflow_outputs( + self, + import_automation_workflow, + temp_dbos_scheduler, + ): + """ + With node-side encryption keys set: + + - ``execute_iteration`` receives tasks whose ``content`` is ciphertext; ``encrypted_task`` decrypts + before ``OctoBotActionsJob`` is constructed, so the mock must see plaintext ``task.content``. + - Iteration outputs expose ``next_iteration_description`` / metadata as encrypted when enabled. + - ``execute_automation`` returns encrypted ``state`` / ``state_metadata`` when the run stops with + a next-state payload; rescheduling passes encrypted ``task.content`` / ``content_metadata`` to + ``enqueue_async``. + """ + # --- Keys: real RSA/ECDSA material so encrypt_task_content / decrypt_task_content and + # encrypted_task use the same crypto path as production (settings.is_node_side_encryption_enabled). + rsa_private_key, rsa_public_key = octobot_commons.cryptography.generate_rsa_key_pair(2048) + ecdsa_private_key, ecdsa_public_key = octobot_commons.cryptography.generate_ecdsa_key_pair() + + # Plaintext task.content as stored client-side before upload (what decrypt must reproduce). + plain_task_content = json.dumps({"params": {"ACTIONS": "trade", "EXCHANGE_FROM": "binance", + "ORDER_SYMBOL": "ETH/BTC", "ORDER_AMOUNT": 1, "ORDER_TYPE": "market", + "ORDER_SIDE": "BUY", "SIMULATED_PORTFOLIO": {"BTC": 1}}}) + + # Clear derived-key caches so patched settings take effect immediately. + task_inputs_encryption._server_rsa_public_key_bytes.cache_clear() + task_inputs_encryption._server_ecdsa_public_key_bytes.cache_clear() + + # Patch all four encryption keys on settings for the duration of each block below. + encryption_patches = ( + mock.patch.object( + octobot_node.config.settings, "TASKS_SERVER_RSA_PRIVATE_KEY", rsa_private_key + ), + mock.patch.object( + octobot_node.config.settings, "TASKS_SERVER_ECDSA_PRIVATE_KEY", ecdsa_private_key + ), + mock.patch.object( + octobot_node.config.settings, "TASKS_USER_RSA_PUBLIC_KEY", rsa_public_key + ), + mock.patch.object( + octobot_node.config.settings, "TASKS_USER_ECDSA_PUBLIC_KEY", ecdsa_public_key + ), + ) + + with encryption_patches[0], encryption_patches[1], encryption_patches[2], encryption_patches[3]: + assert octobot_node.config.settings.is_node_side_encryption_enabled is True + + # Simulate API/CSV: task content is ciphertext + metadata (inputs remain encrypted at rest). + encrypted_task_content, task_content_metadata = task_inputs_encryption.encrypt_task_content( + plain_task_content + ) + assert encrypted_task_content != plain_task_content + + # OctoBotActionsJob result templates applied by mocks (mutate job.result like real run()). + next_state_for_stop = { + "automation": { + "metadata": {"automation_id": "encryption_integration"}, + "stopped": True, + "by_encryption_test": True, + } + } + next_state_for_schedule = { + "automation": { + "metadata": {"automation_id": "encryption_integration"}, + "execution": {"current_execution": {"scheduled_to": 0.0}}, + } + } + + stop_result_template = octobot_flow_client.OctoBotActionsJobResult( + processed_actions=[], + next_actions_description=octobot_flow_client.OctoBotActionsJobDescription( + state=next_state_for_stop + ), + has_next_actions=False, + actions_dag=None, + should_stop=True, + ) + + schedule_result_template = octobot_flow_client.OctoBotActionsJobResult( + processed_actions=[], + next_actions_description=octobot_flow_client.OctoBotActionsJobDescription( + state=next_state_for_schedule + ), + has_next_actions=True, + actions_dag=None, + should_stop=False, + ) + + # Stop path: one iteration then workflow completes with AutomationWorkflowOutput. + mock_octobot_actions_job_class_stop, _ = _octobot_actions_job_mock_class( + run_on_result=lambda result_ref: _apply_octobot_actions_job_result_template( + result_ref, stop_result_template + ), + ) + + # Schedule path: one iteration with has_next_actions True so _schedule_next_iteration enqueues child workflow. + mock_octobot_actions_job_class_schedule, _ = _octobot_actions_job_mock_class( + run_on_result=lambda result_ref: _apply_octobot_actions_job_result_template( + result_ref, schedule_result_template + ), + ) + + def _encrypted_description_raw_json(template: octobot_flow_client.OctoBotActionsJobResult) -> str: + assert template.next_actions_description is not None + return json.dumps( + template.next_actions_description.to_dict(include_default_values=False) + ) + + recv_path = "octobot_node.scheduler.workflows.automation_workflow.SCHEDULER.INSTANCE.recv_async" + automation_wf = octobot_node.scheduler.workflows.automation_workflow.AutomationWorkflow + + # Step 1 — execute_iteration: encrypted_task decrypts before OctoBotActionsJob; exit encrypts next state. + # Assert mock sees plaintext description arg; iteration dict carries ciphertext + metadata. + with encryption_patches[0], encryption_patches[1], encryption_patches[2], encryption_patches[3]: + enc_task = octobot_node.models.Task( + name="encryption_integration", + content=encrypted_task_content, + content_metadata=task_content_metadata, + type=octobot_node.models.TaskType.EXECUTE_ACTIONS.value, + ) + inputs_dict = params.AutomationWorkflowInputs(task=enc_task, execution_time=0).to_dict( + include_default_values=False + ) + inputs_dict["task"] = enc_task.model_dump(exclude_defaults=True) + + with mock.patch.object( + octobot_flow_client, + "OctoBotActionsJob", + mock_octobot_actions_job_class_stop, + ) as mock_job_cls: + iteration_payload = await automation_wf.execute_iteration(inputs_dict, None) + + mock_job_cls.assert_called_once() + job_ctor_content_arg = mock_job_cls.call_args[0][0] + assert job_ctor_content_arg == plain_task_content + + iteration_model = params.AutomationWorkflowIterationResult.from_dict(iteration_payload) + raw_stop_description_json = _encrypted_description_raw_json(stop_result_template) + assert iteration_model.next_iteration_description != raw_stop_description_json + assert isinstance(iteration_model.next_iteration_description, str) + assert isinstance(iteration_model.next_iteration_description_metadata, str) + decrypted_iteration_state = task_inputs_encryption.decrypt_task_content( + iteration_model.next_iteration_description, + iteration_model.next_iteration_description_metadata, + ) + assert json.loads(decrypted_iteration_state) == json.loads(raw_stop_description_json) + + # Step 2 — execute_automation (should_stop): final workflow JSON uses encrypted state/metadata; + # decrypt rounds back to the same next-actions description JSON as the mock template. + with encryption_patches[0], encryption_patches[1], encryption_patches[2], encryption_patches[3]: + enc_task_wf = octobot_node.models.Task( + name="encryption_integration_wf_stop", + content=encrypted_task_content, + content_metadata=task_content_metadata, + type=octobot_node.models.TaskType.EXECUTE_ACTIONS.value, + ) + wf_inputs = params.AutomationWorkflowInputs(task=enc_task_wf, execution_time=0).to_dict( + include_default_values=False + ) + wf_inputs["task"] = enc_task_wf.model_dump(exclude_defaults=True) + + with mock.patch(recv_path, mock.AsyncMock(return_value=[])), mock.patch( + "asyncio.sleep", mock.AsyncMock() + ), mock.patch.object( + octobot_flow_client, + "OctoBotActionsJob", + mock_octobot_actions_job_class_stop, + ): + handle = await temp_dbos_scheduler.INSTANCE.start_workflow_async( + automation_wf.execute_automation, + inputs=wf_inputs, + ) + workflow_result = await handle.get_result() + + assert isinstance(workflow_result, str) + parsed_final = _parse_automation_workflow_output(workflow_result) + assert isinstance(parsed_final.state, str) + assert isinstance(parsed_final.state_metadata, str) + raw_final_json = _encrypted_description_raw_json(stop_result_template) + assert parsed_final.state != raw_final_json + decrypted_final = task_inputs_encryption.decrypt_task_content( + parsed_final.state, parsed_final.state_metadata + ) + assert json.loads(decrypted_final) == json.loads(raw_final_json) + + # Step 3 — execute_automation (reschedule): _schedule_next_iteration calls enqueue_async with + # next_iteration_description as task.content (and metadata) as encrypted values + enqueue_mock = mock.AsyncMock(return_value=None) + with encryption_patches[0], encryption_patches[1], encryption_patches[2], encryption_patches[3]: + enc_task_sched = octobot_node.models.Task( + name="encryption_integration_enqueue", + content=encrypted_task_content, + content_metadata=task_content_metadata, + type=octobot_node.models.TaskType.EXECUTE_ACTIONS.value, + ) + sched_inputs = params.AutomationWorkflowInputs(task=enc_task_sched, execution_time=0).to_dict( + include_default_values=False + ) + sched_inputs["task"] = enc_task_sched.model_dump(exclude_defaults=True) + + with mock.patch(recv_path, mock.AsyncMock(return_value=[])), mock.patch( + "asyncio.sleep", mock.AsyncMock() + ), mock.patch.object( + octobot_flow_client, + "OctoBotActionsJob", + mock_octobot_actions_job_class_schedule, + ), mock.patch.object( + octobot_node.scheduler.SCHEDULER.AUTOMATION_WORKFLOW_QUEUE, + "enqueue_async", + enqueue_mock, + ): + schedule_handle = await temp_dbos_scheduler.INSTANCE.start_workflow_async( + automation_wf.execute_automation, + inputs=sched_inputs, + ) + await schedule_handle.get_result() + + enqueue_mock.assert_called_once() + assert len(enqueue_mock.call_args.args) == 1 # function to call + assert len(enqueue_mock.call_args.kwargs) == 1 # inputs + enqueued_inputs = enqueue_mock.call_args.kwargs["inputs"] + schedule_plaintext_state = _encrypted_description_raw_json(schedule_result_template) + assert enqueued_inputs["task"]["content"] != schedule_plaintext_state + assert isinstance(enqueued_inputs["task"]["content_metadata"], str) + decrypted_enqueued = task_inputs_encryption.decrypt_task_content( + enqueued_inputs["task"]["content"], enqueued_inputs["task"]["content_metadata"] + ) + assert json.loads(decrypted_enqueued) == json.loads(schedule_plaintext_state) diff --git a/packages/services/.gitignore b/packages/services/.gitignore new file mode 100644 index 0000000000..2cb0a6f791 --- /dev/null +++ b/packages/services/.gitignore @@ -0,0 +1,107 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ + +.idea/ +tentacles diff --git a/packages/services/BUILD b/packages/services/BUILD new file mode 100644 index 0000000000..d39147f844 --- /dev/null +++ b/packages/services/BUILD @@ -0,0 +1,19 @@ +python_requirements(name="full_reqs", source="full_requirements.txt") + +python_sources(name="octobot_services", sources=["octobot_services/**/*.py"]) + +python_tests( + name="tests", + sources=["tests/**/test_*.py"], + dependencies=[ + ":octobot_services", + ":full_reqs", + "//:dev_reqs", + "packages/commons:octobot_commons", + "packages/commons:reqs", + "packages/commons:full_reqs", + "packages/trading:octobot_trading", + "packages/trading:reqs", + "packages/trading:full_reqs", + ], +) \ No newline at end of file diff --git a/packages/services/CHANGELOG.md b/packages/services/CHANGELOG.md new file mode 100644 index 0000000000..b3efdf2c97 --- /dev/null +++ b/packages/services/CHANGELOG.md @@ -0,0 +1,543 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [1.8.0] - 2026-02-02 +### Added +- pydantic to requirements +- add AbstractAIService +- add AbstractWebSearchService +- MCP server interface +### Updated +- OpenAI dependency + +## [1.7.3] - 2026-01-29 +### Added +- Tavily service constants +- Bird service constants + +## [1.7.2] - 2026-01-25 +### Added +- Coingecko service constants + +## [1.7.1] - 2026-01-25 +### Added +- Add `get_data_cache` to `AbstractServiceFeed` + +## [1.7.0] - 2026-01-23 +### Updated +- Dependencies + +## [1.6.31] - 2026-01-22 +### Added +- Add exchange service constants + +## [1.6.30] - 2024-11-27 +### Updated +[Requirements] moved openai to the [full] requirements + +## [1.6.29] - 2025-11-26 +### Added +[Requirements] [full] requirements installation + +## [1.6.28] - 2025-10-28 +### Added +- Add `data_cache` to `AbstractServiceFeed` + +## [1.6.27] - 2025-10-28 +### Added +- Coindesk, Lunarcrush and Alternative.me services constants + +## [1.6.26] - 2025-08-24 +### Updated +- Dependencies + +## [1.6.25] - 2025-08-17 +### Updated +- Dependencies + +## [1.6.24] - 2025-05-14 +### Updated +- Dependencies + +## [1.6.23] - 2025-01-09 +### Updated +- Dependencies + +## [1.6.22] - 2025-01-04 +### Updated +- Dependencies + +## [1.6.21] - 2023-10-21 +### Added +[Constant] add CONFIG_LLM_CUSTOM_BASE_URL + +## [1.6.20] - 2023-10-07 +### Added +[ReadOnlyInfo] add CTA type + +## [1.6.19] - 2023-10-03 +### Added +- ReadOnlyInfo +### Updated +- Dependencies + +## [1.6.18] - 2023-09-03 +### Updated +- Requirements: bump flask-cors + +## [1.6.17] - 2023-08-19 +### Updated +- Requirements: bump + +## [1.6.16] - 2023-07-23 +### Updated +- Orders notifications: Use PNL + +## [1.6.15] - 2023-06-25 +### Added +- AbstractService: is_improved_by_extensions + +## [1.6.14] - 2023-06-12 +### Added +- Constants: webhook constants + +## [1.6.13] - 2023-03-31 +### Updated +- Requirements: bump + +## [1.6.12] - 2023-03-28 +### Updated +- Interfaces: improve wildcard trading mode display + +## [1.6.11] - 2023-02-28 +### Updated +- Services: add help url to creation_error_message + +## [1.6.10] - 2023-01-10 +### Added +- Services: creation_error_message + +## [1.6.9] - 2023-01-09 +### Updated +- fix openai patches by patch_openai_proxies + +## [1.6.8] - 2023-01-09 +### Added +- is_openai_proxy + +## [1.6.7] - 2023-01-09 +### Updated +- dependencies + +## [1.6.6] - 2023-12-18 +### Added +- [interfaces] async api + +## [1.6.5] - 2023-10-27 +### Added +- [get_service] config param + +## [1.6.4] - 2023-10-11 +### Added +- [Webhook] Add CONFIG_NGROK_DOMAIN key + +## [1.6.3] - 2023-10-01 +### Updated +- [Requirements] update dependencies + +## [1.6.2] - 2023-08-18 +### Updated +- [Requirements] update dependencies + +## [1.6.1] - 2023-07-23 +### Updated +- [ReturningStartable] add threaded_start + +## [1.6.0] - 2023-05-02 +### Updated +- Supported python versions + +## [1.5.6] - 2023-05-02 +### Updated +- [Dependencies] flask, ngrok and openai + +## [1.5.5] - 2023-04-23 +### Updated +- [BotInterface] set_risk command now updated edited config + +## [1.5.4] - 2023-03-30 +### Updated +- [Orders] Order channel callback + +## [1.5.3] - 2023-03-29 +### Added +- [Services] Add GPT requirements +### Updated +- [Services] Dependencies + +## [1.5.2] - 2023-03-24 +### Updated +- [Services] Improve portfolio output + +## [1.5.1] - 2023-03-22 +### Updated +- [Services] Add reference market value in portfolio pretty print + +## [1.5.0] - 2023-03-15 +### Updated +- [Services] stop is now async +- [Telegram] migrate to async version of the lib + +## [1.4.4] - 2023-03-01 +### Updated +- [API] trading apis + +## [1.4.3] - 2023-02-04 +### Updated +- [NotificationLevel] replace DANGER by ERROR + +## [1.4.2] - 2023-02-03 +### Removed +- [Requirements] Python-Twitter as Twitter API will become paid only + +## [1.4.1] - 2022-12-29 +### Added +- [Requirements] flask_cors + +## [1.4.0] - 2022-12-23 +### Updated +- [Requirements] Bump + +## [1.3.10] - 2022-12-13 +### Updated +- [Requirements] Restore gevent==22.10.2 + +## [1.3.9] - 2022-12-11 +### Updated +- [Requirements] Restore gevent==21.12.0 due to glibc incompatibility (https://github.com/gevent/gevent/blob/master/CHANGES.rst#22102-2022-10-31) + +## [1.3.8] - 2022-12-09 +### Updated +- [Requirements] bump requirements + +## [1.3.7] - 2022-10-17 +### Updated +- [Positions] close position + +## [1.3.6] - 2022-09-08 +### Updated +- [AsyncTools] add timeout param + +## [1.3.5] - 2022-08-25 +### Updated +- [Dependencies] update to latest reddit, telegram, ngrok and flask versions + +## [1.3.4] - 2022-08-11 +### Updated +- [AsyncTools] add log_exceptions param + +## [1.3.3] - 2022-07-29 +### Updated +- [Requirements] bump web interface requirements + +## [1.3.2] - 2022-07-02 +### Updated +- [Requirements] bump requirements + +## [1.3.1] - 2022-06-06 +### Updated +- [Notifications] always create notification channel + +## [1.3.0] - 2022-05-04 +### Added +- Notification sounds +### Updated +- Flask requirement + +## [1.2.32] - 2022-02-18 +### Updated +- Flask requirement + +## [1.2.31] - 2022-01-20 +### Updated +- requirements + +## [1.2.30] - 2022-01-16 +### Updated +- requirements + +### Fixed +- [Telegram] RPC login error + +## [1.2.29] - 2021-12-19 +### Updated +- [Util][Portfolio] Migrate to assets + +## [1.2.28] - 2021-11-24 +### Added +- [Constants] CONFIG_ENABLE_NGROK + +## [1.2.27] - 2021-10-28 +### Added +- flask-compress requirements +- flask-cache requirements + +## [1.2.26] - 2021-09-21 +### Updated +- requirements + +## [1.2.25] - 2021-09-13 +### Added +- AbstractBotInterface set_command_restart method + +## [1.2.24] - 2021-09-03 +### Updated +- requirements + +## [1.2.23] - 2021-07-28 +### Updated +- requirements + +## [1.2.22] - 2021-07-17 +### Updated +- changed missing configuration warning into info +- requirements + +## [1.2.21] - 2021-07-09 +### Updated +- requirements + +## [1.2.20] - 2021-07-03 +### Added +- CONFIG_ENABLE_NGROK constants +- CONFIG_WEBHOOK_SERVER_IP +- CONFIG_WEBHOOK_SERVER_PORT + +## [1.2.19] - 2021-05-03 +### Added +- async reddit api via asyncpraw +### Updated +- gevent and python-telegram-bot versions + +## [1.2.18] - 2021-04-22 +### Updated +- simplifiedpytrends version + +## [1.2.17] - 2021-04-14 +### Added +- CONFIG_MEDIA_PATH constant + +## [1.2.16] - 2021-04-09 +### Added +- telethon +- telegram api constants + +## [1.2.15] - 2021-04-08 +### Updated +- pyngrok version + +## [1.2.14] - 2021-03-26 +### Updated +- Requirements + +## [1.2.13] - 2021-03-15 +### Added +- User commands channel + +## [1.2.12] - 2021-03-03 +### Added +- Python 3.9 support + +## [1.2.11] - 2020-01-04 +### Updated +- requirements + +## [1.2.10] - 2020-12-23 +### Fixed +- has_trader exception + +## [1.2.9] - 2020-12-23 +### Added +- Profiles handling +### Fixed +- No activated trader situations + +## [1.2.8] - 2020-12-16 +### Updated +- Push notifications using async executor +- flask-socketio to 5.0.0 + +## [1.2.7] - 2020-12-06 +### Fixed +- Notifiers when no config data + +## [1.2.6] - 2020-11-26 +### Added +- Services logo and url + +## [1.2.5] - 2020-11-14 +### Added +- Services logo and url + +## [1.2.4] - 2020-11-07 +### Updated +- Requirements + +## [1.2.3] - 2020-10-27 +### Updated +- Services warnings and errors on config issues + +## [1.2.2] - 2020-10-26 +### Updated +- Requirements + +### Fixed +- Service init + +## [1.2.1] - 2020-10-23 +### Updated +- Python 3.8 support + +## [1.2.0] - 2020-10-06 +### Updated +- Migrate imports + +## [1.1.22] - 2020-09-02 +### Updated +- Order notifications for new order states management + +## [1.1.21] - 2020-08-31 +### Updated +- Order notifications for new order states management + +## [1.1.20] - 2020-08-23 +### Updated +- Requirements + +## [1.1.19] - 2020-08-15 +### Updated +- Requirements + +## [1.1.18] - 2020-07-19 +### Updated +- Refresh real trader changed into refresh portfolio +- Requirements + +## [1.1.17] - 2020-06-21 +### Updated +- Requirements + +## [1.1.16] - 2020-06-20 +### Fixed +- Services config update error + +## [1.1.15] - 2020-06-07 +### Updated +- Handle non trading exchanges + +## [1.1.14] - 2020-06-02 +### Added +- Web login + +## [1.1.13] - 2020-05-27 +### Update +- Cython version + +## [1.1.12] - 2020-05-26 +### Updated +- Requirements + +## [1.1.11] - 2020-05-21 +### Updated +- Remove advanced manager from commons + +## [1.1.10] - 2020-05-19 +### Added +- Config constants + +## [1.1.9] - 2020-05-19 +### Added +- OctoBot channels initialization + +## [1.1.8] - 2020-05-18 +### Added +- run_in_bot_async_executor util function + +## [1.1.7] - 2020-05-17 +### Fixed +- Bot interface config command + +## [1.1.6] - 2020-05-16 +### Updated +- Requirements + +## [1.1.5] - 2020-05-15 +### Updated +- OctoBot requirements + +## [1.1.4] - 2020-05-10 +### Updated +- Stop interface +- Telegram requirement + +## [1.1.3] - 2020-05-10 +### Updated +- Channel requirement +- Commons requirement +- Trading requirement + +## [1.1.2] - 2020-05-06 +### Added +- [Service] Webhook + +## [1.1.1] - 2020-05-03 +### Added +- Can now edit user config in services + +## [1.1.0] - 2020-05-02 +### Updated +- Octobot backtesting import paths + +## [1.0.8] - 2020-05-01 +### Added +- Include interfaces and notifications + +## [1.0.7] - 2020-05-01 +### Updated +- Handle multiple services for service feeds and interfaces + +## [1.0.6] - 2020-04-17 +### Updated +- python-telegram-bot requirement + +## [1.0.5] - 2020-04-13 +### Added +- ENV_WEB_ADDRESS environment constant + +## [1.0.4] - 2020-04-13 +### Added +- WEB_PORT environment constant + +## [1.0.3] - 2020-04-10 +### Added +- get_backtesting_service_feed api +- Service feed handling + +## [1.0.2] - 2020-04-04 +### Update +- Requirements version + +### Fixed +- Travis CI file + +## [1.0.1] - 2020-11-02 +### Added +- Version update + +## [1.0.0] - 2020-01-02 +### Added +- Services +- Service-feeds diff --git a/packages/services/LICENSE b/packages/services/LICENSE new file mode 100644 index 0000000000..0a041280bd --- /dev/null +++ b/packages/services/LICENSE @@ -0,0 +1,165 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff --git a/packages/services/MANIFEST.in b/packages/services/MANIFEST.in new file mode 100644 index 0000000000..bc2ec0117f --- /dev/null +++ b/packages/services/MANIFEST.in @@ -0,0 +1,7 @@ +recursive-include octobot_services *.pxd + +include README.md +include LICENSE +include CHANGELOG.md +include requirements.txt +include full_requirements.txt diff --git a/packages/services/README.md b/packages/services/README.md new file mode 100644 index 0000000000..34e0111fb6 --- /dev/null +++ b/packages/services/README.md @@ -0,0 +1,5 @@ +# OctoBot-Services +[![Codacy Badge](https://api.codacy.com/project/badge/Grade/31a1caa6e5384d80bf890dba5c9b5e4b)](https://app.codacy.com/gh/Drakkar-Software/OctoBot-Services?utm_source=github.com&utm_medium=referral&utm_content=Drakkar-Software/OctoBot-Services&utm_campaign=Badge_Grade_Dashboard) +[![Github-Action-CI](https://github.com/Drakkar-Software/OctoBot-Services/workflows/OctoBot-Services-CI/badge.svg)](https://github.com/Drakkar-Software/OctoBot-Services/actions) + +OctoBot services package. diff --git a/packages/services/full_requirements.txt b/packages/services/full_requirements.txt new file mode 100644 index 0000000000..66cbf5bace --- /dev/null +++ b/packages/services/full_requirements.txt @@ -0,0 +1,43 @@ +# Services +# Reddit +asyncpraw==7.8.1 +# Telegram +python-telegram-bot==22.6 # update alongside supabase for httpx requirement +telethon==1.42.0 +# Twitter (associated tentacles are disabled as starting from feb 9 2023, API is now paid only). +# see https://twitter.com/TwitterDev/status/1621026986784337922 +# Python-Twitter==3.5 +# Google +simplifiedpytrends>=1.1.2 +# Ngrok +pyngrok==7.5.0 +# Web +## http server +flask==3.1.2 +werkzeug == 3.1.5 +# Flask templates +jinja2==3.1.6 +## flask minification +flask-compress==1.23 +## flask cache +flask-caching==2.3.1 +## flask user authentication management +flask-login==0.6.3 +## flask CORS management +flask-cors==6.0.2 +## user form validators +WTForms==3.2.1 +Flask-WTF==1.2.2 +## websockets +### used by the webhook service and flask-socketio for the web interface +gevent==25.9.1 +### used by flask-socketio with gevent (listed here because multiple libs are usable, force this one) +gevent-websocket==0.10.1 +flask-socketio==5.6.0 +# AI +openai==2.20.0 +mcp==1.26.0 +# Coingecko +coingecko-openapi-client==1.4.0 +# Analysis tools +vaderSentiment==3.3.2 diff --git a/packages/services/octobot_services/__init__.py b/packages/services/octobot_services/__init__.py new file mode 100644 index 0000000000..8ac429925b --- /dev/null +++ b/packages/services/octobot_services/__init__.py @@ -0,0 +1,18 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +PROJECT_NAME = "OctoBot-Services" +VERSION = "1.8.0" # major.minor.revision diff --git a/packages/services/octobot_services/abstract_service_user.py b/packages/services/octobot_services/abstract_service_user.py new file mode 100644 index 0000000000..b8fc57265e --- /dev/null +++ b/packages/services/octobot_services/abstract_service_user.py @@ -0,0 +1,95 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import abc +import typing + +import octobot_commons.logging as logging + +import octobot_services.services as services +import octobot_services.util as util + + +class AbstractServiceUser(util.InitializableWithPostAction): + __metaclass__ = abc.ABCMeta + + # The service required to run this user + REQUIRED_SERVICES: typing.Optional[list[services.AbstractService]] = None + + def __init__(self, config): + super().__init__() + self.config = config + self.paused = False + + async def _initialize_impl(self, backtesting_enabled: bool, edited_config) -> bool: + # init associated service if not already init + service_list = services.ServiceFactory.get_available_services() + if self.REQUIRED_SERVICES: + for service in self.REQUIRED_SERVICES: + if service in service_list: + if not await self._create_or_get_service_instance( + service, backtesting_enabled, edited_config + ): + return False + else: + self.get_logger().error( + f"Required service {self.REQUIRED_SERVICES} is not an available service" + ) + return True + elif self.REQUIRED_SERVICES is False: + return True # When no services are required + elif self.REQUIRED_SERVICES is None: + self.get_logger().error( + f"Required service is not set, set it at False if no service is required" + ) + return False + + async def _create_or_get_service_instance( + self, service, backtesting_enabled: bool, edited_config + ): + service_factory = services.ServiceFactory(self.config) + created, error_message = await service_factory.create_or_get_service( + service, backtesting_enabled, edited_config + ) + if created: + return True + else: + log_func = self.get_logger().debug + # log error when the issue is not related to configuration + if service.instance().has_required_configuration(): + log_func = self.get_logger().warning + log_func( + f"Impossible to start {self.get_name()}: required service {service.get_name()} " + f"is not available ({error_message})." + ) + return False + + def has_required_services_configuration(self) -> bool: + if not self.REQUIRED_SERVICES: + return True + if isinstance(self.REQUIRED_SERVICES, bool): + return self.REQUIRED_SERVICES + return all( + service.instance().has_required_configuration() + for service in self.REQUIRED_SERVICES + ) + + @classmethod + def get_name(cls) -> str: + return cls.__name__ + + @classmethod + def get_logger(cls): + return logging.get_logger(cls.get_name()) diff --git a/packages/services/octobot_services/api/__init__.py b/packages/services/octobot_services/api/__init__.py new file mode 100644 index 0000000000..a5099b53d2 --- /dev/null +++ b/packages/services/octobot_services/api/__init__.py @@ -0,0 +1,105 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_services.api import services +from octobot_services.api import interfaces +from octobot_services.api import service_feeds +from octobot_services.api import notification + +from octobot_services.api.services import ( + get_available_services, + get_available_backtestable_services, + get_available_ai_services, + get_available_web_search_services, + get_ai_service, + get_web_search_service, + is_service_class, + is_service_available_in_backtesting, + get_service, + create_service_factory, + stop_services, +) +from octobot_services.api.interfaces import ( + initialize_global_project_data, + create_interface_factory, + is_enabled, + is_enabled_in_backtesting, + is_interface_relevant, + disable_interfaces, + send_user_command, + start_interfaces, + stop_interfaces, +) +from octobot_services.api.service_feeds import ( + create_service_feed_factory, + get_service_feed, + get_available_backtestable_feeds, + is_service_used_by_backtestable_feed, + start_service_feed, + stop_service_feed, + clear_bot_id_feeds, +) +from octobot_services.api.notification import ( + create_notifier_factory, + create_notification, + is_enabled_in_config, + get_enable_notifier, + set_enable_notifier, + is_notifier_relevant, + send_notification, + process_pending_notifications, +) + + +LOGGER_TAG = "ServicesApi" + +__all__ = [ + "get_available_services", + "get_available_backtestable_services", + "get_available_ai_services", + "get_available_web_search_services", + "get_ai_service", + "get_web_search_service", + "is_service_class", + "is_service_available_in_backtesting", + "get_service", + "create_service_factory", + "stop_services", + "initialize_global_project_data", + "create_interface_factory", + "is_enabled", + "is_enabled_in_backtesting", + "is_interface_relevant", + "disable_interfaces", + "send_user_command", + "start_interfaces", + "stop_interfaces", + "create_service_feed_factory", + "get_service_feed", + "get_available_backtestable_feeds", + "is_service_used_by_backtestable_feed", + "start_service_feed", + "stop_service_feed", + "clear_bot_id_feeds", + "create_notifier_factory", + "create_notification", + "is_enabled_in_config", + "get_enable_notifier", + "set_enable_notifier", + "is_notifier_relevant", + "send_notification", + "process_pending_notifications", +] diff --git a/packages/services/octobot_services/api/interfaces.py b/packages/services/octobot_services/api/interfaces.py new file mode 100644 index 0000000000..1986fe60ed --- /dev/null +++ b/packages/services/octobot_services/api/interfaces.py @@ -0,0 +1,87 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import asyncio + +import octobot_commons.channels_name as channels_names +import async_channel.channels as channels +import octobot_services.interfaces as interfaces +import octobot_services.managers as managers +import octobot_services.api.service_feeds as service_feeds_api + + +def initialize_global_project_data(bot_id: str, project_name: str, project_version: str) -> None: + interfaces.AbstractInterface.initialize_global_project_data(bot_id, project_name, project_version) + + +def create_interface_factory(config: dict) -> interfaces.InterfaceFactory: + return interfaces.InterfaceFactory(config) + + +def is_enabled(interface_class: interfaces.AbstractInterface) -> bool: + return interface_class.enabled + + +async def send_user_command(bot_id, subject, action, data, wait_for_processing=False) -> bool: + try: + channel = channels.get_chan(channels_names.OctoBotUserChannelsName.USER_COMMANDS_CHANNEL.value) + await channel.get_internal_producer().send( + bot_id=bot_id, + subject=subject, + action=action, + data=data + ) + if wait_for_processing: + producers = channel.producers + if channel.internal_producer is not None: + producers.append(channel.internal_producer) + await asyncio.gather(*(producer.wait_for_processing() for producer in producers)) + return True + except KeyError: + return False + + +def is_enabled_in_backtesting(interface_class) -> bool: + if not interface_class.REQUIRED_SERVICES: + return True + return all( + service_feeds_api.is_service_used_by_backtestable_feed(service) + for service in interface_class.REQUIRED_SERVICES + ) + + +def is_interface_relevant(config, interface_class, backtesting_enabled): + return is_enabled(interface_class) and \ + all(service.get_is_enabled(config) for service in interface_class.REQUIRED_SERVICES) and \ + (not backtesting_enabled or (backtesting_enabled and is_enabled_in_backtesting(interface_class))) + + +def disable_interfaces(interface_identifier: str) -> int: + disabled_interfaces = 0 + normalized_identifier = interface_identifier.lower() + for interface_class in interfaces.InterfaceFactory.get_available_interfaces(): + if normalized_identifier in interface_class.__name__.lower(): + interface_class.enabled = False + disabled_interfaces += 1 + return disabled_interfaces + + +# Return the list of started interfaces +async def start_interfaces(interfaces: list) -> list: + return await managers.start_interfaces(interfaces) + + +async def stop_interfaces(interfaces: list) -> None: + await managers.stop_interfaces(interfaces) diff --git a/packages/services/octobot_services/api/notification.py b/packages/services/octobot_services/api/notification.py new file mode 100644 index 0000000000..76315694be --- /dev/null +++ b/packages/services/octobot_services/api/notification.py @@ -0,0 +1,82 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import async_channel.channels as channel + +import octobot_commons.enums as common_enums + +import octobot_services.channel as channels +import octobot_services.notification as notifications +import octobot_services.notifier as notifiers +import octobot_services.enums as enums + +MAX_PENDING_NOTIFICATION = 10 +pending_notifications = [] + + +def create_notifier_factory(config) -> notifiers.NotifierFactory: + return notifiers.NotifierFactory(config) + + +def create_notification(text: str, title="", markdown_text="", sound=enums.NotificationSound.NO_SOUND, + markdown_format: common_enums.MarkdownFormat = common_enums.MarkdownFormat.IGNORE, + level: enums.NotificationLevel = enums.NotificationLevel.INFO, + category: enums.NotificationCategory = enums.NotificationCategory.GLOBAL_INFO, + linked_notification=None) -> notifications.Notification: + return notifications.Notification(text, title, markdown_text, sound, markdown_format, level, category, + linked_notification) + + +async def send_notification(notification: notifications.Notification) -> None: + try: + # send notification only if is a notification channel is running + channel.get_chan(channels.NotificationChannel.get_name()) + await channels.NotificationChannelProducer.instance().send( + { + "notification": notification + } + ) + except KeyError: + if len(pending_notifications) < MAX_PENDING_NOTIFICATION: + pending_notifications.append(notification) + + +async def process_pending_notifications(): + for notification in pending_notifications: + await channels.NotificationChannelProducer.instance().send( + { + "notification": notification + } + ) + pending_notifications.clear() + + +def is_enabled_in_config(notifier_class, config) -> bool: + return notifier_class.is_enabled(config) + + +def get_enable_notifier(notifier) -> bool: + return notifier.enabled + + +def set_enable_notifier(notifier, enabled) -> None: + notifier.enabled = enabled + + +def is_notifier_relevant(config, notifier_class, backtesting_enabled): + return is_enabled_in_config(notifier_class, config) and \ + all(service.get_is_enabled(config) + for service in notifier_class.REQUIRED_SERVICES) and \ + not backtesting_enabled diff --git a/packages/services/octobot_services/api/service_feeds.py b/packages/services/octobot_services/api/service_feeds.py new file mode 100644 index 0000000000..68af89e396 --- /dev/null +++ b/packages/services/octobot_services/api/service_feeds.py @@ -0,0 +1,57 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.tentacles_management as tentacles_management + +import octobot_services.managers as managers +import octobot_services.service_feeds as service_feeds + + +def get_available_backtestable_feeds() -> list: + feeds = tentacles_management.get_all_classes_from_parent(service_feeds.AbstractServiceFeed) + return [feed for feed in feeds if feed.BACKTESTING_ENABLED] + + +def is_service_used_by_backtestable_feed(service_class) -> bool: + backtestable = get_available_backtestable_feeds() + for feed in backtestable: + if feed.REQUIRED_SERVICES and service_class in feed.REQUIRED_SERVICES: + return True + return False + + +def create_service_feed_factory(config, main_async_loop, bot_id, backtesting=None, importer=None) -> service_feeds.ServiceFeedFactory: + return service_feeds.ServiceFeedFactory(config, main_async_loop, bot_id, backtesting, importer=importer) + + +def get_service_feed(service_feed_class, bot_id) -> service_feeds.AbstractServiceFeed: + try: + return service_feeds.ServiceFeeds.instance().get_service_feed(bot_id, service_feed_class.get_name()) + except TypeError: + raise RuntimeError(f"can't get {service_feed_class} instance: service feed has not been properly created yet") + + +async def start_service_feed(service_feed: service_feeds.AbstractServiceFeed, + backtesting_enabled: bool, + edited_config: dict) -> bool: + return await managers.ServiceFeedManager.start_service_feed(service_feed, backtesting_enabled, edited_config) + + +async def stop_service_feed(service_feed: service_feeds.AbstractServiceFeed) -> None: + await managers.ServiceFeedManager.stop_service_feed(service_feed) + + +async def clear_bot_id_feeds(bot_id: str) -> None: + service_feeds.ServiceFeeds.instance().clear_bot_id_feeds(bot_id) diff --git a/packages/services/octobot_services/api/services.py b/packages/services/octobot_services/api/services.py new file mode 100644 index 0000000000..0773c1a512 --- /dev/null +++ b/packages/services/octobot_services/api/services.py @@ -0,0 +1,120 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_services.api.service_feeds as service_feeds_api +import octobot_services.managers as managers +import octobot_services.services as services +import octobot_services.interfaces as interfaces +import octobot_services.errors as errors +import octobot_commons.asyncio_tools as asyncio_tools + + +_SERVICE_ASYNC_LOCKS = {} + + +def _service_async_lock(service_class): + try: + return _SERVICE_ASYNC_LOCKS[service_class.__name__] + except KeyError: + _SERVICE_ASYNC_LOCKS[service_class.__name__] = asyncio_tools.RLock() + return _SERVICE_ASYNC_LOCKS[service_class.__name__] + + +def get_available_services() -> list[type[services.AbstractService]]: + return services.ServiceFactory.get_available_services() + +def get_available_ai_services() -> list[type[services.AbstractAIService]]: + return services.ServiceFactory.get_available_ai_services() + +def get_available_web_search_services() -> list[type[services.AbstractWebSearchService]]: + return services.ServiceFactory.get_available_web_search_services() + +def is_service_class(klass) -> bool: + return klass in get_available_services() + get_available_ai_services() + get_available_web_search_services() + +def get_available_backtestable_services() -> list: + return [ + service_class for service_class in services.ServiceFactory.get_available_services() + if service_class.BACKTESTING_ENABLED + ] + +async def _get_available_service_instance( + get_available_services_func, + service_type_name: str, + is_backtesting: bool = False, + config = None +): + available_services = get_available_services_func() + for service_class in available_services: + try: + return await get_service(service_class, is_backtesting, config) + except errors.CreationError: + # Service is not running/initialized, skip it + continue + raise errors.CreationError(f"No {service_type_name} is currently running or available.") + +async def get_ai_service(is_backtesting=False, config = None) -> services.AbstractAIService: + return await _get_available_service_instance( + get_available_ai_services, + "AI service", + is_backtesting, + config + ) + +async def get_web_search_service(is_backtesting=False, config = None) -> services.AbstractWebSearchService: + return await _get_available_service_instance( + get_available_web_search_services, + "web search service", + is_backtesting, + config + ) + + +def is_service_available_in_backtesting(service_class) -> bool: + return ( + service_class.BACKTESTING_ENABLED + or service_feeds_api.is_service_used_by_backtestable_feed(service_class) + ) + + +async def get_service(service_class, is_backtesting, config=None): + # prevent concurrent access when creating a service + async with _service_async_lock(service_class): + # Use provided config, or fall back to interface config + # when not backtesting as startup config shouldn't be used + if config is None and not is_backtesting: + config = interfaces.get_startup_config(dict_only=True) + + created, error_message = await create_service_factory(config).create_or_get_service( + service_class, + is_backtesting, + config + ) + if created: + service = service_class.instance() + if is_backtesting and not is_service_available_in_backtesting(service_class): + raise errors.UnavailableInBacktestingError( + f"{service_class.__name__} service is not available in backtesting" + ) + return service + raise errors.CreationError(f"{service_class.__name__} service is not initialized: {error_message}") + + +def create_service_factory(config) -> services.ServiceFactory: + return services.ServiceFactory(config) + + +async def stop_services() -> None: + await managers.stop_services() diff --git a/packages/services/octobot_services/channel/__init__.py b/packages/services/octobot_services/channel/__init__.py new file mode 100644 index 0000000000..9c2dddc861 --- /dev/null +++ b/packages/services/octobot_services/channel/__init__.py @@ -0,0 +1,47 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_services.channel import abstract_service_feed +from octobot_services.channel import notifications + +from octobot_services.channel.abstract_service_feed import ( + AbstractServiceFeedChannelConsumer, + AbstractServiceFeedChannelProducer, + AbstractServiceFeedChannel, +) +from octobot_services.channel.notifications import ( + NotificationChannelConsumer, + NotificationChannelProducer, + NotificationChannel, +) +from octobot_services.channel.user_commands import ( + UserCommandsChannelConsumer, + UserCommandsChannelProducer, + UserCommandsChannel, +) + +__all__ = [ + "AbstractServiceFeedChannelConsumer", + "AbstractServiceFeedChannelProducer", + "AbstractServiceFeedChannel", + "NotificationChannelConsumer", + "NotificationChannelProducer", + "NotificationChannel", + "UserCommandsChannelConsumer", + "UserCommandsChannelProducer", + "UserCommandsChannel", +] + diff --git a/packages/services/octobot_services/channel/abstract_service_feed.py b/packages/services/octobot_services/channel/abstract_service_feed.py new file mode 100644 index 0000000000..e1849f69ed --- /dev/null +++ b/packages/services/octobot_services/channel/abstract_service_feed.py @@ -0,0 +1,35 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import abc + +import async_channel.channels as channels +import async_channel.producer as producer +import async_channel.consumer as consumer + + +class AbstractServiceFeedChannelConsumer(consumer.Consumer): + __metaclass__ = abc.ABCMeta + + +class AbstractServiceFeedChannelProducer(producer.Producer): + __metaclass__ = abc.ABCMeta + + +class AbstractServiceFeedChannel(channels.Channel): + __metaclass__ = abc.ABCMeta + + PRODUCER_CLASS = AbstractServiceFeedChannelProducer + CONSUMER_CLASS = AbstractServiceFeedChannelConsumer diff --git a/packages/services/octobot_services/channel/notifications.py b/packages/services/octobot_services/channel/notifications.py new file mode 100644 index 0000000000..bf5374c022 --- /dev/null +++ b/packages/services/octobot_services/channel/notifications.py @@ -0,0 +1,33 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.singleton as singleton + +import async_channel.channels as channels +import async_channel.producer as producer +import async_channel.consumer as consumer + + +class NotificationChannelConsumer(consumer.Consumer): + pass + + +class NotificationChannelProducer(producer.Producer, singleton.Singleton): + pass + + +class NotificationChannel(channels.Channel): + PRODUCER_CLASS = NotificationChannelProducer + CONSUMER_CLASS = NotificationChannelConsumer diff --git a/packages/services/octobot_services/channel/user_commands.py b/packages/services/octobot_services/channel/user_commands.py new file mode 100644 index 0000000000..6951659553 --- /dev/null +++ b/packages/services/octobot_services/channel/user_commands.py @@ -0,0 +1,38 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import async_channel.channels as channels +import async_channel.producer as producer +import async_channel.consumer as consumer + + +class UserCommandsChannelConsumer(consumer.SupervisedConsumer): + pass + + +class UserCommandsChannelProducer(producer.Producer): + async def send(self, bot_id, subject, action, data=None): + for consumer in self.channel.get_consumer_from_filters({"bot_id": bot_id, "subject": subject}): + await consumer.queue.put({ + "bot_id": bot_id, + "subject": subject, + "action": action, + "data": data + }) + + +class UserCommandsChannel(channels.Channel): + PRODUCER_CLASS = UserCommandsChannelProducer + CONSUMER_CLASS = UserCommandsChannelConsumer diff --git a/packages/services/octobot_services/constants.py b/packages/services/octobot_services/constants.py new file mode 100644 index 0000000000..6e47450e3c --- /dev/null +++ b/packages/services/octobot_services/constants.py @@ -0,0 +1,254 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +# Config +CONFIG_CATEGORY_SERVICES = "services" +CONFIG_SERVICE_INSTANCE = "service_instance" + +# Interfaces +CONFIG_INTERFACES = "interfaces" +CONFIG_INTERFACES_WEB = "web" +CONFIG_INTERFACES_NODE_WEB = "node_web" +CONFIG_INTERFACES_TELEGRAM = "telegram" + +# Service feeds +FEED_METADATA = "metadata" + +# Telegram +CONFIG_TELEGRAM = "telegram" +CONFIG_TOKEN = "token" +CONFIG_TELEGRAM_CHANNEL = "telegram-channels" +MESSAGE_PARSE_MODE = 'parse_mode' +CONFIG_TELEGRAM_ALL_CHANNEL = "*" +CONFIG_GROUP_MESSAGE = "group-message" +CONFIG_GROUP_MESSAGE_DESCRIPTION = "group-message-description" +CONFIG_USERNAMES_WHITELIST = "usernames-whitelist" +CONFIG_CHAT_ID = "chat-id" + +CONFIG_TELEGRAM_API = "telegram-api" +CONFIG_API = "telegram-api" +CONFIG_API_HASH = "telegram-api-hash" +CONFIG_TELEGRAM_PHONE = "telegram-phone" +CONFIG_TELEGRAM_PASSWORD = "telegram-password" +CONFIG_MESSAGE_CONTENT = "message-content" +CONFIG_MESSAGE_SENDER = "message-sender" +CONFIG_IS_GROUP_MESSAGE = "is-group-message" +CONFIG_IS_CHANNEL_MESSAGE = "is-channel-message" +CONFIG_IS_PRIVATE_MESSAGE = "is-private-message" +CONFIG_MEDIA_PATH = "media-path" + +# Web +CONFIG_WEB = "web" +CONFIG_WEB_IP = "ip" +CONFIG_WEB_PORT = "port" +CONFIG_WEB_REQUIRES_PASSWORD = "require-password" +CONFIG_WEB_PASSWORD = "password" +CONFIG_AUTO_OPEN_IN_WEB_BROWSER = "auto-open-in-web-browser" +ENV_WEB_PORT = "WEB_PORT" +ENV_WEB_ADDRESS = "WEB_ADDRESS" +ENV_CORS_ALLOWED_ORIGINS = "CORS_ALLOWED_ORIGINS" +ENV_BACKEND_CORS_ALLOWED_ORIGINS = "BACKEND_CORS_ALLOWED_ORIGINS" +ENV_AUTO_OPEN_IN_WEB_BROWSER = "AUTO_OPEN_IN_WEB_BROWSER" +DEFAULT_SERVER_IP = '0.0.0.0' +DEFAULT_SERVER_PORT = 5001 +DEFAULT_BACKEND_CORS_ALLOWED_ORIGINS = "http://localhost,http://localhost:5173,http://localhost:8000,https://localhost,https://localhost:5173" + +# Node API service +CONFIG_NODE_API = "node-api" +CONFIG_NODE_API_IP = "ip" +CONFIG_NODE_API_PORT = "port" +ENV_NODE_API_PORT = "NODE_API_PORT" +ENV_NODE_API_ADDRESS = "NODE_API_ADDRESS" +ENV_ENABLE_NODE_API = "ENABLE_NODE_API" +DEFAULT_NODE_API_IP = DEFAULT_SERVER_IP +DEFAULT_NODE_API_PORT = 8000 + +# Node Interface configuration +ADMIN_USERNAME = "admin-username" +ADMIN_PASSWORD = "admin-password" +ENV_ADMIN_USERNAME = "ENV_ADMIN_USERNAME" +ENV_ADMIN_PASSWORD = "ENV_ADMIN_PASSWORD" +NODE_API_URL = "node-api-url" +NODE_SQLITE_FILE = "node-sqlite-file" +NODE_REDIS_URL = "node-redis-url" +BACKEND_CORS_ALLOWED_ORIGINS = "backend-cors-allowed-origins" +ENV_NODE_SQLITE_FILE = "ENV_NODE_SQLITE_FILE" +ENV_NODE_POSTGRES_URL = "ENV_NODE_POSTGRES_URL" + +# Webhook +CONFIG_WEBHOOK = "webhook" +CONFIG_ENABLE_NGROK = "enable-ngrok" +CONFIG_ENABLE_OCTOBOT_WEBHOOK = "enable-octobot-webhook" +CONFIG_NGROK_TOKEN = "ngrok-token" +CONFIG_NGROK_DOMAIN = "ngrok-domain" +CONFIG_WEBHOOK_SERVER_IP = "webhook-bind-ip" +CONFIG_WEBHOOK_SERVER_PORT = "webhook-bind-port" +ENV_WEBHOOK_PORT = "WEBHOOK_PORT" +ENV_WEBHOOK_ADDRESS = "WEBHOOK_ADDRESS" +DEFAULT_WEBHOOK_SERVER_IP = '127.0.0.1' +DEFAULT_WEBHOOK_SERVER_PORT = 9000 +TRADINGVIEW_WEBHOOK_SERVICE_NAME = "trading_view" + +# GPT +CONFIG_GPT = "GPT" +DEPRECATED_CONFIG_OPENAI_SECRET_KEY = "openai-secret-key" +CONFIG_LLM_API_KEY = "api-key" +CONFIG_LLM_CUSTOM_BASE_URL = "llm-custom-base-url" +CONFIG_LLM_MODEL = "model" +CONFIG_LLM_MODEL_FAST = "model-fast" +CONFIG_LLM_MODEL_REASONING = "model-reasoning" +CONFIG_LLM_DAILY_TOKENS_LIMIT = "daily-tokens-limit" +CONFIG_LLM_SHOW_REASONING = "show-reasoning" +CONFIG_LLM_REASONING_EFFORT = "reasoning-effort" +CONFIG_LLM_MCP_SERVERS = "mcp-servers" +CONFIG_LLM_AUTO_INJECT_MCP_TOOLS = "auto-inject-mcp-tools" +CONFIG_LLM_TOOL_CALL_JSON_OUTPUT = "tool-call-json-output" +CONFIG_LLM_AI_PROVIDER = "ai-provider" +ENV_OPENAI_SECRET_KEY = "OPENAI_SECRET_KEY" +ENV_GPT_MODEL = "GPT_MODEL" +ENV_LLM_CUSTOM_BASE_URL = "LLM_CUSTOM_BASE_URL" +ENV_LLM_MODEL = "LLM_MODEL" +ENV_GPT_DAILY_TOKENS_LIMIT = "GPT_DAILY_TOKEN_LIMIT" + +# LangChain +CONFIG_LANGCHAIN = "langchain" +CONFIG_LANGCHAIN_AI_PROVIDER = "ai-provider" +CONFIG_LANGCHAIN_API_KEY = "api-key" +CONFIG_LANGCHAIN_CUSTOM_BASE_URL = "base-url" +CONFIG_LANGCHAIN_MODEL = "model" +CONFIG_LANGCHAIN_MODEL_FAST = "model-fast" +CONFIG_LANGCHAIN_MODEL_REASONING = "model-reasoning" +CONFIG_LANGCHAIN_DAILY_TOKENS_LIMIT = "daily-tokens-limit" +ENV_LANGCHAIN_API_KEY = "LANGCHAIN_API_KEY" +ENV_LANGCHAIN_MODEL = "LANGCHAIN_MODEL" +ENV_LANGCHAIN_DAILY_TOKENS_LIMIT = "LANGCHAIN_DAILY_TOKEN_LIMIT" + +# MCP +CONFIG_MCP = "mcp" +CONFIG_MCP_IP = "ip" +CONFIG_MCP_PORT = "port" +ENV_MCP_PORT = "MCP_PORT" +ENV_MCP_ADDRESS = "MCP_ADDRESS" +DEFAULT_MCP_IP = '127.0.0.1' +DEFAULT_MCP_PORT = 3001 + +# Google +CONFIG_GOOGLE = "google" +CONFIG_TREND_TOPICS = "trends" +CONFIG_TREND = "trend" +CONFIG_TREND_DESCRIPTION = "trend_description" +CONFIG_TREND_HISTORY_TIME = "relevant_history_months" + +# TradingView +CONFIG_TRADING_VIEW = "trading-view" +CONFIG_REQUIRE_TRADING_VIEW_TOKEN = "require-token" +CONFIG_TRADING_VIEW_TOKEN = "token" +CONFIG_TRADING_VIEW_USE_EMAIL_ALERTS = "use-email-alerts" +TRADING_VIEW_USING_EMAIL_INSTEAD_OF_WEBHOOK = "Using email alerts instead of webhook" + +# Twitter +CONFIG_TWITTERS_ACCOUNTS = "accounts" +CONFIG_TWITTERS_HASHTAGS = "hashtags" +CONFIG_TWITTER = "twitter" +CONFIG_TWITTER_API_INSTANCE = "twitter_api_instance" +CONFIG_TWEET = "tweet" +CONFIG_TWEET_DESCRIPTION = "tweet_description" +CONFIG_TW_API_KEY = "api-key" +CONFIG_TW_API_SECRET = "api-secret" +CONFIG_TW_ACCESS_TOKEN = "access-token" +CONFIG_TW_ACCESS_TOKEN_SECRET = "access-token-secret" + +# Bird (Bird CLI - read-only Twitter/X) +CONFIG_BIRD = "bird" +CONFIG_BIRD_CLI_PATH = "cli-path" +CONFIG_BIRD_ACCOUNT = "account" + +# Tavily (Tavily API - web search) +CONFIG_TAVILY = "tavily" +CONFIG_TAVILY_API_KEY = "api-key" +CONFIG_TAVILY_PROJECT_ID = "project-id" + +# SearXNG (self-hosted web search) +CONFIG_SEARXNG = "searxng" +CONFIG_SEARXNG_URL = "url" +CONFIG_SEARXNG_PORT = "port" +CONFIG_SEARXNG_CATEGORIES = "categories" +CONFIG_SEARXNG_LANGUAGE = "language" +CONFIG_SEARXNG_TIME_RANGE = "time_range" +CONFIG_SEARXNG_SAFE_SEARCH = "safe_search" +CONFIG_SEARXNG_ENGINES = "engines" + +# Reddit +CONFIG_REDDIT = "reddit" +CONFIG_REDDIT_SUBREDDITS = "subreddits" +CONFIG_REDDIT_ENTRY = "entry" +CONFIG_REDDIT_ENTRY_WEIGHT = "entry_weight" +CONFIG_REDDIT_CLIENT_ID = "client-id" +CONFIG_REDDIT_CLIENT_SECRET = "client-secret" +CONFIG_REDDIT_PASSWORD = "password" +CONFIG_REDDIT_USERNAME = "username" + +# Coindesk +CONFIG_COINDESK = "coindesk" +CONFIG_COINDESK_API_KEY = "api-key" +CONFIG_COINDESK_LANGUAGE = "lang" +CONFIG_COINDESK_REFRESH_TIME_FRAME = "refresh_time_frame" +CONFIG_COINDESK_TOPICS = "topics" +COINDESK_TOPIC_MARKETCAP = "topic_marketcap" +COINDESK_TOPIC_NEWS = "topic_news" +COINDESK_DATA_KEY = "data" + +# Lunarcrush +CONFIG_LUNARCRUSH = "lunarcrush" +CONFIG_LUNARCRUSH_API_KEY = "api-key" +CONFIG_LUNARCRUSH_REFRESH_TIME_FRAME = "refresh_time_frame" +CONFIG_LUNARCRUSH_COINS = "coins" +LUNARCRUSH_COIN_METRICS = "coin_metrics" +LUNARCRUSH_DATA_KEY = "data" + +# Alternative.me +CONFIG_ALTERNATIVE_ME = "alternative_me" +CONFIG_ALTERNATIVE_ME_TOPICS = "topics" +CONFIG_ALTERNATIVE_ME_REFRESH_TIME_FRAME = "refresh_time_frame" +ALTERNATIVE_ME_TOPIC_FEAR_AND_GREED = "topic_fear_and_greed" +ALTERNATIVE_ME_DATA_KEY = "data" + +# Coingecko +CONFIG_COINGECKO = "coingecko" +CONFIG_COINGECKO_API_KEY = "api-key" +CONFIG_COINGECKO_TOPICS = "topics" +CONFIG_COINGECKO_REFRESH_TIME_FRAME = "refresh_time_frame" +CONFIG_COINGECKO_COINS = "coins" +COINGECKO_TOPIC_MARKETS = "topic_markets" +COINGECKO_TOPIC_TRENDING = "topic_trending" +COINGECKO_TOPIC_GLOBAL = "topic_global" +COINGECKO_DATA_KEY = "data" + +# Exchange +CONFIG_EXCHANGE = "exchange" +CONFIG_EXCHANGE_PROFILES = "profiles" +CONFIG_EXCHANGE_PROFILE_ID = "id" + +# Notifications +CONFIG_CATEGORY_NOTIFICATION = "notification" +CONFIG_NOTIFICATION_TYPE = "notification-type" + +# Interfaces +PAID_FEES_STR = "Paid fees" + +# external resources +EXTERNAL_RESOURCE_CURRENT_USER_FORM = "current-user-feedback-form" +EXTERNAL_RESOURCE_PUBLIC_ANNOUNCEMENTS = "public-announcements" diff --git a/packages/services/octobot_services/enums.py b/packages/services/octobot_services/enums.py new file mode 100644 index 0000000000..792969eeeb --- /dev/null +++ b/packages/services/octobot_services/enums.py @@ -0,0 +1,58 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import enum + + +class NotificationLevel(enum.Enum): + CRITICAL = "critical" + ERROR = "error" + WARNING = "warning" + INFO = "info" + SUCCESS = "success" + + +class NotificationCategory(enum.Enum): + GLOBAL_INFO = "global-info" + PRICE_ALERTS = "price-alerts" + TRADES = "trades" + TRADING_SCRIPT_ALERTS = "trading-script-alerts" + OTHER = "other" + + +class NotificationSound(enum.Enum): + NO_SOUND = None + FINISHED_PROCESSING = "finished_processing.mp3" + + +class ReadOnlyInfoType(enum.Enum): + COPYABLE = "copyable" + CLICKABLE = "clickable" + CTA = "cta" + READONLY = "readonly" + + +class AIModelPolicy(enum.Enum): + FAST = "fast" + REASONING = "reasoning" + +class AIProvider(enum.Enum): + OPENAI = "openai" + ANTHROPIC = "anthropic" + OLLAMA = "ollama" + GOOGLE = "google" + MICROSOFT = "microsoft" + AMAZON = "amazon" + OTHER = "other" diff --git a/packages/services/octobot_services/errors.py b/packages/services/octobot_services/errors.py new file mode 100644 index 0000000000..45df2e011d --- /dev/null +++ b/packages/services/octobot_services/errors.py @@ -0,0 +1,39 @@ +# Drakkar-Software OctoBot-Trading +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + + +class UnavailableInBacktestingError(Exception): + """ + Raised when accessing a service that is not available in backtesting + """ + + +class CreationError(Exception): + """ + Raised when accessing a service that failed to be successfully created + """ + + +class InvalidRequestError(Exception): + """ + Raised when an invalid request is submitted to a service + """ + + +class RateLimitError(Exception): + """ + Raised when an the rate limit has been reached for the given request + """ diff --git a/packages/services/octobot_services/interfaces/__init__.py b/packages/services/octobot_services/interfaces/__init__.py new file mode 100644 index 0000000000..f4d75a7477 --- /dev/null +++ b/packages/services/octobot_services/interfaces/__init__.py @@ -0,0 +1,130 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_services.interfaces import abstract_interface +from octobot_services.interfaces.abstract_interface import ( + AbstractInterface, +) + +from octobot_services.interfaces import interface_factory +from octobot_services.interfaces import bots +from octobot_services.interfaces import util +from octobot_services.interfaces import web + +from octobot_services.interfaces.interface_factory import ( + InterfaceFactory, +) +from octobot_services.interfaces.bots import ( + AbstractBotInterface, + LOGGER, + EOL, + NO_TRADER_MESSAGE, + NO_CURRENCIES_MESSAGE, +) +from octobot_services.interfaces.util import ( + get_bot_api, + get_exchange_manager_ids, + get_global_config, + get_startup_config, + get_edited_config, + get_startup_tentacles_config, + get_edited_tentacles_config, + get_exchange_managers, + run_in_bot_main_loop, + run_in_bot_async_executor, + get_all_open_orders, + cancel_orders, + cancel_all_open_orders, + async_cancel_orders, + async_cancel_all_open_orders, + has_trader, + has_real_and_or_simulated_traders, + sell_all_currencies, + sell_all, + async_sell_all_currencies, + async_sell_all, + set_enable_trading, + get_total_paid_fees, + get_trades_history, + set_risk, + get_risk, + get_currencies_with_status, + get_matrix_list, + get_portfolio_holdings, + get_portfolio_current_value, + get_global_portfolio_currencies_amounts, + get_global_portfolio_currencies_values, + trigger_portfolios_refresh, + async_trigger_portfolios_refresh, + get_global_profitability, + get_reference_market, + get_all_positions, + close_positions, + async_close_positions, +) +from octobot_services.interfaces.web import ( + AbstractWebInterface, +) + +__all__ = [ + "AbstractInterface", + "InterfaceFactory", + "AbstractWebInterface", + "get_bot_api", + "get_exchange_manager_ids", + "get_global_config", + "get_startup_config", + "get_edited_config", + "get_startup_tentacles_config", + "get_edited_tentacles_config", + "get_exchange_managers", + "run_in_bot_main_loop", + "run_in_bot_async_executor", + "get_all_open_orders", + "cancel_orders", + "cancel_all_open_orders", + "async_cancel_orders", + "async_cancel_all_open_orders", + "has_trader", + "has_real_and_or_simulated_traders", + "sell_all_currencies", + "sell_all", + "async_sell_all_currencies", + "async_sell_all", + "set_enable_trading", + "get_total_paid_fees", + "get_trades_history", + "set_risk", + "get_risk", + "get_currencies_with_status", + "get_matrix_list", + "get_portfolio_holdings", + "get_portfolio_current_value", + "get_global_portfolio_currencies_amounts", + "get_global_portfolio_currencies_values", + "trigger_portfolios_refresh", + "async_trigger_portfolios_refresh", + "get_global_profitability", + "get_reference_market", + "AbstractBotInterface", + "LOGGER", + "EOL", + "NO_TRADER_MESSAGE", + "NO_CURRENCIES_MESSAGE", + "get_all_positions", + "close_positions", + "async_close_positions", +] diff --git a/packages/services/octobot_services/interfaces/abstract_interface.py b/packages/services/octobot_services/interfaces/abstract_interface.py new file mode 100644 index 0000000000..d6e0e795a3 --- /dev/null +++ b/packages/services/octobot_services/interfaces/abstract_interface.py @@ -0,0 +1,89 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import abc +import typing + +import octobot_commons.channels_name as channels_names +import async_channel.util as channel_creator +import async_channel.channels as channels +import octobot_services.channel as service_channels +import octobot_services.abstract_service_user as abstract_service_user +import octobot_services.util as util + +if typing.TYPE_CHECKING: + import octobot.octobot_api as octobot_api + +class AbstractInterface(abstract_service_user.AbstractServiceUser, util.ReturningStartable, util.ExchangeWatcher): + __metaclass__ = abc.ABCMeta + # The service required to run this interface + REQUIRED_SERVICES = None + + # Constants that will be used by all interfaces + bot_id: str = None # type: ignore + project_name: str = None # type: ignore + project_version: str = None # type: ignore + enabled: bool = True + + def __init__(self, config): + abstract_service_user.AbstractServiceUser.__init__(self, config) + util.ExchangeWatcher.__init__(self) + + async def _initialize_impl(self, backtesting_enabled, edited_config) -> bool: + if await abstract_service_user.AbstractServiceUser._initialize_impl(self, backtesting_enabled, edited_config): + await self._create_user_commands_channel_if_not_existing() + return True + return False + + @staticmethod + def initialize_global_project_data( + bot_id: str, project_name: str, project_version: str + ): + AbstractInterface.bot_id = bot_id + AbstractInterface.project_name = project_name + AbstractInterface.project_version = project_version + + @staticmethod + def get_bot_api() -> "octobot_api.OctoBotAPI": + try: + import octobot.octobot_api as octobot_api + return octobot_api.OctoBotAPIProvider.instance().get_api(AbstractInterface.bot_id) + except ImportError as err: + raise ImportError("The OctoBot package is not installed") from err + + @staticmethod + def get_exchange_managers(): + try: + import octobot_trading.api + return octobot_trading.api.get_exchange_managers_from_exchange_ids( + AbstractInterface.get_bot_api().get_exchange_manager_ids() + ) + except ImportError: + AbstractInterface.get_logger().error("AbstractInterface requires OctoBot-Trading package installed") + + @staticmethod + def is_bot_ready(): + return AbstractInterface.get_bot_api().is_initialized() + + @abc.abstractmethod + async def stop(self): + raise NotImplementedError(f"stop is not implemented for {self.get_name()}") + + @staticmethod + async def _create_user_commands_channel_if_not_existing() -> None: + try: + channels.get_chan(channels_names.OctoBotUserChannelsName.USER_COMMANDS_CHANNEL.value) + except KeyError: + await channel_creator.create_channel_instance(service_channels.UserCommandsChannel, channels.set_chan) diff --git a/packages/services/octobot_services/interfaces/bots/__init__.py b/packages/services/octobot_services/interfaces/bots/__init__.py new file mode 100644 index 0000000000..fb357169a7 --- /dev/null +++ b/packages/services/octobot_services/interfaces/bots/__init__.py @@ -0,0 +1,39 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +from octobot_commons.logging.logging_util import get_logger + +from octobot_services.interfaces.bots import abstract_bot_interface + +from octobot_services.interfaces.bots.abstract_bot_interface import ( + AbstractBotInterface, +) + +LOGGER = get_logger(__name__) +EOL = "\n" +NO_TRADER_MESSAGE = """OctoBot is either starting or there is no trader is activated in my config/config.json file. +See https://github.com/Drakkar-Software/OctoBot/wiki if you need help with my configuration.""" +NO_CURRENCIES_MESSAGE = "No cryptocurrencies are in my activated profile.\n" \ + "See https://github.com/Drakkar-Software/OctoBot/wiki/Configuration#cryptocurrencies " \ + "if you need help with my cryptocurrencies configuration.""" +UNAUTHORIZED_USER_MESSAGE = "Hello, I dont talk to strangers." + +__all__ = [ + "AbstractBotInterface", + "LOGGER", + "EOL", + "NO_TRADER_MESSAGE", + "NO_CURRENCIES_MESSAGE" +] diff --git a/packages/services/octobot_services/interfaces/bots/abstract_bot_interface.py b/packages/services/octobot_services/interfaces/bots/abstract_bot_interface.py new file mode 100644 index 0000000000..2d58073467 --- /dev/null +++ b/packages/services/octobot_services/interfaces/bots/abstract_bot_interface.py @@ -0,0 +1,398 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import abc + +import octobot_commons.constants as common_constants +import octobot_commons.pretty_printer as pretty_printer +import octobot_commons.timestamp_util as timestamp_util +import octobot_commons.dict_util as dict_util + +import octobot_trading.api as trading_api +import octobot_trading.constants as trading_constants + +import octobot_services.interfaces as interfaces +import octobot_services.constants as constants +import octobot_commons.constants as commons_constants + + +class AbstractBotInterface(interfaces.AbstractInterface): + __metaclass__ = abc.ABCMeta + + @staticmethod + def enable(config, is_enabled, associated_config=None): + if constants.CONFIG_INTERFACES not in config: + config[constants.CONFIG_INTERFACES] = {} + if associated_config not in config[constants.CONFIG_INTERFACES]: + config[constants.CONFIG_INTERFACES][associated_config] = {} + config[constants.CONFIG_INTERFACES][associated_config][common_constants.CONFIG_ENABLED_OPTION] = is_enabled + + @staticmethod + def is_enabled(config, associated_config=None): + return constants.CONFIG_INTERFACES in config \ + and associated_config in config[constants.CONFIG_INTERFACES] \ + and common_constants.CONFIG_ENABLED_OPTION in config[constants.CONFIG_INTERFACES][associated_config] \ + and config[constants.CONFIG_INTERFACES][associated_config][common_constants.CONFIG_ENABLED_OPTION] + + @staticmethod + def _is_valid_user(user_name, associated_config=None): + config_interface = interfaces.get_global_config()[constants.CONFIG_CATEGORY_SERVICES][associated_config] + + white_list = config_interface[constants.CONFIG_USERNAMES_WHITELIST] \ + if constants.CONFIG_USERNAMES_WHITELIST in config_interface else None + + is_valid = not white_list or user_name in white_list or f"@{user_name}" in white_list + + return is_valid, white_list + + @staticmethod + def get_command_configuration(markdown=False): + _, bold, code = pretty_printer.get_markers(markdown) + message = f"{bold}My configuration:{bold}{interfaces.EOL}{interfaces.EOL}" + + message += f"{bold}Traders: {bold}{interfaces.EOL}" + if interfaces.has_trader(): + has_real_trader, has_simulated_trader = interfaces.has_real_and_or_simulated_traders() + if has_real_trader: + message += f"{code}- Real trader{code}{interfaces.EOL}" + if has_simulated_trader: + message += f"{code}- Simulated trader{code}{interfaces.EOL}" + else: + message += f"{code}- No activated trader{code}{interfaces.EOL}" + + message += f"{interfaces.EOL}{bold}Exchanges:{bold}{interfaces.EOL}" + for exchange_name in trading_api.get_exchange_names(): + message += f"{code}- {exchange_name.capitalize()}{code}{interfaces.EOL}" + + try: + import octobot_evaluators.api as evaluators_api + import octobot_evaluators.enums as evaluators_enums + tentacle_setup_config = interfaces.get_bot_api().get_tentacles_setup_config() + message += f"{interfaces.EOL}{bold}Evaluators:{bold}{interfaces.EOL}" + evaluators = evaluators_api.get_evaluator_classes_from_type( + evaluators_enums.EvaluatorMatrixTypes.TA.value, tentacle_setup_config) + evaluators += evaluators_api.get_evaluator_classes_from_type( + evaluators_enums.EvaluatorMatrixTypes.SOCIAL.value, tentacle_setup_config) + evaluators += evaluators_api.get_evaluator_classes_from_type( + evaluators_enums.EvaluatorMatrixTypes.REAL_TIME.value, tentacle_setup_config) + for evaluator in evaluators: + message += f"{code}- {evaluator.get_name()}{code}{interfaces.EOL}" + + message += f"{interfaces.EOL}{bold}Strategies:{bold}{interfaces.EOL}" + for strategy in evaluators_api.get_evaluator_classes_from_type( + evaluators_enums.EvaluatorMatrixTypes.STRATEGIES.value, tentacle_setup_config): + message += f"{code}- {strategy.get_name()}{code}{interfaces.EOL}" + except ImportError: + message += f"{interfaces.EOL}{bold}Impossible to retrieve evaluation configuration: requires OctoBot-Evaluators " \ + f"package installed{bold}{interfaces.EOL}" + try: + trading_mode = interfaces.get_bot_api().get_trading_mode() + except IndexError: + # no activated trader + trading_mode = None + if trading_mode: + message += f"{interfaces.EOL}{bold}Trading mode:{bold}{interfaces.EOL}" + message += f"{code}- {trading_mode.get_name()}{code}" + + return message + + @staticmethod + def get_command_market_status(markdown=False): + _, bold, code = pretty_printer.get_markers(markdown) + message = f"{bold}My cryptocurrencies evaluations are:{bold} {interfaces.EOL}{interfaces.EOL}" + at_least_one_currency = False + for currency_pair, currency_info in interfaces.get_currencies_with_status().items(): + at_least_one_currency = True + message += f"{code}{currency_pair}:{code}{interfaces.EOL}" + for _, evaluation in currency_info.items(): + message += f"{code}- {evaluation[2].capitalize()}: {evaluation[0]}{code}{interfaces.EOL}" + if not at_least_one_currency: + message += f"{code}{interfaces.NO_CURRENCIES_MESSAGE}{code}{interfaces.EOL}" + risk = interfaces.get_risk() + if risk: + message += f"{interfaces.EOL}{code}My current risk is: {interfaces.get_risk()}{code}" + return message + + @staticmethod + def _print_trades(trades_history, trader_str, markdown=False): + _, bold, code = pretty_printer.get_markers(markdown) + trades_history_string = f"{bold}{trader_str}{bold}{code}Trades :{interfaces.EOL}{code}" + if trades_history: + for trade in trades_history: + exchange_name = trading_api.get_trade_exchange_name(trade) + trades_history_string += \ + f"{pretty_printer.trade_pretty_printer(exchange_name, trade, markdown=markdown)}{interfaces.EOL}" + else: + trades_history_string += f"{code}No trade yet.{code}" + return trades_history_string + + @staticmethod + def get_command_trades_history(markdown=False): + has_real_trader, has_simulated_trader = interfaces.has_real_and_or_simulated_traders() + real_trades_history, simulated_trades_history = interfaces.get_trades_history() + + trades_history_string = "" + if has_real_trader: + trades_history_string += AbstractBotInterface._print_trades(real_trades_history, + trading_constants.REAL_TRADER_STR, + markdown) + + if has_simulated_trader: + trades_history_string += f"{interfaces.EOL}" \ + f"{AbstractBotInterface._print_trades(simulated_trades_history, trading_constants.SIMULATOR_TRADER_STR, markdown)}" + + if not trades_history_string: + trades_history_string = interfaces.NO_TRADER_MESSAGE + + return trades_history_string + + @staticmethod + def _print_open_orders(open_orders, trader_str, markdown=False): + _, bold, code = pretty_printer.get_markers(markdown) + orders_string = f"{bold}{trader_str}{bold}{code}Open orders :{code}{interfaces.EOL}" + if open_orders: + for order in open_orders: + exchange_name = trading_api.get_order_exchange_name(order).capitalize() + orders_string += pretty_printer.open_order_pretty_printer(exchange_name, + trading_api.order_to_dict(order), + markdown=markdown) + interfaces.EOL + else: + orders_string += f"{code}No open order yet.{code}" + return orders_string + + @staticmethod + def get_command_open_orders(markdown=False): + has_real_trader, has_simulated_trader = interfaces.has_real_and_or_simulated_traders() + portfolio_real_open_orders, portfolio_simulated_open_orders = interfaces.get_all_open_orders() + + orders_string = "" + if has_real_trader: + orders_string += AbstractBotInterface._print_open_orders(portfolio_real_open_orders, + trading_constants.REAL_TRADER_STR, + markdown) + + if has_simulated_trader: + message = AbstractBotInterface._print_open_orders(portfolio_simulated_open_orders, + trading_constants.SIMULATOR_TRADER_STR, + markdown) + orders_string += f"{interfaces.EOL}{message}" + + if not orders_string: + orders_string = interfaces.NO_TRADER_MESSAGE + + return orders_string + + @staticmethod + def get_command_fees(markdown=False): + _, bold, _ = pretty_printer.get_markers(markdown) + real_trader_fees, simulated_trader_fees = interfaces.get_total_paid_fees() + result_str = "" + if real_trader_fees is not None: + result_str = f"{bold}{trading_constants.REAL_TRADER_STR}{bold}{constants.PAID_FEES_STR}: " \ + f"{pretty_printer.pretty_print_dict(real_trader_fees, markdown=markdown)}" + if simulated_trader_fees is not None: + result_str = f"{result_str}\n{bold}{trading_constants.SIMULATOR_TRADER_STR}{bold}" \ + f"{constants.PAID_FEES_STR}: " \ + f"{pretty_printer.pretty_print_dict(simulated_trader_fees, markdown=markdown)}" + if not result_str: + result_str = interfaces.NO_TRADER_MESSAGE + return result_str + + @staticmethod + async def get_command_sell_all_currencies(): + try: + await interfaces.async_cancel_all_open_orders() + nb_created_orders = len(await interfaces.async_sell_all_currencies()) + if nb_created_orders: + return f"Currencies sold in {nb_created_orders} order{'s' if nb_created_orders > 1 else ''}." + else: + return "Nothing to sell." + except Exception as e: + return f"An error occurred: {e.__class__.__name__}" + + @staticmethod + async def get_command_sell_all(currency): + try: + await interfaces.async_cancel_all_open_orders(currency) + nb_created_orders = len(await interfaces.async_sell_all(currency)) + if nb_created_orders: + return f"{currency} sold in {nb_created_orders} order{'s' if nb_created_orders > 1 else ''}." + else: + return f"Nothing to sell for {currency}." + except Exception as e: + return f"An error occurred: {e.__class__.__name__}" + + @staticmethod + def _print_portfolio( + current_val, ref_market, portfolio, currency_values, trader_str, markdown=False + ): + _, bold, code = pretty_printer.get_markers(markdown) + portfolios_string = ( + f"{bold}{trader_str}{bold}Portfolio value : " + f"{bold}{pretty_printer.get_min_string_from_number(current_val)} {ref_market}{bold}" + f"{interfaces.EOL}" + ) + portfolio_str = pretty_printer.global_portfolio_pretty_print( + global_portfolio=portfolio, currency_values=currency_values, + ref_market_name=ref_market, markdown=markdown) + + if not portfolio_str: + portfolio_str = "Nothing there." + portfolios_string += f"{interfaces.EOL}{code}{portfolio_str}{code}" + return portfolios_string + + @staticmethod + def get_command_portfolio(markdown=False): + has_real_trader, has_simulated_trader, \ + portfolio_real_current_value, portfolio_simulated_current_value = interfaces.get_portfolio_current_value() + reference_market = interfaces.get_reference_market() + real_global_portfolio, simulated_global_portfolio = interfaces.get_global_portfolio_currencies_amounts() + currency_values = interfaces.get_global_portfolio_currencies_values() + + portfolios_string = "" + if has_real_trader: + portfolios_string += AbstractBotInterface._print_portfolio( + portfolio_real_current_value, + reference_market, + real_global_portfolio, + currency_values, + trading_constants.REAL_TRADER_STR, + markdown, + ) + + if has_simulated_trader: + portfolio_str = AbstractBotInterface._print_portfolio( + portfolio_simulated_current_value, + reference_market, + simulated_global_portfolio, + currency_values, + trading_constants.SIMULATOR_TRADER_STR, + markdown, + ) + portfolios_string += f"{interfaces.EOL}{portfolio_str}" + + if not portfolios_string: + portfolios_string = interfaces.NO_TRADER_MESSAGE + + return portfolios_string + + @staticmethod + def get_command_profitability(markdown=False): + _, bold, code = pretty_printer.get_markers(markdown) + has_real_trader, has_simulated_trader, \ + real_global_profitability, simulated_global_profitability, \ + real_percent_profitability, simulated_percent_profitability, \ + real_no_trade_profitability, simulated_no_trade_profitability, \ + market_average_profitability = interfaces.get_global_profitability() + profitability_string = "" + if has_real_trader: + real_profitability_pretty = pretty_printer.portfolio_profitability_pretty_print( + real_global_profitability, None, interfaces.get_reference_market()) + profitability_string = \ + f"{bold}{trading_constants.REAL_TRADER_STR}{bold}Global profitability : {code}{real_profitability_pretty}" \ + f"({pretty_printer.get_min_string_from_number(real_percent_profitability, 2)}%){code}, market: {code}" \ + f"{pretty_printer.get_min_string_from_number(market_average_profitability, 2)}%{code}, initial portfolio:" \ + f" {code}{pretty_printer.get_min_string_from_number(real_no_trade_profitability, 2)}%{code}{interfaces.EOL}" + if has_simulated_trader: + simulated_profitability_pretty = \ + pretty_printer.portfolio_profitability_pretty_print( + simulated_global_profitability, None, interfaces.get_reference_market()) + profitability_string += \ + f"{bold}{trading_constants.SIMULATOR_TRADER_STR}{bold}Global profitability : {code}" \ + f"{simulated_profitability_pretty}" \ + f"({pretty_printer.get_min_string_from_number(simulated_percent_profitability, 2)}%){code}, " \ + f"market: {code}{pretty_printer.get_min_string_from_number(market_average_profitability, 2)}%{code}, " \ + f"initial portfolio: {code}" \ + f"{pretty_printer.get_min_string_from_number(simulated_no_trade_profitability, 2)}%{code}" + if not profitability_string: + profitability_string = interfaces.NO_TRADER_MESSAGE + + return profitability_string + + @staticmethod + def get_command_ping(): + return f"I'm alive since " \ + f"{timestamp_util.convert_timestamp_to_datetime(interfaces.get_bot_api().get_start_time(), '%Y-%m-%d %H:%M:%S', local_timezone=True)}." + + @staticmethod + def get_command_version(): + return f"{interfaces.AbstractInterface.project_name} {interfaces.AbstractInterface.project_version}" + + @staticmethod + def get_command_start(markdown=False): + if markdown: + return "Hello, I'm [OctoBot](https://github.com/Drakkar-Software/OctoBot), type /help to know my skills." + else: + return "Hello, I'm OctoBot, type /help to know my skills." + + @staticmethod + async def set_command_portfolios_refresh(): + return await interfaces.async_trigger_portfolios_refresh() + + @staticmethod + def set_command_risk(new_risk): + updated_risk = interfaces.set_risk(new_risk) + risk_config = { + commons_constants.CONFIG_TRADING: { + commons_constants.CONFIG_RISK: float(updated_risk) + } + } + AbstractBotInterface._update_edited_config(risk_config) + return updated_risk + + @staticmethod + def set_command_stop(): + interfaces.get_bot_api().stop_bot() + + async def set_command_pause(self): + await interfaces.async_cancel_all_open_orders() + interfaces.set_enable_trading(False) + self.paused = True + + def set_command_resume(self): + interfaces.set_enable_trading(True) + self.paused = False + + @staticmethod + def set_command_restart(): + interfaces.get_bot_api().restart_bot() + + @staticmethod + def _split_messages_if_too_long(message, max_length, preferred_separator): + if len(message) >= max_length: + # split message using preferred_separator as separator + messages_list = [] + first_part = message[:max_length] + end_index = first_part.rfind(preferred_separator) + if end_index != -1: + messages_list.append(message[:end_index]) + else: + messages_list.append(message[:max_length]) + end_index = len(first_part) - 1 + + if end_index < len(message) - 1: + remaining = message[end_index + 1:] + return messages_list + AbstractBotInterface._split_messages_if_too_long(remaining, max_length, + preferred_separator) + else: + return messages_list + else: + return [message] + + @staticmethod + def _update_edited_config(partial_config_update): + config = interfaces.get_edited_config(dict_only=False) + dict_util.nested_update_dict(config.config, partial_config_update) + config.save() diff --git a/packages/services/octobot_services/interfaces/interface_factory.py b/packages/services/octobot_services/interfaces/interface_factory.py new file mode 100644 index 0000000000..080c4a7e4a --- /dev/null +++ b/packages/services/octobot_services/interfaces/interface_factory.py @@ -0,0 +1,32 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.tentacles_management as tentacles_management + +import octobot_services.interfaces as interfaces + + +class InterfaceFactory: + def __init__(self, config): + self.config = config + + @staticmethod + def get_available_interfaces() -> list: + return [interface_class + for interface_class in tentacles_management.get_all_classes_from_parent(interfaces.AbstractInterface) + if not tentacles_management.is_abstract_using_inspection_and_class_naming(interface_class)] + + async def create_interface(self, interface_class) -> interfaces.AbstractInterface: + return interface_class(self.config) diff --git a/packages/services/octobot_services/interfaces/util/__init__.py b/packages/services/octobot_services/interfaces/util/__init__.py new file mode 100644 index 0000000000..41e6e1b90a --- /dev/null +++ b/packages/services/octobot_services/interfaces/util/__init__.py @@ -0,0 +1,121 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_services.interfaces.util import bot +from octobot_services.interfaces.util import util +from octobot_services.interfaces.util import order +from octobot_services.interfaces.util import position +from octobot_services.interfaces.util import trader +from octobot_services.interfaces.util import portfolio +from octobot_services.interfaces.util import profitability + +from octobot_services.interfaces.util.bot import ( + get_bot_api, + get_exchange_manager_ids, + get_global_config, + get_startup_config, + get_edited_config, + get_startup_tentacles_config, + get_edited_tentacles_config, + set_edited_tentacles_config, +) +from octobot_services.interfaces.util.util import ( + get_exchange_managers, + run_in_bot_main_loop, + run_in_bot_async_executor, +) +from octobot_services.interfaces.util.order import ( + get_all_open_orders, + cancel_orders, + cancel_all_open_orders, + async_cancel_orders, + async_cancel_all_open_orders, +) +from octobot_services.interfaces.util.position import ( + get_all_positions, + close_positions, + async_close_positions, +) +from octobot_services.interfaces.util.trader import ( + has_trader, + has_real_and_or_simulated_traders, + sell_all_currencies, + sell_all, + async_sell_all_currencies, + async_sell_all, + set_enable_trading, + get_total_paid_fees, + get_trades_history, + set_risk, + get_risk, + get_currencies_with_status, + get_matrix_list, +) +from octobot_services.interfaces.util.portfolio import ( + get_portfolio_holdings, + get_portfolio_current_value, + get_global_portfolio_currencies_amounts, + get_global_portfolio_currencies_values, + trigger_portfolios_refresh, + async_trigger_portfolios_refresh, +) +from octobot_services.interfaces.util.profitability import ( + get_global_profitability, + get_reference_market, +) + +__all__ = [ + "get_bot_api", + "get_exchange_manager_ids", + "get_global_config", + "get_startup_config", + "get_edited_config", + "get_startup_tentacles_config", + "get_edited_tentacles_config", + "set_edited_tentacles_config", + "get_exchange_managers", + "run_in_bot_main_loop", + "run_in_bot_async_executor", + "get_all_open_orders", + "cancel_orders", + "cancel_all_open_orders", + "async_cancel_orders", + "async_cancel_all_open_orders", + "has_trader", + "has_real_and_or_simulated_traders", + "sell_all_currencies", + "sell_all", + "async_sell_all_currencies", + "async_sell_all", + "set_enable_trading", + "get_total_paid_fees", + "get_trades_history", + "set_risk", + "get_risk", + "get_currencies_with_status", + "get_matrix_list", + "get_portfolio_holdings", + "get_portfolio_current_value", + "get_global_portfolio_currencies_amounts", + "get_global_portfolio_currencies_values", + "trigger_portfolios_refresh", + "async_trigger_portfolios_refresh", + "get_global_profitability", + "get_reference_market", + "get_all_positions", + "close_positions", + "async_close_positions", +] diff --git a/packages/services/octobot_services/interfaces/util/bot.py b/packages/services/octobot_services/interfaces/util/bot.py new file mode 100644 index 0000000000..70cf40915c --- /dev/null +++ b/packages/services/octobot_services/interfaces/util/bot.py @@ -0,0 +1,52 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import typing +import octobot_services.interfaces as interfaces + + +if typing.TYPE_CHECKING: + import octobot.octobot_api as octobot_api + +def get_bot_api() -> "octobot_api.OctoBotAPI": + return interfaces.AbstractInterface.get_bot_api() + +def get_exchange_manager_ids(): + return get_bot_api().get_exchange_manager_ids() + + +def get_global_config(): + return get_bot_api().get_global_config() + + +def get_startup_config(dict_only=True): + return get_bot_api().get_startup_config(dict_only=dict_only) + + +def get_edited_config(dict_only=True): + return get_bot_api().get_edited_config(dict_only=dict_only) + + +def get_startup_tentacles_config(): + return get_bot_api().get_startup_tentacles_config() + + +def get_edited_tentacles_config(): + return get_bot_api().get_edited_tentacles_config() + + +def set_edited_tentacles_config(config): + return get_bot_api().set_edited_tentacles_config(config) diff --git a/packages/services/octobot_services/interfaces/util/order.py b/packages/services/octobot_services/interfaces/util/order.py new file mode 100644 index 0000000000..f43cfae9f4 --- /dev/null +++ b/packages/services/octobot_services/interfaces/util/order.py @@ -0,0 +1,70 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_trading.api as trading_api +import octobot_trading.errors as trading_errors + +import octobot_commons.logging as logging + +import octobot_services.interfaces as interfaces + + +def get_all_open_orders(): + simulated_open_orders = [] + real_open_orders = [] + + for exchange_manager in interfaces.get_exchange_managers(): + if trading_api.is_trader_existing_and_enabled(exchange_manager): + if trading_api.is_trader_simulated(exchange_manager): + simulated_open_orders += trading_api.get_open_orders(exchange_manager) + else: + real_open_orders += trading_api.get_open_orders(exchange_manager) + + return real_open_orders, simulated_open_orders + + +def cancel_orders(order_ids): + return interfaces.run_in_bot_main_loop(async_cancel_orders(order_ids)) + + +async def async_cancel_orders(order_ids): + removed_count = 0 + if order_ids: + for order_id in order_ids: + for exchange_manager in interfaces.get_exchange_managers(): + if trading_api.is_trader_existing_and_enabled(exchange_manager): + try: + removed_count += 1 if ( + await trading_api.cancel_order_with_id( + exchange_manager, order_id, wait_for_cancelling=False + ) + )[0] else 0 + except (trading_errors.OrderCancelError, trading_errors.UnexpectedExchangeSideOrderStateError) \ + as err: + logging.get_logger("InterfaceOrderUtil").error(f"Skipping order cancel: {err}") + return removed_count + + +def cancel_all_open_orders(currency=None): + return interfaces.run_in_bot_main_loop(async_cancel_all_open_orders(currency=currency)) + + +async def async_cancel_all_open_orders(currency=None): + for exchange_manager in interfaces.get_exchange_managers(): + if trading_api.is_trader_existing_and_enabled(exchange_manager): + if currency is None: + await trading_api.cancel_all_open_orders(exchange_manager) + else: + await trading_api.cancel_all_open_orders_with_currency(exchange_manager, currency) diff --git a/packages/services/octobot_services/interfaces/util/portfolio.py b/packages/services/octobot_services/interfaces/util/portfolio.py new file mode 100644 index 0000000000..1cdf89ff92 --- /dev/null +++ b/packages/services/octobot_services/interfaces/util/portfolio.py @@ -0,0 +1,131 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import decimal + +import octobot_commons.constants as constants + +import octobot_services.interfaces as interfaces +import octobot_trading +import octobot_trading.api as trading_api + + +def _merge_portfolio_values(portfolio1, portfolio2): + for key, value in portfolio2.items(): + if key in portfolio1: + portfolio1[key] += value + else: + portfolio1[key] = value + return portfolio1 + + +def get_portfolio_holdings(): + real_currency_portfolio = {} + simulated_currency_portfolio = {} + + for exchange_manager in interfaces.get_exchange_managers(): + if trading_api.is_trader_existing_and_enabled(exchange_manager): + + trader_currencies_values = trading_api.get_current_holdings_values(exchange_manager) + if trading_api.is_trader_simulated(exchange_manager): + _merge_portfolio_values(simulated_currency_portfolio, trader_currencies_values) + else: + _merge_portfolio_values(real_currency_portfolio, trader_currencies_values) + return real_currency_portfolio, simulated_currency_portfolio + + +def get_portfolio_current_value(): + simulated_value = 0 + real_value = 0 + has_real_trader = False + has_simulated_trader = False + + for exchange_manager in interfaces.get_exchange_managers(): + if trading_api.is_trader_existing_and_enabled(exchange_manager): + + current_value = trading_api.get_current_portfolio_value(exchange_manager) + + # current_value might be 0 if no trades have been made / canceled => use origin value + if current_value == 0: + current_value = trading_api.get_origin_portfolio_value(exchange_manager) + + if trading_api.is_trader_simulated(exchange_manager): + simulated_value += current_value + has_simulated_trader = True + else: + real_value += current_value + has_real_trader = True + + return has_real_trader, has_simulated_trader, real_value, simulated_value + + +def _get_portfolios(): + simulated_portfolios = [] + real_portfolios = [] + + for exchange_manager in interfaces.get_exchange_managers(): + if trading_api.is_trader_existing_and_enabled(exchange_manager): + if trading_api.is_trader_simulated(exchange_manager): + simulated_portfolios.append(trading_api.get_portfolio(exchange_manager)) + else: + real_portfolios.append(trading_api.get_portfolio(exchange_manager)) + return real_portfolios, simulated_portfolios + + +def _merge_portfolios(base_portfolio, to_merge_portfolio): + for currency, asset in to_merge_portfolio.items(): + if currency not in base_portfolio: + base_portfolio[currency] = { + constants.PORTFOLIO_AVAILABLE: decimal.Decimal(0), + constants.PORTFOLIO_TOTAL: decimal.Decimal(0) + } + + base_portfolio[currency][constants.PORTFOLIO_AVAILABLE] += asset.available + base_portfolio[currency][constants.PORTFOLIO_TOTAL] += asset.total + + +def get_global_portfolio_currencies_amounts(): + real_portfolios, simulated_portfolios = _get_portfolios() + real_global_portfolio = {} + simulated_global_portfolio = {} + + for portfolio in simulated_portfolios: + _merge_portfolios(simulated_global_portfolio, portfolio) + + for portfolio in real_portfolios: + _merge_portfolios(real_global_portfolio, portfolio) + + return real_global_portfolio, simulated_global_portfolio + + +def get_global_portfolio_currencies_values() -> dict: + return trading_api.get_global_portfolio_currencies_values( + interfaces.get_exchange_managers() + ) + + +def trigger_portfolios_refresh(): + return interfaces.run_in_bot_main_loop(async_trigger_portfolios_refresh()) + + +async def async_trigger_portfolios_refresh(): + at_least_one = False + for exchange_manager in interfaces.get_exchange_managers(): + if trading_api.is_trader_existing_and_enabled(exchange_manager): + at_least_one = True + await trading_api.refresh_real_trader_portfolio(exchange_manager) + + if not at_least_one: + raise RuntimeError("no real trader to update.") diff --git a/packages/services/octobot_services/interfaces/util/position.py b/packages/services/octobot_services/interfaces/util/position.py new file mode 100644 index 0000000000..9c594f9b16 --- /dev/null +++ b/packages/services/octobot_services/interfaces/util/position.py @@ -0,0 +1,53 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_trading.api as trading_api +import octobot_trading.enums as trading_enums + +import octobot_services.interfaces as interfaces + + +def get_all_positions(): + simulated_positions = [] + real_positions = [] + + for exchange_manager in interfaces.get_exchange_managers(): + if trading_api.is_trader_existing_and_enabled(exchange_manager): + if trading_api.is_trader_simulated(exchange_manager): + simulated_positions += trading_api.get_positions(exchange_manager) + else: + real_positions += trading_api.get_positions(exchange_manager) + + return real_positions, simulated_positions + + +def close_positions(positions_descs): + return interfaces.run_in_bot_main_loop(async_close_positions(positions_descs)) + + +async def async_close_positions(positions_descs): + removed_count = 0 + if positions_descs: + for positions_desc in positions_descs: + for exchange_manager in interfaces.get_exchange_managers(): + if trading_api.is_trader_existing_and_enabled(exchange_manager): + removed_count += 1 if ( + await trading_api.close_position( + exchange_manager, + positions_desc["symbol"], + trading_enums.PositionSide(positions_desc["side"]), + ) + ) else 0 + return removed_count diff --git a/packages/services/octobot_services/interfaces/util/profitability.py b/packages/services/octobot_services/interfaces/util/profitability.py new file mode 100644 index 0000000000..72dabb8207 --- /dev/null +++ b/packages/services/octobot_services/interfaces/util/profitability.py @@ -0,0 +1,63 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_trading.api as trading_api + +import octobot_services.interfaces as interfaces + + +def get_global_profitability(): + simulated_global_profitability = 0 + real_global_profitability = 0 + simulated_no_trade_profitability = 0 + real_no_trade_profitability = 0 + simulated_full_origin_value = 0 + real_full_origin_value = 0 + market_average_profitability = None + has_real_trader = False + has_simulated_trader = False + + for exchange_manager in interfaces.get_exchange_managers(): + if trading_api.is_trader_existing_and_enabled(exchange_manager): + + current_value, _, _, market_average_profitability, initial_portfolio_current_profitability = \ + trading_api.get_profitability_stats(exchange_manager) + + if trading_api.is_trader_simulated(exchange_manager): + simulated_full_origin_value += trading_api.get_origin_portfolio_value(exchange_manager) + simulated_global_profitability += current_value + simulated_no_trade_profitability += initial_portfolio_current_profitability + has_simulated_trader = True + else: + real_full_origin_value += trading_api.get_origin_portfolio_value(exchange_manager) + real_global_profitability += current_value + real_no_trade_profitability += initial_portfolio_current_profitability + has_real_trader = True + + simulated_percent_profitability = simulated_global_profitability * 100 / simulated_full_origin_value \ + if simulated_full_origin_value > 0 else 0 + real_percent_profitability = real_global_profitability * 100 / real_full_origin_value \ + if real_full_origin_value > 0 else 0 + + return has_real_trader, has_simulated_trader, \ + real_global_profitability, simulated_global_profitability, \ + real_percent_profitability, simulated_percent_profitability, \ + real_no_trade_profitability, simulated_no_trade_profitability, \ + market_average_profitability + + +def get_reference_market() -> str: + # The reference market is the currency unit of the calculated quantity value + return trading_api.get_reference_market(interfaces.get_global_config()) diff --git a/packages/services/octobot_services/interfaces/util/trader.py b/packages/services/octobot_services/interfaces/util/trader.py new file mode 100644 index 0000000000..0c46eec233 --- /dev/null +++ b/packages/services/octobot_services/interfaces/util/trader.py @@ -0,0 +1,189 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.logging as logging + +import octobot_trading.api as trading_api + +import octobot_services.interfaces as interfaces + + +def has_trader(): + try: + return trading_api.has_trader(_first_exchange_manager()) + except StopIteration: + return False + + +def has_real_and_or_simulated_traders(): + has_real_trader = False + has_simulated_trader = False + exchange_managers = interfaces.get_exchange_managers() + for exchange_manager in exchange_managers: + if trading_api.is_trader_simulated(exchange_manager): + has_simulated_trader = True + else: + has_real_trader = True + return has_real_trader, has_simulated_trader + + +def sell_all_currencies(): + return interfaces.run_in_bot_main_loop(async_sell_all_currencies()) + + +async def async_sell_all_currencies(): + orders = [] + for exchange_manager in interfaces.get_exchange_managers(): + if trading_api.is_trader_existing_and_enabled(exchange_manager): + orders += await trading_api.sell_all_everything_for_reference_market(exchange_manager) + return orders + + +def sell_all(currency): + return interfaces.run_in_bot_main_loop(async_sell_all(currency)) + + +async def async_sell_all(currency): + orders = [] + for exchange_manager in interfaces.get_exchange_managers(): + if trading_api.is_trader_existing_and_enabled(exchange_manager): + orders += await trading_api.sell_currency_for_reference_market(exchange_manager, currency) + return orders + + +def set_enable_trading(enable): + for exchange_manager in interfaces.get_exchange_managers(): + if trading_api.has_trader(exchange_manager): + if trading_api.is_trader_enabled_in_config_from_exchange_manager(exchange_manager): + trading_api.set_trading_enabled(exchange_manager, enable) + + +def _merge_trader_fees(current_fees, exchange_manager): + current_fees_dict = current_fees if current_fees else {} + for key, val in trading_api.get_total_paid_trading_fees(exchange_manager).items(): + if key in current_fees_dict: + current_fees_dict[key] += val + else: + current_fees_dict[key] = val + return current_fees_dict + + +def get_total_paid_fees(bot=None): + real_trader_fees = None + simulated_trader_fees = None + + for exchange_manager in interfaces.get_exchange_managers(bot): + if trading_api.is_trader_existing_and_enabled(exchange_manager): + if trading_api.is_trader_simulated(exchange_manager): + simulated_trader_fees = _merge_trader_fees(simulated_trader_fees, exchange_manager) + else: + real_trader_fees = _merge_trader_fees(real_trader_fees, exchange_manager) + return real_trader_fees, simulated_trader_fees + + +def get_trades_history(bot_api=None, symbol=None, independent_backtesting=None, since=None, as_dict=False): + simulated_trades_history = [] + real_trades_history = [] + + for exchange_manager in interfaces.get_exchange_managers(bot_api=bot_api, + independent_backtesting=independent_backtesting): + if trading_api.is_trader_existing_and_enabled(exchange_manager): + if trading_api.is_trader_simulated(exchange_manager): + simulated_trades_history += trading_api.get_trade_history(exchange_manager, None, symbol, since, as_dict) + else: + real_trades_history += trading_api.get_trade_history(exchange_manager, None, symbol, since, as_dict) + return real_trades_history, simulated_trades_history + + +def set_risk(risk): + result_risk = None + for exchange_manager in interfaces.get_exchange_managers(): + if trading_api.has_trader(exchange_manager): + result_risk = trading_api.set_trader_risk(exchange_manager, risk) + return result_risk + + +def get_risk(): + try: + return trading_api.get_trader_risk(_first_exchange_manager()) \ + if trading_api.has_trader(_first_exchange_manager()) else None + except StopIteration: + return None + + +def get_currencies_with_status(): + evaluations_by_exchange_by_pair = {} + for exchange_manager in interfaces.get_exchange_managers(): + trading_modes = trading_api.get_trading_modes(exchange_manager) + for pair in trading_api.get_trading_pairs(exchange_manager): + if pair not in evaluations_by_exchange_by_pair: + evaluations_by_exchange_by_pair[pair] = {} + status_explanation = "N/A" + status = "N/A" + for trading_mode in trading_modes: + if trading_api.get_trading_mode_symbol(trading_mode) == pair \ + or trading_api.is_trading_mode_symbol_wildcard(trading_mode): + status_explanation, status = trading_api.get_trading_mode_current_state(trading_mode) + try: + status = round(status, 3) + except TypeError: + pass + break + evaluations_by_exchange_by_pair[pair][trading_api.get_exchange_manager_id(exchange_manager)] = \ + [status_explanation.replace("_", " "), status, + trading_api.get_exchange_name(exchange_manager).capitalize()] + return evaluations_by_exchange_by_pair + + +def _get_tentacles_values(evaluations, tentacle_type_node, exchange): + try: + import octobot_evaluators.api as evaluators_api + except ImportError: + logging.get_logger("InterfaceUtil").error("_get_tentacles_values requires OctoBot-Evaluators package installed") + return {} + for tentacle_name, tentacle_name_node in evaluators_api.get_children_list(tentacle_type_node).items(): + evaluations[exchange][tentacle_name] = {} + for cryptocurrency, cc_node in evaluators_api.get_children_list(tentacle_name_node).items(): + evaluations[exchange][tentacle_name][cryptocurrency] = {} + if evaluators_api.has_children(cc_node): + for symbol, symbol_node in evaluators_api.get_children_list(cc_node).items(): + if evaluators_api.has_children(symbol_node): + evaluations[exchange][tentacle_name][symbol] = {} + for time_frame, time_frame_node in evaluators_api.get_children_list(symbol_node).items(): + evaluations[exchange][tentacle_name][symbol][time_frame] = \ + evaluators_api.get_value(time_frame_node) + else: + evaluations[exchange][tentacle_name][symbol] = evaluators_api.get_value(symbol_node) + else: + evaluations[exchange][tentacle_name][cryptocurrency] = evaluators_api.get_value(cc_node) + + +def get_matrix_list(): + try: + import octobot_evaluators.api as evaluators_api + except ImportError: + logging.get_logger("InterfaceUtil").error("get_matrix_list requires OctoBot-Evaluators package installed") + return {} + evaluations = {} + matrix = evaluators_api.get_matrix(interfaces.get_bot_api().get_matrix_id()) + for exchange, exchange_node in evaluators_api.get_node_children_by_names(matrix).items(): + evaluations[exchange] = {} + for tentacle_type_node in evaluators_api.get_children_list(exchange_node).values(): + _get_tentacles_values(evaluations, tentacle_type_node, exchange) + return evaluations + + +def _first_exchange_manager(): + return next(iter(interfaces.get_exchange_managers())) diff --git a/packages/services/octobot_services/interfaces/util/util.py b/packages/services/octobot_services/interfaces/util/util.py new file mode 100644 index 0000000000..d4e2486a7b --- /dev/null +++ b/packages/services/octobot_services/interfaces/util/util.py @@ -0,0 +1,65 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import threading + +import octobot_commons.logging as logging +import octobot_commons.constants as commons_constants + +import octobot_trading.api as trading_api + +import octobot_services.interfaces as interfaces + + +def get_exchange_managers(bot_api=None, independent_backtesting=None, trading_exchanges_only=True): + if bot_api is not None: + return _filter_exchange_manager(trading_api.get_exchange_managers_from_exchange_ids( + bot_api.get_exchange_manager_ids()), trading_exchanges_only) + elif independent_backtesting is not None: + try: + import octobot.api as api + return _filter_exchange_manager( + trading_api.get_exchange_managers_from_exchange_ids( + api.get_independent_backtesting_exchange_manager_ids(independent_backtesting)), + trading_exchanges_only) + except ImportError: + logging.get_logger("octobot_services/interfaces/util/util.py").error( + "get_exchange_managers requires OctoBot package installed") + else: + return _filter_exchange_manager(interfaces.AbstractInterface.get_exchange_managers(), trading_exchanges_only) + + +def _filter_exchange_manager(exchange_managers, trading_exchanges_only): + if trading_exchanges_only: + return trading_api.get_trading_exchanges(exchange_managers) + return exchange_managers + + +def run_in_bot_main_loop(coroutine, blocking=True, log_exceptions=True, + timeout=commons_constants.DEFAULT_FUTURE_TIMEOUT): + if blocking: + return interfaces.get_bot_api().run_in_main_asyncio_loop(coroutine, log_exceptions=log_exceptions, + timeout=timeout) + else: + threading.Thread( + target=interfaces.get_bot_api().run_in_main_asyncio_loop, + name=f"run_in_bot_main_loop {coroutine.__name__}", + args=(coroutine,), + kwargs={"timeout": timeout} + ).start() + + +def run_in_bot_async_executor(coroutine): + return interfaces.get_bot_api().run_in_async_executor(coroutine) diff --git a/packages/services/octobot_services/interfaces/web/__init__.py b/packages/services/octobot_services/interfaces/web/__init__.py new file mode 100644 index 0000000000..265c50bcb9 --- /dev/null +++ b/packages/services/octobot_services/interfaces/web/__init__.py @@ -0,0 +1,25 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_services.interfaces.web import abstract_web_interface + +from octobot_services.interfaces.web.abstract_web_interface import ( + AbstractWebInterface, +) + +__all__ = [ + "AbstractWebInterface", +] diff --git a/packages/services/octobot_services/interfaces/web/abstract_web_interface.py b/packages/services/octobot_services/interfaces/web/abstract_web_interface.py new file mode 100644 index 0000000000..245373788c --- /dev/null +++ b/packages/services/octobot_services/interfaces/web/abstract_web_interface.py @@ -0,0 +1,22 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import abc + +import octobot_services.interfaces as interfaces + + +class AbstractWebInterface(interfaces.AbstractInterface): + __metaclass__ = abc.ABCMeta diff --git a/packages/services/octobot_services/managers/__init__.py b/packages/services/octobot_services/managers/__init__.py new file mode 100644 index 0000000000..6cd8126b62 --- /dev/null +++ b/packages/services/octobot_services/managers/__init__.py @@ -0,0 +1,39 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_services.managers import service_feed_manager +from octobot_services.managers import service_manager +from octobot_services.managers import interface_manager + +from octobot_services.managers.service_feed_manager import ( + ServiceFeedManager, +) +from octobot_services.managers.service_manager import ( + stop_services, +) +from octobot_services.managers.interface_manager import ( + start_interfaces, + start_interface, + stop_interfaces, +) + +__all__ = [ + "ServiceFeedManager", + "stop_services", + "start_interfaces", + "start_interface", + "stop_interfaces", +] diff --git a/packages/services/octobot_services/managers/interface_manager.py b/packages/services/octobot_services/managers/interface_manager.py new file mode 100644 index 0000000000..d8a50eec9e --- /dev/null +++ b/packages/services/octobot_services/managers/interface_manager.py @@ -0,0 +1,35 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.logging as logging + + +async def start_interfaces(interfaces: list): + started_interfaces = [] + for interface in interfaces: + if await interface.start(): + started_interfaces.append(interface) + return started_interfaces + + +async def start_interface(interface): + return await interface.start() + + +async def stop_interfaces(interfaces: list): + for interface in interfaces: + logging.get_logger(__name__).debug(f"Stopping {interface.get_name()} ...") + await interface.stop() + logging.get_logger(__name__).debug(f"Stopped {interface.get_name()}") diff --git a/packages/services/octobot_services/managers/service_feed_manager.py b/packages/services/octobot_services/managers/service_feed_manager.py new file mode 100644 index 0000000000..fffa6485d8 --- /dev/null +++ b/packages/services/octobot_services/managers/service_feed_manager.py @@ -0,0 +1,35 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.logging as logging +import octobot_services.service_feeds as service_feeds + + +class ServiceFeedManager: + + @staticmethod + async def start_service_feed(service_feed: service_feeds.AbstractServiceFeed, + backtesting_enabled: bool, + edited_config: dict): + if not service_feed.is_running and not service_feed.should_stop: + if await service_feed.initialize(backtesting_enabled, edited_config): + return await service_feed.start() + return False + + @staticmethod + async def stop_service_feed(service_feed: service_feeds.AbstractServiceFeed): + logging.get_logger(ServiceFeedManager.__name__).debug(f"Stopping {service_feed.get_name()} ...") + await service_feed.stop() + logging.get_logger(ServiceFeedManager.__name__).debug(f"Stopped {service_feed.get_name()}") diff --git a/packages/services/octobot_services/managers/service_manager.py b/packages/services/octobot_services/managers/service_manager.py new file mode 100644 index 0000000000..73b5fafe69 --- /dev/null +++ b/packages/services/octobot_services/managers/service_manager.py @@ -0,0 +1,31 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.logging as logging +import octobot_services.services as services + + +async def stop_services(): + for service_instance in _get_service_instances(): + try: + logging.get_logger(__name__).debug(f"Stopping {service_instance.get_name()} ...") + await service_instance.stop() + logging.get_logger(__name__).debug(f"Stopped {service_instance.get_name()}") + except Exception as e: + raise e + + +def _get_service_instances(): + return [service_class.instance() for service_class in services.ServiceFactory.get_available_services()] diff --git a/packages/services/octobot_services/notification/__init__.py b/packages/services/octobot_services/notification/__init__.py new file mode 100644 index 0000000000..a94d1e27f1 --- /dev/null +++ b/packages/services/octobot_services/notification/__init__.py @@ -0,0 +1,32 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_services.notification import notification +from octobot_services.notification.notification import ( + Notification, +) +from octobot_services.notification import formated_notifications + +from octobot_services.notification.formated_notifications import ( + OrderCreationNotification, + OrderEndNotification, +) + +__all__ = [ + "OrderCreationNotification", + "OrderEndNotification", + "Notification", +] diff --git a/packages/services/octobot_services/notification/formated_notifications.py b/packages/services/octobot_services/notification/formated_notifications.py new file mode 100644 index 0000000000..00811f218b --- /dev/null +++ b/packages/services/octobot_services/notification/formated_notifications.py @@ -0,0 +1,98 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import abc + +import octobot_commons.enums as common_enums +import octobot_commons.pretty_printer as pretty_printer + +import octobot_trading.constants as constants + +import octobot_services.notification as notifications +import octobot_services.enums as enums + + +class _OrderNotification(notifications.Notification): + def __init__(self, text, evaluator_notification: notifications.Notification): + super().__init__("", text, "", enums.NotificationSound.NO_SOUND, + common_enums.MarkdownFormat.IGNORE, + enums.NotificationLevel.INFO, + enums.NotificationCategory.TRADES, + evaluator_notification) + self._build_text() + + @abc.abstractmethod + def _build_text(self): + raise NotImplementedError("_build_text is not implemented") + + +class OrderCreationNotification(_OrderNotification): + def __init__(self, evaluator_notification: notifications.Notification, dict_order: dict, exchange_name: str): + self.dict_order = dict_order + self.exchange_name = exchange_name.capitalize() + super().__init__("Order created", evaluator_notification) + + def _build_text(self): + self.text = "" + self.markdown_text = "" + self.text += f"- {pretty_printer.open_order_pretty_printer(self.exchange_name, self.dict_order, markdown=False)}" + self.markdown_text += \ + f"- {pretty_printer.open_order_pretty_printer(self.exchange_name, self.dict_order, markdown=True)}" + + +class OrderEndNotification(_OrderNotification): + def __init__(self, order_previous_notification: notifications.Notification, + dict_order_filled: dict, exchange_name: str, + dict_orders_canceled: list, trade_profitability: float, portfolio_profitability: float, + portfolio_diff: float, add_profitability: bool, is_simulated: bool): + self.dict_order_filled = dict_order_filled + self.exchange_name = exchange_name.capitalize() + self.dict_orders_canceled = dict_orders_canceled + self.trade_profitability = trade_profitability + self.portfolio_profitability = portfolio_profitability + self.portfolio_diff = portfolio_diff + self.add_profitability = add_profitability + self.is_simulated = is_simulated + super().__init__("Order update", order_previous_notification) + + def _build_text(self): + self.text = "" + self.markdown_text = "" + trader_type = constants.SIMULATOR_TRADER_STR if self.is_simulated else constants.REAL_TRADER_STR + if self.dict_order_filled is not None: + self.text += f"{trader_type}Order(s) filled : " \ + f"\n- {pretty_printer.open_order_pretty_printer(self.exchange_name, self.dict_order_filled)}" + md_text = pretty_printer.open_order_pretty_printer(self.exchange_name, self.dict_order_filled, markdown=True) + self.markdown_text += f"*{trader_type}*Order(s) filled : \n-{md_text}" + + if self.dict_orders_canceled is not None and self.dict_orders_canceled: + self.text += f"{trader_type}Order(s) canceled :" + self.markdown_text += f"*{trader_type}*Order(s) canceled :" + for dict_order in self.dict_orders_canceled: + self.text += f"\n- {pretty_printer.open_order_pretty_printer(self.exchange_name, dict_order)}" + self.markdown_text += \ + f"\n- {pretty_printer.open_order_pretty_printer(self.exchange_name, dict_order, markdown=True)}" + + if self.trade_profitability is not None and self.add_profitability: + self.text += f"\nTrade profitability : {'+' if self.trade_profitability >= 0 else ''}" \ + f"{round(self.trade_profitability, 4)}%" + self.markdown_text += f"\nTrade profitability : *{'+' if self.trade_profitability >= 0 else ''}" \ + f"{round(self.trade_profitability, 4)}%*" + + if self.portfolio_profitability is not None and self.add_profitability: + self.text += f"\nPortfolio profitability : {round(self.portfolio_profitability, 4)}% " \ + f"{'+' if self.portfolio_diff >= 0 else ''}{round(self.portfolio_diff, 4)}%" + self.markdown_text += f"\nPortfolio profitability : `{round(self.portfolio_profitability, 4)}% " \ + f"{'+' if self.portfolio_diff >= 0 else ''}{round(self.portfolio_diff, 4)}%`" diff --git a/packages/services/octobot_services/notification/notification.py b/packages/services/octobot_services/notification/notification.py new file mode 100644 index 0000000000..c27673dc04 --- /dev/null +++ b/packages/services/octobot_services/notification/notification.py @@ -0,0 +1,41 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.enums as common_enums + +import octobot_services.enums as enums + + +class Notification: + def __init__(self, text: str, title: str, markdown_text: str, sound: enums.NotificationSound, + markdown_format: common_enums.MarkdownFormat, + level: enums.NotificationLevel, category: enums.NotificationCategory, + linked_notification): + self.text = text + self.markdown_text = markdown_text + self.title = title + self.level = level + self.markdown_format = markdown_format + self.linked_notification = linked_notification + self.category = category + self.sound = sound + + # Used to identify previous notification related elements when necessary ex: a tweet to reply to + self.metadata = {} + + def __repr__(self): + return f"[Notification] title: {self.title}, text: {self.text}, level: {self.level}, " \ + f"markdown_format: {self.markdown_format.name}, category: {self.category.value}, " \ + f"linked_notification: {self.linked_notification}" diff --git a/packages/services/octobot_services/notifier/__init__.py b/packages/services/octobot_services/notifier/__init__.py new file mode 100644 index 0000000000..02ec1ffbeb --- /dev/null +++ b/packages/services/octobot_services/notifier/__init__.py @@ -0,0 +1,30 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_services.notifier import notifier_factory +from octobot_services.notifier import abstract_notifier + +from octobot_services.notifier.notifier_factory import ( + NotifierFactory, +) +from octobot_services.notifier.abstract_notifier import ( + AbstractNotifier, +) + +__all__ = [ + "NotifierFactory", + "AbstractNotifier", +] diff --git a/packages/services/octobot_services/notifier/abstract_notifier.py b/packages/services/octobot_services/notifier/abstract_notifier.py new file mode 100644 index 0000000000..a639d2f5b0 --- /dev/null +++ b/packages/services/octobot_services/notifier/abstract_notifier.py @@ -0,0 +1,167 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import abc +import asyncio + +import async_channel.util as channel_creator +import async_channel.channels as channels + +import octobot_trading.api as trading_api +import octobot_trading.enums as trading_enums + +import octobot_services.channel as service_channels +import octobot_services.constants as constants +import octobot_services.notification as notifications +import octobot_services.abstract_service_user as abstract_service_user +import octobot_services.util as util + + +class AbstractNotifier(abstract_service_user.AbstractServiceUser, util.ExchangeWatcher): + __metaclass__ = abc.ABCMeta + # Override this key with the identifier of the notifier (used to know if enabled) + NOTIFICATION_TYPE_KEY = None + # The service required to run this notifier + REQUIRED_SERVICES = None + USE_MAIN_LOOP = False + + def __init__(self, config): + abstract_service_user.AbstractServiceUser.__init__(self, config) + util.ExchangeWatcher.__init__(self) + self.executors = None + self.logger = self.get_logger() + self.enabled = self.is_enabled(config) + self.services = None + self.previous_notifications_by_identifier = {} + self.loop = asyncio.get_event_loop() + + async def register_new_exchange_impl(self, exchange_id): + # if self.is_initialized is False, this notifier has not been initialized and should not be used + if self.is_initialized and exchange_id not in self.registered_exchanges_ids: + await self._subscribe_to_order_channel(exchange_id) + + # Override this method to consume a notification when received + @abc.abstractmethod + async def _handle_notification(self, notification: notifications.Notification): + raise NotImplementedError(f"_handle_notification is not implemented") + + async def _send_notification(self, notification: notifications.Notification): + self.logger.debug(f"Publishing notification: {notification}") + await self._handle_notification(notification) + + def _send_notification_from_executor(self, notification: notifications.Notification): + asyncio.run(self._send_notification(notification)) + + async def _notification_callback(self, notification: notifications.Notification = None): + try: + if self._is_notification_category_enabled(notification): + if self.USE_MAIN_LOOP: + await self._send_notification(notification) + else: + await self.loop.run_in_executor(self.executors, self._send_notification_from_executor, notification) + except Exception as e: + self.logger.exception(e, True, f"Exception when handling notification: {e}") + + async def _initialize_impl(self, backtesting_enabled, edited_config) -> bool: + # make sure to always create the notification channel + channel = await self._create_notification_channel_if_not_existing() + if await abstract_service_user.AbstractServiceUser._initialize_impl(self, backtesting_enabled, edited_config): + self.services = [service.instance() for service in self.REQUIRED_SERVICES] + await self._subscribe_to_notification_channel(channel) + return True + return False + + async def _subscribe_to_notification_channel(self, channel): + await channel.new_consumer(self._notification_callback) + self.logger.debug("Registered as notification consumer") + + async def _order_notification_callback(self, exchange, exchange_id, cryptocurrency, symbol, order, + update_type, is_from_bot): + exchange_manager = trading_api.get_exchange_manager_from_exchange_id(exchange_id) + # Do not notify on existing pre-start orders + if is_from_bot and not trading_api.get_is_backtesting(exchange_manager): + order_identifier = f"{exchange}_{order[trading_enums.ExchangeConstantsOrderColumns.ID.value]}" + # find the last notification for this order if any + linked_notification = self.previous_notifications_by_identifier[order_identifier] \ + if order_identifier in self.previous_notifications_by_identifier else None + await self._handle_order_notification(order, linked_notification, order_identifier, + exchange_manager, exchange) + + async def _handle_order_notification(self, dict_order, linked_notification, order_identifier, + exchange_manager, exchange): + notification = None + order_status = trading_api.parse_order_status(dict_order) + + if order_status is trading_enums.OrderStatus.OPEN: + notification = notifications.OrderCreationNotification(linked_notification, dict_order, exchange) + # update last notification for this order + self.previous_notifications_by_identifier[order_identifier] = notification + else: + is_simulated = trading_api.is_trader_simulated(exchange_manager) + if order_status is trading_enums.OrderStatus.EXPIRED or \ + order_status is trading_enums.OrderStatus.CANCELED or \ + (order_status is trading_enums.OrderStatus.CLOSED + and dict_order[trading_enums.ExchangeConstantsOrderColumns.FILLED.value] == 0): + notification = notifications.OrderEndNotification( + linked_notification, None, exchange, [dict_order], None, None, None, False, is_simulated + ) + elif order_status in (trading_enums.OrderStatus.CLOSED, trading_enums.OrderStatus.FILLED): + trade_pnl = trading_api.get_trade_pnl( + exchange_manager, order_id=dict_order[trading_enums.ExchangeConstantsOrderColumns.ID.value] + ) + pnl_percent = None + if trade_pnl: + _, pnl_percent = trade_pnl.get_profits() + notification = notifications.OrderEndNotification( + linked_notification, dict_order, exchange, [], pnl_percent, None, None, True, is_simulated + ) + # remove order from previous_notifications_by_identifier: no more notification from it to be received + if order_identifier in self.previous_notifications_by_identifier: + self.previous_notifications_by_identifier.pop(order_identifier) + await self._notification_callback(notification) + + async def _subscribe_to_order_channel(self, exchange_id): + try: + await trading_api.subscribe_to_order_channel(self._order_notification_callback, exchange_id) + except KeyError: + self.logger.error("No order channel to subscribe to: impossible to send order notifications") + + @classmethod + def is_enabled(cls, config): + if cls.NOTIFICATION_TYPE_KEY is None: + cls.get_logger().warning(f"{cls.get_name()}.NOTIFICATION_TYPE_KEY is not set, it has to be set to identify " + f"and activate this notifier.") + return cls.NOTIFICATION_TYPE_KEY in AbstractNotifier._get_activated_notification_keys(config) + + @staticmethod + def _get_activated_notification_keys(config): + if constants.CONFIG_CATEGORY_NOTIFICATION in config \ + and constants.CONFIG_NOTIFICATION_TYPE in config[constants.CONFIG_CATEGORY_NOTIFICATION]: + return config[constants.CONFIG_CATEGORY_NOTIFICATION][constants.CONFIG_NOTIFICATION_TYPE] + return [] + + @staticmethod + async def _create_notification_channel_if_not_existing() -> channels.Channel: + try: + return channels.get_chan(service_channels.NotificationChannel.get_name()) + except KeyError: + channel = await channel_creator.create_channel_instance(service_channels.NotificationChannel, + channels.set_chan) + await channel.register_producer(service_channels.NotificationChannelProducer.instance(channel)) + return channel + + def _is_notification_category_enabled(self, notification): + return constants.CONFIG_CATEGORY_NOTIFICATION in self.config and \ + self.config[constants.CONFIG_CATEGORY_NOTIFICATION].get(notification.category.value, True) diff --git a/packages/services/octobot_services/notifier/notifier_factory.py b/packages/services/octobot_services/notifier/notifier_factory.py new file mode 100644 index 0000000000..3e7e47cd77 --- /dev/null +++ b/packages/services/octobot_services/notifier/notifier_factory.py @@ -0,0 +1,33 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import octobot_commons.tentacles_management as tentacles_management + +import octobot_services.notifier as notifiers + + +class NotifierFactory: + def __init__(self, config): + self.config = config + + @staticmethod + def get_available_notifiers(): + return [notifier_class + for notifier_class in tentacles_management.get_all_classes_from_parent(notifiers.AbstractNotifier) + if not tentacles_management.is_abstract_using_inspection_and_class_naming(notifier_class)] + + async def create_notifier(self, notifier_class): + return notifier_class(self.config) diff --git a/packages/services/octobot_services/octobot_channel_consumer.py b/packages/services/octobot_services/octobot_channel_consumer.py new file mode 100644 index 0000000000..ba668587f3 --- /dev/null +++ b/packages/services/octobot_services/octobot_channel_consumer.py @@ -0,0 +1,143 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import enum + +import async_channel.channels as channels + +import octobot_commons.channels_name as channels_name +import octobot_commons.logging as logging +import octobot_commons.enums as enums + +import octobot_services.api as api +import octobot_services.managers as managers + +OCTOBOT_CHANNEL_SERVICE_CONSUMER_LOGGER_TAG = "OctoBotChannelServiceConsumer" + + +class OctoBotChannelServiceActions(enum.Enum): + """ + OctoBot Channel consumer supported actions + """ + + INTERFACE = "interface" + NOTIFICATION = "notification" + SERVICE_FEED = "service_feed" + START_SERVICE_FEED = "start_service_feed" + EXCHANGE_REGISTRATION = "exchange_registration" + + +class OctoBotChannelServiceDataKeys(enum.Enum): + """ + OctoBot Channel consumer supported data keys + """ + + EXCHANGE_ID = "exchange_id" + BOT_ID = "bot_id" + EDITED_CONFIG = "edited_config" + BACKTESTING_ENABLED = "backtesting_enabled" + INSTANCE = "instance" + SUCCESSFUL_OPERATION = "successful_operation" + CLASS = "class" + FACTORY = "factory" + EXECUTORS = "executors" + + +async def octobot_channel_callback(bot_id, subject, action, data) -> None: + """ + OctoBot channel consumer callback + :param bot_id: the callback bot id + :param subject: the callback subject + :param action: the callback action + :param data: the callback data + """ + if subject == enums.OctoBotChannelSubjects.CREATION.value: + await _handle_creation(bot_id, action, data) + elif subject == enums.OctoBotChannelSubjects.UPDATE.value: + if action == OctoBotChannelServiceActions.EXCHANGE_REGISTRATION.value: + await _handle_exchange_notification(data) + elif action == OctoBotChannelServiceActions.START_SERVICE_FEED.value: + await _handle_service_feed_start_notification(bot_id, action, data) + + +async def _handle_creation(bot_id, action, data): + created_instance = None + edited_config = data[OctoBotChannelServiceDataKeys.EDITED_CONFIG.value] + backtesting_enabled = data[OctoBotChannelServiceDataKeys.BACKTESTING_ENABLED.value] + to_create_class = data[OctoBotChannelServiceDataKeys.CLASS.value] + factory = data[OctoBotChannelServiceDataKeys.FACTORY.value] + if action == OctoBotChannelServiceActions.INTERFACE.value: + created_instance = await _create_and_start_interface(factory, to_create_class, + edited_config, backtesting_enabled) + if action == OctoBotChannelServiceActions.NOTIFICATION.value: + executors = data[OctoBotChannelServiceDataKeys.EXECUTORS.value] + created_instance = await _create_notifier(factory, to_create_class, edited_config, + backtesting_enabled, executors) + if action == OctoBotChannelServiceActions.SERVICE_FEED.value: + created_instance = await _create_service_feed(factory, to_create_class) + await channels.get_chan_at_id(channels_name.OctoBotChannelsName.OCTOBOT_CHANNEL.value, + bot_id).get_internal_producer() \ + .send(bot_id=bot_id, + subject=enums.OctoBotChannelSubjects.NOTIFICATION.value, + action=action, + data={OctoBotChannelServiceDataKeys.INSTANCE.value: created_instance}) + + +async def _create_and_start_interface(interface_factory, to_create_class, edited_config, backtesting_enabled): + interface_instance = await interface_factory.create_interface(to_create_class) + await interface_instance.initialize(backtesting_enabled, edited_config) + return interface_instance if await managers.start_interface(interface_instance) else None + + +async def _create_notifier(factory, to_create_class, edited_config, backtesting_enabled, executors): + notifier_instance = await factory.create_notifier(to_create_class) + notifier_instance.executors = executors + await notifier_instance.initialize(backtesting_enabled, edited_config) + return notifier_instance + + +async def _create_service_feed(factory, to_create_class): + return factory.create_service_feed(to_create_class) + + +async def _handle_exchange_notification(data): + notifier_or_interface = data[OctoBotChannelServiceDataKeys.INSTANCE.value] + exchange_id = data[OctoBotChannelServiceDataKeys.EXCHANGE_ID.value] + await notifier_or_interface.register_new_exchange(exchange_id) + + +async def _handle_service_feed_start_notification(bot_id, action, data): + service_feed = data[OctoBotChannelServiceDataKeys.INSTANCE.value] + edited_config = data[OctoBotChannelServiceDataKeys.EDITED_CONFIG.value] + await channels.get_chan_at_id(channels_name.OctoBotChannelsName.OCTOBOT_CHANNEL.value, + bot_id).get_internal_producer() \ + .send(bot_id=bot_id, + subject=enums.OctoBotChannelSubjects.NOTIFICATION.value, + action=action, + data={OctoBotChannelServiceDataKeys.SUCCESSFUL_OPERATION.value: await _start_service_feed(service_feed, + edited_config)}) + + +async def _start_service_feed(service_feed, edited_config): + if not await api.start_service_feed(service_feed, False, edited_config): + logger = logging.get_logger(OCTOBOT_CHANNEL_SERVICE_CONSUMER_LOGGER_TAG) + # log error when the issue is not related to configuration + if service_feed.has_required_services_configuration(): + logger.error(f"Failed to start {service_feed.get_name()}. Evaluators requiring this service feed " + f"might not work properly.") + else: + logger.debug(f"Impossible to start {service_feed.get_name()}: missing service(s) configuration.") + return False + return True diff --git a/packages/services/octobot_services/service_feeds/__init__.py b/packages/services/octobot_services/service_feeds/__init__.py new file mode 100644 index 0000000000..11333ae92f --- /dev/null +++ b/packages/services/octobot_services/service_feeds/__init__.py @@ -0,0 +1,42 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_services.service_feeds import abstract_service_feed +from octobot_services.service_feeds.abstract_service_feed import ( + AbstractServiceFeed, +) + +from octobot_services.service_feeds import service_feeds +from octobot_services.service_feeds.service_feeds import ( + ServiceFeeds, +) + +from octobot_services.service_feeds import service_feed_factory +from octobot_services.service_feeds import service_feed_exception + +from octobot_services.service_feeds.service_feed_factory import ( + ServiceFeedFactory, +) +from octobot_services.service_feeds.service_feed_exception import ( + ServiceFeedException, +) + +__all__ = [ + "ServiceFeedFactory", + "AbstractServiceFeed", + "ServiceFeedException", + "ServiceFeeds", +] diff --git a/packages/services/octobot_services/service_feeds/abstract_service_feed.py b/packages/services/octobot_services/service_feeds/abstract_service_feed.py new file mode 100644 index 0000000000..729b582f87 --- /dev/null +++ b/packages/services/octobot_services/service_feeds/abstract_service_feed.py @@ -0,0 +1,229 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import abc +import typing + +import async_channel.channels as channels + +import octobot_commons.asyncio_tools as asyncio_tools +import octobot_backtesting.api as backtesting_api + +import octobot_services.abstract_service_user as abstract_service_user +import octobot_services.channel as service_channels +import octobot_services.util as util + + +class AbstractServiceFeed(abstract_service_user.AbstractServiceUser, + util.ReturningStartable, + service_channels.AbstractServiceFeedChannelProducer): + __metaclass__ = abc.ABCMeta + + # Override FEED_CHANNEL with a dedicated channel + FEED_CHANNEL = None + + # Set simulator class when available in order to use it in backtesting for this feed + SIMULATOR_CLASS = None + IS_SIMULATOR_CLASS = False + + # Whether this feed supports historical data collection for backtesting + BACKTESTING_ENABLED = False + + _SLEEPING_TIME_BEFORE_RECONNECT_ATTEMPT_SEC = 10 + DELAY_BETWEEN_STREAMS_QUERIES = 5 + REQUIRED_SERVICE_ERROR_MESSAGE = "Required services are not ready, service feed can't start" + + def __init__(self, config, main_async_loop, bot_id, backtesting=None, importer=None): + abstract_service_user.AbstractServiceUser.__init__(self, config) + try: + channel = channels.set_chan(self.FEED_CHANNEL(), None) + except ValueError: + channel = channels.get_chan(self.FEED_CHANNEL.get_name()) + service_channels.AbstractServiceFeedChannelProducer.__init__(self, channel) + self.feed_config = {} + self.main_async_loop = main_async_loop + self.bot_id = bot_id + self.services = None + self.should_stop = False + self.data_cache = None + + # backtesting + self.backtesting = backtesting + self.is_backtesting = backtesting is not None and self.BACKTESTING_ENABLED == True + self.social_data_importer = importer + self.time_consumer = None + + # Override update_feed_config if any need in the extending feed + def update_feed_config(self, config): + pass + + # Override this method if the service feed implementation is using a dispatcher handled in the service layer + # (ie: TelegramServiceFeed) + @staticmethod + def _get_service_layer_service_feed() -> object: + return None + + # Override this method to specify the feed reception process + @abc.abstractmethod + async def _start_service_feed(self): + raise NotImplementedError("start_dispatcher not implemented") + + @abc.abstractmethod + def _something_to_watch(self): + raise NotImplementedError("_something_to_watch not implemented") + + @abc.abstractmethod + def _initialize(self): + raise NotImplementedError("_initialize not implemented") + + async def _init_channel(self): + channel = channels.get_chan(self.FEED_CHANNEL.get_name()) + await channel.register_producer(self) + if self.is_backtesting: + await self._register_time_consumer() + + # Call _notify_consumers to send data to consumers + def _notify_consumers(self, data): + try: + # send notification only if is a notification channel is running + channels.get_chan(self.FEED_CHANNEL.get_name()) + asyncio_tools.run_coroutine_in_asyncio_loop(self.feed_send_coroutine(data), self.main_async_loop) + except KeyError: + self.logger.error("Can't send notification data: no initialized channel found") + + # Call _async_notify_consumers to send data to consumers (same as _notify_consumers but directly from async context) + async def _async_notify_consumers(self, data): + try: + # send notification only if is a notification channel is running + channels.get_chan(self.FEED_CHANNEL.get_name()) + await self.feed_send_coroutine(data) + except KeyError: + self.logger.error("Can't send notification data: no initialized channel found") + + async def feed_send_coroutine(self, data): + await self.send( + { + "data": data + } + ) + + async def _run(self, should_init=True): + self.is_running = True + service_level_service_feed_if_any = self._get_service_layer_service_feed() + if self._something_to_watch(): + if should_init: + self._initialize() + await self._init_channel() + if self.services is not None: + for service in self.services: + if service_level_service_feed_if_any is not None \ + and not service.is_running(): + await service.start_service_feed() + if not await self._start_service_feed(): + self.logger.warning("Nothing can be monitored even though there is something to watch" + ", feed is going closing.") + else: + self.logger.info("Nothing to monitor, feed is closing.") + self.is_running = False + return True + + def get_data_cache(self, current_time: float, key: typing.Optional[str] = None): + if self.data_cache is None: + return None + + if key is not None: + return self.data_cache.get(key, None) + + return self.data_cache + + async def _async_run(self) -> bool: + self.logger.info("Initializing feed reception ...") + self.services = [service.instance() for service in self.REQUIRED_SERVICES] if self.REQUIRED_SERVICES else [] + return await self._run() + + async def resume(self) -> bool: + self.should_stop = False + self.logger.info("Resuming feed reception ...") + return await self._run(should_init=False) + + async def stop(self): + if self.is_running: + self.should_stop = True + self.is_running = False + if self.is_backtesting: + await self._stop_and_pause_time_consumer() + + async def pause(self): + if self.is_backtesting: + await self._pause_time_consumer() + + def _get_time_channel(self): + return channels.get_chan( + backtesting_api.get_backtesting_time_channel_name(self.backtesting) + ) + + async def _pause_time_consumer(self) -> None: + if self.time_consumer is not None: + await self._get_time_channel().remove_consumer(self.time_consumer) + + async def _stop_and_pause_time_consumer(self) -> None: + try: + await self._pause_time_consumer() + except KeyError: + pass + self.time_consumer = None + + async def _register_time_consumer(self) -> None: + if self.time_consumer is None: + self.time_consumer = await self._get_time_channel().new_consumer(self.handle_timestamp) + + async def handle_timestamp(self, timestamp, **kwargs) -> None: + self.logger.error("Received timestamp in feed but no handler implemented, this should not happen") + + async def get_historical_data( + self, + start_timestamp, + end_timestamp, + symbols=None, + source=None, + **kwargs + ) -> typing.AsyncIterator[list[dict]]: + """ + Fetch historical data from the feed for the given time range. + Override this method in feeds that support historical data collection. + + :param start_timestamp: milliseconds timestamp (int/float) for start of range + :param end_timestamp: milliseconds timestamp (int/float) for end of range + :param symbols: optional list of symbols to filter by + :param source: optional source/topic to fetch + :param kwargs: additional feed-specific parameters + :return: async generator yielding batches (lists) of event dicts + :rtype: typing.AsyncIterator[list[dict]] + + Each event dict should have at least: + - timestamp: milliseconds timestamp (int/float) + - payload: dict with event data + - channel: optional str + - symbol: optional str + """ + raise NotImplementedError("get_historical_data is not implemented for this feed") + + @classmethod + def get_historical_sources(cls) -> list: + """ + Return the list of source/topic ids supported by get_historical_data. + Override in feeds that support historical data to return their source ids. + """ + return [] diff --git a/packages/services/octobot_services/service_feeds/service_feed_exception.py b/packages/services/octobot_services/service_feeds/service_feed_exception.py new file mode 100644 index 0000000000..4cb4e12ae4 --- /dev/null +++ b/packages/services/octobot_services/service_feeds/service_feed_exception.py @@ -0,0 +1,19 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + + +class ServiceFeedException(Exception): + pass diff --git a/packages/services/octobot_services/service_feeds/service_feed_factory.py b/packages/services/octobot_services/service_feeds/service_feed_factory.py new file mode 100644 index 0000000000..79e4ab0408 --- /dev/null +++ b/packages/services/octobot_services/service_feeds/service_feed_factory.py @@ -0,0 +1,49 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.tentacles_management as tentacles_management + +import octobot_commons.logging as logging + +import octobot_services.service_feeds as service_feeds + + +class ServiceFeedFactory: + def __init__(self, config, main_async_loop, bot_id, backtesting=None, importer=None): + self.logger = logging.get_logger(self.__class__.__name__) + self.config = config + self.main_async_loop = main_async_loop + self.bot_id = bot_id + self.backtesting = backtesting + self.importer = importer + + @staticmethod + def get_available_service_feeds(in_backtesting: bool) -> list: + feeds = tentacles_management.get_all_classes_from_parent(service_feeds.AbstractServiceFeed) + if in_backtesting: + feeds = [feed.SIMULATOR_CLASS + for feed in feeds + if feed.SIMULATOR_CLASS is not None and feed.IS_SIMULATOR_CLASS] + else: + feeds = [feed + for feed in feeds + if feed.IS_SIMULATOR_CLASS is False] + return feeds + + def create_service_feed(self, service_feed_class, importer=None) -> service_feeds.AbstractServiceFeed: + feed = service_feed_class(self.config, self.main_async_loop, self.bot_id, + backtesting=self.backtesting, importer=importer or self.importer) + service_feeds.ServiceFeeds.instance().add_service_feed(self.bot_id, service_feed_class.get_name(), feed) + return feed diff --git a/packages/services/octobot_services/service_feeds/service_feeds.py b/packages/services/octobot_services/service_feeds/service_feeds.py new file mode 100644 index 0000000000..77597c750a --- /dev/null +++ b/packages/services/octobot_services/service_feeds/service_feeds.py @@ -0,0 +1,39 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import typing + +import octobot_commons.singleton as singleton + +import octobot_services.service_feeds as service_feeds + + +class ServiceFeeds(singleton.Singleton): + def __init__(self): + self.service_feeds = {} + + def get_service_feed(self, bot_id: str, feed_name: str) -> typing.Optional[service_feeds.AbstractServiceFeed]: + try: + return self.service_feeds[bot_id][feed_name] + except KeyError: + return None + + def add_service_feed(self, bot_id: str, feed_name: str, feed: service_feeds.AbstractServiceFeed) -> None: + if bot_id not in self.service_feeds: + self.service_feeds[bot_id] = {} + self.service_feeds[bot_id][feed_name] = feed + + def clear_bot_id_feeds(self, bot_id: str) -> None: + self.service_feeds.pop(bot_id, None) diff --git a/packages/services/octobot_services/services/__init__.py b/packages/services/octobot_services/services/__init__.py new file mode 100644 index 0000000000..88aab3319d --- /dev/null +++ b/packages/services/octobot_services/services/__init__.py @@ -0,0 +1,49 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_services.services import service_factory +from octobot_services.services import abstract_service +from octobot_services.services import abstract_ai_service +from octobot_services.services import abstract_web_search_service +from octobot_services.services import read_only_info + +from octobot_services.services.service_factory import ( + ServiceFactory, +) +from octobot_services.services.abstract_service import ( + AbstractService, +) +from octobot_services.services.abstract_ai_service import ( + AbstractAIService, +) +from octobot_services.services.abstract_web_search_service import ( + AbstractWebSearchService, + WebSearchResult, + WebSearchResponse, +) +from octobot_services.services.read_only_info import ( + ReadOnlyInfo, +) + +__all__ = [ + "ServiceFactory", + "AbstractService", + "AbstractAIService", + "AbstractWebSearchService", + "WebSearchResult", + "WebSearchResponse", + "ReadOnlyInfo", +] diff --git a/packages/services/octobot_services/services/abstract_ai_service.py b/packages/services/octobot_services/services/abstract_ai_service.py new file mode 100644 index 0000000000..d95a2eccd3 --- /dev/null +++ b/packages/services/octobot_services/services/abstract_ai_service.py @@ -0,0 +1,445 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import abc +import asyncio +import functools +import json +import logging +import typing + +from octobot_services.services.abstract_service import AbstractService +import octobot_services.enums as enums + +class AbstractAIService(AbstractService, abc.ABC): + DEFAULT_MODEL: typing.Optional[str] = None + DEFAULT_MAX_TOKENS: int = 10000 + DEFAULT_TEMPERATURE: float = 0.5 + + def __init__(self): + super().__init__() + self.model = self.DEFAULT_MODEL + self.models: list[str] = [] + self.models_config: typing.Dict[str, str] = {} # usage policy -> model name, e.g. {"fast": "gpt-4o-mini", "reasoning": "o4-mini"} + self.ai_provider: typing.Optional[enums.AIProvider] = None + self.auth_token: typing.Optional[str] = None + self.api_key: typing.Optional[str] = None + + @staticmethod + def retry_llm_completion( + max_retries: int = 3, + retry_delay: float = 0.0, + retriable_exceptions: tuple = (json.JSONDecodeError, ValueError, KeyError, AttributeError), + ): + """ + Decorator to retry LLM completion methods on retriable exceptions. + + Args: + max_retries: Maximum number of retry attempts (default: 3). + retry_delay: Delay in seconds between retries (default: 0.0). + retriable_exceptions: Tuple of exception types that should trigger retries. + + Returns: + Decorator function that wraps async methods with retry logic. + """ + def decorator(func): + @functools.wraps(func) + async def wrapper(self, *args, **kwargs): + logger = getattr(self, 'logger', None) or logging.getLogger(f"{self.__class__.__name__}.retry") + last_exception = None + + for attempt in range(1, max_retries + 1): + try: + return await func(self, *args, **kwargs) + except retriable_exceptions as e: + last_exception = e + error_details = str(e) + + if attempt < max_retries: + logger.warning( + f"{func.__name__} failed on attempt {attempt}/{max_retries} " + f"for {self.__class__.__name__}: {error_details}. Retrying..." + ) + if retry_delay > 0: + await asyncio.sleep(retry_delay) + else: + logger.error( + f"{func.__name__} failed on final attempt {attempt}/{max_retries} " + f"for {self.__class__.__name__}: {error_details}" + ) + raise + except Exception: + # Non-retriable exceptions should be raised immediately + raise + + # Should not reach here, but just in case + if last_exception: + raise last_exception + + return wrapper + return decorator + + @retry_llm_completion() + @abc.abstractmethod + async def get_completion( + self, + messages: list, + model: typing.Optional[str] = None, + max_tokens: int = 10000, + n: int = 1, + stop: typing.Optional[typing.Union[str, list]] = None, + temperature: float = 0.5, + json_output: bool = False, + response_schema: typing.Optional[typing.Any] = None, + tools: typing.Optional[list] = None, + tool_choice: typing.Optional[typing.Union[str, dict]] = None, + use_octobot_mcp: typing.Optional[bool] = None, + middleware: typing.Optional[typing.List[typing.Callable]] = None, + ) -> typing.Union[str, dict, None]: + """ + Get a completion from the LLM. + + Args: + messages: List of message dicts with 'role' and 'content' keys. + model: Model to use (defaults to service's default model). + max_tokens: Maximum tokens in the response. + n: Number of completions to generate. + stop: Stop sequences. + temperature: Sampling temperature (0-2). + json_output: Whether to return JSON formatted output. + response_schema: Optional Pydantic model or JSON schema dict + for structured output validation. + tools: Optional list of tool definitions for function calling. + Each tool should be a dict with 'type' and 'function' keys. + tool_choice: Optional control for tool usage. Can be "auto", "none", + or a dict specifying a specific tool. + use_octobot_mcp: Optional bool to include OctoBot MCP server tools. + If True, automatically discovers and includes tools from OctoBot MCP interface. + If None, uses default behavior (does not include OctoBot MCP). + If False, explicitly excludes OctoBot MCP tools. + middleware: Optional list of middleware callables to process the client before invocation. + Each middleware should accept (client, messages, kwargs) and return modified client. + This is primarily used by LangChain-based services; OpenAI-based services may ignore this. + + Returns: + str: The completion text when no tools are used or tool_choice is "none". + dict: When tools are used and model makes tool calls, returns dict with: + - "content": str | None (may be None if only tool calls) + - "tool_calls": list of tool call dicts with id, type, function keys + None: On error + + Raises: + InvalidRequestError: If the request is malformed. + RateLimitError: If rate limits are exceeded. + """ + raise NotImplementedError("get_completion not implemented") + + @retry_llm_completion() + @abc.abstractmethod + async def get_completion_with_tools( + self, + messages: list, + tool_executor: typing.Optional[typing.Callable[[str, dict], typing.Any]] = None, + model: typing.Optional[str] = None, + max_tokens: int = 10000, + n: int = 1, + stop: typing.Optional[typing.Union[str, list]] = None, + temperature: float = 0.5, + json_output: bool = False, + response_schema: typing.Optional[typing.Any] = None, + tools: typing.Optional[list] = None, + tool_choice: typing.Optional[typing.Union[str, dict]] = None, + use_octobot_mcp: typing.Optional[bool] = None, + max_tool_iterations: int = 3, + return_tool_calls: bool = False, + middleware: typing.Optional[typing.List[typing.Callable]] = None, + ) -> typing.Any: + """ + Get a completion from the LLM with automatic tool calling orchestration. + + This method handles the tool calling loop automatically: + 1. Calls get_completion with the provided parameters + 2. If the response contains tool_calls, executes them using tool_executor + 3. Appends tool results to messages and calls get_completion again + 4. Repeats until no tool_calls are present or max_tool_iterations is reached + 5. Returns the final parsed response + + Args: + messages: List of message dicts with 'role' and 'content' keys. + tool_executor: Optional callback function to execute tools. + Signature: (tool_name: str, arguments: dict) -> Any + If None, tool calls will not be executed (response returned as-is). + model: Model to use (defaults to service's default model). + max_tokens: Maximum tokens in the response. + n: Number of completions to generate. + stop: Stop sequences. + temperature: Sampling temperature (0-2). + json_output: Whether to parse response as JSON. + response_schema: Optional Pydantic model or JSON schema dict + for structured output validation. + tools: Optional list of tool definitions for function calling. + Each tool should be a dict with 'type' and 'function' keys. + tool_choice: Optional control for tool usage. Can be "auto", "none", + or a dict specifying a specific tool. + use_octobot_mcp: Optional bool to include OctoBot MCP server tools. + If True, automatically discovers and includes tools from OctoBot MCP interface. + If None, uses default behavior (does not include OctoBot MCP). + If False, explicitly excludes OctoBot MCP tools. + max_tool_iterations: Maximum number of tool calling rounds (default: 3). + Prevents infinite loops if LLM keeps requesting tools. + middleware: Optional list of middleware callables to process the client before invocation. + Each middleware should accept (client, messages, kwargs) and return modified client. + This is primarily used by LangChain-based services; OpenAI-based services may ignore this. + + Returns: + Final parsed response: + - dict: If json_output=True, returns parsed JSON dict + - str: If json_output=False, returns the content string + - If tool_executor is None and tool_calls are present, returns dict with tool_calls + + Raises: + InvalidRequestError: If the request is malformed. + RateLimitError: If rate limits are exceeded. + ValueError: If max_tool_iterations is exceeded or tool_executor is None when tool_calls are present. + """ + raise NotImplementedError("get_completion_with_tools not implemented") + + @staticmethod + @abc.abstractmethod + def create_message( + role: str, + content: str, + model: typing.Optional[str] = None + ) -> dict: + """ + Create a message dict for the LLM. + + Some models don't support certain roles (e.g., 'system'), + so this method allows implementations to handle that. + + Args: + role: The message role ('system', 'user', 'assistant'). + content: The message content. + model: Optional model name to handle model-specific restrictions. + + Returns: + A dict with 'role' and 'content' keys. + """ + raise NotImplementedError("create_message not implemented") + + def supports_call_json_output(self) -> bool: + """ + Whether the service supports tool calling when JSON output is requested. + + Defaults to True. Services can override to provide config-driven behavior. + """ + return True + + @staticmethod + def parse_completion_response( + response: typing.Union[str, dict, None], + json_output: bool = False + ) -> typing.Any: + """ + Parse a completion response from get_completion(). + + Handles both string responses and dict responses (with tool_calls). + Extracts content and optionally parses JSON. + + Args: + response: The response from get_completion(), can be str, dict, or None. + json_output: Whether to parse the content as JSON. + + Returns: + Parsed JSON dict if json_output=True, otherwise the content string. + + Raises: + json.JSONDecodeError: If json_output=True and content is not valid JSON. + ValueError: If response format is unexpected. + """ + if response is None: + raise ValueError("Response is None") + + if isinstance(response, dict): + response_stripped = response.get("content", "").strip() if response.get("content") else str(response).strip() + else: + response_stripped = response.strip() if isinstance(response, str) else str(response) + + if json_output: + try: + parsed_response = json.loads(response_stripped) + except json.JSONDecodeError: + try: + # Cannot be imported at the top level due to circular imports, TODO improve it + import octobot_agents.utils as agents_utils + parsed_response = agents_utils.extract_json_from_content(response_stripped) + except Exception: + parsed_response = None + if parsed_response is None: + return { + "error": f"Error parsing JSON from response {response_stripped}", + } + else: + parsed_response = response_stripped + + return parsed_response + + @staticmethod + @abc.abstractmethod + def handle_tool_calls( + tool_calls: typing.List[dict], + tool_executor: typing.Callable[[str, dict], typing.Any], + ) -> typing.List[dict]: + """ + Execute tool calls and format results for LLM message continuation. + + Takes a list of tool calls from an LLM response, executes them using + the provided tool_executor callback, and returns formatted tool result + messages ready to append to the conversation. + + Args: + tool_calls: List of tool call dicts from LLM response, each with: + - "id": Tool call ID + - "function": Dict with "name" and "arguments" keys + tool_executor: Callback function that executes a tool. + Signature: (tool_name: str, arguments: dict) -> Any + Should return the tool execution result (will be JSON-serialized). + + Returns: + List of tool result message dicts, each with: + - "tool_call_id": The original tool call ID + - "role": "tool" + - "name": Tool function name + - "content": JSON-serialized tool result + + Raises: + NotImplementedError: If the service doesn't support tool calls. + """ + raise NotImplementedError("handle_tool_calls not implemented") + + def format_tool_definition( + self, + name: str, + description: str, + parameters: typing.Dict[str, typing.Any], + tool_type: str = "function" + ) -> typing.Dict[str, typing.Any]: + """ + Format a tool definition into the standard OpenAI function calling format. + + This method can be overridden by concrete AI services to customize tool formatting. + Use this instead of manually creating tool dictionaries to avoid format errors. + + Args: + name: The function name (must be non-empty string). + description: Description of what the tool does. + parameters: JSON schema dict defining the tool's parameters. + tool_type: The tool type (default: "function" for OpenAI compatibility). + + Returns: + Properly formatted tool definition dict with 'type' and 'function' keys. + + Example: + >>> service.format_tool_definition( + ... name="run_agent", + ... description="Execute a specific agent", + ... parameters={"type": "object", "properties": {"agent_name": {"type": "string"}}} + ... ) + { + "type": "function", + "function": { + "name": "run_agent", + "description": "Execute a specific agent", + "parameters": {"type": "object", "properties": {"agent_name": {"type": "string"}}} + } + } + """ + if not name or not isinstance(name, str) or name.strip() == "": + raise ValueError(f"Tool name must be a non-empty string, got: {name}") + + return { + "type": tool_type, + "function": { + "name": name, + "description": description, + "parameters": parameters, + } + } + + def get_model(self) -> typing.Optional[str]: + return self.model + + def get_available_models(self) -> list: + return self.models + + def get_model_for_policy(self, policy: str) -> typing.Optional[str]: + """ + Return the model name for a given usage policy (e.g. "fast" or "reasoning"). + When models_config is set (e.g. {"fast": "gpt-4o-mini", "reasoning": "o4-mini"}), + returns the model for that policy; otherwise returns None and callers should use get_model(). + """ + if not self.models_config: + return None + return self.models_config.get(policy) + + def get_chat_model( + self, + model: typing.Optional[str] = None, + temperature: typing.Optional[float] = None, + max_tokens: typing.Optional[int] = None, + **kwargs + ) -> typing.Any: + """ + Get a chat model instance for use with agents or other LLM components. + + This method provides direct access to the underlying model client, + allowing integration with agent frameworks like LangGraph. + + Args: + model: Optional model override. Uses service's configured model if not provided. + temperature: Optional temperature override. + max_tokens: Optional max_tokens override. + **kwargs: Additional keyword arguments passed to the chat model. + + Returns: + Chat model instance configured with the service's settings. + + Raises: + NotImplementedError: If the service doesn't support this method. + """ + raise NotImplementedError("get_chat_model not implemented") + + def init_chat_model( + self, + model: typing.Optional[str] = None, + **kwargs + ) -> typing.Any: + """ + Initialize a chat model using the service's configuration. + + This method provides a way to create a chat model instance that can be + used with deep agents and other LLM orchestration frameworks. + + Args: + model: Model name. If None, uses service's configured model. + **kwargs: Additional keyword arguments passed to the model initialization. + + Returns: + Chat model instance. + + Raises: + NotImplementedError: If the service doesn't support this method. + """ + raise NotImplementedError("init_chat_model not implemented") diff --git a/packages/services/octobot_services/services/abstract_service.py b/packages/services/octobot_services/services/abstract_service.py new file mode 100644 index 0000000000..cd395ae5b4 --- /dev/null +++ b/packages/services/octobot_services/services/abstract_service.py @@ -0,0 +1,182 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import abc +import typing + +import octobot_commons.configuration as configuration +import octobot_commons.singleton as singleton +import octobot_commons.logging as logging + +import octobot_services.constants as constants +import octobot_services.services.read_only_info as read_only_info + + +class AbstractService(singleton.Singleton): + __metaclass__ = abc.ABCMeta + + BACKTESTING_ENABLED = False + + def __init__(self): + super().__init__() + self.logger: typing.Optional[logging.BotLogger] = None + self.config: typing.Optional[dict] = None + self.edited_config: typing.Optional[dict] = None + self.creation_error_message: typing.Optional[str] = None + self._created: bool = True + self._healthy: bool = False + self._has_been_created: bool = False + + def set_has_been_created(self, value): + self._has_been_created = value + + def get_has_been_created(self): + return self._has_been_created + + def is_healthy(self): + return self._healthy + + @classmethod + def get_name(cls): + return cls.__name__ + + def get_fields_description(self) -> dict: + """ + :return: the service configuration keys with their description + """ + return {} + + def get_default_value(self) -> dict: + """ + :return: the service configuration keys with their default value + """ + return {} + + def get_required_config(self) -> list: + """ + :return: the list of required (not optional) configuration keys + """ + return [] + + def get_read_only_info(self) -> list[read_only_info.ReadOnlyInfo]: + return [] + + def is_improved_by_extensions(self) -> bool: + return False + + @classmethod + def get_help_page(cls) -> str: + """ + :return: the url of the help page with this service + """ + return "" + + # Override this method if a user is to be registered in this service (ie: TelegramService) + def register_user(self, user_key): + pass + + # Override this method if a service feed is located in this service (ie: TelegramService) + async def start_service_feed(self): + pass + + # Override this method to know if an updater is already running + def is_running(self): + return False + + # Returns true if all the service has an instance in config + @staticmethod + @abc.abstractmethod + def is_setup_correctly(config): + raise NotImplementedError("is_setup_correctly not implemented") + + # Override this method to perform additional checks + @staticmethod + def get_is_enabled(config): + return True + + # implement locally if the service has thread(s) to stop + async def stop(self): + pass + + # implement locally if the service shouldn't raise warning at startup if configuration is not set + @staticmethod + def get_should_warn(): + return True + + # Returns true if all the configuration is available + @abc.abstractmethod + def has_required_configuration(self): + raise NotImplementedError("has_required_configuration not implemented") + + # Returns the service's endpoint + @abc.abstractmethod + def get_endpoint(self) -> None: + raise NotImplementedError("get_endpoint not implemented") + + # Called to put in the right service in config + @abc.abstractmethod + def get_type(self) -> None: + raise NotImplementedError("get_type not implemented") + + # Called after service setup + @abc.abstractmethod + async def prepare(self) -> None: + raise NotImplementedError("prepare not implemented") + + # Called by say_hello after service is prepared, return relevant service information and a boolean for + # success or failure + @abc.abstractmethod + def get_successful_startup_message(self): + raise NotImplementedError("get_successful_startup_message not implemented") + + def get_website_url(self) -> str: + # Override with the real url + return "" + + def get_brand_name(self): + return self.get_type() + + def get_logo(self): + # Default services logos are fount using https://github.com/edent/SuperTinyIcons + # Override to customize this behavior + return f"https://raw.githubusercontent.com/edent/SuperTinyIcons/master/images/svg/{self.get_brand_name()}.svg" + + def check_required_config(self, config): + return all(key in config for key in self.get_required_config()) and \ + not configuration.has_invalid_default_config_value(*(config[key] for key in self.get_required_config())) + + def log_connection_error_message(self, e): + self.logger.error(f"{self.get_name()} is failing to connect, please check your internet connection: {e}") + + async def say_hello(self): + message, self._healthy = self.get_successful_startup_message() + if self._healthy and message: + self.logger.info(message) + return self._healthy + + def save_service_config(self, service_key, service_config, update=False): + """ + Save the service's config into the user config file + :param service_key: the key of the service config in file + :param service_config: the updated config + :param update: when true, the service configuration dict will be updated using the new data, it will + be replaced otherwise + :return: None + """ + if update and service_key in self.edited_config.config[constants.CONFIG_CATEGORY_SERVICES]: + self.edited_config.config[constants.CONFIG_CATEGORY_SERVICES][service_key].update(service_config) + else: + self.edited_config.config[constants.CONFIG_CATEGORY_SERVICES][service_key] = service_config + self.edited_config.save() diff --git a/packages/services/octobot_services/services/abstract_web_search_service.py b/packages/services/octobot_services/services/abstract_web_search_service.py new file mode 100644 index 0000000000..c7bedee763 --- /dev/null +++ b/packages/services/octobot_services/services/abstract_web_search_service.py @@ -0,0 +1,157 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import abc +import typing +from dataclasses import dataclass, field +from typing import Any, Dict, List, Optional, Sequence + +from octobot_services.services.abstract_service import AbstractService + +@dataclass +class WebSearchResult: + """Single web search result.""" + title: str = "" + url: str = "" + content: str = "" + score: float = 0.0 + raw_content: Optional[str] = None + favicon: Optional[str] = None + engine: Optional[str] = None # Which search engine returned this result + + @classmethod + def from_dict(cls, data: Dict[str, Any]) -> "WebSearchResult": + if not data or not isinstance(data, dict): + return cls() + return cls( + title=str(data.get("title", "")), + url=str(data.get("url", "")), + content=str(data.get("content", "")), + score=float(data.get("score", 0)), + raw_content=data.get("raw_content"), + favicon=data.get("favicon"), + engine=data.get("engine"), + ) + + +@dataclass +class WebSearchResponse: + """Web search response containing multiple results.""" + query: str = "" + results: List[WebSearchResult] = field(default_factory=list) + answer: Optional[str] = None # AI-generated answer if available + response_time: Optional[float] = None + total_results: Optional[int] = None + + @classmethod + def from_dict(cls, data: Dict[str, Any]) -> "WebSearchResponse": + if not data or not isinstance(data, dict): + return cls() + results = [ + WebSearchResult.from_dict(r) + for r in data.get("results", []) + if isinstance(r, dict) + ] + return cls( + query=str(data.get("query", "")), + results=results, + answer=data.get("answer"), + response_time=data.get("response_time"), + total_results=data.get("total_results"), + ) + + +class AbstractWebSearchService(AbstractService, abc.ABC): + """ + Abstract base class for web search services. + + Provides a common interface for web search functionality similar to + how AbstractAIService provides a common interface for AI/LLM services. + + Implementations should override the abstract methods to provide + search functionality via different backends (Tavily, SearXNG, etc.). + """ + + DEFAULT_MAX_RESULTS: int = 10 + DEFAULT_TIMEOUT: float = 30.0 + + def __init__(self): + super().__init__() + self._startup_message: str = "" + self._startup_healthy: bool = False + + @abc.abstractmethod + async def search( + self, + query: str, + max_results: Optional[int] = None, + categories: Optional[Sequence[str]] = None, + language: Optional[str] = None, + time_range: Optional[str] = None, + include_domains: Optional[Sequence[str]] = None, + exclude_domains: Optional[Sequence[str]] = None, + timeout: Optional[float] = None, + **kwargs, + ) -> WebSearchResponse: + """ + Perform a web search. + + Args: + query: The search query string. + max_results: Maximum number of results to return. + categories: Search categories (e.g., ["general", "news", "images"]). + language: Language code for results (e.g., "en", "fr"). + time_range: Time range filter (e.g., "day", "week", "month", "year"). + include_domains: Only include results from these domains. + exclude_domains: Exclude results from these domains. + timeout: Request timeout in seconds. + **kwargs: Additional provider-specific parameters. + + Returns: + WebSearchResponse containing the search results. + """ + raise NotImplementedError("search not implemented") + + async def search_news( + self, + query: str, + max_results: Optional[int] = None, + language: Optional[str] = None, + time_range: Optional[str] = None, + timeout: Optional[float] = None, + **kwargs, + ) -> WebSearchResponse: + """ + Search for news articles. + + Default implementation calls search() with categories=["news"]. + Override for providers with dedicated news search endpoints. + + Args: + query: The search query string. + max_results: Maximum number of results to return. + language: Language code for results. + time_range: Time range filter. + timeout: Request timeout in seconds. + **kwargs: Additional provider-specific parameters. + + Returns: + WebSearchResponse containing news results. + """ + raise NotImplementedError("search_news not implemented") + + def get_successful_startup_message(self) -> typing.Tuple[str, bool]: + """Return startup message and health status.""" + return self._startup_message, self._startup_healthy diff --git a/packages/services/octobot_services/services/read_only_info.py b/packages/services/octobot_services/services/read_only_info.py new file mode 100644 index 0000000000..8539134f55 --- /dev/null +++ b/packages/services/octobot_services/services/read_only_info.py @@ -0,0 +1,28 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import dataclasses + +import octobot_services.enums + + +@dataclasses.dataclass +class ReadOnlyInfo: + name: str + value: str + type: octobot_services.enums.ReadOnlyInfoType + path: str = "" + configuration_title: str = "" + configuration_path: str = "" diff --git a/packages/services/octobot_services/services/service_factory.py b/packages/services/octobot_services/services/service_factory.py new file mode 100644 index 0000000000..2bde86b9b6 --- /dev/null +++ b/packages/services/octobot_services/services/service_factory.py @@ -0,0 +1,92 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import typing + +import octobot_commons.logging as logging + +import octobot_services.constants as constants +import octobot_services.services as services + + +class ServiceFactory: + def __init__(self, config): + self.logger = logging.get_logger(self.__class__.__name__) + self.config = config + + @staticmethod + def get_available_services() -> list: + return [service_class for service_class in services.AbstractService.__subclasses__() if service_class is not services.AbstractAIService and service_class is not services.AbstractWebSearchService] + + @staticmethod + def get_available_ai_services() -> list: + return [service_class for service_class in services.AbstractAIService.__subclasses__()] + + @staticmethod + def get_available_web_search_services() -> list: + return [service_class for service_class in services.AbstractWebSearchService.__subclasses__()] + + async def create_or_get_service(self, service_class, backtesting_enabled, edited_config) -> typing.Tuple[bool, str]: + """ + create_or_get_service will create a service instance if it doesn't exist, check the existing one otherwise + :param service_class: the class of the service to create + :return: True if the created service is working properly, False otherwise + """ + service_instance = service_class.instance() + if service_instance.get_has_been_created(): + return service_instance.is_healthy(), service_instance.creation_error_message + else: + return ( + await self._create_service(service_instance, backtesting_enabled, edited_config), + service_instance.creation_error_message + ) + + async def _create_service(self, service, backtesting_enabled, edited_config) -> bool: + service.set_has_been_created(True) + service.logger = logging.get_logger(service.get_name()) + service.config = self.config + service.edited_config = edited_config + if service.has_required_configuration(): + return await self._perform_checkup(service) + else: + if service.get_should_warn(): + service.creation_error_message = ( + f"Invalid configuration, visit {service.get_help_page()} for more details." + ) + self.logger.info(f"{service.get_name()} can't be initialized: {service.creation_error_message}") + return False + + async def _perform_checkup(self, service) -> bool: + try: + await service.prepare() + if self.config: + if constants.CONFIG_CATEGORY_SERVICES not in self.config: + self.config[constants.CONFIG_CATEGORY_SERVICES] = {} + if service.get_type() not in self.config[constants.CONFIG_CATEGORY_SERVICES]: + self.config[constants.CONFIG_CATEGORY_SERVICES][service.get_type()] = {} + self.config[constants.CONFIG_CATEGORY_SERVICES][service.get_type()][ + constants.CONFIG_SERVICE_INSTANCE] = service + if await service.say_hello(): + return service.is_healthy() + else: + self.logger.warning(f"{service.get_name()} initial checkup failed.") + except Exception as e: + self.logger.exception(e, True, f"{service.get_name()} preparation produced the following error: {e}") + return False + + @staticmethod + def has_already_been_created(service_class): + return service_class.instance().get_has_been_created() diff --git a/packages/services/octobot_services/util/__init__.py b/packages/services/octobot_services/util/__init__.py new file mode 100644 index 0000000000..896a2374dc --- /dev/null +++ b/packages/services/octobot_services/util/__init__.py @@ -0,0 +1,35 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_services.util import initializable_with_post_actions +from octobot_services.util import exchange_watcher +from octobot_services.util import returning_startable + +from octobot_services.util.initializable_with_post_actions import ( + InitializableWithPostAction, +) +from octobot_services.util.exchange_watcher import ( + ExchangeWatcher, +) +from octobot_services.util.returning_startable import ( + ReturningStartable, +) + +__all__ = [ + "InitializableWithPostAction", + "ExchangeWatcher", + "ReturningStartable", +] diff --git a/packages/services/octobot_services/util/exchange_watcher.py b/packages/services/octobot_services/util/exchange_watcher.py new file mode 100644 index 0000000000..85c077c983 --- /dev/null +++ b/packages/services/octobot_services/util/exchange_watcher.py @@ -0,0 +1,34 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + + +class ExchangeWatcher: + """ + ExchangeWatcher is used as a superclass for elements that require to interact with exchanges. + register_new_exchange_impl(self, exchange_id) will be called whenever a new exchange is ready. + Registered exchange ids are stored in self.registered_exchanges_ids + """ + def __init__(self): + self.registered_exchanges_ids = set() + + async def register_new_exchange(self, exchange_id): + try: + await self.register_new_exchange_impl(exchange_id) + finally: + self.registered_exchanges_ids.add(exchange_id) + + async def register_new_exchange_impl(self, exchange_id): + pass diff --git a/packages/services/octobot_services/util/initializable_with_post_actions.py b/packages/services/octobot_services/util/initializable_with_post_actions.py new file mode 100644 index 0000000000..703181e4ea --- /dev/null +++ b/packages/services/octobot_services/util/initializable_with_post_actions.py @@ -0,0 +1,43 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import abc + + +class InitializableWithPostAction: + __metaclass__ = abc.ABCMeta + + def __init__(self): + self.is_initialized = False + + # calls initialize_impl if not initialized + async def initialize(self, *args) -> bool: + if not self.is_initialized: + if await self._initialize_impl(*args): + await self._post_initialize(args) + self.is_initialized = True + return True + return False + return False + + @abc.abstractmethod + async def _initialize_impl(self, *args) -> bool: + raise NotImplementedError("initialize_impl not implemented") + + # Implement _post_initialize if anything specific has to be done after initialize and before start + async def _post_initialize(self, *args) -> bool: + return True + diff --git a/packages/services/octobot_services/util/returning_startable.py b/packages/services/octobot_services/util/returning_startable.py new file mode 100644 index 0000000000..d2d3ecf672 --- /dev/null +++ b/packages/services/octobot_services/util/returning_startable.py @@ -0,0 +1,59 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import asyncio +import abc +import threading + +import octobot_commons.logging as logging + + +class ReturningStartable: + __metaclass__ = abc.ABCMeta + + # Override this method with the actions to perform when starting this + # Called both by async and threaded versions of this (in a threaded new async loop for threaded versions) + @abc.abstractmethod + async def _async_run(self) -> bool: + raise NotImplementedError(f"_async_run is not implemented for {self.__class__.__name__}") + + # Override this method if this has to be run in a thread using this body + # + # async def _inner_start(self) -> bool: + # threading.Thread.start(self) + # return True + async def _inner_start(self) -> bool: + return await self._async_run() + + # Always called to start this + async def start(self) -> bool: + try: + return await self._inner_start() + except Exception as e: + class_name = self.__class__.__name__ + logger = logging.get_logger(class_name) + logger.exception(e, True, f"{class_name} start error: {e}") + return False + + def get_name(self): + raise NotImplementedError + + def threaded_start(self): + threading.Thread(target=self.run, name=self.get_name()).start() + return True + + # Called by threading.Thread.start(self) when a this is threaded + def run(self) -> None: + asyncio.run(self._async_run()) diff --git a/packages/services/standard.rc b/packages/services/standard.rc new file mode 100644 index 0000000000..b914c428ec --- /dev/null +++ b/packages/services/standard.rc @@ -0,0 +1,510 @@ +[MASTER] + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. +extension-pkg-whitelist= + +# Specify a score threshold to be exceeded before program exits with error. +fail-under=10.0 + +# Add files or directories to the blacklist. They should be base names, not +# paths. +ignore=CVS,tests + +# Add files or directories matching the regex patterns to the blacklist. The +# regex matches against base names, not paths. +ignore-patterns= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the +# number of processors available to use. +jobs=1 + +# Control the amount of potential inferred values when inferring a single +# object. This can help the performance when dealing with large functions or +# complex, nested conditions. +limit-inference-results=100 + +# List of plugins (as comma separated values of python module names) to load, +# usually to register additional checkers. +load-plugins= + +# Pickle collected data for later comparisons. +persistent=yes + +# When enabled, pylint would attempt to guess common misconfiguration and emit +# user-friendly hints instead of false-positive error messages. +# suggestion-mode=yes + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED. +confidence= + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once). You can also use "--disable=all" to +# disable everything first and then reenable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use "--disable=all --enable=classes +# --disable=W". +disable=I, R, C, W, not-callable + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +enable= + + +[REPORTS] + +# Python expression which should return a score less than or equal to 10. You +# have access to the variables 'error', 'warning', 'refactor', and 'convention' +# which contain the number of messages in each category, as well as 'statement' +# which is the total number of statements analyzed. This score is used by the +# global evaluation report (RP0004). +evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details. +#msg-template= + +# Set the output format. Available formats are text, parseable, colorized, json +# and msvs (visual studio). You can also give a reporter class, e.g. +# mypackage.mymodule.MyReporterClass. +output-format=text + +# Tells whether to display a full report or only the messages. +reports=no + +# Activate the evaluation score. +score=yes + + +[REFACTORING] + +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 + +# Complete name of functions that never returns. When checking for +# inconsistent-return-statements if a never returning function is called then +# it will be considered as an explicit return statement and no message will be +# printed. +never-returning-functions=sys.exit + + +[SIMILARITIES] + +# Ignore comments when computing similarities. +ignore-comments=yes + +# Ignore docstrings when computing similarities. +ignore-docstrings=yes + +# Ignore imports when computing similarities. +ignore-imports=no + +# Minimum lines number of a similarity. +min-similarity-lines=4 + + +[LOGGING] + +# The type of string formatting that logging methods do. `old` means using % +# formatting, `new` is for `{}` formatting. +logging-format-style=old + +# Logging modules to check that the string format arguments are in logging +# function parameter format. +logging-modules=logging + + +[STRING] + +# This flag controls whether inconsistent-quotes generates a warning when the +# character used as a quote delimiter is used inconsistently within a module. +check-quote-consistency=no + +# This flag controls whether the implicit-str-concat should generate a warning +# on implicit string concatenation in sequences defined over several lines. +check-str-concat-over-line-jumps=no + + +[SPELLING] + +# Limits count of emitted suggestions for spelling mistakes. +max-spelling-suggestions=4 + +# Spelling dictionary name. Available dictionaries: none. To make it work, +# install the python-enchant package. +spelling-dict= + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains the private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to the private dictionary (see the +# --spelling-private-dict-file option) instead of raising a message. +spelling-store-unknown-words=no + + +[FORMAT] + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Maximum number of characters on a single line. +max-line-length=100 + +# Maximum number of lines in a module. +max-module-lines=1000 + +# Allow the body of a class to be on the same line as the declaration if body +# contains single statement. +single-line-class-stmt=no + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME, + XXX, + TODO + +# Regular expression of note tags to take in consideration. +#notes-rgx= + + +[BASIC] + +# Naming style matching correct argument names. +argument-naming-style=snake_case + +# Regular expression matching correct argument names. Overrides argument- +# naming-style. +#argument-rgx= + +# Naming style matching correct attribute names. +attr-naming-style=snake_case + +# Regular expression matching correct attribute names. Overrides attr-naming- +# style. +#attr-rgx= + +# Bad variable names which should always be refused, separated by a comma. +bad-names=foo, + bar, + baz, + toto, + tutu, + tata + +# Bad variable names regexes, separated by a comma. If names match any regex, +# they will always be refused +bad-names-rgxs= + +# Naming style matching correct class attribute names. +class-attribute-naming-style=any + +# Regular expression matching correct class attribute names. Overrides class- +# attribute-naming-style. +#class-attribute-rgx= + +# Naming style matching correct class names. +class-naming-style=PascalCase + +# Regular expression matching correct class names. Overrides class-naming- +# style. +#class-rgx= + +# Naming style matching correct constant names. +const-naming-style=UPPER_CASE + +# Regular expression matching correct constant names. Overrides const-naming- +# style. +#const-rgx= + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + +# Naming style matching correct function names. +function-naming-style=snake_case + +# Regular expression matching correct function names. Overrides function- +# naming-style. +#function-rgx= + +# Good variable names which should always be accepted, separated by a comma. +good-names=i, + j, + k, + ex, + Run, + _ + +# Good variable names regexes, separated by a comma. If names match any regex, +# they will always be accepted +good-names-rgxs= + +# Include a hint for the correct naming format with invalid-name. +include-naming-hint=no + +# Naming style matching correct inline iteration names. +inlinevar-naming-style=any + +# Regular expression matching correct inline iteration names. Overrides +# inlinevar-naming-style. +#inlinevar-rgx= + +# Naming style matching correct method names. +method-naming-style=snake_case + +# Regular expression matching correct method names. Overrides method-naming- +# style. +#method-rgx= + +# Naming style matching correct module names. +module-naming-style=snake_case + +# Regular expression matching correct module names. Overrides module-naming- +# style. +#module-rgx= + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +# These decorators are taken in consideration only for invalid-name. +property-classes=abc.abstractproperty + +# Naming style matching correct variable names. +variable-naming-style=snake_case + +# Regular expression matching correct variable names. Overrides variable- +# naming-style. +#variable-rgx= + + +[VARIABLES] + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid defining new builtins when possible. +additional-builtins= + +# Tells whether unused global variables should be treated as a violation. +allow-global-unused-variables=yes + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_, + _cb + +# A regular expression matching the name of dummy variables (i.e. expected to +# not be used). +dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ + +# Argument names that match this expression will be ignored. Default to name +# with leading underscore. +ignored-argument-names=_.*|^ignored_|^unused_ + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io + + +[TYPECHECK] + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + +# Tells whether missing members accessed in mixin class should be ignored. A +# mixin class is detected if its name ends with "mixin" (case insensitive). +ignore-mixin-members=yes + +# Tells whether to warn about missing members when the owner of the attribute +# is inferred to be None. +ignore-none=yes + +# This flag controls whether pylint should warn about no-member and similar +# checks whenever an opaque object is returned when inferring. The inference +# can return multiple potential results while evaluating a Python object, but +# some branches might not be evaluated, which results in partial inference. In +# that case, it might be useful to still emit no-member and other checks for +# the rest of the inferred objects. +ignore-on-opaque-inference=yes + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local + +# List of module names for which member attributes should not be checked +# (useful for modules/projects where namespaces are manipulated during runtime +# and thus existing member attributes cannot be deduced by static analysis). It +# supports qualified module names, as well as Unix pattern matching. +ignored-modules= + +# Show a hint with possible names when a member name was not found. The aspect +# of finding the hint is based on edit distance. +missing-member-hint=yes + +# The minimum edit distance a name should have in order to be considered a +# similar match for a missing member name. +missing-member-hint-distance=1 + +# The total number of similar names that should be taken in consideration when +# showing a hint for a missing member. +missing-member-max-choices=1 + +# List of decorators that change the signature of a decorated function. +signature-mutators= + + +[IMPORTS] + +# List of modules that can be imported at any level, not just the top level +# one. +allow-any-import-level= + +# Allow wildcard imports from modules that define __all__. +allow-wildcard-with-all=no + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + +# Deprecated modules which should not be used, separated by a comma. +deprecated-modules=optparse,tkinter.tix + +# Create a graph of external dependencies in the given file (report RP0402 must +# not be disabled). +ext-import-graph= + +# Create a graph of every (i.e. internal and external) dependencies in the +# given file (report RP0402 must not be disabled). +import-graph= + +# Create a graph of internal dependencies in the given file (report RP0402 must +# not be disabled). +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant + +# Couples of modules and preferred modules, separated by a comma. +preferred-modules= + + +[CLASSES] + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__, + __new__, + setUp, + __post_init__ + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict, + _fields, + _replace, + _source, + _make + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=cls + + +[DESIGN] + +# Maximum number of arguments for function / method. +max-args=5 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Maximum number of boolean expressions in an if statement (see R0916). +max-bool-expr=5 + +# Maximum number of branch for function / method body. +max-branches=12 + +# Maximum number of locals for function / method body. +max-locals=15 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 + +# Maximum number of return / yield for function / method body. +max-returns=6 + +# Maximum number of statements in function / method body. +max-statements=50 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when being caught. Defaults to +# "BaseException, Exception". +overgeneral-exceptions=builtins.BaseException, + builtins.Exception diff --git a/packages/services/tests/__init__.py b/packages/services/tests/__init__.py new file mode 100644 index 0000000000..767d2b7d53 --- /dev/null +++ b/packages/services/tests/__init__.py @@ -0,0 +1,15 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. diff --git a/packages/services/tests/test_imports.py b/packages/services/tests/test_imports.py new file mode 100644 index 0000000000..e5fdc18979 --- /dev/null +++ b/packages/services/tests/test_imports.py @@ -0,0 +1,45 @@ +# Drakkar-Software OctoBot-Interfaces +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import gc +import pytest + +import octobot_services.util + +# ensure that imports that might be conflicting are importable + + +def test_web_imports(): + import flask + import flask_caching + import flask_compress + import flask_socketio + import gevent + import geventwebsocket + import flask_login + import wtforms + import flask_wtf + + +def test_telegram_imports(): + import telegram + import telethon + + +def test_openai_imports(): + import openai + # ensure openai lib mocks don't crash when calling isinstance on them + for obj in gc.get_objects(): + isinstance(obj, str) diff --git a/packages/services/tests/test_interface_api.py b/packages/services/tests/test_interface_api.py new file mode 100644 index 0000000000..5408154bd7 --- /dev/null +++ b/packages/services/tests/test_interface_api.py @@ -0,0 +1,43 @@ +# Drakkar-Software OctoBot-Interfaces +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import pytest + +from octobot_services.api.interfaces import initialize_global_project_data, create_interface_factory, \ + start_interfaces, stop_interfaces +from octobot_services.interfaces.abstract_interface import AbstractInterface + + +def test_initialize_global_project_data(): + bot_id = "bot" + initialize_global_project_data(bot_id, "1", "2") + assert AbstractInterface.bot_id is bot_id + assert AbstractInterface.project_name == "1" + assert AbstractInterface.project_version == "2" + + +def test_create_interface_factory(): + create_interface_factory({}) + + +@pytest.mark.asyncio +async def test_start_interfaces(): + await start_interfaces([]) + + +@pytest.mark.asyncio +async def test_stop_interfaces(): + await stop_interfaces([]) diff --git a/packages/services/tests/test_notification_api.py b/packages/services/tests/test_notification_api.py new file mode 100644 index 0000000000..8dac114aba --- /dev/null +++ b/packages/services/tests/test_notification_api.py @@ -0,0 +1,33 @@ +# Drakkar-Software OctoBot-Notifications +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import pytest + +from octobot_services.api.notification import create_notifier_factory, send_notification, create_notification + + +def test_create_notifier_factory(): + factory = create_notifier_factory({}) + factory.get_available_notifiers() + + +def test_create_notification(): + create_notification("plop") + + +@pytest.mark.asyncio +async def test_send_notification(): + await send_notification(create_notification("")) diff --git a/packages/services/tests/test_service_api.py b/packages/services/tests/test_service_api.py new file mode 100644 index 0000000000..79d0511f7f --- /dev/null +++ b/packages/services/tests/test_service_api.py @@ -0,0 +1,31 @@ +# Drakkar-Software OctoBot-Services +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import pytest + +from octobot_services.api.services import stop_services, get_available_services +from octobot_services.api.service_feeds import create_service_feed_factory + + +@pytest.mark.asyncio +async def test_init_services(): + get_available_services() + await stop_services() + + +def test_init_service_feeds(): + factory = create_service_feed_factory({}, None, "") + factory.get_available_service_feeds(True) diff --git a/packages/sync/.env.test.example b/packages/sync/.env.test.example new file mode 100644 index 0000000000..6bf04a47d9 --- /dev/null +++ b/packages/sync/.env.test.example @@ -0,0 +1,6 @@ +# S3-compatible object store for E2E tests +# Copy to .env.test and fill in your credentials +S3_ENDPOINT=http://localhost:3900 +S3_BUCKET=octobot-sync-test +S3_ACCESS_KEY=GKtest00000000000000000 +S3_SECRET_KEY=s3secrettest0000000000000000000000000000000000000000 diff --git a/packages/sync/.gitignore b/packages/sync/.gitignore new file mode 100644 index 0000000000..ed9875073f --- /dev/null +++ b/packages/sync/.gitignore @@ -0,0 +1 @@ +.env.test diff --git a/packages/sync/BUILD b/packages/sync/BUILD new file mode 100644 index 0000000000..7aed1d56b2 --- /dev/null +++ b/packages/sync/BUILD @@ -0,0 +1,4 @@ +python_requirements(name="reqs") +python_requirements(name="full_reqs", source="full_requirements.txt") + +python_sources(name="octobot_sync", sources=["octobot_sync/**/*.py"]) diff --git a/packages/sync/CHANGELOG.md b/packages/sync/CHANGELOG.md new file mode 100644 index 0000000000..dfc664d1a4 --- /dev/null +++ b/packages/sync/CHANGELOG.md @@ -0,0 +1,23 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [0.1.0] - 2026-03-20 +### Added +- [Collections] `product-profiles` collection with JSON Schema validation (name, description, website, twitter, tags) +- [Collections] `product-logos` binary collection with MIME type validation (PNG, JPEG, GIF, WebP) +- [Collections] `product-versions` collection with JSON Schema validation for version documents +- [Signals] `member` role for signal reads — public products allow all authenticated users, private products require on-chain `has_access` +- [RoleEnricher] Assign `member` role via on-chain `has_access` check (owner gets both `owner` and `member`) +- [NginxConf] Escape regex metacharacters in storage paths to prevent nginx config injection +- [NginxConf] Validate collection names (alphanumeric, hyphens, underscores only) +- [NginxConf] Reject zero/negative rate limit values +- [Security] Auth failure logging via `octobot_sync.security` logger +### Changed +- [Constants] Reduce auth timestamp window from 30s to 10s +### Removed +- [Routes] Remove manual product routes (GET/PUT) — replaced by declarative Starfish collections +- [Routes] Remove unused `/verify` endpoint (auth handled by starfish role_resolver) +- [App] Remove `app.state` dependencies (object_store, registry, platform_pubkey) — all handled by Starfish router diff --git a/packages/sync/full_requirements.txt b/packages/sync/full_requirements.txt new file mode 100644 index 0000000000..2dc13e8398 --- /dev/null +++ b/packages/sync/full_requirements.txt @@ -0,0 +1,2 @@ +starfish-server==1.18.1 +cachetools diff --git a/packages/sync/octobot_sync/__init__.py b/packages/sync/octobot_sync/__init__.py new file mode 100644 index 0000000000..6896078e1e --- /dev/null +++ b/packages/sync/octobot_sync/__init__.py @@ -0,0 +1,18 @@ +# Drakkar-Software OctoBot-Sync +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +PROJECT_NAME = "OctoBot-Sync" +VERSION = "0.1.0" # major.minor.revision diff --git a/packages/sync/octobot_sync/app.py b/packages/sync/octobot_sync/app.py new file mode 100644 index 0000000000..b2d21bb58c --- /dev/null +++ b/packages/sync/octobot_sync/app.py @@ -0,0 +1,117 @@ +# This file is part of OctoBot Sync (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + +"""Application factory — creates the FastAPI app with all routes.""" + +import os + +import httpx +from fastapi import FastAPI +from starfish_server.storage.base import AbstractObjectStore +from starfish_server.router.route_builder import create_sync_router, SyncRouterOptions +from starfish_server.replica import ReplicaManager + +import octobot_sync.auth as auth +import octobot_sync.chain as chain +import octobot_sync.constants as constants +import octobot_sync.sync as sync + + +def create_app( + nonce: auth.NonceStore, + object_store: AbstractObjectStore, + registry: chain.ChainRegistry, + collections_path: str | None = None, + primary_url: str | None = None, + auth_provider: auth.StarfishAuthProvider | None = None, + write_mode: str = "bidirectional", + sync_interval_ms: int = 60_000, +) -> FastAPI: + app = FastAPI(title="OctoBot Sync — Signal Sync Server") + + platform_pubkey = os.environ["PLATFORM_PUBKEY_EVM"] + encryption_secret = os.environ["ENCRYPTION_SECRET"] + platform_encryption_secret = os.environ["PLATFORM_ENCRYPTION_SECRET"] + + sync_config = sync.load_sync_config(collections_path) + + replica_manager = None + if primary_url: + sync_config = sync.make_replica_config( + sync_config, primary_url, write_mode, sync_interval_ms, + ) + + replica_client = _create_authenticated_client(auth_provider) + replica_manager = ReplicaManager( + store=object_store, + collections=sync_config.collections, + client=replica_client, + ) + + # Starfish sync router (handles all sync collections) + sync_router = create_sync_router( + SyncRouterOptions( + store=object_store, + config=sync_config, + role_resolver=sync.create_role_resolver(registry, nonce, platform_pubkey), + role_enricher=sync.create_role_enricher(registry), + encryption_secret=encryption_secret, + identity_encryption_info=constants.HKDF_INFO_USER_DATA, + server_encryption_secret=platform_encryption_secret, + server_identity=platform_pubkey, + server_encryption_info=constants.HKDF_INFO_PLATFORM_DATA, + signature_verifier=sync.create_signature_verifier(registry), + replica_manager=replica_manager, + ) + ) + app.include_router(sync_router, prefix="/v1") + + @app.get("/health") + async def health(): + return {"ok": True} + + if replica_manager: + @app.on_event("startup") + async def _start_replica(): + await replica_manager.start() + + @app.on_event("shutdown") + async def _stop_replica(): + await replica_manager.stop() + + return app + + +def _create_authenticated_client( + auth_provider: auth.StarfishAuthProvider | None, +) -> httpx.AsyncClient: + """Create an httpx client that signs requests using the StarfishAuthProvider.""" + if auth_provider is None: + return httpx.AsyncClient(timeout=30.0) + + async def _auth_hook(request: httpx.Request): + body_str = request.content.decode("utf-8") if request.content else None + headers = await auth_provider( + method=request.method, + path=str(request.url.raw_path, "ascii"), + body=body_str, + ) + request.headers.update(headers) + + return httpx.AsyncClient( + timeout=30.0, + event_hooks={"request": [_auth_hook]}, + ) diff --git a/packages/sync/octobot_sync/auth/__init__.py b/packages/sync/octobot_sync/auth/__init__.py new file mode 100644 index 0000000000..272c024b7d --- /dev/null +++ b/packages/sync/octobot_sync/auth/__init__.py @@ -0,0 +1,46 @@ +# Drakkar-Software OctoBot-Sync +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_sync.auth import canonical +from octobot_sync.auth.canonical import ( + build_canonical, + hash_body, +) + +from octobot_sync.auth import nonce +from octobot_sync.auth.nonce import ( + NonceStore, +) + +from octobot_sync.auth import storage +from octobot_sync.auth.storage import ( + AbstractStorageAdapter, + MemoryStorageAdapter, +) + +from octobot_sync.auth import provider +from octobot_sync.auth.provider import ( + StarfishAuthProvider, +) + +__all__ = [ + "build_canonical", + "hash_body", + "NonceStore", + "AbstractStorageAdapter", + "MemoryStorageAdapter", + "StarfishAuthProvider", +] diff --git a/packages/sync/octobot_sync/auth/canonical.py b/packages/sync/octobot_sync/auth/canonical.py new file mode 100644 index 0000000000..f6625c8150 --- /dev/null +++ b/packages/sync/octobot_sync/auth/canonical.py @@ -0,0 +1,32 @@ +# This file is part of OctoBot Sync (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + +import hashlib + + +def build_canonical( + method: str, + path: str, + timestamp: str, + nonce: str, + body_hash: str, +) -> str: + return f"ED25519-OCTOBOT\n{method}\n{path}\n{timestamp}\n{nonce}\n{body_hash}" + + +def hash_body(body: str | None) -> str: + data = (body or "").encode("utf-8") + return hashlib.sha256(data).hexdigest() diff --git a/packages/sync/octobot_sync/auth/nonce.py b/packages/sync/octobot_sync/auth/nonce.py new file mode 100644 index 0000000000..7f4409657e --- /dev/null +++ b/packages/sync/octobot_sync/auth/nonce.py @@ -0,0 +1,31 @@ +# This file is part of OctoBot Sync (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + +"""Nonce replay protection.""" + + +import octobot_sync.auth.storage as storage_module + + +class NonceStore: + def __init__(self, store: storage_module.AbstractStorageAdapter) -> None: + self._store = store + + async def nonce_insert(self, nonce: str, pubkey: str) -> bool: + """Returns True if nonce is fresh (allow request). + Returns False if nonce was already seen within the 30s window (reject as replay). + """ + return await self._store.set_if_absent(f"nonce:{pubkey}:{nonce}", "1", 30_000) diff --git a/packages/sync/octobot_sync/auth/provider.py b/packages/sync/octobot_sync/auth/provider.py new file mode 100644 index 0000000000..cdb0f4708c --- /dev/null +++ b/packages/sync/octobot_sync/auth/provider.py @@ -0,0 +1,58 @@ +# This file is part of OctoBot Sync (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + +import time +import uuid + +from web3 import Web3 + +import octobot_sync.auth.canonical as canonical +import octobot_sync.chain.evm as evm +import octobot_sync.constants as constants + + +class StarfishAuthProvider: + def __init__(self, private_key: str, chain_id: str) -> None: + self._w3 = Web3() + self._private_key = private_key + self._address = evm.address_from_evm_key(private_key) + self._chain_id = chain_id + + @property + def address(self) -> str: + return self._address + + async def sign_payload(self, data: str) -> str: + msg_hash = evm._eip191_hash(data) + signed = self._w3.eth.account._sign_hash(msg_hash, private_key=self._private_key) + return signed.signature.hex() + + async def __call__( + self, *, method: str, path: str, body: str | None + ) -> dict[str, str]: + ts = str(int(time.time() * 1000)) + nonce = str(uuid.uuid4()) + body_hash = canonical.hash_body(body) + msg = canonical.build_canonical(method, path, ts, nonce, body_hash) + msg_hash = evm._eip191_hash(msg) + signed = self._w3.eth.account._sign_hash(msg_hash, private_key=self._private_key) + return { + constants.HEADER_PUBKEY: self._address, + constants.HEADER_SIGNATURE: signed.signature.hex(), + constants.HEADER_TIMESTAMP: ts, + constants.HEADER_NONCE: nonce, + constants.HEADER_CHAIN: self._chain_id, + } diff --git a/packages/sync/octobot_sync/auth/storage.py b/packages/sync/octobot_sync/auth/storage.py new file mode 100644 index 0000000000..882f4dba70 --- /dev/null +++ b/packages/sync/octobot_sync/auth/storage.py @@ -0,0 +1,63 @@ +# This file is part of OctoBot Sync (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + +"""Storage adapter protocol and in-memory implementation for nonce dedup.""" + +import time +from typing import Protocol + + +class AbstractStorageAdapter(Protocol): + """Atomic key-value store with TTL.""" + + async def set_if_absent(self, key: str, value: str, ttl_ms: int) -> bool: ... + async def set(self, key: str, value: str, ttl_ms: int | None = None) -> None: ... + async def get(self, key: str) -> str | None: ... + async def delete(self, key: str) -> None: ... + + +class MemoryStorageAdapter: + """In-memory AbstractStorageAdapter for testing.""" + + def __init__(self) -> None: + self._store: dict[str, tuple[str, float]] = {} + + def _cleanup(self) -> None: + now = time.time() * 1000 + expired = [k for k, (_, exp) in self._store.items() if exp < now] + for k in expired: + del self._store[k] + + async def set_if_absent(self, key: str, value: str, ttl_ms: int) -> bool: + self._cleanup() + if key in self._store: + return False + self._store[key] = (value, time.time() * 1000 + ttl_ms) + return True + + async def set(self, key: str, value: str, ttl_ms: int | None = None) -> None: + exp = time.time() * 1000 + (ttl_ms if ttl_ms is not None else 999_999_999_999) + self._store[key] = (value, exp) + + async def get(self, key: str) -> str | None: + entry = self._store.get(key) + if entry is None or entry[1] < time.time() * 1000: + self._store.pop(key, None) + return None + return entry[0] + + async def delete(self, key: str) -> None: + self._store.pop(key, None) diff --git a/packages/sync/octobot_sync/chain/__init__.py b/packages/sync/octobot_sync/chain/__init__.py new file mode 100644 index 0000000000..35bfc51bea --- /dev/null +++ b/packages/sync/octobot_sync/chain/__init__.py @@ -0,0 +1,46 @@ +# Drakkar-Software OctoBot-Sync +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_sync.chain import interface +from octobot_sync.chain.interface import ( + AbstractChain, + Item, + Wallet, +) + +from octobot_sync.chain import evm +from octobot_sync.chain.evm import ( + EvmChain, + create_evm_wallet, + address_from_evm_key, + verify_evm, +) + +from octobot_sync.chain import registry +from octobot_sync.chain.registry import ( + ChainRegistry, +) + +__all__ = [ + "AbstractChain", + "Item", + "Wallet", + "EvmChain", + "create_evm_wallet", + "address_from_evm_key", + "verify_evm", + "ChainRegistry", +] diff --git a/packages/sync/octobot_sync/chain/evm.py b/packages/sync/octobot_sync/chain/evm.py new file mode 100644 index 0000000000..cccebb6d69 --- /dev/null +++ b/packages/sync/octobot_sync/chain/evm.py @@ -0,0 +1,158 @@ +# This file is part of OctoBot Sync (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + +import functools + +from cachetools import TTLCache +from web3 import Web3, AsyncWeb3 +from web3.providers import AsyncHTTPProvider + +import octobot_sync.chain.interface as chain_interface + +_SENTINEL = object() + + +def _async_ttl_cached(ttl_s: float, maxsize: int = 1024): + def decorator(fn): + cache = TTLCache(maxsize=maxsize, ttl=ttl_s) + + @functools.wraps(fn) + async def wrapper(*args, **kwargs): + key = args[1:] # skip self + result = cache.get(key, _SENTINEL) + if result is not _SENTINEL: + return result + result = await fn(*args, **kwargs) + cache[key] = result + return result + + wrapper.cache = cache + return wrapper + + return decorator + +OCTOBOT_PRODUCT_ABI = [ + { + "type": "function", + "name": "ownerOf", + "inputs": [{"name": "tokenId", "type": "uint256"}], + "outputs": [{"name": "owner", "type": "address"}], + "stateMutability": "view", + }, + { + "type": "function", + "name": "hasAccess", + "inputs": [ + {"name": "user", "type": "address"}, + {"name": "itemId", "type": "uint256"}, + ], + "outputs": [{"name": "", "type": "bool"}], + "stateMutability": "view", + }, +] + + +def _eip191_hash(text: str) -> bytes: + """Compute the EIP-191 personal_sign message hash without eth_account.""" + msg_bytes = text.encode("utf-8") + prefix = f"\x19Ethereum Signed Message:\n{len(msg_bytes)}".encode("utf-8") + return Web3.keccak(prefix + msg_bytes) + + +def create_evm_wallet() -> chain_interface.Wallet: + account = Web3().eth.account.create() + return chain_interface.Wallet( + private_key=account.key.hex(), + address=account.address, + ) + + +def address_from_evm_key(private_key: str) -> str: + return Web3().eth.account.from_key(private_key).address + + +def verify_evm(canonical: str, signature: str, address: str) -> bool: + """Verify an EIP-191 personal_sign signature via web3.""" + try: + msg_hash = _eip191_hash(canonical) + recovered = Web3().eth.account._recover_hash(msg_hash, signature=signature) + return recovered.lower() == address.lower() + except Exception: + return False + + + +class EvmChain: + def __init__( + self, + chain_id: str, + rpc_url: str | None = None, + contract_address: str | None = None, + ) -> None: + self._id = chain_id + if rpc_url and contract_address: + self._contract_address = AsyncWeb3.to_checksum_address(contract_address) + self._w3 = AsyncWeb3(AsyncHTTPProvider(rpc_url)) + self._contract = self._w3.eth.contract( + address=self._contract_address, abi=OCTOBOT_PRODUCT_ABI + ) + else: + self._contract = None + + @property + def id(self) -> str: + return self._id + + @staticmethod + def create_wallet() -> chain_interface.Wallet: + return create_evm_wallet() + + @staticmethod + def address_from_key(private_key: str) -> str: + return address_from_evm_key(private_key) + + async def verify_signature( + self, canonical: str, signature: str, pubkey_or_address: str + ) -> bool: + return verify_evm(canonical, signature, pubkey_or_address) + + def _require_contract(self) -> None: + if self._contract is None: + raise RuntimeError( + f"Chain {self._id}: RPC not configured. " + "Set EVM_BASE_RPC and EVM_CONTRACT_BASE environment variables." + ) + + @_async_ttl_cached(ttl_s=30) + async def get_item(self, item_id: str) -> chain_interface.Item | None: + self._require_contract() + try: + owner = await self._contract.functions.ownerOf(int(item_id)).call() + return chain_interface.Item(id=item_id, owner=owner) + except Exception: + return None + + @_async_ttl_cached(ttl_s=365 * 86400) + async def is_item_owner(self, item_id: str, pubkey_or_address: str) -> bool: + item = await self.get_item(item_id) + return item is not None and item.owner.lower() == pubkey_or_address.lower() + + @_async_ttl_cached(ttl_s=60) + async def has_access(self, item_id: str, user_address: str) -> bool: + self._require_contract() + return await self._contract.functions.hasAccess( + AsyncWeb3.to_checksum_address(user_address), int(item_id) + ).call() diff --git a/packages/sync/octobot_sync/chain/interface.py b/packages/sync/octobot_sync/chain/interface.py new file mode 100644 index 0000000000..29074f87ad --- /dev/null +++ b/packages/sync/octobot_sync/chain/interface.py @@ -0,0 +1,51 @@ +# This file is part of OctoBot Sync (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + +from dataclasses import dataclass +from typing import Protocol + + +@dataclass +class Item: + id: str + owner: str + + +@dataclass +class Wallet: + private_key: str + address: str + + +class AbstractChain(Protocol): + @property + def id(self) -> str: ... + + @staticmethod + def create_wallet() -> Wallet: ... + + @staticmethod + def address_from_key(private_key: str) -> str: ... + + async def verify_signature( + self, canonical: str, signature: str, pubkey_or_address: str + ) -> bool: ... + + async def get_item(self, item_id: str) -> Item | None: ... + + async def is_item_owner(self, item_id: str, pubkey_or_address: str) -> bool: ... + + async def has_access(self, item_id: str, user_address: str) -> bool: ... diff --git a/packages/sync/octobot_sync/chain/registry.py b/packages/sync/octobot_sync/chain/registry.py new file mode 100644 index 0000000000..f8add4fdd1 --- /dev/null +++ b/packages/sync/octobot_sync/chain/registry.py @@ -0,0 +1,34 @@ +# This file is part of OctoBot Sync (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + +import octobot_sync.chain.interface as chain_interface + + +class ChainRegistry: + def __init__(self) -> None: + self._chains: dict[str, chain_interface.AbstractChain] = {} + + def register(self, chain: chain_interface.AbstractChain) -> None: + self._chains[chain.id] = chain + + def get(self, chain_id: str) -> chain_interface.AbstractChain: + chain = self._chains.get(chain_id) + if chain is None: + raise ValueError(f"Unknown chain: {chain_id}") + return chain + + def list(self) -> list[chain_interface.AbstractChain]: + return list(self._chains.values()) diff --git a/packages/sync/octobot_sync/client.py b/packages/sync/octobot_sync/client.py new file mode 100644 index 0000000000..812b41941f --- /dev/null +++ b/packages/sync/octobot_sync/client.py @@ -0,0 +1,81 @@ +# This file is part of OctoBot Sync (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + +import threading + +from starfish_sdk import StarfishClient + +import octobot_commons.logging as logging +import octobot_sync.auth as auth +import octobot_sync.constants as constants +import octobot_sync.server as server + +_local_server_thread: threading.Thread | None = None + + +def create_sync_client( + private_key: str, + chain_id: str, + sync_url: str = None, + start_replica_server: bool = False, + replica_port: int = 3000, + replica_write_mode: str = "bidirectional", + replica_sync_interval_ms: int = 60_000, +) -> tuple[StarfishClient, str]: + auth_provider = auth.StarfishAuthProvider(private_key, chain_id) + + if start_replica_server: + sync_url = _start_replica_server( + primary_url=sync_url, + private_key=private_key, + chain_id=chain_id, + port=replica_port, + platform_pubkey=auth_provider.address, + write_mode=replica_write_mode, + sync_interval_ms=replica_sync_interval_ms, + ) + + client = StarfishClient( + base_url=sync_url, + auth=auth_provider, + namespace=None if start_replica_server else constants.SYNC_NAMESPACE, + ) + logging.get_logger("SyncClient").info(f"Sync client initialized (sync server: {sync_url}, address: {auth_provider.address})") + return client, auth_provider.address, auth_provider.sign_payload + + +def _start_replica_server( + primary_url: str, + private_key: str, + chain_id: str, + port: int, + platform_pubkey: str, + write_mode: str = "bidirectional", + sync_interval_ms: int = 60_000, +) -> str: + global _local_server_thread + if _local_server_thread is None or not _local_server_thread.is_alive(): + _local_server_thread = server.start_replica_server_background( + primary_url=primary_url, + private_key=private_key, + chain_id=chain_id, + host="127.0.0.1", + port=port, + platform_pubkey=platform_pubkey, + write_mode=write_mode, + sync_interval_ms=sync_interval_ms, + ) + return f"http://127.0.0.1:{port}" diff --git a/packages/sync/octobot_sync/constants.py b/packages/sync/octobot_sync/constants.py new file mode 100644 index 0000000000..df0de05f9e --- /dev/null +++ b/packages/sync/octobot_sync/constants.py @@ -0,0 +1,35 @@ +# This file is part of OctoBot Sync (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + +MAX_BODY_SIZE_SIGNAL = 64 * 1024 # 64 KB — signal payload +MAX_BODY_SIZE_PERFORMANCE = 64 * 1024 # 64 KB — live performance snapshot +MAX_BODY_SIZE_PRIVATE = 10 * 1024 * 1024 # 10 MB — private documents + +HKDF_INFO_USER_DATA = "octobot-sync-user-data" +HKDF_INFO_PLATFORM_DATA = "octobot-sync-platform-data" + +MAX_NONCE_LENGTH = 128 +MAX_PUBKEY_LENGTH = 256 +MAX_SIGNATURE_LENGTH = 512 +TIMESTAMP_WINDOW_MS = 10_000 + +HEADER_PUBKEY = "X-Starfish-Pubkey" +HEADER_SIGNATURE = "X-Starfish-Signature" +HEADER_TIMESTAMP = "X-Starfish-Timestamp" +HEADER_NONCE = "X-Starfish-Nonce" +HEADER_CHAIN = "X-Starfish-Chain" +COLLECTIONS_FILE = "collections.json" +SYNC_NAMESPACE = "octobot" diff --git a/packages/sync/octobot_sync/server.py b/packages/sync/octobot_sync/server.py new file mode 100644 index 0000000000..6a8834cbb5 --- /dev/null +++ b/packages/sync/octobot_sync/server.py @@ -0,0 +1,163 @@ +# This file is part of OctoBot Sync (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + +import os +import threading + +import uvicorn +from starfish_server.storage.s3 import S3ObjectStore, S3StorageOptions +from starfish_server.storage.filesystem import FilesystemObjectStore, FilesystemStorageOptions + +import octobot_commons.logging as logging +import octobot_sync.app as sync_app +import octobot_sync.auth as auth +import octobot_sync.chain as chain + +def _get_logger(): + return logging.get_logger("OctoBot-Sync") + + +def _require_env(key: str) -> str: + value = os.environ.get(key) + if not value: + raise RuntimeError(f"Required environment variable missing: {key}") + return value + + +def _setup_registry() -> chain.ChainRegistry: + registry = chain.ChainRegistry() + evm_base_rpc = os.getenv("EVM_BASE_RPC") + evm_contract_base = os.getenv("EVM_CONTRACT_BASE") + if evm_base_rpc and evm_contract_base: + registry.register(chain.EvmChain("evm:8453", evm_base_rpc, evm_contract_base)) + else: + registry.register(chain.EvmChain("evm:8453")) + return registry + + +def _build_app(platform_pubkey: str | None = None) -> tuple: + """Build a standalone (primary) server backed by S3 storage.""" + nonce = auth.NonceStore(auth.MemoryStorageAdapter()) + + object_store = S3ObjectStore( + S3StorageOptions( + access_key_id=_require_env("S3_ACCESS_KEY"), + secret_access_key=_require_env("S3_SECRET_KEY"), + endpoint=_require_env("S3_ENDPOINT"), + bucket=_require_env("S3_BUCKET"), + region=_require_env("S3_REGION"), + ) + ) + + registry = _setup_registry() + + if platform_pubkey: + os.environ.setdefault("PLATFORM_PUBKEY_EVM", platform_pubkey) + + app = sync_app.create_app(nonce, object_store, registry) + return app + + +def _build_replica_app( + primary_url: str, + private_key: str, + chain_id: str, + platform_pubkey: str | None = None, + write_mode: str = "bidirectional", + sync_interval_ms: int = 60_000, + data_dir: str | None = None, +) -> tuple: + """Build a replica server backed by local filesystem storage.""" + nonce = auth.NonceStore(auth.MemoryStorageAdapter()) + + resolved_data_dir = data_dir or os.path.join( + os.path.expanduser("~"), ".octobot", "sync_data" + ) + object_store = FilesystemObjectStore( + FilesystemStorageOptions(base_dir=resolved_data_dir) + ) + + registry = _setup_registry() + auth_provider = auth.StarfishAuthProvider(private_key, chain_id) + + if platform_pubkey: + os.environ.setdefault("PLATFORM_PUBKEY_EVM", platform_pubkey) + + app = sync_app.create_app( + nonce, + object_store, + registry, + primary_url=primary_url, + auth_provider=auth_provider, + write_mode=write_mode, + sync_interval_ms=sync_interval_ms, + ) + return app + + +def start_sync_server(host: str = "0.0.0.0", port: int | None = None) -> None: + app = _build_app() + resolved_port = port or int(os.getenv("PORT", "3000")) + _get_logger().info(f"OctoBot-Sync server listening on {host}:{resolved_port}") + uvicorn.run(app, host=host, port=resolved_port) + + +def start_sync_server_background( + host: str = "127.0.0.1", + port: int = 3000, + platform_pubkey: str | None = None, +) -> threading.Thread: + app = _build_app(platform_pubkey=platform_pubkey) + config = uvicorn.Config(app, host=host, port=port, log_level="warning") + server = uvicorn.Server(config) + + thread = threading.Thread(target=server.run, name="octobot-sync", daemon=True) + thread.start() + _get_logger().info(f"Local sync server started on http://{host}:{port}") + return thread + + +def start_replica_server_background( + primary_url: str, + private_key: str, + chain_id: str, + host: str = "127.0.0.1", + port: int = 3000, + platform_pubkey: str | None = None, + write_mode: str = "bidirectional", + sync_interval_ms: int = 60_000, + data_dir: str | None = None, +) -> threading.Thread: + """Start a replica server in a background daemon thread.""" + app = _build_replica_app( + primary_url=primary_url, + private_key=private_key, + chain_id=chain_id, + platform_pubkey=platform_pubkey, + write_mode=write_mode, + sync_interval_ms=sync_interval_ms, + data_dir=data_dir, + ) + config = uvicorn.Config(app, host=host, port=port, log_level="warning") + server = uvicorn.Server(config) + + thread = threading.Thread(target=server.run, name="octobot-sync-replica", daemon=True) + thread.start() + _get_logger().info( + f"Replica sync server started on http://{host}:{port} " + f"(primary: {primary_url})" + ) + return thread diff --git a/packages/sync/octobot_sync/sync/__init__.py b/packages/sync/octobot_sync/sync/__init__.py new file mode 100644 index 0000000000..9b4646c9b6 --- /dev/null +++ b/packages/sync/octobot_sync/sync/__init__.py @@ -0,0 +1,38 @@ +# Drakkar-Software OctoBot-Sync +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_sync.sync import collections +from octobot_sync.sync.collections import ( + load_sync_config, + is_replicable_collection, + make_replica_config, +) + +from octobot_sync.sync import role_resolver +from octobot_sync.sync.role_resolver import ( + create_role_resolver, + create_role_enricher, + create_signature_verifier, +) + +__all__ = [ + "load_sync_config", + "is_replicable_collection", + "make_replica_config", + "create_role_resolver", + "create_role_enricher", + "create_signature_verifier", +] diff --git a/packages/sync/octobot_sync/sync/collections.py b/packages/sync/octobot_sync/sync/collections.py new file mode 100644 index 0000000000..a3fadf275d --- /dev/null +++ b/packages/sync/octobot_sync/sync/collections.py @@ -0,0 +1,109 @@ +# This file is part of OctoBot Sync (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + +import os + +import octobot_commons.constants as commons_constants +import octobot_commons.logging as logging + +from starfish_server.config.loader import load_config_file +from starfish_server.config.schema import SyncConfig, CollectionConfig, RemoteConfig, WriteMode, SyncTrigger + +import octobot_sync.constants as constants + +logger = logging.get_logger("SyncCollections") + +DEFAULT_SYNC_CONFIG = SyncConfig( + version=1, + collections=[ + CollectionConfig( + name="bots", + storagePath="users/{identity}", + bundle="user-data", + readRoles=["self"], + writeRoles=["self"], + encryption="identity", + maxBodyBytes=constants.MAX_BODY_SIZE_PRIVATE, + ), + CollectionConfig( + name="accounts", + storagePath="users/{identity}", + bundle="user-data", + readRoles=["self"], + writeRoles=["self"], + encryption="identity", + maxBodyBytes=constants.MAX_BODY_SIZE_PRIVATE, + ), + CollectionConfig( + name="errors", + storagePath="users/{identity}/errors/{errorId}", + readRoles=["self", "admin"], + writeRoles=["self"], + encryption="delegated", + maxBodyBytes=500_000, + ), + ], +) + + +def load_sync_config( + collections_path: str | None = None, +) -> SyncConfig: + path = collections_path or os.path.join( + commons_constants.USER_FOLDER, constants.COLLECTIONS_FILE + ) + if not os.path.isfile(path): + logger.warning( + f"Collections file not found at {path}, using default config" + ) + return DEFAULT_SYNC_CONFIG + return load_config_file(path) + + +def is_replicable_collection(col: CollectionConfig) -> bool: + """A collection is replicable if its storagePath has no template variables.""" + return "{" not in col.storage_path + + +def make_replica_config( + config: SyncConfig, + primary_url: str, + write_mode: str = "bidirectional", + sync_interval_ms: int = 60_000, +) -> SyncConfig: + """Inject RemoteConfig into replicable collections. + + Returns the updated SyncConfig with remote on replicable collections. + Non-replicable (templated) collections are kept as-is. + """ + mode = WriteMode(write_mode) + updated = [] + for col in config.collections: + if is_replicable_collection(col): + col = col.model_copy( + update={ + "remote": RemoteConfig( + url=primary_url, + pullPath=f"/pull/{col.storage_path}", + pushPath=f"/push/{col.storage_path}" if mode != WriteMode.PULL_ONLY else None, + writeMode=mode, + intervalMs=sync_interval_ms, + syncTriggers=[SyncTrigger.ON_PULL, SyncTrigger.SCHEDULED], + ), + } + ) + updated.append(col) + return config.model_copy(update={"collections": updated}) diff --git a/packages/sync/octobot_sync/sync/role_resolver.py b/packages/sync/octobot_sync/sync/role_resolver.py new file mode 100644 index 0000000000..b1570a5a5f --- /dev/null +++ b/packages/sync/octobot_sync/sync/role_resolver.py @@ -0,0 +1,117 @@ +# This file is part of OctoBot Sync (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + +import re +import time +from contextvars import ContextVar +from urllib.parse import urlparse + +from fastapi import Request +from starfish_server.router.route_builder import AuthResult + +import octobot_sync.auth as auth +import octobot_sync.chain as chain +import octobot_sync.constants as constants + +# Per-request chain context so the enricher can access the resolved chain +_request_chain: ContextVar[chain.AbstractChain | None] = ContextVar("_request_chain", default=None) + + +def create_role_resolver( + registry: chain.ChainRegistry, + nonce: auth.NonceStore, + platform_pubkey: str, +): + async def role_resolver(request: Request) -> AuthResult: + pubkey = request.headers.get(constants.HEADER_PUBKEY) + signature = request.headers.get(constants.HEADER_SIGNATURE) + timestamp = request.headers.get(constants.HEADER_TIMESTAMP) + nonce_header = request.headers.get(constants.HEADER_NONCE) + chain_id = request.headers.get(constants.HEADER_CHAIN) + + if not all([pubkey, signature, timestamp, nonce_header, chain_id]): + raise ValueError("Missing authentication headers") + + if len(pubkey) > constants.MAX_PUBKEY_LENGTH: + raise ValueError("Invalid pubkey") + if len(signature) > constants.MAX_SIGNATURE_LENGTH: + raise ValueError("Invalid signature") + if len(nonce_header) > constants.MAX_NONCE_LENGTH: + raise ValueError("Invalid nonce") + + if not re.match(r"^\d+$", timestamp): + raise ValueError("Invalid timestamp") + ts = int(timestamp) + if abs(ts - int(time.time() * 1000)) > constants.TIMESTAMP_WINDOW_MS: + raise ValueError("Timestamp out of window") + + try: + chain = registry.get(chain_id) + except Exception: + raise ValueError("Unknown chain") + + # Store chain for later use by enricher + _request_chain.set(chain) + + body = await request.body() + body_text = body.decode("utf-8") if body else "" + body_hash = auth.hash_body(body_text) + path = urlparse(str(request.url)).path + canonical = auth.build_canonical(request.method, path, timestamp, nonce_header, body_hash) + + valid = await chain.verify_signature(canonical, signature, pubkey) + if not valid: + raise ValueError("Invalid signature") + + fresh = await nonce.nonce_insert(nonce_header, pubkey) + if not fresh: + raise ValueError("Replay") + + roles = ["user"] + if pubkey == platform_pubkey: + roles.append("admin") + + return AuthResult(identity=pubkey, roles=roles) + + return role_resolver + + +def create_role_enricher(registry: chain.ChainRegistry): + async def role_enricher(auth: AuthResult, params: dict[str, str]) -> list[str]: + product_id = params.get("productId") + if not product_id: + return [] + for c in registry.list(): + if await c.is_item_owner(product_id, auth.identity): + return ["owner", "member"] + if await c.has_access(product_id, auth.identity): + return ["member"] + return [] + + return role_enricher + + +def create_signature_verifier(registry: chain.ChainRegistry): + async def signature_verifier(data: str, signature: str, pubkey: str) -> bool: + chain = _request_chain.get() + if chain is not None: + return await chain.verify_signature(data, signature, pubkey) + for chain in registry.list(): + if await chain.verify_signature(data, signature, pubkey): + return True + return False + + return signature_verifier diff --git a/packages/sync/octobot_sync/util/__init__.py b/packages/sync/octobot_sync/util/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/packages/sync/octobot_sync/util/nginx_conf.py b/packages/sync/octobot_sync/util/nginx_conf.py new file mode 100644 index 0000000000..e71a5de785 --- /dev/null +++ b/packages/sync/octobot_sync/util/nginx_conf.py @@ -0,0 +1,217 @@ +"""Generate an nginx config from an OctoBot sync collections.json. + +Produces an nginx server block where: +- Public + pull_only collections → cached with long TTL (1h) +- Public + writable collections → cached with short TTL (30s) +- Everything else → proxy_pass straight to OctoBot sync, no cache +- Rate limiting from global rateLimit config + per-collection rateLimit flag + +CLI usage: + python -m octobot_sync.nginx_conf collections.json > nginx.conf + python -m octobot_sync.nginx_conf collections.json --upstream octobot-sync:3000 --listen 80 +""" + +import argparse +import json +import math +import re +import sys +import textwrap + + +_COLLECTION_NAME_RE = re.compile(r"^[a-zA-Z0-9_-]+$") + + +def storage_path_to_regex(storage_path: str) -> str: + """Convert a storagePath to an nginx location regex. + + "items/{itemId}/feed/{version}" → "items/[^/]+/feed/[^/]+" + "public/catalog" → "public/catalog" + + Literal path segments are escaped so that regex metacharacters in + collection paths cannot inject arbitrary nginx location patterns. + """ + parts = re.split(r"(\{[^}]+\})", storage_path) + result = [] + for part in parts: + if part.startswith("{") and part.endswith("}"): + result.append("[^/]+") + else: + result.append(re.escape(part)) + return "".join(result) + + +def rate_to_nginx(max_requests: int, window_ms: int) -> tuple[str, int]: + """Convert maxRequests/windowMs to nginx rate string and burst. + + Returns (rate_str, burst) e.g. ("2r/s", 20). + """ + if max_requests <= 0 or window_ms <= 0: + raise ValueError(f"Rate limit values must be positive: maxRequests={max_requests}, windowMs={window_ms}") + window_s = window_ms / 1000 + rps = max_requests / window_s + if rps >= 1: + rate_str = f"{math.ceil(rps)}r/s" + else: + rpm = max_requests / (window_s / 60) + rate_str = f"{math.ceil(rpm)}r/m" + burst = max(1, max_requests // 2) + return rate_str, burst + + +def generate(collections_path: str, upstream: str, listen: int) -> str: + with open(collections_path) as f: + config = json.load(f) + + collections = config.get("collections", []) + for col in collections: + name = col.get("name", "") + if not _COLLECTION_NAME_RE.match(name): + raise ValueError(f"Invalid collection name (must be alphanumeric/hyphens/underscores): {name!r}") + global_rate_limit = config.get("rateLimit") + + # Rate limit zones + rate_limit_block = "" + global_rate_str = "" + global_burst = 0 + strict_rate_str = "" + strict_burst = 0 + if global_rate_limit: + window_ms = global_rate_limit["windowMs"] + max_requests = global_rate_limit["maxRequests"] + global_rate_str, global_burst = rate_to_nginx(max_requests, window_ms) + strict_rate_str, strict_burst = rate_to_nginx( + max(1, max_requests // 2), window_ms + ) + rate_limit_block = textwrap.dedent(f"""\ + limit_req_zone $binary_remote_addr zone=sync_global:10m rate={global_rate_str}; + limit_req_zone $binary_remote_addr zone=sync_strict:10m rate={strict_rate_str}; + limit_req_status 429; + """) + + # Cached pull locations for public collections + cached_locations = [] + for col in collections: + read_roles = col.get("readRoles", []) + if "public" not in read_roles: + continue + + path_re = storage_path_to_regex(col["storagePath"]) + pull_only = col.get("pullOnly", False) + name = col["name"] + + ttl = "1h" if pull_only else "30s" + + rate_line = "" + if global_rate_limit: + rate_line = f"\n limit_req zone=sync_global burst={global_burst} nodelay;" + + cached_locations.append( + textwrap.dedent(f"""\ + # {name} (public, {"pull_only" if pull_only else "writable"}) + location ~* ^/v1/pull/{path_re}$ {{ + proxy_pass http://octobot_sync; + proxy_cache sync_cache; + proxy_cache_valid 200 {ttl}; + proxy_cache_use_stale error timeout updating; + proxy_cache_lock on; + add_header X-Cache-Status $upstream_cache_status;{rate_line} + }}""") + ) + + # Strict rate-limited push locations for collections with rateLimit: true + rate_limited_push_locations = [] + if global_rate_limit: + for col in collections: + if not col.get("rateLimit"): + continue + if col.get("pullOnly"): + continue + + path_re = storage_path_to_regex(col["storagePath"]) + name = col["name"] + + rate_limited_push_locations.append( + textwrap.dedent(f"""\ + # {name} (rate limited push) + location ~* ^/v1/push/{path_re}$ {{ + proxy_pass http://octobot_sync; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + limit_req zone=sync_strict burst={strict_burst} nodelay; + }}""") + ) + + cached_block = "\n\n".join(cached_locations) if cached_locations else "" + push_block = "\n\n".join(rate_limited_push_locations) if rate_limited_push_locations else "" + + # Global rate limit on catch-all + catchall_rate_line = "" + if global_rate_limit: + catchall_rate_line = f"\n limit_req zone=sync_global burst={global_burst} nodelay;" + + return textwrap.dedent(f"""\ +proxy_cache_path /var/cache/nginx/sync + levels=1:2 + keys_zone=sync_cache:10m + max_size=1g + inactive=60m + use_temp_path=off; + +{rate_limit_block}upstream octobot_sync {{ + server {upstream}; +}} + +server {{ + listen {listen}; + server_name _; + + client_max_body_size 10m; + + # ── Health (no cache, no rate limit) ── + location = /health {{ + proxy_pass http://octobot_sync; + }} + +{textwrap.indent(cached_block, " ") if cached_block else " # (no public collections found)"} + +{textwrap.indent(push_block, " ") if push_block else ""} + + # ── Catch-all: proxy to OctoBot sync, no cache ── + location /v1/ {{ + proxy_pass http://octobot_sync; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme;{catchall_rate_line} + }} + + # Reject anything outside /v1 and /health + location / {{ + return 404; + }} +}} +""") + + +def main(): + parser = argparse.ArgumentParser( + description="Generate nginx config from OctoBot sync collections.json" + ) + parser.add_argument("collections", help="Path to collections.json") + parser.add_argument( + "--upstream", default="octobot-sync:3000", + help="OctoBot sync upstream host:port (default: octobot-sync:3000)", + ) + parser.add_argument( + "--listen", type=int, default=80, + help="nginx listen port (default: 80)", + ) + args = parser.parse_args() + + sys.stdout.write(generate(args.collections, args.upstream, args.listen)) + + +if __name__ == "__main__": + main() diff --git a/packages/sync/pytest.ini b/packages/sync/pytest.ini new file mode 100644 index 0000000000..78c5011f9d --- /dev/null +++ b/packages/sync/pytest.ini @@ -0,0 +1,3 @@ +[pytest] +asyncio_mode = auto +testpaths = tests diff --git a/packages/sync/requirements.txt b/packages/sync/requirements.txt new file mode 100644 index 0000000000..d63f332268 --- /dev/null +++ b/packages/sync/requirements.txt @@ -0,0 +1,3 @@ +starfish-sdk==1.18.1 +python-multipart +web3 diff --git a/packages/sync/tests/__init__.py b/packages/sync/tests/__init__.py new file mode 100644 index 0000000000..a448e53f11 --- /dev/null +++ b/packages/sync/tests/__init__.py @@ -0,0 +1,15 @@ +# Drakkar-Software OctoBot-Sync +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. diff --git a/packages/sync/tests/conftest.py b/packages/sync/tests/conftest.py new file mode 100644 index 0000000000..00aaf9ed6a --- /dev/null +++ b/packages/sync/tests/conftest.py @@ -0,0 +1,124 @@ +# Drakkar-Software OctoBot-Sync +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +"""Shared test fixtures.""" + +import time + +import pytest + +import octobot_sync.auth as auth +import octobot_sync.chain as chain +import octobot_sync.constants as constants +import tests.mock_chain as mock_chain_module + + + +TEST_PUBKEY = "0xTestPubkey1234567890abcdef" +TEST_ADMIN_PUBKEY = "0xAdminPubkey1234567890abcdef" +TEST_CHAIN_ID = "mock" + + + + +@pytest.fixture +def memory_storage(): + return auth.MemoryStorageAdapter() + + +@pytest.fixture +def nonce_store(memory_storage): + return auth.NonceStore(memory_storage) + + +@pytest.fixture +def mock_chain(): + return mock_chain_module.MockChain(TEST_CHAIN_ID) + + +@pytest.fixture +def chain_registry(mock_chain): + registry = chain.ChainRegistry() + registry.register(mock_chain) + return registry + + + + + +class MemoryObjectStore: + """Minimal AbstractObjectStore for testing.""" + + def __init__(self) -> None: + self._store: dict[str, str] = {} + + async def get_string(self, key: str) -> str | None: + return self._store.get(key) + + async def put( + self, key: str, body: str, *, content_type: str | None = None, cache_control: str | None = None + ) -> None: + self._store[key] = body + + async def list_keys( + self, prefix: str, *, start_after: str | None = None, limit: int | None = None + ) -> list[str]: + keys = sorted(k for k in self._store if k.startswith(prefix)) + if start_after: + keys = [k for k in keys if k > start_after] + if limit: + keys = keys[:limit] + return keys + + async def delete(self, key: str) -> None: + self._store.pop(key, None) + + async def delete_many(self, keys: list[str]) -> None: + for k in keys: + self._store.pop(k, None) + + +@pytest.fixture +def memory_object_store(): + return MemoryObjectStore() + + + + +def make_auth_headers( + mock_chain: mock_chain_module.MockChain, + pubkey: str = TEST_PUBKEY, + method: str = "GET", + path: str = "/", + body: str = "", + chain_id: str = TEST_CHAIN_ID, +) -> dict[str, str]: + """Create valid auth headers and configure the mock chain to accept them.""" + ts = str(int(time.time() * 1000)) + nonce = f"test-nonce-{time.time()}" + body_hash = auth.hash_body(body) + canonical = auth.build_canonical(method, path, ts, nonce, body_hash) + signature = f"sig-{ts}" + + mock_chain.set_signature_valid(canonical, signature, pubkey, True) + + return { + constants.HEADER_PUBKEY: pubkey, + constants.HEADER_SIGNATURE: signature, + constants.HEADER_TIMESTAMP: ts, + constants.HEADER_NONCE: nonce, + constants.HEADER_CHAIN: chain_id, + } diff --git a/packages/sync/tests/e2e/__init__.py b/packages/sync/tests/e2e/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/packages/sync/tests/e2e/conftest.py b/packages/sync/tests/e2e/conftest.py new file mode 100644 index 0000000000..c10da20d91 --- /dev/null +++ b/packages/sync/tests/e2e/conftest.py @@ -0,0 +1,81 @@ +# Drakkar-Software OctoBot-Sync +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +"""Shared fixtures for e2e tests.""" + +import os +from pathlib import Path + +import pytest +from httpx import AsyncClient, ASGITransport + +import octobot_sync.app as sync_app +import octobot_sync.auth as auth +import octobot_sync.chain as chain +import tests.mock_chain as mock_chain_module + +ADMIN_PUBKEY = "0xE2eAdminPubkey" +USER_PUBKEY = "0xE2eUserPubkey" +OTHER_PUBKEY = "0xE2eOtherPubkey" +CHAIN_ID = "mock" + +COLLECTIONS_PATH = str(Path(__file__).resolve().parent.parent / "fixtures" / "collections.json") + + +@pytest.fixture +async def s3_store(): + from starfish_server.storage.s3 import S3ObjectStore, S3StorageOptions + + store = S3ObjectStore( + S3StorageOptions( + access_key_id=os.environ["S3_ACCESS_KEY"], + secret_access_key=os.environ["S3_SECRET_KEY"], + endpoint=os.environ["S3_ENDPOINT"], + bucket=os.environ.get("S3_BUCKET", "octobot-sync-test"), + region=os.environ.get("S3_REGION", "us-east-1"), + ) + ) + yield store + try: + for prefix in ("test/", "products/", "users/", "public/", "platform/"): + keys = await store.list_keys(prefix) + if keys: + await store.delete_many(keys) + finally: + await store.close() + + +@pytest.fixture +def mock_chain(): + return mock_chain_module.MockChain(CHAIN_ID) + + +@pytest.fixture +def app(s3_store, mock_chain, monkeypatch): + monkeypatch.setenv("PLATFORM_PUBKEY_EVM", ADMIN_PUBKEY) + monkeypatch.setenv("ENCRYPTION_SECRET", "e2e-encryption-secret") + monkeypatch.setenv("PLATFORM_ENCRYPTION_SECRET", "e2e-platform-secret") + registry = chain.ChainRegistry() + registry.register(mock_chain) + nonce = auth.NonceStore(auth.MemoryStorageAdapter()) + return sync_app.create_app(nonce, s3_store, registry, collections_path=COLLECTIONS_PATH) + + +@pytest.fixture +async def client(app): + transport = ASGITransport(app=app) + async with AsyncClient(transport=transport, base_url="http://test") as ac: + yield ac diff --git a/packages/sync/tests/e2e/test_error_sharing_e2e.py b/packages/sync/tests/e2e/test_error_sharing_e2e.py new file mode 100644 index 0000000000..eeb480a3c7 --- /dev/null +++ b/packages/sync/tests/e2e/test_error_sharing_e2e.py @@ -0,0 +1,195 @@ +# Drakkar-Software OctoBot-Sync +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +"""E2E tests — error sharing via sync server + real S3.""" + +import os +import time +import uuid + +import pytest +from httpx import ASGITransport + +from starfish_sdk import StarfishClient, SyncManager + +import octobot_sync.app as sync_app +import octobot_sync.auth as auth +import octobot_sync.chain as chain +import octobot_sync.constants as constants +import tests.mock_chain as mock_chain_module +from octobot.community.errors_upload.error_sharing import ( + upload_error, + ERRORS_PULL_PATH_TEMPLATE, + ERRORS_PUSH_PATH_TEMPLATE, + ENCRYPTION_INFO, +) +from tests.e2e.conftest import ADMIN_PUBKEY, USER_PUBKEY, CHAIN_ID, COLLECTIONS_PATH + +pytestmark = pytest.mark.skipif( + not os.environ.get("S3_ENDPOINT"), + reason="S3_ENDPOINT not set — skipping e2e tests", +) + + +def _make_auth_provider( + mock_chain: mock_chain_module.MockChain, + pubkey: str, +): + async def auth_provider( + *, method: str, path: str, body: str | None + ) -> dict[str, str]: + ts = str(int(time.time() * 1000)) + nonce = f"err-nonce-{uuid.uuid4()}" + body_hash = auth.hash_body(body or "") + canonical = auth.build_canonical(method, path, ts, nonce, body_hash) + signature = f"err-sig-{ts}" + mock_chain.set_signature_valid(canonical, signature, pubkey, True) + + return { + constants.HEADER_PUBKEY: pubkey, + constants.HEADER_SIGNATURE: signature, + constants.HEADER_TIMESTAMP: ts, + constants.HEADER_NONCE: nonce, + constants.HEADER_CHAIN: CHAIN_ID, + } + + return auth_provider + + +@pytest.fixture +async def sync_client(s3_store, mock_chain, monkeypatch): + monkeypatch.setenv("PLATFORM_PUBKEY_EVM", ADMIN_PUBKEY) + monkeypatch.setenv("ENCRYPTION_SECRET", "e2e-encryption-secret") + monkeypatch.setenv("PLATFORM_ENCRYPTION_SECRET", "e2e-platform-secret") + registry = chain.ChainRegistry() + registry.register(mock_chain) + nonce = auth.NonceStore(auth.MemoryStorageAdapter()) + app = sync_app.create_app(nonce, s3_store, registry, collections_path=COLLECTIONS_PATH) + + import httpx + + transport = ASGITransport(app=app) + http_client = httpx.AsyncClient(transport=transport, base_url="http://test") + client = StarfishClient( + base_url="http://test", + auth=_make_auth_provider(mock_chain, USER_PUBKEY), + client=http_client, + ) + yield client + await client.close() + + +async def test_upload_error_returns_credentials(sync_client): + """upload_error returns errorId (salt) and errorSecret for decryption.""" + try: + raise ValueError("something broke during trading") + except ValueError as exc: + result = await upload_error( + sync_client, + USER_PUBKEY, + exc, + context={"exchange": "binance", "pair": "BTC/USDT"}, + ) + + assert result is not None + assert "hash" in result + assert "errorId" in result + assert "errorSecret" in result + assert len(result["errorId"]) == 32 + assert len(result["errorSecret"]) == 64 + + +async def test_upload_error_encrypted_at_rest(sync_client, s3_store): + """Uploaded error data is encrypted in S3 (delegated encryption).""" + try: + raise ValueError("secret trading error") + except ValueError as exc: + result = await upload_error(sync_client, USER_PUBKEY, exc) + + salt = result["errorId"] + raw = await s3_store.get_string(f"users/{USER_PUBKEY}/errors/{salt}") + assert raw is not None + assert "secret trading error" not in raw + assert "ValueError" not in raw + + +async def test_upload_error_decryptable_with_credentials(sync_client): + """Error can be decrypted using the returned errorId and errorSecret.""" + try: + raise RuntimeError("decryption test") + except RuntimeError as exc: + result = await upload_error( + sync_client, + USER_PUBKEY, + exc, + context={"exchange": "binance"}, + ) + + salt = result["errorId"] + error_secret = result["errorSecret"] + + manager = SyncManager( + client=sync_client, + pull_path=ERRORS_PULL_PATH_TEMPLATE.format(pubkey=USER_PUBKEY, errorId=salt), + push_path=ERRORS_PUSH_PATH_TEMPLATE.format(pubkey=USER_PUBKEY, errorId=salt), + encryption_secret=error_secret, + encryption_salt=salt, + encryption_info=ENCRYPTION_INFO, + ) + data = await manager.pull() + assert data["message"] == "decryption test" + assert data["type"] == "RuntimeError" + assert data["context"]["exchange"] == "binance" + + +async def test_upload_error_includes_version(sync_client, monkeypatch): + """Error payload includes the OctoBot version (verifiable after decryption).""" + monkeypatch.setattr("octobot.constants.LONG_VERSION", "1.2.3-test") + try: + raise TypeError("version check") + except TypeError as exc: + result = await upload_error(sync_client, USER_PUBKEY, exc) + + manager = SyncManager( + client=sync_client, + pull_path=ERRORS_PULL_PATH_TEMPLATE.format(pubkey=USER_PUBKEY, errorId=result["errorId"]), + push_path=ERRORS_PUSH_PATH_TEMPLATE.format(pubkey=USER_PUBKEY, errorId=result["errorId"]), + encryption_secret=result["errorSecret"], + encryption_salt=result["errorId"], + encryption_info=ENCRYPTION_INFO, + ) + data = await manager.pull() + assert data["version"] == "1.2.3-test" + + +async def test_upload_error_includes_bot_id(sync_client, monkeypatch): + """Error payload includes bot_id when COMMUNITY_BOT_ID is set.""" + monkeypatch.setattr("octobot.constants.COMMUNITY_BOT_ID", "bot-42") + try: + raise KeyError("bot id check") + except KeyError as exc: + result = await upload_error(sync_client, USER_PUBKEY, exc) + + manager = SyncManager( + client=sync_client, + pull_path=ERRORS_PULL_PATH_TEMPLATE.format(pubkey=USER_PUBKEY, errorId=result["errorId"]), + push_path=ERRORS_PUSH_PATH_TEMPLATE.format(pubkey=USER_PUBKEY, errorId=result["errorId"]), + encryption_secret=result["errorSecret"], + encryption_salt=result["errorId"], + encryption_info=ENCRYPTION_INFO, + ) + data = await manager.pull() + assert data["bot_id"] == "bot-42" diff --git a/packages/sync/tests/fixtures/collections.json b/packages/sync/tests/fixtures/collections.json new file mode 100644 index 0000000000..3588c8d3f4 --- /dev/null +++ b/packages/sync/tests/fixtures/collections.json @@ -0,0 +1,111 @@ +{ + "version": 1, + "collections": [ + { + "name": "alpha-docs", + "storagePath": "users/{identity}", + "bundle": "personal", + "readRoles": ["self"], + "writeRoles": ["self"], + "encryption": "identity", + "maxBodyBytes": 10485760 + }, + { + "name": "beta-prefs", + "storagePath": "users/{identity}", + "bundle": "personal", + "readRoles": ["self"], + "writeRoles": ["self"], + "encryption": "identity", + "maxBodyBytes": 10485760 + }, + { + "name": "gamma-logs", + "storagePath": "users/{identity}/logs/{logId}", + "readRoles": ["admin"], + "writeRoles": ["user"], + "encryption": "delegated", + "maxBodyBytes": 65536 + }, + { + "name": "delta-feed", + "storagePath": "items/{itemId}/feed/{version}", + "readRoles": ["member"], + "writeRoles": ["owner"], + "encryption": "none", + "maxBodyBytes": 65536, + "rateLimit": true + }, + { + "name": "theta-profiles", + "storagePath": "items/{itemId}/profile", + "readRoles": ["public"], + "writeRoles": ["owner", "admin"], + "encryption": "none", + "maxBodyBytes": 65536, + "objectSchema": { + "type": "object", + "properties": { + "name": { "type": "string" }, + "description": { "type": "string" } + }, + "additionalProperties": false + }, + "rateLimit": true + }, + { + "name": "theta-logos", + "storagePath": "items/{itemId}/logo", + "readRoles": ["public"], + "writeRoles": ["owner", "admin"], + "encryption": "none", + "maxBodyBytes": 2097152, + "allowedMimeTypes": ["image/png", "image/jpeg", "image/gif", "image/webp"], + "rateLimit": true + }, + { + "name": "theta-versions", + "storagePath": "items/{itemId}/versions/{version}/document", + "readRoles": ["public"], + "writeRoles": ["owner", "admin"], + "encryption": "none", + "maxBodyBytes": 65536, + "objectSchema": { + "type": "object", + "properties": { + "description": { "type": "string" } + }, + "additionalProperties": false + } + }, + { + "name": "epsilon-catalog", + "storagePath": "public/catalog", + "readRoles": ["public"], + "writeRoles": ["admin"], + "encryption": "none", + "maxBodyBytes": 1048576, + "pullOnly": true + }, + { + "name": "zeta-internal", + "storagePath": "platform/internal/{identity}", + "readRoles": ["self"], + "writeRoles": ["admin"], + "encryption": "server", + "maxBodyBytes": 65536 + }, + { + "name": "eta-static", + "storagePath": "items/{itemId}", + "readRoles": ["public"], + "writeRoles": ["owner"], + "encryption": "none", + "maxBodyBytes": 2097152 + } + ], + "rateLimit": { + "windowMs": 60000, + "maxRequests": 100 + } +} diff --git a/packages/sync/tests/mock_chain.py b/packages/sync/tests/mock_chain.py new file mode 100644 index 0000000000..62bde1600c --- /dev/null +++ b/packages/sync/tests/mock_chain.py @@ -0,0 +1,76 @@ +# Drakkar-Software OctoBot-Sync +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +"""MockChain — in-memory AbstractChain for testing.""" + + +import time + +import octobot_sync.chain.interface as chain_interface + + +class MockChain: + def __init__(self, chain_id: str = "mock") -> None: + self._id = chain_id + self._items: dict[str, chain_interface.Item] = {} + self._owners: dict[str, str] = {} + self._signatures: dict[str, bool] = {} + self._access: list[dict] = [] + + @property + def id(self) -> str: + return self._id + + + + def set_item(self, item_id: str, item: chain_interface.Item) -> None: + self._items[item_id] = item + + def set_owner(self, item_id: str, owner: str) -> None: + self._owners[item_id] = owner + + def set_signature_valid( + self, canonical: str, signature: str, pubkey: str, valid: bool + ) -> None: + self._signatures[f"{canonical}:{signature}:{pubkey}"] = valid + + def set_access(self, item_id: str, user_address: str, expires_at: int) -> None: + self._access.append( + {"user": user_address, "itemId": item_id, "expiresAt": expires_at} + ) + + + + async def verify_signature( + self, canonical: str, signature: str, pubkey_or_address: str + ) -> bool: + return self._signatures.get(f"{canonical}:{signature}:{pubkey_or_address}", False) + + async def get_item(self, item_id: str) -> chain_interface.Item | None: + return self._items.get(item_id) + + async def is_item_owner(self, item_id: str, pubkey_or_address: str) -> bool: + owner = self._owners.get(item_id) + return owner == pubkey_or_address + + async def has_access(self, item_id: str, user_address: str) -> bool: + entry = None + for a in self._access: + if a["user"] == user_address and a["itemId"] == item_id: + entry = a + if entry is None: + return False + return entry["expiresAt"] == 0 or entry["expiresAt"] > time.time() diff --git a/packages/sync/tests/test_app_helpers.py b/packages/sync/tests/test_app_helpers.py new file mode 100644 index 0000000000..3049adc38d --- /dev/null +++ b/packages/sync/tests/test_app_helpers.py @@ -0,0 +1,64 @@ +# Drakkar-Software OctoBot-Sync +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +"""Tests for app.py helpers (_create_authenticated_client).""" + +import httpx +import pytest + +import octobot_sync.app as sync_app +import octobot_sync.constants as constants + + +async def test_create_authenticated_client_no_provider(): + client = sync_app._create_authenticated_client(None) + try: + assert isinstance(client, httpx.AsyncClient) + assert client._event_hooks["request"] == [] + finally: + await client.aclose() + + +async def test_create_authenticated_client_with_provider(): + async def fake_provider(*, method, path, body): + return {"X-Auth": "signed"} + + client = sync_app._create_authenticated_client(fake_provider) + try: + assert len(client._event_hooks["request"]) == 1 + finally: + await client.aclose() + + +async def test_create_authenticated_client_signs_request(): + """Auth provider headers appear on outgoing requests.""" + captured_headers = {} + + async def fake_provider(*, method, path, body): + return {constants.HEADER_PUBKEY: "0xTestAddr", constants.HEADER_CHAIN: "evm:1"} + + async def mock_handler(request: httpx.Request): + captured_headers.update(dict(request.headers)) + return httpx.Response(200, json={"ok": True}) + + client = sync_app._create_authenticated_client(fake_provider) + client._transport = httpx.MockTransport(mock_handler) + try: + await client.get("http://example.com/test") + assert captured_headers[constants.HEADER_PUBKEY.lower()] == "0xTestAddr" + assert captured_headers[constants.HEADER_CHAIN.lower()] == "evm:1" + finally: + await client.aclose() diff --git a/packages/sync/tests/test_auth_provider.py b/packages/sync/tests/test_auth_provider.py new file mode 100644 index 0000000000..a6a27784ff --- /dev/null +++ b/packages/sync/tests/test_auth_provider.py @@ -0,0 +1,78 @@ +# Drakkar-Software OctoBot-Sync +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +"""Tests for StarfishAuthProvider.""" + +import time + +import pytest + +import octobot_sync.auth as auth +import octobot_sync.chain.evm as evm +import octobot_sync.constants as constants + + +@pytest.fixture +def wallet(): + return evm.create_evm_wallet() + + +@pytest.fixture +def provider(wallet): + return auth.StarfishAuthProvider(wallet.private_key, "evm:8453") + + +def test_address_matches_private_key(wallet, provider): + assert provider.address == evm.address_from_evm_key(wallet.private_key) + + +async def test_call_returns_all_headers(provider): + headers = await provider(method="GET", path="/v1/pull/test", body=None) + expected_keys = { + constants.HEADER_PUBKEY, + constants.HEADER_SIGNATURE, + constants.HEADER_TIMESTAMP, + constants.HEADER_NONCE, + constants.HEADER_CHAIN, + } + assert set(headers.keys()) == expected_keys + + +async def test_signature_is_verifiable(wallet, provider): + headers = await provider(method="POST", path="/v1/push/test", body='{"data": 1}') + body_hash = auth.hash_body('{"data": 1}') + canonical = auth.build_canonical( + "POST", + "/v1/push/test", + headers[constants.HEADER_TIMESTAMP], + headers[constants.HEADER_NONCE], + body_hash, + ) + assert evm.verify_evm(canonical, headers[constants.HEADER_SIGNATURE], wallet.address) is True + + +async def test_nonce_unique_per_call(provider): + h1 = await provider(method="GET", path="/", body=None) + h2 = await provider(method="GET", path="/", body=None) + assert h1[constants.HEADER_NONCE] != h2[constants.HEADER_NONCE] + + +async def test_timestamp_is_current(provider): + before_ms = int(time.time() * 1000) + headers = await provider(method="GET", path="/", body=None) + after_ms = int(time.time() * 1000) + ts = int(headers[constants.HEADER_TIMESTAMP]) + assert before_ms <= ts <= after_ms diff --git a/packages/sync/tests/test_canonical.py b/packages/sync/tests/test_canonical.py new file mode 100644 index 0000000000..30f346fe9b --- /dev/null +++ b/packages/sync/tests/test_canonical.py @@ -0,0 +1,48 @@ +# Drakkar-Software OctoBot-Sync +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +"""Tests for canonical string building and body hashing.""" + +import octobot_sync.auth as auth + + +def test_build_canonical(): + result = auth.build_canonical("GET", "/v1/test", "1234567890", "nonce123", "abc123") + assert result == "ED25519-OCTOBOT\nGET\n/v1/test\n1234567890\nnonce123\nabc123" + + +def test_build_canonical_post(): + result = auth.build_canonical("POST", "/v1/push/data", "9999", "n1", "hash1") + assert result == "ED25519-OCTOBOT\nPOST\n/v1/push/data\n9999\nn1\nhash1" + + +def test_hash_body_empty(): + h = auth.hash_body("") + assert len(h) == 64 # SHA-256 hex + # SHA-256 of empty string + assert h == "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + + +def test_hash_body_none(): + h = auth.hash_body(None) + # None should be treated as empty string + assert h == auth.hash_body("") + + +def test_hash_body_content(): + h = auth.hash_body('{"key":"value"}') + assert len(h) == 64 + assert h != auth.hash_body("") diff --git a/packages/sync/tests/test_chain_registry.py b/packages/sync/tests/test_chain_registry.py new file mode 100644 index 0000000000..44032bc114 --- /dev/null +++ b/packages/sync/tests/test_chain_registry.py @@ -0,0 +1,56 @@ +# Drakkar-Software OctoBot-Sync +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +"""Tests for ChainRegistry.""" + +import pytest + +import octobot_sync.chain as sync_chain +import tests.mock_chain as mock_chain_module + + +def test_register_and_get(): + registry = sync_chain.ChainRegistry() + mock = mock_chain_module.MockChain("test-chain") + registry.register(mock) + assert registry.get("test-chain") is mock + + +def test_get_unknown_raises(): + registry = sync_chain.ChainRegistry() + with pytest.raises(ValueError, match="Unknown chain"): + registry.get("nonexistent") + + +def test_list(): + registry = sync_chain.ChainRegistry() + chain1 = mock_chain_module.MockChain("chain-1") + chain2 = mock_chain_module.MockChain("chain-2") + registry.register(chain1) + registry.register(chain2) + chains = registry.list() + assert len(chains) == 2 + assert chain1 in chains + assert chain2 in chains + + +def test_register_overwrites(): + registry = sync_chain.ChainRegistry() + chain1 = mock_chain_module.MockChain("same-id") + chain2 = mock_chain_module.MockChain("same-id") + registry.register(chain1) + registry.register(chain2) + assert registry.get("same-id") is chain2 diff --git a/packages/sync/tests/test_evm.py b/packages/sync/tests/test_evm.py new file mode 100644 index 0000000000..ceb6b38bca --- /dev/null +++ b/packages/sync/tests/test_evm.py @@ -0,0 +1,108 @@ +# Drakkar-Software OctoBot-Sync +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +"""Tests for EVM crypto functions and EvmChain.""" + +from unittest.mock import AsyncMock + +import pytest +from web3 import Web3 + +import octobot_sync.chain.evm as evm + + +def test_create_evm_wallet(): + wallet = evm.create_evm_wallet() + # key.hex() returns 64 hex chars (no 0x prefix) + assert len(wallet.private_key) == 64 + assert all(c in "0123456789abcdef" for c in wallet.private_key) + assert Web3.is_checksum_address(wallet.address) + + +def test_address_from_evm_key(): + wallet = evm.create_evm_wallet() + derived = evm.address_from_evm_key(wallet.private_key) + assert derived == wallet.address + + +def test_eip191_hash_deterministic(): + h1 = evm._eip191_hash("hello") + h2 = evm._eip191_hash("hello") + h3 = evm._eip191_hash("world") + assert h1 == h2 + assert h1 != h3 + + +def test_verify_evm_valid_signature(): + w3 = Web3() + wallet = evm.create_evm_wallet() + message = "test-canonical-string" + msg_hash = evm._eip191_hash(message) + signed = w3.eth.account._sign_hash(msg_hash, private_key=wallet.private_key) + assert evm.verify_evm(message, signed.signature.hex(), wallet.address) is True + + +def test_verify_evm_invalid_signature(): + wallet = evm.create_evm_wallet() + assert evm.verify_evm("message", "0xdead", wallet.address) is False + + +def test_verify_evm_wrong_address(): + w3 = Web3() + wallet = evm.create_evm_wallet() + other_wallet = evm.create_evm_wallet() + msg_hash = evm._eip191_hash("msg") + signed = w3.eth.account._sign_hash(msg_hash, private_key=wallet.private_key) + assert evm.verify_evm("msg", signed.signature.hex(), other_wallet.address) is False + + +async def test_async_ttl_cached_returns_cached(): + call_count = 0 + + class FakeChain: + @evm._async_ttl_cached(ttl_s=300) + async def fetch(self, key): + nonlocal call_count + call_count += 1 + return f"result-{key}" + + chain = FakeChain() + r1 = await chain.fetch("a") + r2 = await chain.fetch("a") + assert r1 == r2 == "result-a" + assert call_count == 1 + + +async def test_async_ttl_cached_different_args(): + call_count = 0 + + class FakeChain: + @evm._async_ttl_cached(ttl_s=300) + async def fetch(self, key): + nonlocal call_count + call_count += 1 + return f"result-{key}" + + chain = FakeChain() + await chain.fetch("a") + await chain.fetch("b") + assert call_count == 2 + + +def test_evm_chain_require_contract_raises(): + chain = evm.EvmChain("evm:8453") # no RPC + with pytest.raises(RuntimeError, match="RPC not configured"): + chain._require_contract() diff --git a/packages/sync/tests/test_generate_nginx_conf.py b/packages/sync/tests/test_generate_nginx_conf.py new file mode 100644 index 0000000000..edf4fcb194 --- /dev/null +++ b/packages/sync/tests/test_generate_nginx_conf.py @@ -0,0 +1,270 @@ +# Drakkar-Software OctoBot-Sync +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +"""Tests for octobot_sync.nginx_conf.""" + +import json +import os +import tempfile +from pathlib import Path + +import pytest + +from octobot_sync.util.nginx_conf import generate, storage_path_to_regex, rate_to_nginx + +FIXTURES_DIR = Path(__file__).resolve().parent / "fixtures" +COLLECTIONS_PATH = str(FIXTURES_DIR / "collections.json") + + +def test_static_path_unchanged(): + assert storage_path_to_regex("public/catalog") == r"public/catalog" + + +def test_single_template_replaced(): + assert storage_path_to_regex("users/{identity}") == "users/[^/]+" + + +def test_multiple_templates_replaced(): + result = storage_path_to_regex("items/{itemId}/feed/{version}") + assert result == "items/[^/]+/feed/[^/]+" + + +def test_rate_100_per_60s(): + rate, burst = rate_to_nginx(100, 60_000) + assert rate == "2r/s" + assert burst == 50 + + +def test_rate_10_per_60s(): + rate, burst = rate_to_nginx(10, 60_000) + assert rate == "10r/m" # 10/60s < 1r/s → use r/m + assert burst == 5 + + +def test_rate_1_per_60s(): + rate, burst = rate_to_nginx(1, 60_000) + assert rate == "1r/m" + assert burst == 1 + + +def test_rate_30_per_30s(): + rate, burst = rate_to_nginx(30, 30_000) + assert rate == "1r/s" + assert burst == 15 + + +def test_output_contains_upstream(): + output = generate(COLLECTIONS_PATH, "octobot-sync:3000", 80) + assert "upstream octobot_sync" in output + assert "server octobot-sync:3000;" in output + + +def test_output_contains_cache_path(): + output = generate(COLLECTIONS_PATH, "octobot-sync:3000", 80) + assert "proxy_cache_path" in output + assert "sync_cache" in output + + +def test_listen_port(): + output = generate(COLLECTIONS_PATH, "octobot-sync:3000", 8080) + assert "listen 8080;" in output + + +def test_health_endpoint(): + output = generate(COLLECTIONS_PATH, "octobot-sync:3000", 80) + assert "location = /health" in output + + +def test_public_pull_only_cached_1h(): + """epsilon-catalog is public + pullOnly → 1h cache.""" + output = generate(COLLECTIONS_PATH, "octobot-sync:3000", 80) + assert "# epsilon-catalog (public, pull_only)" in output + assert "proxy_cache_valid 200 1h;" in output + + +def test_public_writable_cached_30s(): + """theta-profiles is public + writable → 30s cache.""" + output = generate(COLLECTIONS_PATH, "octobot-sync:3000", 80) + assert "# theta-profiles (public, writable)" in output + assert "proxy_cache_valid 200 30s;" in output + + +def test_private_collections_not_cached(): + """alpha-docs, beta-prefs, gamma-logs, zeta-internal are private → no cache location.""" + output = generate(COLLECTIONS_PATH, "octobot-sync:3000", 80) + assert "alpha-docs" not in output + assert "beta-prefs" not in output + assert "gamma-logs" not in output + assert "zeta-internal" not in output + + +def test_cache_status_header(): + output = generate(COLLECTIONS_PATH, "octobot-sync:3000", 80) + assert "X-Cache-Status" in output + + +def test_catchall_location(): + output = generate(COLLECTIONS_PATH, "octobot-sync:3000", 80) + assert "location /v1/" in output + + +def test_reject_root(): + output = generate(COLLECTIONS_PATH, "octobot-sync:3000", 80) + assert "return 404;" in output + + +def test_rate_limit_zones_present(): + """Global rateLimit config → limit_req_zone directives.""" + output = generate(COLLECTIONS_PATH, "octobot-sync:3000", 80) + assert "limit_req_zone" in output + assert "zone=sync_global" in output + assert "zone=sync_strict" in output + assert "limit_req_status 429;" in output + + +def test_public_locations_have_global_rate_limit(): + output = generate(COLLECTIONS_PATH, "octobot-sync:3000", 80) + # delta-feed pull location should have global rate limit + lines = output.split("\n") + in_delta = False + for line in lines: + if "delta-feed" in line and "writable" in line: + in_delta = True + if in_delta and "limit_req zone=sync_global" in line: + break + if in_delta and line.strip() == "}": + pytest.fail("delta-feed pull location missing global rate limit") + + +def test_rate_limited_push_location(): + """delta-feed has rateLimit: true → strict push location.""" + output = generate(COLLECTIONS_PATH, "octobot-sync:3000", 80) + assert "# delta-feed (rate limited push)" in output + assert "/v1/push/items/[^/]+/feed/[^/]+" in output + assert "zone=sync_strict" in output + + +def test_pull_only_no_push_rate_limit(): + """epsilon-catalog is pullOnly → no push rate limit location.""" + output = generate(COLLECTIONS_PATH, "octobot-sync:3000", 80) + assert "epsilon-catalog (rate limited push)" not in output + + +def test_catchall_has_rate_limit(): + output = generate(COLLECTIONS_PATH, "octobot-sync:3000", 80) + lines = output.split("\n") + in_catchall = False + for line in lines: + if "Catch-all" in line: + in_catchall = True + if in_catchall and "limit_req zone=sync_global" in line: + break + if in_catchall and line.strip() == "}": + pytest.fail("Catch-all location missing rate limit") + + +def test_no_rate_limit_config(): + """When global rateLimit is absent, no rate limiting directives.""" + config = { + "version": 1, + "collections": [ + { + "name": "public-col", + "storagePath": "public/data", + "readRoles": ["public"], + "writeRoles": ["admin"], + "encryption": "none", + "maxBodyBytes": 65536, + } + ], + } + with tempfile.NamedTemporaryFile( + mode="w", suffix=".json", delete=False + ) as f: + json.dump(config, f) + f.flush() + try: + output = generate(f.name, "octobot-sync:3000", 80) + finally: + os.unlink(f.name) + + assert "limit_req_zone" not in output + assert "limit_req" not in output + assert "public-col" in output + + +# ── Security: input sanitization (#24) ── + + +def test_storage_path_escapes_regex_special_chars(): + """Regex metacharacters in literal path segments must be escaped.""" + result = storage_path_to_regex("public/data.v2") + assert result == r"public/data\.v2" + + result = storage_path_to_regex("items/{id}/feed+extra") + assert result == r"items/[^/]+/feed\+extra" + + +def test_storage_path_escapes_pipe(): + """Pipe in path could create regex OR — must be escaped.""" + result = storage_path_to_regex("public/a|b") + assert result == r"public/a\|b" + + +def test_storage_path_escapes_dollar(): + """Dollar sign in path must be escaped.""" + result = storage_path_to_regex("public/price$") + assert result == r"public/price\$" + + +def test_storage_path_escapes_parentheses(): + result = storage_path_to_regex("public/data(v1)") + assert result == r"public/data\(v1\)" + + +def test_invalid_collection_name_rejected(): + """Collection names with special chars should raise ValueError.""" + config = { + "version": 1, + "collections": [ + { + "name": "bad name; injection", + "storagePath": "public/data", + "readRoles": ["public"], + "writeRoles": ["admin"], + "encryption": "none", + "maxBodyBytes": 65536, + } + ], + } + with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as f: + json.dump(config, f) + f.flush() + try: + with pytest.raises(ValueError, match="Invalid collection name"): + generate(f.name, "octobot-sync:3000", 80) + finally: + os.unlink(f.name) + + +def test_rate_to_nginx_rejects_zero(): + with pytest.raises(ValueError, match="must be positive"): + rate_to_nginx(0, 60_000) + + +def test_rate_to_nginx_rejects_negative(): + with pytest.raises(ValueError, match="must be positive"): + rate_to_nginx(10, -1) diff --git a/packages/sync/tests/test_nonce.py b/packages/sync/tests/test_nonce.py new file mode 100644 index 0000000000..c9b1e09103 --- /dev/null +++ b/packages/sync/tests/test_nonce.py @@ -0,0 +1,45 @@ +# Drakkar-Software OctoBot-Sync +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +"""Tests for nonce replay protection.""" + +import pytest + +import octobot_sync.auth as auth + + +@pytest.fixture +def nonce_store(): + return auth.NonceStore(auth.MemoryStorageAdapter()) + + +async def test_fresh_nonce_accepted(nonce_store): + assert await nonce_store.nonce_insert("nonce1", "pubkey1") is True + + +async def test_duplicate_nonce_rejected(nonce_store): + assert await nonce_store.nonce_insert("nonce1", "pubkey1") is True + assert await nonce_store.nonce_insert("nonce1", "pubkey1") is False + + +async def test_same_nonce_different_pubkey(nonce_store): + assert await nonce_store.nonce_insert("nonce1", "pubkey1") is True + assert await nonce_store.nonce_insert("nonce1", "pubkey2") is True + + +async def test_different_nonces(nonce_store): + assert await nonce_store.nonce_insert("nonce1", "pubkey1") is True + assert await nonce_store.nonce_insert("nonce2", "pubkey1") is True diff --git a/packages/sync/tests/test_replica_config.py b/packages/sync/tests/test_replica_config.py new file mode 100644 index 0000000000..49a5393707 --- /dev/null +++ b/packages/sync/tests/test_replica_config.py @@ -0,0 +1,126 @@ +# Drakkar-Software OctoBot-Sync +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +"""Tests for replica config helpers (is_replicable_collection, make_replica_config).""" + +import pytest + +from starfish_server.config.schema import ( + CollectionConfig, + SyncConfig, + WriteMode, + SyncTrigger, +) + +from octobot_sync.sync.collections import ( + is_replicable_collection, + make_replica_config, + DEFAULT_SYNC_CONFIG, +) + + +def _col(name: str, path: str) -> CollectionConfig: + return CollectionConfig( + name=name, + storagePath=path, + readRoles=["self"], + writeRoles=["self"], + encryption="identity", + maxBodyBytes=64 * 1024, + ) + + +def test_is_replicable_no_template(): + col = _col("static-col", "shared/catalog") + assert is_replicable_collection(col) is True + + +def test_is_replicable_with_identity(): + col = _col("per-user", "tenants/{identity}") + assert is_replicable_collection(col) is False + + +def test_is_replicable_with_multiple_templates(): + col = _col("nested", "tenants/{identity}/entries/{entryId}") + assert is_replicable_collection(col) is False + + +def test_make_replica_config_keeps_all_collections(): + """All collections are kept — static ones get RemoteConfig, templated ones stay as-is.""" + updated = make_replica_config( + DEFAULT_SYNC_CONFIG, "https://primary.example.com" + ) + assert len(updated.collections) == len(DEFAULT_SYNC_CONFIG.collections) + # Templated collections should have no remote + for col in updated.collections: + if "{" in col.storage_path: + assert col.remote is None + + +def test_make_replica_config_with_static_collection(): + """A static-path collection gets RemoteConfig; templated ones don't.""" + config = SyncConfig( + version=1, + collections=[ + _col("static-col", "shared/catalog"), + _col("per-user", "tenants/{identity}"), + ], + ) + updated = make_replica_config(config, "https://primary.example.com") + assert len(updated.collections) == 2 + + static = next(c for c in updated.collections if c.name == "static-col") + assert static.remote is not None + assert static.remote.url == "https://primary.example.com" + + templated = next(c for c in updated.collections if c.name == "per-user") + assert templated.remote is None + + +def test_make_replica_config_remote_fields(): + """Verify RemoteConfig has correct url, pullPath, pushPath, writeMode, intervalMs.""" + config = SyncConfig( + version=1, + collections=[_col("static-col", "shared/catalog")], + ) + updated = make_replica_config( + config, + "https://primary.example.com", + write_mode="bidirectional", + sync_interval_ms=30_000, + ) + remote = updated.collections[0].remote + assert remote.url == "https://primary.example.com" + assert remote.pull_path == "/pull/shared/catalog" + assert remote.push_path == "/push/shared/catalog" + assert remote.write_mode == WriteMode.BIDIRECTIONAL + assert remote.interval_ms == 30_000 + assert SyncTrigger.ON_PULL in remote.sync_triggers + assert SyncTrigger.SCHEDULED in remote.sync_triggers + + +def test_make_replica_config_pull_only_no_push_path(): + """write_mode=pull_only → pushPath is None.""" + config = SyncConfig( + version=1, + collections=[_col("static-col", "shared/catalog")], + ) + updated = make_replica_config( + config, "https://primary.example.com", write_mode="pull_only" + ) + remote = updated.collections[0].remote + assert remote.push_path is None + assert remote.write_mode == WriteMode.PULL_ONLY diff --git a/packages/sync/tests/test_role_resolver.py b/packages/sync/tests/test_role_resolver.py new file mode 100644 index 0000000000..f7b3cf3a31 --- /dev/null +++ b/packages/sync/tests/test_role_resolver.py @@ -0,0 +1,223 @@ +# Drakkar-Software OctoBot-Sync +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +"""Tests for role resolver, enricher, and signature verifier.""" + +import time + +import pytest +from unittest.mock import AsyncMock, MagicMock +from fastapi import Request +from starfish_server.router.route_builder import AuthResult + +import octobot_sync.auth as auth +import octobot_sync.chain as chain +import octobot_sync.constants as constants +import octobot_sync.sync as sync +import tests.mock_chain as mock_chain_module + + +PUBKEY = "0xTestUser" +ADMIN_PUBKEY = "0xAdmin" +CHAIN_ID = "mock" + + +@pytest.fixture +def mock_chain(): + return mock_chain_module.MockChain(CHAIN_ID) + + +@pytest.fixture +def registry(mock_chain): + r = chain.ChainRegistry() + r.register(mock_chain) + return r + + +@pytest.fixture +def nonce(): + return auth.NonceStore(auth.MemoryStorageAdapter()) + + +def _make_request(method: str, path: str, body: str, headers: dict) -> MagicMock: + """Create a mock FastAPI Request.""" + req = MagicMock(spec=Request) + req.method = method + req.headers = headers + req.url = MagicMock() + req.url.__str__ = lambda self: f"http://localhost{path}" + req.body = AsyncMock(return_value=body.encode("utf-8") if body else b"") + return req + + +async def test_role_resolver_success(mock_chain, registry, nonce): + resolver = sync.create_role_resolver(registry, nonce, ADMIN_PUBKEY) + + ts = str(int(time.time() * 1000)) + nonce_val = "test-nonce-1" + body_hash = auth.hash_body("") + canonical = auth.build_canonical("GET", "/v1/test", ts, nonce_val, body_hash) + sig = "test-sig" + mock_chain.set_signature_valid(canonical, sig, PUBKEY, True) + + headers = { + constants.HEADER_PUBKEY: PUBKEY, + constants.HEADER_SIGNATURE: sig, + constants.HEADER_TIMESTAMP: ts, + constants.HEADER_NONCE: nonce_val, + constants.HEADER_CHAIN: CHAIN_ID, + } + req = _make_request("GET", "/v1/test", "", headers) + result = await resolver(req) + assert result.identity == PUBKEY + assert "user" in result.roles + assert "admin" not in result.roles + + +async def test_role_resolver_admin(mock_chain, registry, nonce): + resolver = sync.create_role_resolver(registry, nonce, ADMIN_PUBKEY) + + ts = str(int(time.time() * 1000)) + nonce_val = "test-nonce-admin" + body_hash = auth.hash_body("") + canonical = auth.build_canonical("GET", "/v1/test", ts, nonce_val, body_hash) + sig = "admin-sig" + mock_chain.set_signature_valid(canonical, sig, ADMIN_PUBKEY, True) + + headers = { + constants.HEADER_PUBKEY: ADMIN_PUBKEY, + constants.HEADER_SIGNATURE: sig, + constants.HEADER_TIMESTAMP: ts, + constants.HEADER_NONCE: nonce_val, + constants.HEADER_CHAIN: CHAIN_ID, + } + req = _make_request("GET", "/v1/test", "", headers) + result = await resolver(req) + assert "admin" in result.roles + + +async def test_role_resolver_missing_headers(registry, nonce): + resolver = sync.create_role_resolver(registry, nonce, ADMIN_PUBKEY) + req = _make_request("GET", "/", "", {}) + with pytest.raises(ValueError, match="Missing authentication headers"): + await resolver(req) + + +async def test_role_resolver_replay_rejected(mock_chain, registry, nonce): + resolver = sync.create_role_resolver(registry, nonce, ADMIN_PUBKEY) + + ts = str(int(time.time() * 1000)) + nonce_val = "replay-nonce" + body_hash = auth.hash_body("") + canonical = auth.build_canonical("GET", "/v1/test", ts, nonce_val, body_hash) + sig = "replay-sig" + mock_chain.set_signature_valid(canonical, sig, PUBKEY, True) + + headers = { + constants.HEADER_PUBKEY: PUBKEY, + constants.HEADER_SIGNATURE: sig, + constants.HEADER_TIMESTAMP: ts, + constants.HEADER_NONCE: nonce_val, + constants.HEADER_CHAIN: CHAIN_ID, + } + req1 = _make_request("GET", "/v1/test", "", headers) + await resolver(req1) # First succeeds + + req2 = _make_request("GET", "/v1/test", "", headers) + with pytest.raises(ValueError, match="Replay"): + await resolver(req2) + + +async def test_role_enricher_owner(mock_chain, registry): + enricher = sync.create_role_enricher(registry) + mock_chain.set_owner("product-123", PUBKEY) + + extra = await enricher(AuthResult(identity=PUBKEY, roles=["user"]), {"productId": "product-123"}) + assert "owner" in extra + assert "member" in extra + + +async def test_role_enricher_member(mock_chain, registry): + enricher = sync.create_role_enricher(registry) + mock_chain.set_owner("product-123", "0xSomeoneElse") + mock_chain.set_access("product-123", PUBKEY, 0) + + extra = await enricher(AuthResult(identity=PUBKEY, roles=["user"]), {"productId": "product-123"}) + assert "member" in extra + assert "owner" not in extra + + +async def test_role_enricher_not_owner_no_access(mock_chain, registry): + enricher = sync.create_role_enricher(registry) + mock_chain.set_owner("product-123", "0xSomeoneElse") + + extra = await enricher(AuthResult(identity=PUBKEY, roles=["user"]), {"productId": "product-123"}) + assert extra == [] + + +async def test_signature_verifier_uses_chain(mock_chain, registry): + mock_chain.set_signature_valid("data", "sig", "pk", True) + verifier = sync.create_signature_verifier(registry) + assert await verifier("data", "sig", "pk") is True + + +async def test_signature_verifier_rejects_invalid(registry): + verifier = sync.create_signature_verifier(registry) + assert await verifier("data", "bad-sig", "pk") is False + + +async def test_role_resolver_expired_timestamp(mock_chain, registry, nonce): + resolver = sync.create_role_resolver(registry, nonce, ADMIN_PUBKEY) + + ts = str(int(time.time() * 1000) - 120_000) # 2 minutes ago + nonce_val = "expired-ts-nonce" + body_hash = auth.hash_body("") + canonical = auth.build_canonical("GET", "/v1/test", ts, nonce_val, body_hash) + sig = "expired-sig" + mock_chain.set_signature_valid(canonical, sig, PUBKEY, True) + + headers = { + constants.HEADER_PUBKEY: PUBKEY, + constants.HEADER_SIGNATURE: sig, + constants.HEADER_TIMESTAMP: ts, + constants.HEADER_NONCE: nonce_val, + constants.HEADER_CHAIN: CHAIN_ID, + } + req = _make_request("GET", "/v1/test", "", headers) + with pytest.raises(ValueError, match="Timestamp out of window"): + await resolver(req) + + +async def test_role_resolver_unknown_chain(registry, nonce): + resolver = sync.create_role_resolver(registry, nonce, ADMIN_PUBKEY) + + ts = str(int(time.time() * 1000)) + headers = { + constants.HEADER_PUBKEY: PUBKEY, + constants.HEADER_SIGNATURE: "sig", + constants.HEADER_TIMESTAMP: ts, + constants.HEADER_NONCE: "nonce-unknown", + constants.HEADER_CHAIN: "unknown-chain", + } + req = _make_request("GET", "/v1/test", "", headers) + with pytest.raises(ValueError, match="Unknown chain"): + await resolver(req) + + +async def test_role_enricher_no_product_id(mock_chain, registry): + enricher = sync.create_role_enricher(registry) + extra = await enricher(AuthResult(identity=PUBKEY, roles=["user"]), {}) + assert extra == [] diff --git a/packages/sync/tests/test_server_helpers.py b/packages/sync/tests/test_server_helpers.py new file mode 100644 index 0000000000..876dc1269b --- /dev/null +++ b/packages/sync/tests/test_server_helpers.py @@ -0,0 +1,56 @@ +# Drakkar-Software OctoBot-Sync +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +"""Tests for server.py helper functions.""" + +import pytest + +import octobot_sync.server as server +import octobot_sync.chain.evm as evm + + +def test_require_env_present(monkeypatch): + monkeypatch.setenv("TEST_SYNC_VAR", "hello") + assert server._require_env("TEST_SYNC_VAR") == "hello" + + +def test_require_env_missing(monkeypatch): + monkeypatch.delenv("TEST_SYNC_VAR", raising=False) + with pytest.raises(RuntimeError, match="TEST_SYNC_VAR"): + server._require_env("TEST_SYNC_VAR") + + +def test_setup_registry_default(monkeypatch): + monkeypatch.delenv("EVM_BASE_RPC", raising=False) + monkeypatch.delenv("EVM_CONTRACT_BASE", raising=False) + registry = server._setup_registry() + chain = registry.get("evm:8453") + assert chain is not None + assert isinstance(chain, evm.EvmChain) + assert chain.id == "evm:8453" + # No contract configured + with pytest.raises(RuntimeError, match="RPC not configured"): + chain._require_contract() + + +def test_setup_registry_with_rpc(monkeypatch): + monkeypatch.setenv("EVM_BASE_RPC", "https://rpc.example.com") + monkeypatch.setenv("EVM_CONTRACT_BASE", "0x0000000000000000000000000000000000000001") + registry = server._setup_registry() + chain = registry.get("evm:8453") + assert chain is not None + # Contract should be configured (no RuntimeError) + chain._require_contract() diff --git a/packages/sync/tests/test_storage.py b/packages/sync/tests/test_storage.py new file mode 100644 index 0000000000..db3a25fcab --- /dev/null +++ b/packages/sync/tests/test_storage.py @@ -0,0 +1,99 @@ +# Drakkar-Software OctoBot-Sync +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +"""Tests for MemoryStorageAdapter CRUD and TTL behaviour.""" + +import pytest + +import octobot_sync.auth as auth + + +@pytest.fixture +def storage(): + return auth.MemoryStorageAdapter() + + +async def test_set_and_get(storage): + await storage.set("k1", "v1") + assert await storage.get("k1") == "v1" + + +async def test_get_missing_key(storage): + assert await storage.get("nonexistent") is None + + +async def test_delete(storage): + await storage.set("k1", "v1") + await storage.delete("k1") + assert await storage.get("k1") is None + + +async def test_delete_missing_key(storage): + await storage.delete("nonexistent") # should not raise + + +async def test_set_if_absent_existing(storage): + await storage.set("k1", "original") + result = await storage.set_if_absent("k1", "new", ttl_ms=60_000) + assert result is False + assert await storage.get("k1") == "original" + + +async def test_ttl_expiration(storage, monkeypatch): + """Value disappears after TTL expires (monkeypatched time).""" + import time as time_mod + + now = 1_000_000.0 + monkeypatch.setattr(time_mod, "time", lambda: now) + + await storage.set("k1", "v1", ttl_ms=5_000) + assert await storage.get("k1") == "v1" + + # Advance past TTL + monkeypatch.setattr(time_mod, "time", lambda: now + 6.0) + assert await storage.get("k1") is None + + +async def test_get_expired_key_returns_none(storage, monkeypatch): + """Expired entry is cleaned on read.""" + import time as time_mod + + now = 1_000_000.0 + monkeypatch.setattr(time_mod, "time", lambda: now) + + await storage.set_if_absent("k1", "v1", ttl_ms=1_000) + monkeypatch.setattr(time_mod, "time", lambda: now + 2.0) + + assert await storage.get("k1") is None + assert "k1" not in storage._store + + +async def test_cleanup_removes_expired_only(storage, monkeypatch): + """_cleanup removes expired entries but keeps valid ones.""" + import time as time_mod + + now = 1_000_000.0 + monkeypatch.setattr(time_mod, "time", lambda: now) + + await storage.set("short", "v1", ttl_ms=1_000) + await storage.set("long", "v2", ttl_ms=60_000) + + # Advance past short TTL but not long + monkeypatch.setattr(time_mod, "time", lambda: now + 2.0) + storage._cleanup() + + assert "short" not in storage._store + assert await storage.get("long") == "v2" diff --git a/packages/sync/tests/test_sync_collections.py b/packages/sync/tests/test_sync_collections.py new file mode 100644 index 0000000000..c27ac71aaf --- /dev/null +++ b/packages/sync/tests/test_sync_collections.py @@ -0,0 +1,95 @@ +# Drakkar-Software OctoBot-Sync +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +"""Tests for sync collection loading from a sample collections.json.""" + +from pathlib import Path + +import octobot_sync.sync.collections as collections_module + +FIXTURES_DIR = Path(__file__).resolve().parent / "fixtures" +COLLECTIONS_PATH = str(FIXTURES_DIR / "collections.json") + + +def _load(): + return collections_module.load_sync_config(COLLECTIONS_PATH) + + +def test_sync_config_version(): + assert _load().version == 1 + + +def test_sync_config_has_collections(): + assert len(_load().collections) == 10 + + +def test_all_collections_have_names(): + config = _load() + names = [c.name for c in config.collections] + assert all(names) + assert len(names) == len(set(names)), "Duplicate collection names" + + +def test_rate_limited_collection(): + col = next(c for c in _load().collections if c.name == "delta-feed") + assert col.storage_path == "items/{itemId}/feed/{version}" + assert "member" in col.read_roles + assert "owner" in col.write_roles + assert col.encryption == "none" + assert col.rate_limit + + +def test_bundled_collections(): + bundled = [c for c in _load().collections if c.bundle == "personal"] + assert len(bundled) == 2 + names = {c.name for c in bundled} + assert names == {"alpha-docs", "beta-prefs"} + for c in bundled: + assert c.encryption == "identity" + assert c.storage_path == "users/{identity}" + + +def test_pull_only_collections(): + pull_only = [c for c in _load().collections if c.pull_only] + assert len(pull_only) == 1 + assert pull_only[0].name == "epsilon-catalog" + + +def test_server_encrypted_collections(): + server_encrypted = [c for c in _load().collections if c.encryption == "server"] + assert len(server_encrypted) == 1 + assert server_encrypted[0].name == "zeta-internal" + + +def test_rate_limit_config(): + config = _load() + assert config.rate_limit is not None + assert config.rate_limit.window_ms == 60_000 + assert config.rate_limit.max_requests == 100 + + +def test_fallback_to_default_config(): + """When collections file is missing, DEFAULT_SYNC_CONFIG is returned.""" + config = collections_module.load_sync_config("/nonexistent/path.json") + assert config.version == 1 + assert len(config.collections) == 3 + names = {c.name for c in config.collections} + assert names == {"bots", "accounts", "errors"} + errors = next(c for c in config.collections if c.name == "errors") + assert errors.read_roles == ["self", "admin"] + assert errors.write_roles == ["self"] + assert errors.max_body_bytes == 500_000 + assert errors.encryption == "delegated" diff --git a/packages/tentacles/.coveragerc b/packages/tentacles/.coveragerc new file mode 100644 index 0000000000..9cb5869958 --- /dev/null +++ b/packages/tentacles/.coveragerc @@ -0,0 +1,3 @@ +[run] +include = + tentacles/ diff --git a/packages/tentacles/.gitignore b/packages/tentacles/.gitignore new file mode 100644 index 0000000000..78132e2b03 --- /dev/null +++ b/packages/tentacles/.gitignore @@ -0,0 +1,134 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ + +\.idea/ + + +/__init__.py +Backtesting/__init__.py +Backtesting/collectors/__init__.py +Backtesting/collectors/exchanges/__init__.py +Backtesting/converters/__init__.py +Backtesting/converters/exchanges/__init__.py +Backtesting/importers/__init__.py +Backtesting/importers/exchanges/__init__.py +Evaluator/__init__.py +Evaluator/RealTime/__init__.py +Evaluator/Social/__init__.py +Evaluator/Strategies/__init__.py +Evaluator/TA/__init__.py +Evaluator/Util/__init__.py +profiles/__init__.py +Services/__init__.py +Services/Interfaces/__init__.py +Services/Notifiers/__init__.py +Services/Services_bases/__init__.py +Services/Services_feeds/__init__.py +Trading/__init__.py +Trading/Exchange/__init__.py +Trading/Mode/__init__.py + +# include web interface lib directory +!/Services/Interfaces/web_interface/static/js/lib diff --git a/packages/tentacles/Agent/sub_agents/bull_bear_research_agent/__init__.py b/packages/tentacles/Agent/sub_agents/bull_bear_research_agent/__init__.py new file mode 100644 index 0000000000..8929b8a061 --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/bull_bear_research_agent/__init__.py @@ -0,0 +1,11 @@ +from .bull_research_agent import ( + BullResearchAIAgentChannel, + BullResearchAIAgentConsumer, + BullResearchAIAgentProducer, +) +from .bear_research_agent import ( + BearResearchAIAgentChannel, + BearResearchAIAgentConsumer, + BearResearchAIAgentProducer, +) +from .models import ResearchDebateOutput diff --git a/packages/tentacles/Agent/sub_agents/bull_bear_research_agent/bear_research_agent.py b/packages/tentacles/Agent/sub_agents/bull_bear_research_agent/bear_research_agent.py new file mode 100644 index 0000000000..8c2ce885c7 --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/bull_bear_research_agent/bear_research_agent.py @@ -0,0 +1,136 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +""" +Bear Research Agent. +Takes the bearish side in a research debate: argues for caution, lower allocation, or risk reduction. +""" +import json +import typing + +import octobot_agents.agent.channels.ai_agent as ai_agent_channels +from octobot_services.enums import AIModelPolicy + +from .models import ResearchDebateOutput + + +class BearResearchAIAgentChannel(ai_agent_channels.AbstractAIAgentChannel): + OUTPUT_SCHEMA = ResearchDebateOutput + + +class BearResearchAIAgentConsumer(ai_agent_channels.AbstractAIAgentChannelConsumer): + pass + + +class BearResearchAIAgentProducer(ai_agent_channels.AbstractAIAgentChannelProducer): + """ + Bear researcher: argues the bearish case in a research debate. + Uses strategy data, portfolio context, and debate history to argue for caution. + """ + + AGENT_VERSION = "1.0.0" + AGENT_CHANNEL = BearResearchAIAgentChannel + AGENT_CONSUMER = BearResearchAIAgentConsumer + MODEL_POLICY = AIModelPolicy.FAST + + def __init__(self, channel=None, model=None, max_tokens=None, temperature=None, **kwargs): + super().__init__( + channel=channel, + model=model, + max_tokens=max_tokens, + temperature=temperature, + **kwargs, + ) + + def _get_default_prompt(self) -> str: + return """You are the Bear Researcher in an investment research debate. +Your role is to argue the bearish case: reasons to reduce exposure, be cautious, or favor defensive allocation. + +You receive: +1. Initial state: portfolio, strategy data (global and per-crypto), current distribution. +2. Debate history: previous messages from you and the Bull researcher. + +Respond with a short, focused argument (one paragraph) for the bearish side. Consider risks, overvaluation, and downside. +Return ONLY valid JSON (no markdown, no code fences, no extra text). +Required JSON keys: +- "message": string (required) +- "reasoning": string (optional) +- "error": null or empty string (optional; omit unless you cannot answer) + +CRITICAL: Do NOT include any other keys. Do NOT include "asset" or lists. +Example: +{"message": "Bearish argument...", "reasoning": "Brief rationale."}""" + + def _build_user_prompt(self, input_data: typing.Dict[str, typing.Any]) -> str: + initial_state = input_data.get("_initial_state") or {} + debate_history = input_data.get("_debate_history") or [] + round_num = input_data.get("_debate_round", 1) + state_preview = json.dumps({ + "crypto_strategy_data_keys": list((initial_state.get("crypto_strategy_data") or {}).keys()), + "global_strategy_data_keys": list((initial_state.get("global_strategy_data") or {}).keys()), + "current_distribution": initial_state.get("current_distribution"), + "reference_market": initial_state.get("reference_market"), + }, indent=2) + debate_text = "\n".join( + f"[{e.get('agent_name', '?')}]: {e.get('message', '')[:300]}" + for e in debate_history + ) if debate_history else "No previous messages." + return f"""Round {round_num} + +Initial state (summary): +{state_preview} + +Debate so far: +{debate_text} + +Your bearish argument (short, one paragraph):""" + + async def execute(self, input_data: typing.Any, ai_service) -> typing.Any: + messages = [ + {"role": "system", "content": self.prompt}, + {"role": "user", "content": self._build_user_prompt(input_data)}, + ] + try: + response_data = await self._call_llm( + messages, + ai_service, + json_output=True, + response_schema=ResearchDebateOutput, + ) + out = ResearchDebateOutput(**response_data) + except Exception as e: + self.logger.warning(f"LLM call failed with error: {e}. Attempting to extract JSON from error message.") + extracted_json = ResearchDebateOutput.recover_json_from_error(e) + if extracted_json: + try: + out = ResearchDebateOutput(**extracted_json) + except Exception as e2: + self.logger.error(f"Failed to parse extracted JSON into ResearchDebateOutput: {e2}") + raise ValueError(f"LLM call failed and extracted JSON is invalid: {e2}") from e2 + else: + self.logger.error("Failed to extract JSON from LLM error message.") + raise ValueError(f"LLM call failed and no valid JSON could be extracted: {e}") from e + + # Check if the model contains an error + if not out.message and out.reasoning: + out.message = out.reasoning + if out.error: + raise ValueError(f"LLM failed to return valid bearish research: {out.error}") + if not out.message: + self.logger.warning("Bear research agent returned empty message; using fallback.") + out.message = "No bearish argument generated by the LLM." + + return {"message": out.message, "reasoning": out.reasoning} diff --git a/packages/tentacles/Agent/sub_agents/bull_bear_research_agent/bull_research_agent.py b/packages/tentacles/Agent/sub_agents/bull_bear_research_agent/bull_research_agent.py new file mode 100644 index 0000000000..0735318469 --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/bull_bear_research_agent/bull_research_agent.py @@ -0,0 +1,136 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +""" +Bull Research Agent. +Takes the bullish side in a research debate: argues for higher allocation / more risk based on strategy data. +""" +import json +import typing + +import octobot_agents.agent.channels.ai_agent as ai_agent_channels +from octobot_services.enums import AIModelPolicy + +from .models import ResearchDebateOutput + + +class BullResearchAIAgentChannel(ai_agent_channels.AbstractAIAgentChannel): + OUTPUT_SCHEMA = ResearchDebateOutput + + +class BullResearchAIAgentConsumer(ai_agent_channels.AbstractAIAgentChannelConsumer): + pass + + +class BullResearchAIAgentProducer(ai_agent_channels.AbstractAIAgentChannelProducer): + """ + Bull researcher: argues the bullish case in a research debate. + Uses strategy data, portfolio context, and debate history to make a short argument. + """ + + AGENT_VERSION = "1.0.0" + AGENT_CHANNEL = BullResearchAIAgentChannel + AGENT_CONSUMER = BullResearchAIAgentConsumer + MODEL_POLICY = AIModelPolicy.FAST + + def __init__(self, channel=None, model=None, max_tokens=None, temperature=None, **kwargs): + super().__init__( + channel=channel, + model=model, + max_tokens=max_tokens, + temperature=temperature, + **kwargs, + ) + + def _get_default_prompt(self) -> str: + return """You are the Bull Researcher in an investment research debate. +Your role is to argue the bullish case: reasons to increase exposure, take more risk, or favor allocation to assets. + +You receive: +1. Initial state: portfolio, strategy data (global and per-crypto), current distribution. +2. Debate history: previous messages from you and the Bear researcher. + +Respond with a short, focused argument (one paragraph) for the bullish side. Consider signals, momentum, and opportunities. +Return ONLY valid JSON (no markdown, no code fences, no extra text). +Required JSON keys: +- "message": string (required) +- "reasoning": string (optional) +- "error": null or empty string (optional; omit unless you cannot answer) + +CRITICAL: Do NOT include any other keys. Do NOT include "asset" or lists. +Example: +{"message": "Bullish argument...", "reasoning": "Brief rationale."}""" + + def _build_user_prompt(self, input_data: typing.Dict[str, typing.Any]) -> str: + initial_state = input_data.get("_initial_state") or {} + debate_history = input_data.get("_debate_history") or [] + round_num = input_data.get("_debate_round", 1) + state_preview = json.dumps({ + "crypto_strategy_data_keys": list((initial_state.get("crypto_strategy_data") or {}).keys()), + "global_strategy_data_keys": list((initial_state.get("global_strategy_data") or {}).keys()), + "current_distribution": initial_state.get("current_distribution"), + "reference_market": initial_state.get("reference_market"), + }, indent=2) + debate_text = "\n".join( + f"[{e.get('agent_name', '?')}]: {e.get('message', '')[:300]}" + for e in debate_history + ) if debate_history else "No previous messages." + return f"""Round {round_num} + +Initial state (summary): +{state_preview} + +Debate so far: +{debate_text} + +Your bullish argument (short, one paragraph):""" + + async def execute(self, input_data: typing.Any, ai_service) -> typing.Any: + messages = [ + {"role": "system", "content": self.prompt}, + {"role": "user", "content": self._build_user_prompt(input_data)}, + ] + try: + response_data = await self._call_llm( + messages, + ai_service, + json_output=True, + response_schema=ResearchDebateOutput, + ) + out = ResearchDebateOutput(**response_data) + except Exception as e: + self.logger.warning(f"LLM call failed with error: {e}. Attempting to extract JSON from error message.") + extracted_json = ResearchDebateOutput.recover_json_from_error(e) + if extracted_json: + try: + out = ResearchDebateOutput(**extracted_json) + except Exception as e2: + self.logger.error(f"Failed to parse extracted JSON into ResearchDebateOutput: {e2}") + raise ValueError(f"LLM call failed and extracted JSON is invalid: {e2}") from e2 + else: + self.logger.error("Failed to extract JSON from LLM error message.") + raise ValueError(f"LLM call failed and no valid JSON could be extracted: {e}") from e + + # Check if the model contains an error + if not out.message and out.reasoning: + out.message = out.reasoning + if out.error: + raise ValueError(f"LLM failed to return valid bullish research: {out.error}") + if not out.message: + self.logger.warning("Bull research agent returned empty message; using fallback.") + out.message = "No bullish argument generated by the LLM." + + return {"message": out.message, "reasoning": out.reasoning} diff --git a/packages/tentacles/Agent/sub_agents/bull_bear_research_agent/metadata.json b/packages/tentacles/Agent/sub_agents/bull_bear_research_agent/metadata.json new file mode 100644 index 0000000000..609c1b9d3a --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/bull_bear_research_agent/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["BullResearchAgent", "BearResearchAgent"], + "tentacles-requirements": [] +} diff --git a/packages/tentacles/Agent/sub_agents/bull_bear_research_agent/models.py b/packages/tentacles/Agent/sub_agents/bull_bear_research_agent/models.py new file mode 100644 index 0000000000..832d713809 --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/bull_bear_research_agent/models.py @@ -0,0 +1,39 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from typing import Optional +from pydantic import ConfigDict, model_validator + +import octobot_agents.models as agent_models + + +class ResearchDebateOutput(agent_models.AgentBaseModel): + """Output from a research debate agent (bull or bear): message for the debate.""" + __strict_json_schema__ = True + model_config = ConfigDict(extra="ignore") + message: Optional[str] = None + reasoning: Optional[str] = None + error: Optional[str] = None + + @model_validator(mode="after") + def check_message_or_error(self): + if self.error: + self.error = agent_models.AgentBaseModel.normalize_agent_error(self.error) + if not self.message and self.reasoning: + self.message = self.reasoning + if not self.message and not self.error: + self.error = "No message or error provided" + return self diff --git a/packages/tentacles/Agent/sub_agents/default_critic_agent/__init__.py b/packages/tentacles/Agent/sub_agents/default_critic_agent/__init__.py new file mode 100644 index 0000000000..58a9eb49f6 --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/default_critic_agent/__init__.py @@ -0,0 +1,10 @@ +from .default_critic_agent import ( + DefaultCriticAgentProducer, + DefaultCriticAgentChannel, + DefaultCriticAgentConsumer, +) +from .default_ai_critic_agent import ( + DefaultAICriticAgentProducer, + DefaultAICriticAgentChannel, + DefaultAICriticAgentConsumer, +) diff --git a/packages/tentacles/Agent/sub_agents/default_critic_agent/default_ai_critic_agent.py b/packages/tentacles/Agent/sub_agents/default_critic_agent/default_ai_critic_agent.py new file mode 100644 index 0000000000..6ba10b6027 --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/default_critic_agent/default_ai_critic_agent.py @@ -0,0 +1,401 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import typing +from typing import List + +from pydantic import Field + +import octobot_agents.team.critic as critic +from .default_critic_agent import DefaultCriticAgentProducer +import octobot_agents.models as agent_models + + +class QualityEvaluationOutput(agent_models.AgentBaseModel): + """ + Output schema for LLM-based quality evaluation. + + This model is intentionally focused on behavioral and strategic quality only. + Schema/field/type validation is handled separately by Pydantic and runtime + checks, not by the critic. + """ + __strict_json_schema__ = True + + quality_ok: bool = Field(description="Whether the output quality is acceptable.") + # Behavior/strategy/risk/logic issues suitable for learning and memory. + issues: List[str] = Field( + default_factory=list, + description="List of behavioral/strategic quality issues (empty if quality_ok is true).", + ) + reasoning: str = Field( + default="", + description="Brief explanation of the behavioral quality assessment." + ) + + +class DefaultAICriticAgentChannel(critic.AICriticAgentChannel): + pass + + +class DefaultAICriticAgentConsumer(critic.AICriticAgentConsumer): + pass + + +class DefaultAICriticAgentProducer(critic.AICriticAgentProducer): + """ + Default AI critic agent - hybrid rule-based + LLM analysis. + + Combines basic quality heuristics from DefaultCriticAgentProducer with + LLM-based evaluation for comprehensive result quality assessment. + Designed as the default critic for AI teams. + + Features: + - Basic quality checks (empty results, schema validation, placeholder detection) + - LLM-based quality evaluation (correctness, relevance, completeness, expectations) + - Context-aware analysis (compares results against execution plan) + - Enhanced improvement suggestions + """ + + AGENT_CHANNEL: typing.Type[critic.AICriticAgentChannel] = DefaultAICriticAgentChannel + AGENT_CONSUMER: typing.Type[critic.AICriticAgentConsumer] = DefaultAICriticAgentConsumer + + def __init__( + self, + channel: typing.Optional[DefaultAICriticAgentChannel] = None, + model: typing.Optional[str] = None, + max_tokens: typing.Optional[int] = None, + temperature: typing.Optional[float] = None, + self_improving: bool = True, + **kwargs, + ): + super().__init__( + channel=channel, + model=model, + max_tokens=max_tokens, + temperature=temperature, + self_improving=self_improving, + **kwargs, + ) + # Create instance of DefaultCriticAgentProducer to reuse its quality evaluation + self._basic_critic = DefaultCriticAgentProducer(channel=None) + + def _get_default_prompt(self) -> str: + """ + Return the default prompt for the AI critic agent. + + Returns: + The default system prompt string. + """ + return """You are an AI critic agent for an AI agent team. +Your role is to analyze team execution and evaluate the *behavioral and strategic quality* +of each agent's output, not to restate low-level schema or type constraints. + +Focus on: +1. Result & Behavior Quality: + - Correctness: Are the decisions and outputs logically and financially sound? + - Relevance: Do they address the intended trading or evaluation objective? + - Completeness: Are the key behavioral steps and justifications present (not just fields)? + - Expectations: Do they match the intent of the execution plan and agent role? + +2. Issues and Problems: + - Identify logical errors, poor risk management, or inconsistent reasoning. + - Detect off-topic, misleading, or unsupported conclusions. + - Highlight missing analysis or skipped safety checks that should have been performed. + +3. Agent Improvements: + - Only include agents in agent_improvements if they have memory enabled. + - Provide specific, actionable improvements to behavior, strategy, or coordination. + - Explain why each agent needs improvement in terms of its decisions and patterns, + not just missing keys or fields. + - For strong results, suggest what should be captured as reusable best practices. + +4. Context Awareness: + - Compare outputs against the execution plan (goals, constraints, and roles). + - Check consistency between related agent outputs (e.g., no conflicting signals). + - Consider recent history and regime (if provided) when judging severity of issues. + +For each agent with memory enabled: +- If the result is high quality: include a positive learning entry that describes the + pattern to reuse (what worked and why). +- If the result has quality issues: include specific, behavior-level issues and + improvements (what to change in logic/decision-making). +- If the result is off-topic or shallow: explain why, and propose how to better + align with the task and other agents.""" + + def _evaluate_result_quality_basic( + self, + output: typing.Any, + agent: typing.Optional[typing.Any] + ) -> typing.Tuple[bool, typing.List[str]]: + """ + Evaluate result quality using basic heuristics (from DefaultCriticAgentProducer). + + Args: + output: The agent output to evaluate + agent: The agent instance (optional, for schema access) + + Returns: + Tuple of (is_quality_ok, list_of_issues) + """ + # Reuse the basic quality evaluation from DefaultCriticAgentProducer + return self._basic_critic._evaluate_result_quality(output, agent) + + async def _evaluate_result_quality_with_llm( + self, + output: typing.Any, + agent: typing.Optional[typing.Any], + agent_name: str, + execution_plan: typing.Optional[typing.Any], + other_outputs: typing.Dict[str, typing.Any], + ai_service: typing.Any, + ) -> typing.Tuple[bool, typing.List[str], typing.Optional[str]]: + """ + Evaluate result quality using LLM for deeper assessment. + + Args: + output: The agent output to evaluate + agent: The agent instance (optional, for schema access) + agent_name: Name of the agent + execution_plan: The execution plan (for context) + other_outputs: Other agent outputs (for consistency checking) + ai_service: The AI service instance + + Returns: + Tuple of (is_quality_ok, list_of_issues, llm_reasoning) + """ + issues = [] + reasoning = None + + # Build context for LLM evaluation + agent_context = { + "agent_name": agent_name, + "output": output, + } + + # Add schema info if available (team agents are producers with AGENT_CHANNEL) + if agent is not None: + try: + schema = agent.AGENT_CHANNEL.get_output_schema() + if schema: + agent_context["expected_schema"] = schema.__name__ + except (AttributeError, TypeError): + pass + + # Build evaluation prompt + evaluation_prompt = f"""Evaluate the behavioral and strategic quality of this agent's output: + +Agent: {agent_name} +Output: {self.format_data(output)} + +Context: +- Execution Plan: {self.format_data(execution_plan.to_dict() if execution_plan else {})} +- Other Agent Outputs: {self.format_data({k: v for k, v in other_outputs.items() if k != agent_name})} + +Your task: +1. Completely ignore low-level schema/field/type/format problems. Assume that + any output you see has already passed structural validation. +2. Evaluate only behavioral and strategic quality: + - Correctness: Are the decisions and conclusions logically and financially sound? + - Relevance: Do they address the intended trading or evaluation objective? + - Completeness: Are key behavioral steps, checks, and justifications present? + - Expectations: Do they match the intent of the execution plan and agent role? + - Consistency: Are they coherent with other agent outputs? + +Respond with structured JSON fields: +- quality_ok: true/false +- issues: list of BEHAVIORAL issues only (short text, empty if quality_ok is true) +- reasoning: brief explanation of the behavioral assessment + +If quality_ok is false, issues should clearly explain what to change in the +agent's decisions, strategies, or risk management, not its output schema.""" + + try: + messages = [ + {"role": "system", "content": "You are a quality evaluation expert. Analyze agent outputs for correctness, relevance, completeness, and whether they meet expectations."}, + {"role": "user", "content": evaluation_prompt}, + ] + + # Call LLM for quality evaluation with structured output schema + response = await self._call_llm( + messages, + ai_service, + json_output=True, + response_schema=QualityEvaluationOutput, + ) + + # Parse response using Pydantic model + quality_eval = QualityEvaluationOutput.model_validate(response) + quality_ok = quality_eval.quality_ok + issues.extend(quality_eval.issues) + reasoning = quality_eval.reasoning + + # Return issues directly; they all describe behavioral problems. + return quality_ok, issues, reasoning + + except Exception as e: + # If LLM evaluation fails, log and fall back to basic evaluation + self.logger.warning(f"LLM quality evaluation failed for {agent_name}: {e}. Falling back to basic evaluation.") + return True, [], None + + async def execute( + self, + input_data: typing.Union[agent_models.CriticInput, typing.Dict[str, typing.Any]], + ai_service: typing.Any + ) -> agent_models.CriticAnalysis: + """ + Execute critic analysis using hybrid rule-based + LLM evaluation. + + Combines basic heuristics with LLM-based quality assessment for comprehensive + result quality evaluation. + + Args: + input_data: Contains {"team_producer": team_producer, "execution_plan": ExecutionPlan, "execution_results": Dict, "agent_outputs": Dict, "execution_metadata": dict} + ai_service: The AI service instance for LLM calls + + Returns: + CriticAnalysis with comprehensive findings + """ + execution_results = input_data.get("execution_results", {}) + agent_outputs = input_data.get("agent_outputs", {}) + execution_metadata = input_data.get("execution_metadata", {}) + execution_plan = input_data.get("execution_plan") + team_producer = input_data.get("team_producer") + + if team_producer is None: + raise ValueError("team_producer is required in input_data") + + issues = [] + errors = [] + inconsistencies = [] + optimizations = [] + agent_improvements = {} + + # Check for general errors + if execution_metadata.get("errors"): + errors.extend(execution_metadata["errors"]) + + # Process each agent with hybrid quality evaluation + for agent_name, output in agent_outputs.items(): + # Skip agents that failed (no result available) + if output is None: + continue + + # Check for errors in execution results + if agent_name in execution_results: + result = execution_results[agent_name] + if isinstance(result, dict) and result.get("error"): + continue + + # Get agent instance for quality evaluation and memory check + agent = None + has_memory_enabled = False + if team_producer: + try: + agent = team_producer.get_agent_by_name(agent_name) + if agent is None: + manager = team_producer.get_manager() + if manager and manager.name == agent_name: + agent = manager + + if agent: + try: + has_memory_enabled = agent.has_memory_enabled() + except AttributeError: + has_memory_enabled = getattr(agent, 'ENABLE_MEMORY', False) + except (AttributeError, KeyError): + has_memory_enabled = False + + # Skip agents without memory enabled + if not has_memory_enabled: + continue + + # Step 1: Run basic quality checks + basic_ok, basic_issues = self._evaluate_result_quality_basic(output, agent) + + # Step 2: Run LLM-based quality evaluation + # NOTE: llm_issues here are already behavior-focused, schema-only + # issues are kept inside the QualityEvaluationOutput.issues field + # and added above via the issues list. + llm_ok, llm_behavior_issues, llm_reasoning = await self._evaluate_result_quality_with_llm( + output=output, + agent=agent, + agent_name=agent_name, + execution_plan=execution_plan, + other_outputs=agent_outputs, + ai_service=ai_service, + ) + + # Step 3: Combine assessments + # basic_issues may contain schema-like problems; llm_behavior_issues + # are explicitly behavior/strategy/risk oriented. + all_behavior_issues = basic_issues + llm_behavior_issues + is_quality_ok = basic_ok and llm_ok + + # Build reasoning + if is_quality_ok: + reasoning = f"Agent {agent_name} produced quality result" + if llm_reasoning: + reasoning += f": {llm_reasoning}" + reasoning += " - capturing learnings" + + # For successful results, capture positive behavioral pattern + agent_improvements[agent_name] = agent_models.AgentImprovement( + agent_name=agent_name, + improvements=["Capture successful execution patterns"], + issues=[], + errors=[], + reasoning=reasoning, + ) + else: + # Combine basic and LLM behavior issues for reasoning, while schema-only + # issues remain available in the global issues list. + combined_reasoning = f"Agent {agent_name} produced result with quality issues" + if basic_issues: + combined_reasoning += f". Basic checks: {', '.join(basic_issues)}" + if llm_behavior_issues: + combined_reasoning += f". LLM behavior evaluation: {', '.join(llm_behavior_issues)}" + if llm_reasoning: + combined_reasoning += f". Analysis: {llm_reasoning}" + + agent_improvements[agent_name] = agent_models.AgentImprovement( + agent_name=agent_name, + # Focus memory on behavioral improvements, not schema fixes. + improvements=["Improve result quality, correctness, and completeness"], + issues=all_behavior_issues, + errors=[], + reasoning=combined_reasoning, + ) + + # Count quality vs non-quality results + quality_count = sum( + 1 for imp in agent_improvements.values() + if not imp.issues and not imp.errors + ) + quality_issue_count = len(agent_improvements) - quality_count + + summary = ( + f"Found {len(errors)} errors, {len(issues)} issues. " + f"{quality_count} agents with quality results processed for memory updates. " + f"{quality_issue_count} agents with quality issues identified (hybrid evaluation)." + ) + + return agent_models.CriticAnalysis( + issues=issues, + errors=errors, + inconsistencies=inconsistencies, + optimizations=optimizations, + summary=summary, + agent_improvements=agent_improvements, + ) diff --git a/packages/tentacles/Agent/sub_agents/default_critic_agent/default_critic_agent.py b/packages/tentacles/Agent/sub_agents/default_critic_agent/default_critic_agent.py new file mode 100644 index 0000000000..ccccfb52c4 --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/default_critic_agent/default_critic_agent.py @@ -0,0 +1,233 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import typing + +import octobot_agents.team.critic as critic +import octobot_agents.models as agent_models + + +class DefaultCriticAgentChannel(critic.CriticAgentChannel): + pass + + +class DefaultCriticAgentConsumer(critic.CriticAgentConsumer): + pass + + +class DefaultCriticAgentProducer(critic.CriticAgentProducer): + """ + Default critic agent - simple rule-based analysis. + + Inherits from CriticAgentProducer. Uses simple heuristics instead of LLM. + + Note: This is a basic rule-based critic with limited quality evaluation capabilities. + For proper quality evaluation (checking correctness, relevance, completeness, etc.), + use AICriticAgentProducer which uses LLM-based analysis. + """ + + AGENT_CHANNEL: typing.Type[critic.CriticAgentChannel] = DefaultCriticAgentChannel + AGENT_CONSUMER: typing.Type[critic.CriticAgentConsumer] = DefaultCriticAgentConsumer + + def __init__( + self, + channel: typing.Optional[DefaultCriticAgentChannel] = None, + self_improving: bool = True, + ): + super().__init__(channel=channel, self_improving=self_improving) + + def _evaluate_result_quality( + self, + output: typing.Any, + agent: typing.Optional[typing.Any] + ) -> typing.Tuple[bool, typing.List[str]]: + """ + Evaluate result quality using basic heuristics. + + Checks for: + - Empty dict results + - Schema validation failures (if OUTPUT_SCHEMA available) + - All empty/None values + - Placeholder or incomplete data + + Args: + output: The agent output to evaluate + agent: The agent instance (optional, for schema access) + + Returns: + Tuple of (is_quality_ok, list_of_issues) + """ + issues = [] + + # Check for empty dict + if isinstance(output, dict) and len(output) == 0: + return False, ["Result is empty dict"] + + # Try schema validation if available (team agents are producers with AGENT_CHANNEL) + if agent is not None: + try: + schema = agent.AGENT_CHANNEL.get_output_schema() + if schema: + try: + schema.model_validate(output) + except Exception as e: + issues.append(f"Schema validation failed: {str(e)}") + except (AttributeError, TypeError): + # Schema not available or not callable, skip validation + pass + + # Check for all None/empty values in dict + if isinstance(output, dict): + all_empty = all( + v is None or v == "" or (isinstance(v, dict) and len(v) == 0) or (isinstance(v, list) and len(v) == 0) + for v in output.values() + ) + if all_empty: + issues.append("All result fields are empty, None, or empty collections") + + # Check for placeholder-like values (very basic check) + if isinstance(output, dict): + placeholder_indicators = ["n/a", "none", "null", "placeholder", "todo", "tbd"] + for key, value in output.items(): + if isinstance(value, str) and value.lower() in placeholder_indicators: + issues.append(f"Field '{key}' contains placeholder value: {value}") + break + + return len(issues) == 0, issues + + async def execute( + self, + input_data: typing.Union[agent_models.CriticInput, typing.Dict[str, typing.Any]], + ai_service: typing.Any # AbstractAIService - type not available at runtime + ) -> agent_models.CriticAnalysis: + """ + Execute critic analysis using simple heuristics. + + Evaluates agent results using basic quality checks: + - Empty result detection + - Schema validation (if OUTPUT_SCHEMA available) + - Completeness checks (empty fields, placeholder values) + + Note: This is a basic rule-based approach with limited evaluation capabilities. + It cannot assess correctness, relevance, or whether results meet expectations. + For comprehensive quality evaluation, use CriticAgentProducer (LLM-based). + + Args: + input_data: Contains {"team_producer": team_producer, "execution_plan": ExecutionPlan, "execution_results": Dict, "agent_outputs": Dict, "execution_metadata": dict} + ai_service: Not used by default critic agent + + Returns: + CriticAnalysis with basic findings + """ + execution_results = input_data.get("execution_results", {}) + agent_outputs = input_data.get("agent_outputs", {}) + execution_metadata = input_data.get("execution_metadata", {}) + + issues = [] + errors = [] + inconsistencies = [] + optimizations = [] + agent_improvements = {} + + # Check for general errors + if execution_metadata.get("errors"): + errors.extend(execution_metadata["errors"]) + + # Get team producer to check agent memory status + team_producer = input_data.get("team_producer") + + # Check each agent - only process agents with valid results + for agent_name, output in agent_outputs.items(): + # Skip agents that failed (no result available) + if output is None: + # Agent failed - skip it, don't include in improvements + continue + + # Check for errors in execution results + if agent_name in execution_results: + result = execution_results[agent_name] + if isinstance(result, dict) and result.get("error"): + # Agent failed with error - skip it, don't include in improvements + continue + + # Get agent instance for quality evaluation and memory check + agent = None + has_memory_enabled = False + if team_producer: + try: + # Try to get agent instance from team + agent = team_producer.get_agent_by_name(agent_name) + if agent is None: + # Check if it's the manager + manager = team_producer.get_manager() + if manager and manager.name == agent_name: + agent = manager + + if agent: + # Check if agent has memory enabled + try: + has_memory_enabled = agent.has_memory_enabled() + except AttributeError: + # Check ENABLE_MEMORY class variable as fallback + has_memory_enabled = getattr(agent, 'ENABLE_MEMORY', False) + except (AttributeError, KeyError): + # If we can't check, assume no memory + has_memory_enabled = False + + # Evaluate result quality + is_quality_ok, quality_issues = self._evaluate_result_quality(output, agent) + + # Only include agents with quality results and memory enabled + if has_memory_enabled: + if is_quality_ok: + # Agent has quality result and memory enabled - capture learnings + agent_improvements[agent_name] = agent_models.AgentImprovement( + agent_name=agent_name, + improvements=["Capture successful execution patterns"], + issues=[], + errors=[], + reasoning=f"Agent {agent_name} produced quality result with memory enabled - capturing learnings", + ) + else: + # Agent has result but quality issues - include with issues for improvement + agent_improvements[agent_name] = agent_models.AgentImprovement( + agent_name=agent_name, + improvements=["Improve result quality and completeness"], + issues=quality_issues, + errors=[], + reasoning=f"Agent {agent_name} produced result but quality checks failed: {', '.join(quality_issues)}", + ) + + # Count quality vs non-quality results + quality_count = sum( + 1 for imp in agent_improvements.values() + if not imp.issues and not imp.errors + ) + quality_issue_count = len(agent_improvements) - quality_count + + summary = ( + f"Found {len(errors)} errors, {len(issues)} issues. " + f"{quality_count} agents with quality results processed for memory updates. " + f"{quality_issue_count} agents with quality issues identified." + ) + + return agent_models.CriticAnalysis( + issues=issues, + errors=errors, + inconsistencies=inconsistencies, + optimizations=optimizations, + summary=summary, + agent_improvements=agent_improvements, + ) diff --git a/packages/tentacles/Agent/sub_agents/default_critic_agent/metadata.json b/packages/tentacles/Agent/sub_agents/default_critic_agent/metadata.json new file mode 100644 index 0000000000..e99c0bef29 --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/default_critic_agent/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["DefaultCriticAgent", "DefaultAICriticAgent"], + "tentacles-requirements": [] +} diff --git a/packages/tentacles/Agent/sub_agents/default_memory_agent/__init__.py b/packages/tentacles/Agent/sub_agents/default_memory_agent/__init__.py new file mode 100644 index 0000000000..fc9c7c7466 --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/default_memory_agent/__init__.py @@ -0,0 +1,10 @@ +from .default_memory_agent import ( + DefaultMemoryAgentChannel, + DefaultMemoryAgentConsumer, + DefaultMemoryAgentProducer, +) +from .default_ai_memory_agent import ( + DefaultAIMemoryAgentChannel, + DefaultAIMemoryAgentConsumer, + DefaultAIMemoryAgentProducer, +) diff --git a/packages/tentacles/Agent/sub_agents/default_memory_agent/default_ai_memory_agent.py b/packages/tentacles/Agent/sub_agents/default_memory_agent/default_ai_memory_agent.py new file mode 100644 index 0000000000..4148999f12 --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/default_memory_agent/default_ai_memory_agent.py @@ -0,0 +1,336 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import typing + +import octobot_agents.agent.memory as memory +import octobot_agents.models as agent_models +from octobot_services.enums import AIModelPolicy + +MEMORY_TITLE_MAX_LENGTH = 100 +MEMORY_CONTEXT_MAX_LENGTH = 300 +MEMORY_CONTENT_MAX_LENGTH = 1000 + + +class DefaultAIMemoryAgentChannel(memory.AIMemoryAgentChannel): + pass + + +class DefaultAIMemoryAgentConsumer(memory.AIMemoryAgentConsumer): + pass + + +class DefaultAIMemoryAgentProducer(memory.AIMemoryAgentProducer): + """ + AI-powered memory agent that transforms critic feedback into structured memory instructions. + + Uses LLM to analyze critic analysis and generate behavioral, strategic, risk management, + and coordination improvement instructions for agents. + + Inherits from AIMemoryAgentProducer. Uses LLM to transform critic feedback + into structured memory instructions. + + Design influenced by multi-layer and agent memory works including: + Mem-Agent, Moom, LightMem, Nemori, O-Mem (Omni Memory), SEDM, MemoRAG, + EM-LLM, COMEDY, Agent Workflow Memory (AWM), temporal/meta-data-aware RAG, + Memory Decoder (MemDec), Explicit Working Memory (Ewe), + Memento 2 (Stateful Reflective Memory), Agentic Memory (AgeMem), + Agentic Context Engineering (ACE), and practical agent memory systems + such as MemoryBank, LONGMEM, Reflexion and Generative Agents. + """ + + AGENT_CHANNEL: typing.Type[memory.AIMemoryAgentChannel] = DefaultAIMemoryAgentChannel + AGENT_CONSUMER: typing.Type[memory.AIMemoryAgentConsumer] = DefaultAIMemoryAgentConsumer + MODEL_POLICY = AIModelPolicy.REASONING + + def __init__( + self, + channel: typing.Optional[DefaultAIMemoryAgentChannel] = None, + model: typing.Optional[str] = None, + max_tokens: typing.Optional[int] = None, + temperature: typing.Optional[float] = None, + self_improving: bool = True, + **kwargs, + ): + super().__init__( + channel=channel, + model=model, + max_tokens=max_tokens, + temperature=temperature, + self_improving=self_improving, + **kwargs, + ) + + def _get_default_prompt(self) -> str: + """ + Return the default prompt for the AI-powered memory agent. + + Returns: + The default system prompt string. + """ + return f"""You are a memory management agent for an AI agent team. +Your role is to analyze critic feedback and transform it into short, simple, precise, command-like +instructions that help agents improve their behavior, strategies, and coordination over time. + +CRITICAL REQUIREMENTS: +1. Title (max {MEMORY_TITLE_MAX_LENGTH} chars): short, direct, action-oriented + (e.g., "Tighten risk on low-liquidity assets", "Use conservative allocation when volatility spikes"). +2. Context (max {MEMORY_CONTEXT_MAX_LENGTH} chars): very brief description of the situation or + recurring pattern (e.g., "Over-trading in sideways markets", "Stops too tight in high volatility"). +3. Content (max {MEMORY_CONTENT_MAX_LENGTH} chars): simple list of direct behavior-level commands, + one per line, no headers or long explanations. + +PRECISION REQUIREMENTS (CRITICAL): +- Focus on decision-making, strategy, risk management, and coordination between agents. +- Describe WHEN to apply a rule (market regime, instrument type, time horizon, signal conditions). +- Describe WHAT to do differently (e.g., adjust thresholds, avoid certain patterns, prefer + particular workflows, add safety checks). +- Avoid restating low-level schema or type constraints that are already enforced by Pydantic + models (do NOT create memories whose only purpose is to list required fields or types). +- Instead, capture the reasoning behind *why* an agent failed or succeeded so behavior can be + improved or reused in future similar situations. + +FORMAT REQUIREMENTS: +- Use imperative, command-like sentences (e.g., "Reduce position size when volatility increases", + "Wait for confirmation from multiple indicators before entering a trade"). +- NO section headers like "Structured Actions:" or "Guidance:". +- NO numbering prefixes ("1.", "2.", etc.). +- NO full stack traces or raw error messages; summarize them into a short, human-readable issue. +- Prefer a handful of strong, high-value commands over many tiny, redundant ones. + +GENERALIZATION RULES (STRICTLY ENFORCED): +- DO NOT include specific cryptocurrency names (e.g., "BTC", "ETH") or hard-coded tickers. +- DO express conditions and behaviors in generic terms (e.g., "asset", "market", "indicator", + "volatility spike", "low liquidity", "trend vs range"). +- Make content reusable across agents and markets by focusing on patterns, not one-off incidents. + +You will receive critic analysis that identifies which specific agents need improvements. +Only process memories for agents listed in critic_analysis.agent_improvements. +For each such agent, produce a small set of concise, high-impact behavioral or strategic +instructions that would have helped avoid the issues or replicate the successes.""" + + async def execute( + self, + input_data: typing.Union[agent_models.MemoryInput, typing.Dict[str, typing.Any]], + ai_service: typing.Any, + ) -> agent_models.MemoryOperation: + """ + Execute memory operations using LLM. + + Args: + input_data: Contains {"critic_analysis": CriticAnalysis, "agent_outputs": Dict, "execution_metadata": dict} + ai_service: The AI service instance for LLM calls + + Returns: + MemoryOperation with list of operations performed + """ + critic_analysis = input_data.get("critic_analysis") if isinstance(input_data, dict) else None + agent_outputs = input_data.get("agent_outputs", {}) if isinstance(input_data, dict) else {} + execution_metadata = input_data.get("execution_metadata", {}) if isinstance(input_data, dict) else {} + + if not critic_analysis: + return agent_models.MemoryOperation( + success=False, + operations=[], + memory_ids=[], + agent_updates={}, + agents_processed=[], + agents_skipped=[], + message="No critic analysis provided", + ) + + critic_analysis = agent_models.CriticAnalysis.model_validate_or_self(critic_analysis) + agent_improvements = critic_analysis.get_agent_improvements() + agents_to_process = list(agent_improvements.keys()) + + if not agents_to_process: + return agent_models.MemoryOperation( + success=True, + operations=[], + memory_ids=[], + agent_updates={}, + agents_processed=[], + agents_skipped=list(agent_outputs.keys()), + message="No agents need memory updates", + ) + + improvements_summary = [] + for agent_name, improvement in agent_improvements.items(): + improvement = agent_models.AgentImprovement.model_validate_or_self(improvement) + improvements_summary.append( + { + "agent_name": agent_name, + "improvements": improvement.improvements, + "issues": improvement.issues, + "errors": improvement.errors, + "reasoning": improvement.reasoning, + } + ) + + critic_summary = critic_analysis.get_summary() + critic_issues = critic_analysis.get_issues() + critic_errors = critic_analysis.errors + + messages = [ + {"role": "system", "content": self.prompt}, + { + "role": "user", + "content": f"""Transform the following critic feedback into short, simple, precise, command-like +instructions for each agent. Focus ONLY on behavioral, strategic, risk, and coordination +improvements – ignore pure schema/format/type issues. + +IMPORTANT FILTER: +- If the feedback for an agent only contains schema/format/type errors (e.g., missing keys, + wrong types, "output must be dict"), and NO behavioral or strategic issues, then: + -> Do NOT return any instructions for that agent. +- Only create instructions when there is at least one behavior/strategy/risk/logic issue + that can improve the agent's decisions. + +CRITICAL REQUIREMENTS: +- Title: Maximum {MEMORY_TITLE_MAX_LENGTH} characters - short, direct, behavioral/strategic command. +- Context: Maximum {MEMORY_CONTEXT_MAX_LENGTH} characters - short description of the situation or + recurring pattern (NOT raw error messages). +- Content: Maximum {MEMORY_CONTENT_MAX_LENGTH} characters - simple list of behavior-level commands, + one per line. + +Critic Summary: {critic_summary} +Team Issues: {self.format_data(critic_issues)} +Team Errors: {self.format_data(critic_errors)} +Agent Improvements: +{self.format_data(improvements_summary)} + +Return a JSON object with an "instructions" array containing entries only for agents that +have at least one behavioral or strategic issue to learn from.""", + }, + ] + + try: + response_data = await self._call_llm( + messages, + ai_service, + json_output=True, + response_schema=agent_models.AgentMemoryInstructionsList, + input_data=input_data, + ) + + instructions_list_model = agent_models.AgentMemoryInstructionsList.model_validate(response_data) + + operations: typing.List[str] = [] + memory_ids: typing.List[str] = [] + agent_updates: typing.Dict[str, typing.List[str]] = {} + team_producer = execution_metadata.get("team_producer") + agents_processed: typing.List[str] = [] + agents_skipped: typing.List[str] = [] + + instructions_list = instructions_list_model.instructions + instructions_by_agent = { + item.agent_name: item.instructions + for item in instructions_list + } + + for agent_name in agents_to_process: + improvement = agent_improvements[agent_name] + improvement = agent_models.AgentImprovement.model_validate_or_self(improvement) + + agent = None + if team_producer: + try: + agent = team_producer.get_agent_by_name(agent_name) + if agent is None: + manager = team_producer.get_manager() + if manager and manager.name == agent_name: + agent = manager + except (AttributeError, KeyError): + agent = None + + if agent is None: + agents_skipped.append(agent_name) + continue + + try: + memory_enabled = agent.has_memory_enabled() + except AttributeError: + memory_enabled = getattr(agent, "ENABLE_MEMORY", False) + + if not memory_enabled: + agents_skipped.append(agent_name) + continue + + try: + agent_memory_storage = agent.memory_manager + except AttributeError: + agent_memory_storage = None + + if agent_memory_storage is None: + agents_skipped.append(agent_name) + continue + + agent_instructions = instructions_by_agent.get(agent_name) + if not agent_instructions: + agents_skipped.append(agent_name) + continue + + memory_instruction = agent_models.MemoryInstruction.model_validate_or_self(agent_instructions) + title = memory_instruction.title + context_text = memory_instruction.context + content = memory_instruction.build_content() + + await agent_memory_storage.store_memory( + messages=[{"role": "user", "content": content}], + input_data={"agent_id": ""}, + metadata={ + "category": "improvement", + "importance_score": 0.7, + "title": title, + "context": context_text, + "source_agent": agent_name, + "source_type": "critic_improvement", + "memory_system": "ai_memory_agent", + "domain_tags": execution_metadata.get("domain_tags", []), + }, + ) + + all_memories = agent_memory_storage.get_all_memories() + if all_memories: + new_memory_id = all_memories[-1].get("id") + memory_ids.append(new_memory_id) + agent_updates.setdefault(agent_name, []).append(new_memory_id) + if "generated" not in operations: + operations.append("generated") + + agents_processed.append(agent_name) + + return agent_models.MemoryOperation( + success=True, + operations=operations, + memory_ids=memory_ids, + agent_updates=agent_updates, + agents_processed=agents_processed, + agents_skipped=agents_skipped, + message=f"Processed memories for {len(agents_processed)} agents", + ) + + except Exception as e: + self.logger.error(f"DefaultAIMemoryAgentProducer error executing memory operations: {e}") + return agent_models.MemoryOperation( + success=False, + operations=[], + memory_ids=[], + agent_updates={}, + agents_processed=[], + agents_skipped=agents_to_process, + message=f"Error: {str(e)}", + ) diff --git a/packages/tentacles/Agent/sub_agents/default_memory_agent/default_memory_agent.py b/packages/tentacles/Agent/sub_agents/default_memory_agent/default_memory_agent.py new file mode 100644 index 0000000000..5b54c7601f --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/default_memory_agent/default_memory_agent.py @@ -0,0 +1,117 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import typing + +import octobot_agents.agent.memory as memory +import octobot_agents.models as agent_models + + +class DefaultMemoryAgentChannel(memory.MemoryAgentChannel): + pass + + +class DefaultMemoryAgentConsumer(memory.MemoryAgentConsumer): + pass + + +class DefaultMemoryAgentProducer(memory.MemoryAgentProducer): + """ + Default memory agent - simple rule-based memory management. + + Inherits from MemoryAgentProducer. Uses simple heuristics instead of LLM. + + Note: This is a basic rule-based memory agent with limited learning capabilities. + For advanced memory management (transformation of feedback into structured instructions), + use DefaultAIMemoryAgentProducer which uses LLM-based analysis. + """ + + AGENT_CHANNEL = DefaultMemoryAgentChannel + AGENT_CONSUMER = DefaultMemoryAgentConsumer + + def __init__( + self, + channel: typing.Optional[memory.MemoryAgentChannel] = None, + model: typing.Optional[str] = None, + max_tokens: typing.Optional[int] = None, + temperature: typing.Optional[float] = None, + self_improving: bool = True, + **kwargs, + ): + super().__init__( + channel=channel, + model=model, + max_tokens=max_tokens, + temperature=temperature, + self_improving=self_improving, + **kwargs, + ) + + async def execute( + self, + input_data: typing.Union[agent_models.MemoryInput, typing.Dict[str, typing.Any]], + ai_service: typing.Any, + ) -> agent_models.MemoryOperation: + """ + Execute memory operations using simple heuristics. + + Args: + input_data: Contains memory input data + ai_service: The AI service instance (not used in rule-based approach) + + Returns: + MemoryOperation with results + """ + # Extract critic analysis if available + critic_analysis = input_data.get("critic_analysis") if isinstance(input_data, dict) else None + + if not critic_analysis: + return agent_models.MemoryOperation( + success=False, + operations=[], + memory_ids=[], + agent_updates={}, + agents_processed=[], + agents_skipped=[], + message="No critic analysis provided", + ) + + # Convert to model if needed + critic_analysis = agent_models.CriticAnalysis.model_validate_or_self(critic_analysis) + + # Get agent improvements + agent_improvements = critic_analysis.get_agent_improvements() + + if not agent_improvements: + return agent_models.MemoryOperation( + success=True, + operations=[], + memory_ids=[], + agent_updates={}, + agents_processed=[], + agents_skipped=[], + message="No agents need memory updates", + ) + + return agent_models.MemoryOperation( + success=True, + operations=["heuristic_processed"], + memory_ids=[], + agent_updates={}, + agents_processed=list(agent_improvements.keys()), + agents_skipped=[], + message=f"Rule-based processing completed for {len(agent_improvements)} agents", + ) diff --git a/packages/tentacles/Agent/sub_agents/default_memory_agent/metadata.json b/packages/tentacles/Agent/sub_agents/default_memory_agent/metadata.json new file mode 100644 index 0000000000..9aa9cd0e58 --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/default_memory_agent/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["DefaultMemoryAgentProducer", "DefaultAIMemoryAgentProducer"], + "tentacles-requirements": [] +} diff --git a/packages/tentacles/Agent/sub_agents/distribution_agent/__init__.py b/packages/tentacles/Agent/sub_agents/distribution_agent/__init__.py new file mode 100644 index 0000000000..5508246617 --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/distribution_agent/__init__.py @@ -0,0 +1,7 @@ +from .models import AssetDistribution, DistributionOutput +from .distribution_agent import ( + DistributionAIAgentChannel, + DistributionAIAgentConsumer, + DistributionAIAgentProducer, + run_distribution_agent, +) diff --git a/packages/tentacles/Agent/sub_agents/distribution_agent/constants.py b/packages/tentacles/Agent/sub_agents/distribution_agent/constants.py new file mode 100644 index 0000000000..22d314de4f --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/distribution_agent/constants.py @@ -0,0 +1,30 @@ +# Drakkar-Software OctoBot +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +# Instruction constants +INSTRUCTION_ACTION = "action" +INSTRUCTION_SYMBOL = "symbol" +INSTRUCTION_AMOUNT = "amount" +INSTRUCTION_WEIGHT = "weight" + +# Action types +ACTION_REDUCE_EXPOSURE = "reduce_exposure" +ACTION_INCREASE_EXPOSURE = "increase_exposure" +ACTION_ADD_TO_DISTRIBUTION = "add_to_distribution" +ACTION_REMOVE_FROM_DISTRIBUTION = "remove_from_distribution" +ACTION_UPDATE_RATIO = "update_ratio" +ACTION_INCREASE_FIAT_RATIO = "increase_fiat_ratio" +ACTION_DECREASE_FIAT_RATIO = "decrease_fiat_ratio" diff --git a/packages/tentacles/Agent/sub_agents/distribution_agent/distribution_agent.py b/packages/tentacles/Agent/sub_agents/distribution_agent/distribution_agent.py new file mode 100644 index 0000000000..9f476994be --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/distribution_agent/distribution_agent.py @@ -0,0 +1,300 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +""" +Distribution Agent. +Makes final portfolio distribution decisions based on synthesized signals and risk assessment. +Uses ai_index_distribution functions to apply changes. +""" +import json +import typing + +import octobot_agents.agent.channels.ai_agent as ai_agent_channels +import octobot_agents.constants as agent_constants +from octobot_services.enums import AIModelPolicy + +from tentacles.Agent.sub_agents.signal_agent.state import AIAgentState +from .models import DistributionOutput + + +class DistributionAIAgentChannel(ai_agent_channels.AbstractAIAgentChannel): + OUTPUT_SCHEMA = DistributionOutput + + +class DistributionAIAgentConsumer(ai_agent_channels.AbstractAIAgentChannelConsumer): + pass + + +class DistributionAIAgentProducer(ai_agent_channels.AbstractAIAgentChannelProducer): + """ + Distribution agent producer that makes final portfolio allocation decisions. + Combines signal synthesis and risk assessment to determine target distribution. + """ + MODEL_POLICY = AIModelPolicy.REASONING + + AGENT_VERSION = "1.0.0" + AGENT_CHANNEL = DistributionAIAgentChannel + AGENT_CONSUMER = DistributionAIAgentConsumer + ENABLE_MEMORY = True + + def __init__(self, channel, model=None, max_tokens=None, temperature=None, **kwargs): + """ + Initialize the distribution agent producer. + + Args: + channel: The channel this producer is registered to. + model: LLM model to use. + max_tokens: Maximum tokens for response. + temperature: Temperature for LLM randomness. + """ + super().__init__( + channel=channel, + model=model, + max_tokens=max_tokens, + temperature=temperature, + **kwargs, + ) + + def _get_default_prompt(self) -> str: + """Return the default system prompt.""" + return """ +You are a Portfolio Distribution Agent for cryptocurrency trading. +Your task is to make FINAL portfolio allocation decisions based on synthesized signals and risk assessment. + +## Your Role +- Determine target percentage allocation for each asset. +- Balance signal-driven opportunities with risk constraints. +- Decide on rebalancing urgency. + +## Important Constraints +- Total allocation MUST sum to exactly 100%. +- Respect maximum allocation limits from risk assessment. +- Maintain minimum cash reserve as recommended by risk agent. +- Consider current distribution to minimize unnecessary trades. + +## Allocation Actions +- "increase": Increase allocation from current level +- "decrease": Decrease allocation from current level +- "maintain": Keep current allocation +- "add": Add new asset to portfolio +- "remove": Remove asset from portfolio entirely + +## Rebalancing Urgency +- "immediate": Critical signals or risk levels require immediate action +- "soon": Moderate signals suggest rebalancing within the trading session +- "low": Minor adjustments that can wait +- "none": Current distribution is acceptable + +## Decision Framework +1. Start with current distribution as baseline +2. Apply signal synthesis recommendations (increase/decrease based on direction and strength) +3. Apply risk constraints (max allocation limits, min cash reserve) +4. Ensure total sums to 100% +5. Determine urgency based on signal strength and risk level + +## REQUIRED OUTPUT FORMAT - STRICT SCHEMA + +Output a JSON object with: +- "distributions" (or "allocations"): Array of objects, each with ALL of these REQUIRED fields: + * "asset" (string): Asset symbol like "BTC", "ETH", "USD" - REQUIRED + * "percentage" (number): Float between 0.0 and 100.0 - REQUIRED (can also be named "target_percentage", "target_allocation", "allocation", "weight", or "ratio") + * "action" (string): EXACTLY one of "increase", "decrease", "maintain", "add", "remove" - REQUIRED + * "explanation" (string): Clear explanation text for this allocation - REQUIRED +- "rebalance_urgency" (string): EXACTLY one of "immediate", "soon", "low", "none" - REQUIRED +- "reasoning" (string): Summary explanation - REQUIRED + +CRITICAL: Every field above is REQUIRED. Do NOT omit any field, especially "action" and "explanation" in each distribution object. +If an asset is the reference market (e.g., USDT/USD), you MUST still provide an action (usually "maintain"). +""" + + def _build_user_prompt(self, state: AIAgentState) -> str: + """Build the user prompt with all decision inputs.""" + signal_synthesis = state.get("signal_synthesis") + risk_output = state.get("risk_output") + current_distribution = state.get("current_distribution", {}) + cryptocurrencies = state.get("cryptocurrencies", []) + reference_market = state.get("reference_market", "USD") + + # Format signal synthesis + synthesis_data = {} + if signal_synthesis: + try: + # Try Pydantic model access + synthesis_data = { + "market_outlook": signal_synthesis.market_outlook, + "summary": signal_synthesis.summary, + "signals": [ + { + "asset": s.asset, + "direction": s.direction, + "strength": s.strength, + "consensus_level": s.consensus_level, + "trading_instruction": s.trading_instruction + } + for s in signal_synthesis.synthesized_signals + ] + } + except AttributeError: + # It's a dict + synthesis_data = signal_synthesis + + # Format risk output + risk_data = {} + if risk_output: + try: + # Try Pydantic model access + risk_data = { + "overall_risk_level": risk_output.metrics.overall_risk_level, + "concentration_risk": risk_output.metrics.concentration_risk, + "volatility_exposure": risk_output.metrics.volatility_exposure, + "liquidity_risk": risk_output.metrics.liquidity_risk, + "max_allocations": risk_output.max_allocation_per_asset, + "min_cash_reserve": risk_output.min_cash_reserve, + "recommendations": risk_output.recommendations, + "reasoning": risk_output.reasoning + } + except AttributeError: + # It's a dict + risk_data = risk_output + + allowed_assets = cryptocurrencies + [reference_market] + + return f""" +# Determine Portfolio Distribution + +## Allowed Assets +{json.dumps(allowed_assets, indent=2)} + +## Current Distribution (percentages) +{json.dumps(current_distribution, indent=2)} + +## Signal Synthesis (from Signal Agent) +{json.dumps(synthesis_data, indent=2, default=str)} + +## Risk Assessment (from Risk Agent) +{json.dumps(risk_data, indent=2, default=str)} + +## Reference Market (Stablecoin/Cash) +{reference_market} + +## Task +Based on the synthesized signals and risk assessment: +1. Determine target allocation percentage for each asset +2. Specify the action (increase/decrease/maintain/add/remove) +3. Ensure total allocations sum to exactly 100% +4. Respect risk constraints (max allocations, min cash reserve) +5. Set rebalancing urgency +6. Provide reasoning for decisions + +## REQUIRED OUTPUT FORMAT - STRICT SCHEMA + +You MUST return a JSON object with: +- "distributions" (or "allocations"): Array where each object has ALL of these REQUIRED fields: + * "asset" (string): REQUIRED - Asset symbol + * "percentage" (number): REQUIRED - Float 0.0-100.0 (can also be named "target_percentage", "target_allocation", "allocation", "weight", or "ratio") + * "action" (string): REQUIRED - One of "increase", "decrease", "maintain", "add", "remove" + * "explanation" (string): REQUIRED - Clear explanation for this allocation decision +- "rebalance_urgency" (string): REQUIRED - One of "immediate", "soon", "low", "none" +- "reasoning" (string): REQUIRED - Overall reasoning + +CRITICAL: Every field is REQUIRED. Do NOT omit "explanation" in any distribution object. + +Remember: +- Percentages must sum to 100% +- Only use allowed assets +- Balance opportunity with risk +""" + + def _merge_predecessor_outputs(self, input_data: typing.Any) -> dict: + """ + Merge predecessor agent outputs into state. + + When the distribution agent has multiple predecessors (Signal and Risk), + the team system passes them as a dict with agent names as keys. + This method extracts and merges their outputs into the state. + + Args: + input_data: Either a state dict (for entry agents) or a dict with + predecessor outputs keyed by agent name. + + Returns: + Merged state dict with signal_synthesis and risk_output at top level. + """ + + try: + return dict(input_data) + except Exception: + return {} + + async def execute(self, input_data: typing.Any, ai_service) -> typing.Any: + # Merge predecessor outputs into state + state = self._merge_predecessor_outputs(input_data) + self.logger.debug(f"Starting {self.name}...") + + if not state.get("signal_synthesis") and not state.get("risk_output"): + self.logger.warning( + f"{self.name} missing signal_synthesis and risk_output; skipping distribution." + ) + return {"distribution_output": None} + + try: + messages = [ + {"role": "system", "content": self.prompt}, + {"role": "user", "content": self._build_user_prompt(state)}, + ] + + try: + response_data = await self._call_llm( + messages, + ai_service, + json_output=True, + response_schema=DistributionOutput, + ) + distribution_output = DistributionOutput(**response_data) + except Exception as e: + self.logger.warning(f"LLM call failed with error: {e}. Attempting to extract JSON from error message.") + extracted_json = DistributionOutput.recover_json_from_error(e) + if extracted_json: + try: + distribution_output = DistributionOutput(**extracted_json) + except Exception as e2: + self.logger.error(f"Failed to parse extracted JSON into DistributionOutput: {e2}") + distribution_output = None + else: + distribution_output = None + + self.logger.debug(f"{self.name} completed successfully.") + return {"distribution_output": distribution_output} + + except Exception as e: + self.logger.exception(f"Error in {self.name}: {e}") + return {} + + +async def run_distribution_agent(state: AIAgentState, ai_service, agent_id: str = "distribution-agent") -> dict: + """ + Convenience function to run the distribution agent. + + Args: + state: The current agent state. + ai_service: The AI service instance. + agent_id: Unique identifier for the agent instance. + + Returns: + State updates from the agent. + """ + distribution_agent = DistributionAIAgentProducer(channel=None) + return await distribution_agent.execute(state, ai_service) diff --git a/packages/tentacles/Agent/sub_agents/distribution_agent/metadata.json b/packages/tentacles/Agent/sub_agents/distribution_agent/metadata.json new file mode 100644 index 0000000000..58ea47f771 --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/distribution_agent/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["DistributionAgent"], + "tentacles-requirements": [] +} diff --git a/packages/tentacles/Agent/sub_agents/distribution_agent/models.py b/packages/tentacles/Agent/sub_agents/distribution_agent/models.py new file mode 100644 index 0000000000..9e3ba61f12 --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/distribution_agent/models.py @@ -0,0 +1,182 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +""" +Pydantic models for distribution agent outputs. +""" +from typing import Dict, List +from pydantic import Field, field_validator, AliasChoices, model_validator + +import octobot_agents.models as agent_models + +from .constants import ( + INSTRUCTION_ACTION, + INSTRUCTION_SYMBOL, + INSTRUCTION_WEIGHT, + ACTION_REDUCE_EXPOSURE, + ACTION_INCREASE_EXPOSURE, + ACTION_ADD_TO_DISTRIBUTION, + ACTION_REMOVE_FROM_DISTRIBUTION, + ACTION_UPDATE_RATIO, +) + + +class AssetDistribution(agent_models.AgentBaseModel): + """Distribution allocation for a single asset. + + Strict schema enforcement: All fields are required with correct types. + The LLM must return the exact format specified. + """ + __strict_json_schema__ = True + asset: str = Field(description="Asset symbol (e.g., 'BTC', 'ETH', 'USD'). Must be a string.") + percentage: float = Field( + ge=0.0, + le=100.0, + description="Allocation percentage as a number between 0.0 and 100.0. Must be a float.", + validation_alias=AliasChoices("percentage", "target_percentage", "target_allocation", "allocation", "weight", "ratio") + ) + action: str = Field( + description="Action to take. Must be one of: 'increase', 'decrease', 'maintain', 'add', 'remove'." + ) + explanation: str = Field( + description="Explanation for this allocation. Must be a descriptive string explaining the reasoning." + ) + + @model_validator(mode="before") + @classmethod + def normalize_missing_action(cls, data): + try: + if "action" not in data: + data["action"] = "maintain" + except Exception: + return data + return data + + @field_validator("action") + def validate_action(cls, v: str) -> str: + allowed_actions = ["increase", "decrease", "maintain", "add", "remove"] + v_lower = v.lower() + if v_lower not in allowed_actions: + raise ValueError(f"Action must be one of {allowed_actions}") + return v_lower + + +class DistributionOutput(agent_models.AgentBaseModel): + """Output from the distribution agent - final portfolio distribution.""" + __strict_json_schema__ = True + + distributions: List[AssetDistribution] = Field( + description="Target distribution for each asset.", + validation_alias=AliasChoices("distributions", "allocations") + ) + rebalance_urgency: str = Field( + description="Urgency of rebalancing: 'immediate', 'soon', 'low', 'none'." + ) + reasoning: str = Field( + description="Overall reasoning for the distribution decisions." + ) + + @model_validator(mode="before") + @classmethod + def normalize_input(cls, data): + wrapper_keys = {"distributions", "allocations", "rebalance_urgency", "reasoning"} + percentage_keys = {"percentage", "target_percentage", "target_allocation", "allocation", "weight", "ratio"} + recovered_reasoning = "Recovered from malformed distribution output." + + if isinstance(data, list): + return { + "distributions": data, + "rebalance_urgency": "none", + "reasoning": recovered_reasoning, + } + if isinstance(data, str): + recovered = cls.recover_json_from_error(data) + if recovered: + return recovered + if isinstance(data, dict): + if "error" in data: + recovered = cls.recover_json_from_error(data.get("error")) + if recovered: + return recovered + if not (wrapper_keys & set(data.keys())): + return { + "distributions": [], + "rebalance_urgency": "none", + "reasoning": "", + } + has_wrapper_fields = bool(wrapper_keys & set(data.keys())) + is_single_distribution = ( + "asset" in data + and bool(percentage_keys & set(data.keys())) + ) + if not has_wrapper_fields and is_single_distribution: + return { + "distributions": [data], + "rebalance_urgency": "none", + "reasoning": recovered_reasoning, + } + if "distributions" in data or "allocations" in data: + data.setdefault("rebalance_urgency", "none") + data.setdefault("reasoning", recovered_reasoning) + return data + + @field_validator("rebalance_urgency") + def validate_urgency(cls, v: str) -> str: + allowed_urgency = ["immediate", "soon", "low", "none"] + v_lower = v.lower() + if v_lower not in allowed_urgency: + raise ValueError(f"Urgency must be one of {allowed_urgency}") + return v_lower + + def get_distribution_dict(self) -> Dict[str, float]: + """Convert distributions to a simple dict format.""" + return {d.asset: d.percentage for d in self.distributions} + + def get_ai_instructions(self) -> List[dict]: + """Convert distributions to AI instruction format for ai_index_distribution.""" + instructions = [] + for dist in self.distributions: + if dist.action == "increase": + instructions.append({ + INSTRUCTION_ACTION: ACTION_INCREASE_EXPOSURE, + INSTRUCTION_SYMBOL: dist.asset, + INSTRUCTION_WEIGHT: dist.percentage, + }) + elif dist.action == "decrease": + instructions.append({ + INSTRUCTION_ACTION: ACTION_REDUCE_EXPOSURE, + INSTRUCTION_SYMBOL: dist.asset, + INSTRUCTION_WEIGHT: dist.percentage, + }) + elif dist.action == "add": + instructions.append({ + INSTRUCTION_ACTION: ACTION_ADD_TO_DISTRIBUTION, + INSTRUCTION_SYMBOL: dist.asset, + INSTRUCTION_WEIGHT: dist.percentage, + }) + elif dist.action == "remove": + instructions.append({ + INSTRUCTION_ACTION: ACTION_REMOVE_FROM_DISTRIBUTION, + INSTRUCTION_SYMBOL: dist.asset, + }) + elif dist.action == "maintain": + instructions.append({ + INSTRUCTION_ACTION: ACTION_UPDATE_RATIO, + INSTRUCTION_SYMBOL: dist.asset, + INSTRUCTION_WEIGHT: dist.percentage, + }) + + return instructions diff --git a/packages/tentacles/Agent/sub_agents/real_time_analysis_agent/__init__.py b/packages/tentacles/Agent/sub_agents/real_time_analysis_agent/__init__.py new file mode 100644 index 0000000000..616bd2bce6 --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/real_time_analysis_agent/__init__.py @@ -0,0 +1,6 @@ +from .real_time_analysis_agent import ( + RealTimeAnalysisAIAgentChannel, + RealTimeAnalysisAIAgentConsumer, + RealTimeAnalysisAIAgentProducer, +) +from .models import RealTimeAnalysisOutput diff --git a/packages/tentacles/Agent/sub_agents/real_time_analysis_agent/metadata.json b/packages/tentacles/Agent/sub_agents/real_time_analysis_agent/metadata.json new file mode 100644 index 0000000000..62bcc05c68 --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/real_time_analysis_agent/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["RealTimeAnalysisAgent"], + "tentacles-requirements": [] +} diff --git a/packages/tentacles/Agent/sub_agents/real_time_analysis_agent/models.py b/packages/tentacles/Agent/sub_agents/real_time_analysis_agent/models.py new file mode 100644 index 0000000000..1ff3ed18a0 --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/real_time_analysis_agent/models.py @@ -0,0 +1,97 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +""" +Pydantic models for real-time analysis agent outputs. +""" +from typing import List, Optional +from pydantic import BaseModel, Field, field_validator + +import octobot_agents.models as agent_models + + +class RealTimeAnalysisOutput(agent_models.AgentBaseModel): + __strict_json_schema__ = True + """Output from the real-time analysis agent.""" + eval_note: float = Field( + ge=-1.0, + le=1.0, + description="Evaluation score from -1 (strong selling pressure) to 1 (strong buying pressure)." + ) + confidence: float = Field( + ge=0.0, + le=1.0, + description="Confidence level of the real-time analysis (0-1)." + ) + description: str = Field( + description="Summary description of the real-time market analysis." + ) + price_momentum: Optional[float] = Field( + default=None, + ge=-1.0, + le=1.0, + description="Price momentum indicator from -1 (strong downward) to 1 (strong upward). Leave empty if no momentum." + ) + current_status: Optional[str] = Field( + default=None, + description="Current market status: 'bullish', 'bearish', 'neutral', 'volatile'. Leave empty if unclear." + ) + volume_signal: Optional[str] = Field( + default=None, + description="Volume analysis: 'high', 'normal', 'low'. Leave empty if no volume data." + ) + urgency_level: Optional[str] = Field( + default=None, + description="Action urgency: 'immediate', 'high', 'medium', 'low', 'none'. Leave empty if no urgency." + ) + critical_events: Optional[List[str]] = Field( + default=None, + description="Any critical events or catalysts detected. Leave empty if none." + ) + recommendations: Optional[List[str]] = Field( + default=None, + description="Real-time trading recommendations. Leave empty if none." + ) + + @field_validator("current_status") + def validate_status(cls, v: str) -> str: + if v is None: + return None + allowed_statuses = ["bullish", "bearish", "neutral", "volatile"] + v_lower = v.lower() + if v_lower not in allowed_statuses: + raise ValueError(f"Status must be one of {allowed_statuses}") + return v_lower + + @field_validator("volume_signal") + def validate_volume(cls, v: str) -> str: + if v is None: + return None + allowed_volumes = ["high", "normal", "low"] + v_lower = v.lower() + if v_lower not in allowed_volumes: + raise ValueError(f"Volume signal must be one of {allowed_volumes}") + return v_lower + + @field_validator("urgency_level") + def validate_urgency(cls, v: str) -> str: + if v is None: + return None + allowed_urgencies = ["immediate", "high", "medium", "low", "none"] + v_lower = v.lower() + if v_lower not in allowed_urgencies: + raise ValueError(f"Urgency level must be one of {allowed_urgencies}") + return v_lower diff --git a/packages/tentacles/Agent/sub_agents/real_time_analysis_agent/real_time_analysis_agent.py b/packages/tentacles/Agent/sub_agents/real_time_analysis_agent/real_time_analysis_agent.py new file mode 100644 index 0000000000..88addc0984 --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/real_time_analysis_agent/real_time_analysis_agent.py @@ -0,0 +1,115 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import json + +import octobot_agents.agent.channels.ai_agent as ai_agent_channels + +from .models import RealTimeAnalysisOutput + + +class RealTimeAnalysisAIAgentChannel(ai_agent_channels.AbstractAIAgentChannel): + OUTPUT_SCHEMA = RealTimeAnalysisOutput + + +class RealTimeAnalysisAIAgentConsumer(ai_agent_channels.AbstractAIAgentChannelConsumer): + pass + + +class RealTimeAnalysisAIAgentProducer(ai_agent_channels.AbstractAIAgentChannelProducer): + """Producer specialized in real-time market analysis.""" + + AGENT_VERSION = "1.0.0" + AGENT_CHANNEL = RealTimeAnalysisAIAgentChannel + AGENT_CONSUMER = RealTimeAnalysisAIAgentConsumer + + def __init__(self, channel, **kwargs): + super().__init__(channel, **kwargs) + + def _get_default_prompt(self) -> str: + return ( + "You are a Real-Time Market Analysis AI expert. Follow these steps to analyze the provided real-time evaluator signals:\n" + "1. Examine real-time data comprehensively: Review order book dynamics, recent trades, price velocity, and market depth.\n" + "2. Assess market momentum: Determine current buying/selling pressure, accumulation/distribution patterns, and directional bias.\n" + "3. Evaluate market microstructure: Consider bid-ask spreads, order book imbalance, and trade flow characteristics.\n" + "4. Calculate momentum eval_note: Use the full range from -1 (strong selling pressure) to 1 (strong buying pressure), but most real-time data shows neutral to mild momentum.\n" + "5. Assess confidence (0-1) based on data quality: Higher confidence for strong, consistent signals; lower for noisy or conflicting data.\n" + "6. Provide detailed description: Explain current market dynamics, momentum indicators, and short-term outlook.\n\n" + "Important: Real-time momentum is rarely extreme. Use extreme values (-1/1) only for very strong, sustained pressure.\n\n" + "MANDATORY FIELDS (always include):\n" + "- eval_note: float between -1 (strong selling pressure) to 1 (strong buying pressure)\n" + "- confidence: float between 0 (low confidence) to 1 (high confidence)\n" + "- description: detailed explanation of current market momentum and dynamics\n\n" + "OPTIONAL FIELDS (only include if available):\n" + "- price_momentum: float for price momentum strength (-1 to 1) - Leave empty if not clearly identifiable\n" + "- current_status: string like 'accumulating', 'distributing', 'consolidating' - Leave empty if unclear\n" + "- volume_signal: string describing volume patterns - Leave empty if no strong volume signals\n" + "- urgency_level: string like 'low', 'medium', 'high' - Leave empty if no clear urgency\n" + "- critical_events: list of critical market events affecting momentum - Leave empty if none\n" + "- recommendations: list of action recommendations based on real-time analysis - Leave empty if none\n\n" + "If you lack data for any optional field, omit it from the response (leave as null).\n" + "Output only valid JSON matching the RealTimeAnalysisOutput schema." + ) + + async def execute(self, input_data, ai_service) -> dict: + """Evaluate aggregated real-time market data.""" + aggregated_data = input_data + if not aggregated_data: + return { + "eval_note": 0, + "eval_note_description": "No real-time market data available", + "confidence": 0, + } + + data_str = json.dumps(aggregated_data, indent=2) + + messages = [ + ai_service.create_message("system", self.prompt), + ai_service.create_message( + "user", + f"Real-time market data:\n{data_str}\n\n" + "Provide evaluation as JSON matching the RealTimeAnalysisOutput schema. " + "Include mandatory fields (eval_note, confidence, description). " + "Include optional fields only if you have data for them.", + ), + ] + + try: + # Uses RealTimeAnalysisAIAgentChannel.OUTPUT_SCHEMA by default + parsed = await self._call_llm( + messages, + ai_service, + json_output=True, + ) + eval_note = float(parsed.get("eval_note", 0)) + eval_note_description = parsed.get("description", "Real-time analysis") + confidence = float(parsed.get("confidence", 0)) + + # Clamp values + eval_note = max(-1, min(1, eval_note)) + confidence = max(0, min(1, confidence)) + + return { + "eval_note": eval_note, + "eval_note_description": eval_note_description, + "confidence": int(confidence * 100), # Convert to 0-100 range + } + except Exception as e: + self.logger.error(f"Error in real-time analysis: {e}") + return { + "eval_note": 0, + "eval_note_description": f"Error in real-time analysis: {str(e)}", + "confidence": 0, + } diff --git a/packages/tentacles/Agent/sub_agents/risk_agent/__init__.py b/packages/tentacles/Agent/sub_agents/risk_agent/__init__.py new file mode 100644 index 0000000000..5f6a6f2828 --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/risk_agent/__init__.py @@ -0,0 +1,7 @@ +from .risk_agent import ( + RiskAIAgentChannel, + RiskAIAgentConsumer, + RiskAIAgentProducer, + run_risk_agent, +) +from .models import RiskMetrics, RiskAssessmentOutput diff --git a/packages/tentacles/Agent/sub_agents/risk_agent/metadata.json b/packages/tentacles/Agent/sub_agents/risk_agent/metadata.json new file mode 100644 index 0000000000..358d2e2b2b --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/risk_agent/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["RiskAgent"], + "tentacles-requirements": [] +} diff --git a/packages/tentacles/Agent/sub_agents/risk_agent/models.py b/packages/tentacles/Agent/sub_agents/risk_agent/models.py new file mode 100644 index 0000000000..0f37e6a49f --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/risk_agent/models.py @@ -0,0 +1,75 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +""" +Pydantic models for risk agent outputs. +""" +from typing import Dict, List, Optional +from pydantic import BaseModel, Field, field_validator + +import octobot_agents.models as agent_models + + +class RiskMetrics(agent_models.AgentBaseModel): + """Portfolio risk metrics.""" + __strict_json_schema__ = True + overall_risk_level: str = Field( + description="Overall risk level: 'low', 'medium', 'high', 'critical'." + ) + concentration_risk: float = Field( + ge=0.0, + le=1.0, + description="Risk from over-concentration in few assets (0-1)." + ) + volatility_exposure: float = Field( + ge=0.0, + le=1.0, + description="Exposure to volatile assets (0-1)." + ) + liquidity_risk: float = Field( + ge=0.0, + le=1.0, + description="Risk from illiquid positions (0-1)." + ) + + @field_validator("overall_risk_level") + def validate_risk_level(cls, v: str) -> str: + allowed_levels = ["low", "medium", "high", "critical"] + v_lower = v.lower() + if v_lower not in allowed_levels: + raise ValueError(f"Risk level must be one of {allowed_levels}") + return v_lower + + +class RiskAssessmentOutput(agent_models.AgentBaseModel): + """Output from the risk assessment agent.""" + __strict_json_schema__ = True + + metrics: RiskMetrics = Field(description="Calculated risk metrics.") + recommendations: List[str] = Field( + description="Risk mitigation recommendations." + ) + max_allocation_per_asset: Optional[Dict[str, float]] = Field( + default_factory=dict, + description="Maximum recommended allocation percentage per asset." + ) + min_cash_reserve: Optional[float] = Field( + default=0.1, + ge=0.0, + le=1.0, + description="Minimum recommended cash/stablecoin reserve (0-1)." + ) + reasoning: str = Field(description="Explanation of the risk assessment.") diff --git a/packages/tentacles/Agent/sub_agents/risk_agent/risk_agent.py b/packages/tentacles/Agent/sub_agents/risk_agent/risk_agent.py new file mode 100644 index 0000000000..b2d38552af --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/risk_agent/risk_agent.py @@ -0,0 +1,223 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +""" +Risk Assessment Agent. +Evaluates portfolio risk using trading API data. +""" +import json +import typing + +import octobot_agents.agent.channels.ai_agent as ai_agent_channels +from octobot_services.enums import AIModelPolicy + +from tentacles.Agent.sub_agents.signal_agent.state import AIAgentState +from tentacles.Agent.sub_agents.signal_agent.models import CryptoSignalOutput +from .models import RiskAssessmentOutput + + +class RiskAIAgentChannel(ai_agent_channels.AbstractAIAgentChannel): + OUTPUT_SCHEMA = RiskAssessmentOutput + + +class RiskAIAgentConsumer(ai_agent_channels.AbstractAIAgentChannelConsumer): + pass + + +class RiskAIAgentProducer(ai_agent_channels.AbstractAIAgentChannelProducer): + """ + Risk assessment agent producer that evaluates portfolio risk. + Uses portfolio data from trading API to assess concentration, volatility, and liquidity risks. + """ + + AGENT_VERSION = "1.0.0" + AGENT_CHANNEL = RiskAIAgentChannel + AGENT_CONSUMER = RiskAIAgentConsumer + ENABLE_MEMORY = True + MODEL_POLICY = AIModelPolicy.FAST + + def __init__(self, channel, model=None, max_tokens=None, temperature=None, **kwargs): + """ + Initialize the risk agent producer. + + Args: + channel: The channel this producer is registered to. + model: LLM model to use. + max_tokens: Maximum tokens for response. + temperature: Temperature for LLM randomness. + """ + super().__init__( + channel=channel, + model=model, + max_tokens=max_tokens, + temperature=temperature, + **kwargs, + ) + + def _get_default_prompt(self) -> str: + """Return the default system prompt.""" + return """ +You are a Portfolio Risk Assessment Agent for cryptocurrency trading. +Your task is to evaluate the current portfolio risk and provide risk mitigation recommendations. + +## Your Role +- Analyze the current portfolio holdings and their distribution. +- Evaluate concentration risk (over-exposure to single assets). +- Assess volatility exposure based on asset types. +- Consider liquidity risk from position sizes. +- Incorporate signal outputs from individual cryptocurrency analysis. + +## Risk Levels +- "low": Portfolio is well-diversified with manageable risk +- "medium": Some concentration or volatility concerns +- "high": Significant risk factors present +- "critical": Immediate action recommended to reduce risk + +## Important Rules +- Base your analysis ONLY on the provided portfolio and signal data. +- Provide actionable recommendations. +- Set realistic maximum allocation limits per asset. +- Consider the reference market (stablecoin) as the safe haven. + +## Output Requirements - CRITICAL VALUE RANGES +Output a JSON object with: +- "overall_risk_level": EXACTLY one of "low", "medium", "high", "critical" +- "metrics": Object with: + - "overall_risk_level": EXACTLY one of "low", "medium", "high", "critical" + - "concentration_risk": Number between 0 and 1 (0=safe, 1=dangerous) + - "volatility_exposure": Number between 0 and 1 (0=stable, 1=volatile) + - "liquidity_risk": Number between 0 and 1 (0=liquid, 1=illiquid) +- "max_allocation_per_asset": Object mapping asset to max percentage (0-100 range, e.g., 25 means 25%) +- "min_cash_reserve": Minimum recommended cash reserve as DECIMAL between 0 and 1 (e.g., 0.1 means 10%, 0.2 means 20%) +- "recommendations": Array of risk mitigation recommendations +- "reasoning": Explanation of the risk assessment +""" + + def _build_user_prompt(self, state: AIAgentState) -> str: + """Build the user prompt with portfolio data.""" + portfolio = state.get("portfolio", {}) + orders = state.get("orders", {}) + signal_outputs = state.get("signal_outputs", {}).get("signals", {}) + current_distribution = state.get("current_distribution", {}) + cryptocurrencies = state.get("cryptocurrencies", []) + + # Format signal summaries + signal_summary = {} + for crypto, signal in signal_outputs.items(): + try: + # Try Pydantic model access + signal_summary[crypto] = { + "action": signal.signal.action, + "confidence": signal.signal.confidence, + "reasoning": signal.signal.reasoning + } + except AttributeError: + # It's a dict + signal_data = signal.get("signal", {}) + signal_summary[crypto] = { + "action": signal_data.get("action", "unknown"), + "confidence": signal_data.get("confidence", 0), + "reasoning": signal_data.get("reasoning", "") + } + + portfolio_str = json.dumps(portfolio, indent=2, default=str) if portfolio else "No portfolio data" + orders_str = json.dumps(orders, indent=2, default=str) if orders else "No orders" + + return f""" +# Evaluate Portfolio Risk + +## Portfolio Holdings +{portfolio_str} + +## Current Distribution (percentages) +{json.dumps(current_distribution, indent=2)} + +## Open Orders +{orders_str} + +## Tracked Cryptocurrencies +{json.dumps(cryptocurrencies, indent=2)} + +## Signal Outputs from Crypto Agents +{json.dumps(signal_summary, indent=2, default=str)} + +## Reference Market +{portfolio.get('reference_market', 'USD')} + +## Task +Evaluate the portfolio risk considering: +1. Concentration risk from any single asset dominating the portfolio +2. Volatility exposure based on the assets held +3. Liquidity risk from position sizes +4. Open orders that may affect risk profile +5. Signals suggesting increased volatility or directional moves + +Provide risk metrics, maximum allocation limits, and mitigation recommendations as JSON. +""" + + async def execute(self, input_data: typing.Any, ai_service) -> typing.Any: + """ + Execute risk assessment. + + Args: + input_data: The current agent state (AIAgentState). + ai_service: The AI service instance. + + Returns: + Dictionary with risk_output. + """ + state = input_data + self.logger.debug(f"Starting {self.name}...") + + try: + messages = [ + {"role": "system", "content": self.prompt}, + {"role": "user", "content": self._build_user_prompt(state)}, + ] + + # Uses RiskAIAgentChannel.OUTPUT_SCHEMA (RiskAssessmentOutput) by default + response_data = await self._call_llm( + messages, + ai_service, + json_output=True, + ) + + # Parse into model + risk_output = RiskAssessmentOutput(**response_data) + + self.logger.debug(f"{self.name} completed successfully.") + + return {"risk_output": risk_output} + + except Exception as e: + self.logger.exception(f"Error in {self.name}: {e}") + return {} + + +async def run_risk_agent(state: AIAgentState, ai_service, agent_id: str = "risk-agent") -> dict: + """ + Convenience function to run the risk agent. + + Args: + state: The current agent state. + ai_service: The AI service instance. + agent_id: Unique identifier for the agent instance. + + Returns: + State updates from the agent. + """ + risk_agent = RiskAIAgentProducer(channel=None) + return await risk_agent.execute(state, ai_service) diff --git a/packages/tentacles/Agent/sub_agents/risk_judge_agent/__init__.py b/packages/tentacles/Agent/sub_agents/risk_judge_agent/__init__.py new file mode 100644 index 0000000000..8cf5b36221 --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/risk_judge_agent/__init__.py @@ -0,0 +1 @@ +from .risk_judge_agent import RiskJudgeAIAgentProducer diff --git a/packages/tentacles/Agent/sub_agents/risk_judge_agent/metadata.json b/packages/tentacles/Agent/sub_agents/risk_judge_agent/metadata.json new file mode 100644 index 0000000000..badf9e8c2b --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/risk_judge_agent/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["RiskJudgeAgent"], + "tentacles-requirements": [] +} diff --git a/packages/tentacles/Agent/sub_agents/risk_judge_agent/risk_judge_agent.py b/packages/tentacles/Agent/sub_agents/risk_judge_agent/risk_judge_agent.py new file mode 100644 index 0000000000..c791c371c6 --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/risk_judge_agent/risk_judge_agent.py @@ -0,0 +1,124 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +""" +Risk Judge Agent. +Implements AIJudgeAgentProducer: evaluates risk debate history and decides continue or exit with summary. +""" +import typing + +import octobot_commons.logging as logging +import octobot_agents.models as agent_models +import octobot_agents.enums as agent_enums +import octobot_services.services.abstract_ai_service as abstract_ai_service + +from octobot_services.enums import AIModelPolicy +import octobot_agents.team.judge as agent_judge + + +class RiskJudgeAIAgentProducer(agent_judge.AIJudgeAgentProducer): + """ + Risk judge agent: evaluates debate history from risk debators (e.g. risky/safe/neutral) + and decides whether to continue the debate or exit with a risk synthesis summary. + """ + MODEL_POLICY = AIModelPolicy.REASONING + + def __init__( + self, + channel: typing.Optional[typing.Any] = None, + model: typing.Optional[str] = None, + max_tokens: typing.Optional[int] = None, + temperature: typing.Optional[float] = None, + **kwargs, + ): + super().__init__( + channel=channel, + model=model, + max_tokens=max_tokens or 2000, + temperature=temperature if temperature is not None else 0.3, + **kwargs, + ) + self.logger = logging.get_logger(self.__class__.__name__) + + def _get_default_prompt(self) -> str: + return """You are a Risk Judge in a portfolio risk debate. +You receive a debate history: messages from debators (e.g. risky analyst, safe analyst, neutral analyst) arguing about portfolio risk. + +Your role: +1. Evaluate whether the debate has reached a clear conclusion or needs more rounds. +2. If views have converged or max useful exchange reached, decide "exit" and provide a short synthesis summary (risk level and key recommendations). +3. If important points are still unresolved, decide "continue" and briefly explain why. + +Output a JSON object with: +- "decision": exactly "continue" or "exit" +- "reasoning": short explanation for your decision +- "summary": when decision is "exit", a concise risk synthesis (overall risk level and top recommendations); when "continue", null or empty +""" + + async def execute( + self, + input_data: typing.Union[agent_models.JudgeInput, typing.Dict[str, typing.Any]], + ai_service: abstract_ai_service.AbstractAIService, + ) -> agent_models.JudgeDecision: + debate_history = input_data.get("debate_history", []) + debator_agent_names = input_data.get("debator_agent_names", []) + current_round = input_data.get("current_round", 1) + max_rounds = input_data.get("max_rounds", 3) + + if not debate_history: + return agent_models.JudgeDecision( + decision=agent_enums.JudgeDecisionType.EXIT.value, + reasoning="No debate history; exiting.", + summary="No risk debate content.", + ) + + debate_text = "\n\n".join( + f"[Round {e.get('round', '?')}] {e.get('agent_name', '?')}: {e.get('message', '')}" + for e in debate_history + ) + user_content = f"""Debate history (round {current_round} of {max_rounds}, debators: {debator_agent_names}): + +{debate_text} + +Decide: continue the debate or exit with a risk synthesis?""" + + messages = [ + {"role": "system", "content": self._get_default_prompt()}, + {"role": "user", "content": user_content}, + ] + + try: + response = await self._call_llm( + messages, + ai_service, + json_output=True, + response_schema=agent_models.JudgeDecision, + ) + except Exception as e: + self.logger.exception(f"Risk judge LLM call failed: {e}") + return agent_models.JudgeDecision( + decision=agent_enums.JudgeDecisionType.EXIT.value, + reasoning=f"Error: {e}", + summary=None, + ) + + if isinstance(response, dict): + return agent_models.JudgeDecision( + decision=response.get("decision", agent_enums.JudgeDecisionType.EXIT.value), + reasoning=response.get("reasoning", ""), + summary=response.get("summary"), + ) + return response diff --git a/packages/tentacles/Agent/sub_agents/sentiment_analysis_agent/__init__.py b/packages/tentacles/Agent/sub_agents/sentiment_analysis_agent/__init__.py new file mode 100644 index 0000000000..afead98bc8 --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/sentiment_analysis_agent/__init__.py @@ -0,0 +1,6 @@ +from .sentiment_analysis_agent import ( + SentimentAnalysisAIAgentChannel, + SentimentAnalysisAIAgentConsumer, + SentimentAnalysisAIAgentProducer, +) +from .models import SentimentAnalysisOutput diff --git a/packages/tentacles/Agent/sub_agents/sentiment_analysis_agent/metadata.json b/packages/tentacles/Agent/sub_agents/sentiment_analysis_agent/metadata.json new file mode 100644 index 0000000000..2e544c5642 --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/sentiment_analysis_agent/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["SentimentAnalysisAgent"], + "tentacles-requirements": [] +} diff --git a/packages/tentacles/Agent/sub_agents/sentiment_analysis_agent/models.py b/packages/tentacles/Agent/sub_agents/sentiment_analysis_agent/models.py new file mode 100644 index 0000000000..c997ac8850 --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/sentiment_analysis_agent/models.py @@ -0,0 +1,77 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +""" +Pydantic models for sentiment analysis agent outputs. +""" +from typing import List, Optional +from pydantic import BaseModel, Field, field_validator + +import octobot_agents.models as agent_models + + +class SentimentMetrics(agent_models.AgentBaseModel): + __strict_json_schema__ = True + """Sentiment analysis metrics.""" + sentiment_score: float = Field( + ge=-1.0, + le=1.0, + description="Sentiment score from -1 (very negative) to 1 (very positive)." + ) + + @field_validator("sentiment_score") + def validate_score(cls, v: float) -> float: + return max(-1.0, min(1.0, v)) + + +class SentimentAnalysisOutput(agent_models.AgentBaseModel): + """Output from the sentiment analysis agent.""" + __strict_json_schema__ = True + + eval_note: float = Field( + ge=-1.0, + le=1.0, + description="Evaluation score from -1 (very negative) to 1 (very positive)." + ) + confidence: float = Field( + ge=0.0, + le=1.0, + description="Confidence level of the sentiment analysis (0-1)." + ) + description: str = Field( + description="Summary description of the sentiment analysis." + ) + sentiment_score: float = Field( + ge=-1.0, + le=1.0, + description="Detailed sentiment score from -1 to 1. Same as eval_note." + ) + sources_analyzed: Optional[List[str]] = Field( + default=None, + description="Data sources used for sentiment analysis. Leave empty if none identified." + ) + key_mentions: Optional[List[str]] = Field( + default=None, + description="Key mentions or topics driving sentiment. Leave empty if none." + ) + market_implications: Optional[str] = Field( + default=None, + description="Implications of sentiment for market direction. Leave empty if unclear." + ) + recommendations: Optional[List[str]] = Field( + default=None, + description="Trading recommendations based on sentiment. Leave empty if none." + ) diff --git a/packages/tentacles/Agent/sub_agents/sentiment_analysis_agent/sentiment_analysis_agent.py b/packages/tentacles/Agent/sub_agents/sentiment_analysis_agent/sentiment_analysis_agent.py new file mode 100644 index 0000000000..5225052dd3 --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/sentiment_analysis_agent/sentiment_analysis_agent.py @@ -0,0 +1,117 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import json + +import octobot_agents.agent.channels.ai_agent as ai_agent_channels + +from .models import SentimentAnalysisOutput + + +class SentimentAnalysisAIAgentChannel(ai_agent_channels.AbstractAIAgentChannel): + OUTPUT_SCHEMA = SentimentAnalysisOutput + + +class SentimentAnalysisAIAgentConsumer(ai_agent_channels.AbstractAIAgentChannelConsumer): + pass + + +class SentimentAnalysisAIAgentProducer(ai_agent_channels.AbstractAIAgentChannelProducer): + """Producer specialized in sentiment analysis evaluation.""" + + AGENT_VERSION = "1.0.0" + AGENT_CHANNEL = SentimentAnalysisAIAgentChannel + AGENT_CONSUMER = SentimentAnalysisAIAgentConsumer + + def __init__(self, channel, **kwargs): + super().__init__(channel, **kwargs) + + def _get_default_prompt(self) -> str: + return ( + "You are a Social Sentiment Analysis AI expert.\n\n" + "Follow these steps:\n" + "1. Review diverse social signals: news sentiment, market buzz, community discussions, global indicators\n" + "2. Assess overall market mood objectively: Determine if sentiment is bullish, bearish, or neutral\n" + "3. Consider signal sources: Institutional news (ETFs, regulations) outweighs social media noise\n" + "4. Evaluate sentiment strength and consistency across sources\n" + "5. Calculate eval_note using full range from -1 to 1: Most markets show neutral to mildly bullish/bearish sentiment\n" + "6. Assess confidence (0-1) based on data quality and signal clarity\n" + "7. Provide detailed analysis explaining the score\n\n" + "IMPORTANT: Positive regulatory developments are MAJOR bullish signals. Institutional news outweighs social media sentiment.\n" + "Markets are rarely extremely bullish or bearish - use extreme values (-1/1) only for very strong, consistent signals.\n\n" + "MANDATORY FIELDS (always include):\n" + "- eval_note: float between -1 (very bearish sentiment) to 1 (very bullish sentiment)\n" + "- confidence: float between 0 (low confidence) to 1 (high confidence)\n" + "- description: detailed explanation of the sentiment analysis\n" + "- sentiment_score: float between -1 to 1 for aggregate sentiment\n\n" + "OPTIONAL FIELDS (only include if available):\n" + "- sources_analyzed: list of sentiment sources examined (e.g., 'Twitter', 'News', 'Crypto Forums') - Leave empty if not clearly identified\n" + "- key_mentions: list of key topics/assets/events mentioned - Leave empty if none stand out\n" + "- market_implications: string describing sentiment impact on market - Leave empty if unclear\n" + "- recommendations: list of action recommendations based on sentiment - Leave empty if none\n\n" + "If you lack data for any optional field, omit it from the response (leave as null).\n" + "Output only valid JSON matching the SentimentAnalysisOutput schema." + ) + + async def execute(self, input_data, ai_service) -> dict: + """Evaluate aggregated sentiment analysis data.""" + aggregated_data = input_data + if not aggregated_data: + return { + "eval_note": 0, + "eval_note_description": "No sentiment analysis data available", + "confidence": 0, + } + + data_str = json.dumps(aggregated_data, indent=2) + + messages = [ + ai_service.create_message("system", self.prompt), + ai_service.create_message( + "user", + f"Sentiment analysis data:\n{data_str}\n\n" + "Provide evaluation as JSON matching the SentimentAnalysisOutput schema. " + "Include mandatory fields (eval_note, confidence, description, sentiment_score). " + "Include optional fields only if you have data for them.", + ), + ] + + try: + # Uses SentimentAnalysisAIAgentChannel.OUTPUT_SCHEMA by default + parsed = await self._call_llm( + messages, + ai_service, + json_output=True, + ) + eval_note = float(parsed.get("eval_note", 0)) + eval_note_description = parsed.get("description", "Sentiment analysis") + confidence = float(parsed.get("confidence", 0)) + + # Clamp values + eval_note = max(-1, min(1, eval_note)) + confidence = max(0, min(1, confidence)) + + return { + "eval_note": eval_note, + "eval_note_description": eval_note_description, + "confidence": int(confidence * 100), # Convert to 0-100 range + } + except Exception as e: + self.logger.error(f"Error in sentiment analysis: {e}") + return { + "eval_note": 0, + "eval_note_description": f"Error in sentiment analysis: {str(e)}", + "confidence": 0, + } diff --git a/packages/tentacles/Agent/sub_agents/signal_agent/__init__.py b/packages/tentacles/Agent/sub_agents/signal_agent/__init__.py new file mode 100644 index 0000000000..7609213f16 --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/signal_agent/__init__.py @@ -0,0 +1,13 @@ +from .signal_agent import ( + SignalAIAgentChannel, + SignalAIAgentConsumer, + SignalAIAgentProducer, + run_signal_agent, +) +from .models import ( + SignalRecommendation, + CryptoSignalOutput, + SynthesizedSignal, + SignalSynthesisOutput, +) +from .state import AIAgentState diff --git a/packages/tentacles/Agent/sub_agents/signal_agent/metadata.json b/packages/tentacles/Agent/sub_agents/signal_agent/metadata.json new file mode 100644 index 0000000000..a77368a5f8 --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/signal_agent/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["SignalAgent"], + "tentacles-requirements": [] +} diff --git a/packages/tentacles/Agent/sub_agents/signal_agent/models.py b/packages/tentacles/Agent/sub_agents/signal_agent/models.py new file mode 100644 index 0000000000..5906259913 --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/signal_agent/models.py @@ -0,0 +1,203 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +""" +Pydantic models for signal agent outputs. +""" +from enum import Enum +from typing import List, Literal +from pydantic import BaseModel, Field, AliasChoices, field_validator, model_validator + +import octobot_agents.models as agent_models + + +class SignalDirection(str, Enum): + BULLISH = "bullish" + BEARISH = "bearish" + NEUTRAL = "neutral" + + +class ConsensusLevel(str, Enum): + STRONG = "strong" + MODERATE = "moderate" + WEAK = "weak" + CONFLICTING = "conflicting" + + +class MarketOutlook(str, Enum): + BULLISH = "bullish" + BEARISH = "bearish" + NEUTRAL = "neutral" + MIXED = "mixed" + + +class SignalRecommendation(agent_models.AgentBaseModel): + """A trading signal recommendation for an asset.""" + __strict_json_schema__ = True + action: Literal["buy", "sell", "hold", "increase", "decrease"] = Field( + description="Trading action: 'buy', 'sell', 'hold', 'increase', 'decrease'." + ) + confidence: float = Field( + default=0.5, + ge=0.0, + le=1.0, + description="Confidence level of the signal (0 to 1)." + ) + reasoning: str = Field( + description="Explanation of why this signal was generated." + ) + + +class CryptoSignalOutput(agent_models.AgentBaseModel): + """Output from a cryptocurrency signal agent.""" + __strict_json_schema__ = True + + cryptocurrency: str = Field(description="The cryptocurrency being analyzed.") + signal: SignalRecommendation = Field(description="The trading signal for this cryptocurrency.") + market_context: str = Field(description="Brief description of current market context.") + key_factors: List[str] = Field( + default_factory=list, + description="Key factors influencing this signal." + ) + + +class SynthesizedSignal(agent_models.AgentBaseModel): + """A synthesized signal for an asset combining multiple signal sources. + + Strict schema enforcement: All fields are required with correct types. + The LLM must return the exact format specified. + """ + __strict_json_schema__ = True + asset: str = Field( + description="The asset symbol (e.g., 'BTC', 'ETH'). Must be a string.", + validation_alias=AliasChoices("asset", "symbol") + ) + direction: SignalDirection = Field( + description="Synthesized direction: 'bullish', 'bearish', or 'neutral'. Must be one of these exact values." + ) + strength: float = Field( + description="Signal strength as a number between 0.0 and 1.0. Must be a float, NOT a string like 'strong'.", + ge=0.0, + le=1.0 + ) + consensus_level: ConsensusLevel = Field( + description="Level of agreement between signals: 'strong', 'moderate', 'weak', or 'conflicting'. " + "This is different from 'strength' - do NOT confuse them." + ) + trading_instruction: str = Field( + description="Clear trading instruction derived from signals. Must be a descriptive string." + ) + + @field_validator("consensus_level", mode="before") + def normalize_consensus_level(cls, v): + try: + level = v.lower().strip() + if level in {"neutral", "none", "no", "null", "n/a", "na", "unknown"}: + level = ConsensusLevel.WEAK.value + if level not in {item.value for item in ConsensusLevel}: + raise ValueError(f"consensus_level must be one of {[item.value for item in ConsensusLevel]}") + return level + except AttributeError: + pass + return v + + @field_validator("direction", mode="before") + def normalize_direction(cls, v): + try: + direction = v.lower().strip() + except AttributeError: + return v + if direction in {"stable", "flat", "sideways", "range", "ranging"}: + direction = SignalDirection.NEUTRAL.value + if direction not in {item.value for item in SignalDirection}: + raise ValueError(f"direction must be one of {[item.value for item in SignalDirection]}") + return direction + + +class SignalSynthesisOutput(agent_models.AgentBaseModel): + """Output from the signal manager agent - synthesizes all signals.""" + __strict_json_schema__ = True + + synthesized_signals: List[SynthesizedSignal] = Field( + description="List of synthesized signals per asset.", + validation_alias=AliasChoices("synthesized_signals", "signals") + ) + market_outlook: MarketOutlook = Field( + description="Overall market outlook: 'bullish', 'bearish', 'neutral', 'mixed'." + ) + summary: str = Field( + description="Summary of the synthesized signals without making decisions." + ) + + @model_validator(mode="before") + @classmethod + def normalize_input(cls, data): + if isinstance(data, str): + recovered = cls.recover_json_from_error(data) + if recovered: + return recovered + if isinstance(data, dict): + if "error" in data: + recovered = cls.recover_json_from_error(data.get("error")) + if recovered: + return recovered + # Unwrap accidental wrapper payloads. + synthesis = data.get("synthesis") + if isinstance(synthesis, dict): + data = synthesis + # Some models return a single synthesized-signal object instead of + # the required synthesis wrapper. + if ( + "synthesized_signals" not in data + and "signals" not in data + and {"asset", "direction", "strength", "consensus_level", "trading_instruction"} <= set(data.keys()) + ): + return { + "synthesized_signals": [data], + "market_outlook": "neutral", + "summary": "Recovered from malformed synthesis output.", + } + return data + + @model_validator(mode="after") + def ensure_synthesis_present(self): + if not self.synthesized_signals: + raise ValueError("synthesized_signals must not be empty") + return self + + @field_validator("market_outlook", mode="before") + def normalize_market_outlook(cls, v): + try: + outlook = v.lower().strip() + except AttributeError: + return v + allowed = {item.value for item in MarketOutlook} + if outlook in allowed: + return outlook + for value in allowed: + if outlook.startswith(value): + return value + has_bullish = "bullish" in outlook + has_bearish = "bearish" in outlook + if has_bullish and has_bearish: + return MarketOutlook.MIXED.value + if "neutral" in outlook or "stable" in outlook or "sideways" in outlook: + return MarketOutlook.NEUTRAL.value + if has_bullish: + return MarketOutlook.BULLISH.value + if has_bearish: + return MarketOutlook.BEARISH.value + raise ValueError(f"market_outlook must be one of {[item.value for item in MarketOutlook]}") diff --git a/packages/tentacles/Agent/sub_agents/signal_agent/signal_agent.py b/packages/tentacles/Agent/sub_agents/signal_agent/signal_agent.py new file mode 100644 index 0000000000..2c27852621 --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/signal_agent/signal_agent.py @@ -0,0 +1,361 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +""" +Signal Agent. +Analyzes all cryptocurrencies and generates both individual and synthesized signals. +Combines per-crypto analysis with overall market signal synthesis in a single agent. +""" +import json +import typing + +from pydantic import BaseModel, model_validator +from typing import List + +import octobot_agents.agent.channels.ai_agent as ai_agent_channels +import octobot_agents.models as agent_models +import octobot_agents.utils.extractor as agent_extractor +from octobot_services.enums import AIModelPolicy + +from .state import AIAgentState +from .models import CryptoSignalOutput, SignalSynthesisOutput + + +class SignalAgentOutput(agent_models.AgentBaseModel): + """Output schema for SignalAIAgentProducer.""" + __strict_json_schema__ = True + + per_crypto_signals: List[CryptoSignalOutput] + synthesis: SignalSynthesisOutput + + @model_validator(mode="after") + def ensure_output_present(self): + if not self.per_crypto_signals: + raise ValueError("per_crypto_signals must not be empty") + if not self.synthesis: + raise ValueError("synthesis must not be empty") + return self + + +class SignalAIAgentChannel(ai_agent_channels.AbstractAIAgentChannel): + OUTPUT_SCHEMA = SignalAgentOutput + + +class SignalAIAgentConsumer(ai_agent_channels.AbstractAIAgentChannelConsumer): + pass + + +class SignalAIAgentProducer(ai_agent_channels.AbstractAIAgentChannelProducer): + """ + Signal agent producer that analyzes all cryptocurrencies and synthesizes signals. + + This agent: + 1. Analyzes each cryptocurrency against all available data + 2. Generates individual signals with confidence levels + 3. Synthesizes signals across all cryptos to identify market consensus + """ + + AGENT_VERSION = "1.0.0" + AGENT_CHANNEL = SignalAIAgentChannel + AGENT_CONSUMER = SignalAIAgentConsumer + ENABLE_MEMORY = True + MODEL_POLICY = AIModelPolicy.FAST + + def __init__(self, channel, model=None, max_tokens=None, temperature=None, **kwargs): + """ + Initialize the signal agent producer. + + Args: + channel: The channel this producer is registered to. + model: LLM model to use. + max_tokens: Maximum tokens for response. + temperature: Temperature for LLM randomness. + """ + super().__init__( + channel=channel, + model=model, + max_tokens=max_tokens, + temperature=temperature, + **kwargs, + ) + + def _get_default_prompt(self) -> str: + """Return the default system prompt.""" + return """ +You are a Comprehensive Signal Analysis Agent for cryptocurrency portfolio management. +Your task is to analyze all tracked cryptocurrencies and generate both individual trading signals and synthesized market signals. + +## Your Dual Role + +### Part 1: Per-Cryptocurrency Analysis +- Analyze each cryptocurrency provided +- Consider global market strategy data and crypto-specific data +- Consider current portfolio holdings and open orders +- Generate a clear trading signal with confidence level +- Identify key factors driving each signal + +### Part 2: Signal Synthesis (CRITICAL) +- Identify consensus across cryptocurrency signals +- Synthesize signals into clear trading instructions +- Provide overall market outlook +- Do NOT make allocation decisions - only synthesize + +## Per-Crypto Signal Actions (for "action" field only) +- "buy": Strong bullish signal, recommend increasing position +- "sell": Strong bearish signal, recommend decreasing position +- "hold": Neutral signal, recommend maintaining current position +- "increase": Moderate bullish, suggest gradual increase +- "decrease": Moderate bearish, suggest gradual decrease + +## CRITICAL: Synthesis Direction Values (for "direction" field ONLY) +When synthesizing signals, ALWAYS use ONLY these exact values for direction: +- "bullish": Positive market direction +- "bearish": Negative market direction +- "neutral": No clear direction + +DO NOT use "buy", "sell", "hold", "increase", or "decrease" in the direction field. ONLY use: bullish, bearish, or neutral. + +## Consensus Levels (for "consensus_level" field ONLY) +Must be EXACTLY one of: +- "strong": High agreement (>0.7 confidence) +- "moderate": Moderate agreement (0.5-0.7) +- "weak": Low agreement or mixed signals +- "conflicting": Opposing signals + +⚠️ CRITICAL: "neutral" is NOT valid for consensus_level - use "weak" instead. +⚠️ CRITICAL: "none", "null", "n/a", and "unknown" are NOT valid for consensus_level - use "weak" instead. + +If you would normally output "neutral", "none", "null", "n/a", or "unknown" for consensus_level, output "weak". + +## Market Outlook (for "market_outlook" field) +Must be EXACTLY one of: +- "bullish": Majority positive signals +- "bearish": Majority negative signals +- "neutral": Balanced or low conviction +- "mixed": Strong conflicting signals +CRITICAL: market_outlook MUST be a single word enum value only. +Do NOT output explanations like "neutral with bearish risks" in market_outlook. +Put all nuance in "summary" instead. + +## REQUIRED OUTPUT SCHEMA - STRICT ENFORCEMENT + +The "synthesis" object MUST include ALL of these REQUIRED fields: +- "synthesized_signals": Array where each object has ALL of these REQUIRED fields: + * "asset" (string): REQUIRED - Asset symbol like "BTC" or "ETH" + * "direction" (string): REQUIRED - EXACTLY "bullish", "bearish", or "neutral" + * "strength" (number): REQUIRED - Float between 0.0 and 1.0 (NOT a string like "strong") + * "consensus_level" (string): REQUIRED - EXACTLY "strong", "moderate", "weak", or "conflicting" + * "trading_instruction" (string): REQUIRED - Clear trading instruction text +- "market_outlook" (string): REQUIRED - EXACTLY "bullish", "bearish", "neutral", or "mixed" +- "summary" (string): REQUIRED - Summary text + +CRITICAL: Every field is REQUIRED. Do NOT omit any field. "strength" must be a NUMBER, NOT a string. +CRITICAL: "synthesized_signals" MUST be a non-empty array. If you have low confidence, still provide entries with "neutral"/"weak" and low strength. + +Be precise, data-driven, and base all recommendations ONLY on provided data. +""" + + def _format_strategy_data(self, data: dict) -> str: + """Format strategy data for the prompt.""" + if not data: + return "No data available" + return json.dumps(data, indent=2, default=str) + + def _build_user_prompt(self, state: AIAgentState) -> str: + """Build the user prompt with all available data.""" + global_strategy = state.get("global_strategy_data", {}) + crypto_strategy = state.get("crypto_strategy_data", {}) + cryptocurrencies = state.get("cryptocurrencies", []) + portfolio = state.get("portfolio", {}) + orders = state.get("orders", {}) + current_distribution = state.get("current_distribution", {}) + + global_filtered = dict(global_strategy) + crypto_filtered = dict(crypto_strategy) + try: + global_entries = global_strategy.get("STRATEGIES", []) + except Exception: + global_entries = [] + try: + crypto_entries = crypto_strategy.get("STRATEGIES", []) + except Exception: + crypto_entries = [] + + try: + global_filtered["STRATEGIES"] = [ + entry for entry in global_entries if entry.get("cryptocurrency") is None + ] + except Exception: + pass + + try: + crypto_filtered["STRATEGIES"] = [ + entry for entry in crypto_entries if entry.get("cryptocurrency") is not None + ] + except Exception: + pass + + portfolio_str = json.dumps(portfolio, indent=2, default=str) if portfolio else "No portfolio data" + orders_str = json.dumps(orders, indent=2, default=str) if orders else "No orders" + + return f""" +# Analyze All Cryptocurrencies and Synthesize Signals + +## Global Strategy Data +{self._format_strategy_data(global_filtered)} + +## Per-Cryptocurrency Strategy Data +{self._format_strategy_data(crypto_filtered)} + +## Tracked Cryptocurrencies +{json.dumps(cryptocurrencies, indent=2)} + +## Current Portfolio Context +{portfolio_str} + +## Current Distribution +{json.dumps(current_distribution, indent=2)} + +## Open Orders +{orders_str} + +## Reference Market +{portfolio.get('reference_market', 'USD')} + +## Task + +1. **Generate Individual Signals**: For each cryptocurrency, analyze all available data and generate: + - Trading signal (buy/sell/hold/increase/decrease) + - Confidence level (0-1) + - Reasoning based on strategy data + - Market context + - Key factors (max 5) + +2. **Synthesize Signals**: After analyzing all cryptos, synthesize them into: + - Synthesized signal for each cryptocurrency with direction and strength + - Consensus level for each asset + - Clear trading instructions (without specific percentages) + - Overall market outlook + - Summary of the synthesis + +## REQUIRED OUTPUT FORMAT - STRICT SCHEMA + +The "synthesis" object MUST contain: +- "synthesized_signals": Array of objects, each with ALL of these REQUIRED fields: + * "asset" (string): Asset symbol like "BTC" or "ETH" - REQUIRED + * "direction" (string): EXACTLY one of "bullish", "bearish", "neutral" - REQUIRED + * "strength" (number): Float between 0.0 and 1.0 - REQUIRED (NOT a string like "strong") + * "consensus_level" (string): EXACTLY one of "strong", "moderate", "weak", "conflicting" - REQUIRED + * "trading_instruction" (string): Clear trading instruction text - REQUIRED +- "market_outlook" (string): EXACTLY one of "bullish", "bearish", "neutral", "mixed" - REQUIRED +- "summary" (string): Summary text - REQUIRED + +CRITICAL: Every field above is REQUIRED. Do NOT omit any field. "strength" must be a NUMBER (0.0-1.0), NOT a string. + +Output a JSON object with TWO sections: +- "per_crypto_signals": Array of individual signals +- "synthesis": Overall signal synthesis (with ALL required fields above) + +Remember: Base ONLY on the provided data. Do not make allocation decisions - only synthesize. +""" + + async def execute(self, input_data: typing.Any, ai_service) -> typing.Any: + """ + Execute signal analysis and synthesis. + + Args: + input_data: The current agent state (AIAgentState). + ai_service: The AI service instance. + + Returns: + Dictionary with signal_outputs and signal_synthesis. + """ + state = input_data + self.logger.debug(f"Starting {self.name}...") + + try: + messages = [ + {"role": "system", "content": self.prompt}, + {"role": "user", "content": self._build_user_prompt(state)}, + ] + + # Uses SignalAIAgentChannel.OUTPUT_SCHEMA (SignalAgentOutput) by default + response_data = await self._call_llm( + messages, + ai_service, + json_output=True, + ) + + try: + response_data.get("synthesis", {}) + except AttributeError: + parsed = agent_extractor.extract_json_from_content(str(response_data)) + if parsed is None: + raise ValueError("Failed to parse JSON response.") + response_data = parsed + if not isinstance(response_data, dict): + raise ValueError("Signal agent response must be a JSON object.") + if "synthesis" not in response_data: + # If the model omitted the expected wrapper, treat the whole + # object as synthesis payload without creating recursive dicts. + response_data = { + "per_crypto_signals": response_data.get("per_crypto_signals", []), + "synthesis": dict(response_data), + } + + # Process per-crypto signals + signal_outputs = {"signals": {}} + per_crypto = response_data.get("per_crypto_signals", []) + + for signal_data in per_crypto: + crypto = signal_data.get("cryptocurrency", "") + if crypto: + signal_output = CryptoSignalOutput(**signal_data) + signal_outputs["signals"][crypto] = signal_output + + # Process synthesis + synthesis_data = response_data.get("synthesis", {}) + if synthesis_data: + synthesis_output = SignalSynthesisOutput(**synthesis_data) + else: + raise ValueError("Signal synthesis is missing or empty.") + + self.logger.debug(f"{self.name} completed successfully.") + + return { + "signal_outputs": signal_outputs, + "signal_synthesis": synthesis_output, + } + + except Exception as e: + self.logger.exception(f"Error in {self.name}: {e}") + raise + + +async def run_signal_agent(state: AIAgentState, ai_service, agent_id: str = "signal-agent") -> dict: + """ + Convenience function to run the signal agent. + + Args: + state: The current agent state. + ai_service: The AI service instance. + agent_id: Unique identifier for the agent instance. + + Returns: + State updates from the agent. + """ + signal_agent = SignalAIAgentProducer(channel=None) + return await signal_agent.execute(state, ai_service) diff --git a/packages/tentacles/Agent/sub_agents/signal_agent/state.py b/packages/tentacles/Agent/sub_agents/signal_agent/state.py new file mode 100644 index 0000000000..220e86fce8 --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/signal_agent/state.py @@ -0,0 +1,84 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +""" +AI Agent State definitions for the trading mode agents. +""" +from typing import Dict, List, Optional, Any +from typing_extensions import Annotated, TypedDict + + +def merge_dicts(a: dict, b: dict) -> dict: + """Merge two dictionaries, with b overwriting a.""" + return {**a, **b} + + +def replace_value(a: Any, b: Any) -> Any: + """Replace value a with value b.""" + return b + + +class PortfolioState(TypedDict, total=False): + """Current portfolio state from trading API.""" + holdings: Dict[str, float] # {asset: amount} + holdings_value: Dict[str, float] # {asset: value_in_reference_market} + total_value: float + reference_market: str + available_balance: float + + +class OrdersState(TypedDict, total=False): + """Current orders state from trading API.""" + open_orders: List[Dict[str, Any]] + pending_orders: List[Dict[str, Any]] + recent_trades: List[Dict[str, Any]] + + +class StrategyData(TypedDict, total=False): + """Strategy evaluation data.""" + eval_note: float + description: str + metadata: Dict[str, Any] + cryptocurrency: Optional[str] + symbol: Optional[str] + evaluation_type: str + + +class AIAgentState(TypedDict, total=False): + """ + Shared state for all AI trading agents. + Contains strategy data, portfolio info, and agent outputs. + """ + # Input data + global_strategy_data: Dict[str, List[Any]] + crypto_strategy_data: Dict[str, Dict[str, List[Any]]] # {cryptocurrency: strategy_data} + cryptocurrencies: List[str] + reference_market: str + + # Trading context + portfolio: Dict[str, Any] + orders: Dict[str, Any] + current_distribution: Dict[str, float] # Current portfolio distribution percentages + + # Agent outputs + signal_outputs: Dict[str, Any] + risk_output: Optional[Any] + signal_synthesis: Optional[Any] + distribution_output: Optional[Any] + + # Metadata + exchange_name: str + timestamp: str diff --git a/packages/tentacles/Agent/sub_agents/summarization_agent/__init__.py b/packages/tentacles/Agent/sub_agents/summarization_agent/__init__.py new file mode 100644 index 0000000000..fc90e4e789 --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/summarization_agent/__init__.py @@ -0,0 +1,6 @@ +from .summarization_agent import ( + SummarizationAIAgentChannel, + SummarizationAIAgentConsumer, + SummarizationAIAgentProducer, +) +from .models import SummarizationOutput diff --git a/packages/tentacles/Agent/sub_agents/summarization_agent/metadata.json b/packages/tentacles/Agent/sub_agents/summarization_agent/metadata.json new file mode 100644 index 0000000000..ebf2924912 --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/summarization_agent/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["SummarizationAgent"], + "tentacles-requirements": [] +} diff --git a/packages/tentacles/Agent/sub_agents/summarization_agent/models.py b/packages/tentacles/Agent/sub_agents/summarization_agent/models.py new file mode 100644 index 0000000000..f4f4c0e804 --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/summarization_agent/models.py @@ -0,0 +1,40 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +""" +Pydantic models for summarization agent outputs. +""" +from pydantic import BaseModel, Field + +import octobot_agents.models as agent_models + + +class SummarizationOutput(agent_models.AgentBaseModel): + __strict_json_schema__ = True + """Output from the summarization agent.""" + eval_note: float = Field( + ge=-1.0, + le=1.0, + description="Final evaluation score from -1 (strong sell) to 1 (strong buy)." + ) + confidence: float = Field( + ge=0.0, + le=1.0, + description="Confidence level of the final analysis (0-1)." + ) + description: str = Field( + description="Comprehensive summary of market analysis including key consensus points, overall outlook (bullish/bearish/mixed), key recommendations, and identified risks." + ) diff --git a/packages/tentacles/Agent/sub_agents/summarization_agent/summarization_agent.py b/packages/tentacles/Agent/sub_agents/summarization_agent/summarization_agent.py new file mode 100644 index 0000000000..40b7388e91 --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/summarization_agent/summarization_agent.py @@ -0,0 +1,262 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import json +import typing + +import octobot_commons.constants as common_constants +import octobot_agents.constants as agent_constants +import octobot_agents.agent.channels.ai_agent as ai_agent_channels + +from .models import SummarizationOutput + + +class AgentResult(typing.TypedDict, total=False): + """Type definition for agent evaluation results.""" + eval_note: float | str | None + eval_note_description: str + confidence: float + error: str + agent_name: str + agent_id: str + result: dict # Nested result from team execution + + +# Input can be either a dict mapping agent names to results, or a list of results +AgentResultsDict = dict[str, AgentResult] +AgentResultsList = list[AgentResult] +AgentResultsInput = AgentResultsDict | AgentResultsList + + +class SummarizationAIAgentChannel(ai_agent_channels.AbstractAIAgentChannel): + OUTPUT_SCHEMA = SummarizationOutput + + +class SummarizationAIAgentConsumer(ai_agent_channels.AbstractAIAgentChannelConsumer): + pass + + +class SummarizationAIAgentProducer(ai_agent_channels.AbstractAIAgentChannelProducer): + """Producer specialized in combining multiple evaluations into a final recommendation.""" + + AGENT_VERSION = "1.0.0" + AGENT_CHANNEL = SummarizationAIAgentChannel + AGENT_CONSUMER = SummarizationAIAgentConsumer + ENABLE_MEMORY = False + DEFAULT_CONFIDENCE = 50 # Default confidence value (0-100 scale) + + def __init__(self, channel, synthesis_method: str = "weighted", **kwargs): + super().__init__(channel, **kwargs) + self.synthesis_method = synthesis_method + + def _get_default_prompt(self) -> str: + return ( + "You are a Market Analysis Synthesis AI expert. Your task is to combine multiple specialized AI agent evaluations " + "into a single, coherent, and actionable trading recommendation.\n\n" + "Follow these steps:\n" + "1. Review all agent evaluations comprehensively: technical, sentiment, and real-time analysis\n" + "2. Assess overall market direction: Determine if signals indicate strong bullish, bearish, neutral, or mixed conditions\n" + "3. Consider different perspectives: Weight technical signals, social sentiment, and real-time momentum appropriately\n" + "4. Evaluate signal convergence/divergence: Look for confirmation across agents vs. conflicting signals\n" + "5. Calculate balanced final eval_note: Use the full range from -1 (strong sell) to 1 (strong buy), but most syntheses result in neutral to mildly bullish/bearish recommendations\n" + "6. Consider confidence levels (0-1): Higher confidence for consistent signals; lower for conflicting or weak data\n" + "7. Provide detailed reasoning in description: Explain key consensus points, overall outlook, recommendations, and identified risks\n\n" + "Important: Markets are rarely extremely bullish or bearish. Use extreme values (-1/1) only for very strong, consistent signals across all agents.\n\n" + "MANDATORY FIELDS:\n" + "- eval_note: final score from -1 (strong sell) to 1 (strong buy)\n" + "- confidence: confidence level (0-1)\n" + "- description: comprehensive summary including key points, outlook (bullish/bearish/mixed), recommendations, and risks\n\n" + "Output only valid JSON matching the SummarizationOutput schema with these three fields." + ) + + def _collect_failure_details( + self, + agent_results_dict: AgentResultsDict | None, + agent_results_list: AgentResultsList, + ) -> str: + """Collect detailed information about why agent evaluations failed.""" + failure_reasons: list[str] = [] + + # Use dict with agent names if available, otherwise use list + if agent_results_dict is not None: + for agent_name, result in agent_results_dict.items(): + reason = self._get_failure_reason(result, agent_name) + if reason: + failure_reasons.append(reason) + else: + for i, result in enumerate(agent_results_list): + agent_name = result.get("agent_name", f"Agent_{i}") + reason = self._get_failure_reason(result, agent_name) + if reason: + failure_reasons.append(reason) + + if not failure_reasons: + return f"Received {len(agent_results_list)} result(s) but all had eval_note=None" + + return "Failures: " + "; ".join(failure_reasons) + + def _get_failure_reason(self, result: AgentResult, agent_name: str) -> str: + """Extract the failure reason from a single agent result.""" + # Check for explicit error field + if error := result.get("error"): + return f"{agent_name}: {error}" + + # Check for error in description + if result.get("eval_note") is None: + if desc := result.get("eval_note_description"): + # Truncate long descriptions + if len(desc) > 100: + desc = desc[:100] + "..." + return f"{agent_name}: {desc}" + + # Try to find any useful info in the result + available_keys = [k for k in result.keys() if result[k] is not None] + if available_keys: + return f"{agent_name}: eval_note=None (available: {', '.join(available_keys)})" + return f"{agent_name}: eval_note=None (empty result)" + + return "" + + def _unwrap_agent_result(self, result: AgentResult) -> AgentResult: + """ + Unwrap nested result from team execution. + + Team passes results as: {"agent_name": ..., "agent_id": ..., "result": {...}} + We need to extract the inner result dict which contains eval_note, etc. + """ + result_key = agent_constants.RESULT_KEY + agent_name_key = agent_constants.AGENT_NAME_KEY + + try: + inner_result = result[result_key] + if isinstance(inner_result, dict): + # Preserve agent_name if available + if agent_name_key not in inner_result and agent_name_key in result: + inner_result[agent_name_key] = result[agent_name_key] + return typing.cast(AgentResult, inner_result) + except (KeyError, TypeError): + pass + return result + + async def execute( + self, + input_data: AgentResultsInput, + ai_service, + context_info: dict | None = None, + ) -> tuple[float | str, str]: + """Combine multiple agent results into final evaluation.""" + if not input_data: + return common_constants.START_PENDING_EVAL_NOTE, agent_constants.DEFAULT_AGENT_RESULT + + # Convert input to list, preserving dict for failure details if needed + agent_results_dict: AgentResultsDict | None = None + agent_results_list: AgentResultsList + try: + # Try dict access + agent_results_dict = typing.cast(AgentResultsDict, input_data) + agent_results_list = list(agent_results_dict.values()) + except (TypeError, AttributeError): + # List input + agent_results_list = typing.cast(AgentResultsList, input_data) + + # Unwrap nested results from team execution + agent_results_list = [self._unwrap_agent_result(r) for r in agent_results_list] + + # Filter out empty/error results + valid_results = [r for r in agent_results_list if r.get("eval_note") is not None] + + if not valid_results: + # Collect failure details for better debugging + failure_details = self._collect_failure_details(agent_results_dict, agent_results_list) + return common_constants.START_PENDING_EVAL_NOTE, f"All agent evaluations failed. {failure_details}" + + # If only one result, use it directly + if len(valid_results) == 1: + result = valid_results[0] + # eval_note is guaranteed non-None due to valid_results filter + eval_note = result.get("eval_note") or common_constants.INIT_EVAL_NOTE + return float(eval_note), result.get("eval_note_description", "") + + # Prepare summarization data + summary_data = {} + for i, result in enumerate(valid_results): + summary_data[f"agent_{i}"] = { + "eval_note": result.get("eval_note", common_constants.INIT_EVAL_NOTE), + "description": result.get("eval_note_description", ""), + "confidence": result.get("confidence", self.DEFAULT_CONFIDENCE), + } + + # Add context about data completeness + context_str = "" + if context_info: + missing = context_info.get("missing_data_types", []) + available = context_info.get("available_data_types", []) + total = context_info.get("total_expected_types", []) + if missing: + context_str = f"\n\nNote: Analysis is based on incomplete data. Missing evaluator types: {missing}. Available: {available}. Expected total: {total}." + + messages = [ + ai_service.create_message("system", self.prompt), + ai_service.create_message( + "user", + f"Agent evaluations to synthesize:\n{json.dumps(summary_data, indent=2)}{context_str}\n\n" + "Provide final evaluation as JSON matching the SummarizationOutput schema with three fields: eval_note, confidence, and description.", + ), + ] + + try: + # Uses SummarizationAIAgentChannel.OUTPUT_SCHEMA by default + parsed = await self._call_llm( + messages, + ai_service, + json_output=True, + ) + final_eval_note = float(parsed.get("eval_note", common_constants.INIT_EVAL_NOTE)) + final_eval_note_description = parsed.get("description", "AI synthesis") + confidence = float(parsed.get("confidence", self.DEFAULT_CONFIDENCE)) + + # Clamp eval_note + final_eval_note = max(-1, min(1, final_eval_note)) + + # Include confidence in description + final_eval_note_description = f"{final_eval_note_description} (Confidence: {confidence:.1%})" + + return final_eval_note, final_eval_note_description + except Exception as e: + self.logger.error(f"Error in summarization: {e}") + # Fallback: weighted average of agent results + total_weight = 0.0 + weighted_sum = 0.0 + descriptions: list[str] = [] + + for result in valid_results: + confidence = float(result.get("confidence", self.DEFAULT_CONFIDENCE)) / 100.0 # Normalize to 0-1 + eval_note = float(result.get("eval_note") or common_constants.INIT_EVAL_NOTE) + weighted_sum += eval_note * confidence + total_weight += confidence + descriptions.append(result.get("eval_note_description", "")) + + if total_weight > 0: + final_eval_note = weighted_sum / total_weight + else: + final_eval_note = sum( + float(r.get("eval_note") or common_constants.INIT_EVAL_NOTE) for r in valid_results + ) / len(valid_results) + + final_eval_note_description = ( + " | ".join(descriptions) if descriptions else "Fallback synthesis" + ) + + return max(-1, min(1, final_eval_note)), final_eval_note_description diff --git a/packages/tentacles/Agent/sub_agents/technical_analysis_agent/__init__.py b/packages/tentacles/Agent/sub_agents/technical_analysis_agent/__init__.py new file mode 100644 index 0000000000..c0ddb3f27f --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/technical_analysis_agent/__init__.py @@ -0,0 +1,6 @@ +from .technical_analysis_agent import ( + TechnicalAnalysisAIAgentChannel, + TechnicalAnalysisAIAgentConsumer, + TechnicalAnalysisAIAgentProducer, +) +from .models import TechnicalAnalysisOutput diff --git a/packages/tentacles/Agent/sub_agents/technical_analysis_agent/metadata.json b/packages/tentacles/Agent/sub_agents/technical_analysis_agent/metadata.json new file mode 100644 index 0000000000..412a7b7a46 --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/technical_analysis_agent/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["TechnicalAnalysisAgent"], + "tentacles-requirements": [] +} diff --git a/packages/tentacles/Agent/sub_agents/technical_analysis_agent/models.py b/packages/tentacles/Agent/sub_agents/technical_analysis_agent/models.py new file mode 100644 index 0000000000..a2f4afd397 --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/technical_analysis_agent/models.py @@ -0,0 +1,71 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +""" +Pydantic models for technical analysis agent outputs. +""" +from typing import List, Optional +from pydantic import BaseModel, Field, field_validator + +import octobot_agents.models as agent_models + + +class TechnicalAnalysisOutput(agent_models.AgentBaseModel): + __strict_json_schema__ = True + """Output from the technical analysis agent.""" + eval_note: float = Field( + ge=-1.0, + le=1.0, + description="Evaluation score from -1 (strong sell) to 1 (strong buy)." + ) + confidence: float = Field( + ge=0.0, + le=1.0, + description="Confidence level of the analysis (0-1)." + ) + description: str = Field( + description="Summary description of the technical analysis." + ) + trend: Optional[str] = Field( + default=None, + description="Current market trend: 'uptrend', 'downtrend', 'sideways'. Leave empty if unclear." + ) + support_level: Optional[float] = Field( + default=None, + description="Identified support level price. Leave empty if no clear support identified." + ) + resistance_level: Optional[float] = Field( + default=None, + description="Identified resistance level price. Leave empty if no clear resistance identified." + ) + key_indicators: Optional[List[str]] = Field( + default=None, + description="Key technical indicators analyzed. Leave empty if no relevant indicators." + ) + recommendations: Optional[List[str]] = Field( + default=None, + description="Trading recommendations based on technical analysis. Leave empty if no specific recommendations." + ) + + @field_validator("trend") + def validate_trend(cls, v: str) -> str: + if v is None: + return None + allowed_trends = ["uptrend", "downtrend", "sideways"] + v_lower = v.lower() + if v_lower not in allowed_trends: + raise ValueError(f"Trend must be one of {allowed_trends}") + return v_lower diff --git a/packages/tentacles/Agent/sub_agents/technical_analysis_agent/technical_analysis_agent.py b/packages/tentacles/Agent/sub_agents/technical_analysis_agent/technical_analysis_agent.py new file mode 100644 index 0000000000..0b5df5d62e --- /dev/null +++ b/packages/tentacles/Agent/sub_agents/technical_analysis_agent/technical_analysis_agent.py @@ -0,0 +1,115 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import json + +import octobot_agents.agent.channels.ai_agent as ai_agent_channels + +from .models import TechnicalAnalysisOutput + + +class TechnicalAnalysisAIAgentChannel(ai_agent_channels.AbstractAIAgentChannel): + OUTPUT_SCHEMA = TechnicalAnalysisOutput + + +class TechnicalAnalysisAIAgentConsumer(ai_agent_channels.AbstractAIAgentChannelConsumer): + pass + + +class TechnicalAnalysisAIAgentProducer(ai_agent_channels.AbstractAIAgentChannelProducer): + """Producer specialized in technical analysis evaluation.""" + + AGENT_VERSION = "1.0.0" + AGENT_CHANNEL = TechnicalAnalysisAIAgentChannel + AGENT_CONSUMER = TechnicalAnalysisAIAgentConsumer + + def __init__(self, channel, **kwargs): + super().__init__(channel, **kwargs) + + def _get_default_prompt(self) -> str: + return ( + "You are a Technical Analysis AI expert. Follow these steps to analyze the provided technical evaluator signals:\n" + "1. Examine TA signals comprehensively: Review RSI, MACD, moving averages, Bollinger Bands, volume patterns, and price action.\n" + "2. Assess trend strength and direction: Determine if signals indicate strong bullish, bearish, neutral, or mixed conditions.\n" + "3. Consider timeframe context: Different timeframes may show different trends - longer timeframes are generally more significant.\n" + "4. Evaluate indicator convergence/divergence: Look for confirmation across multiple indicators vs. conflicting signals.\n" + "5. Calculate balanced eval_note: Use the full range from -1 (strong sell) to 1 (strong buy), but most markets show neutral to mildly bullish/bearish signals.\n" + "6. Assess confidence realistically: Base confidence on signal strength, agreement (0-1 range), and data quality.\n" + "7. Provide detailed description: Explain key indicators, their significance, and potential market implications.\n\n" + "MANDATORY FIELDS (always include):\n" + "- eval_note: float between -1 (strong sell) to 1 (strong buy)\n" + "- confidence: float between 0 (low confidence) to 1 (high confidence)\n" + "- description: detailed explanation of the analysis\n\n" + "OPTIONAL FIELDS (only include if available):\n" + "- trend: string like 'uptrend', 'downtrend', 'ranging' - Leave empty if unclear\n" + "- support_level: float for identified support price level - Leave empty if not identified\n" + "- resistance_level: float for identified resistance price level - Leave empty if not identified\n" + "- key_indicators: list of important technical indicators and their signals - Leave empty if none clearly identified\n" + "- recommendations: list of trading recommendations - Leave empty if none\n\n" + "Important: Markets are rarely extremely bullish or bearish. Use extreme values (-1/1) only for very strong, consistent signals across multiple timeframes. Avoid bias toward negative signals.\n" + "If you lack data for any optional field, omit it from the response (leave as null).\n" + "Output only valid JSON matching the TechnicalAnalysisOutput schema." + ) + + async def execute(self, input_data, ai_service) -> dict: + """Evaluate aggregated technical analysis data.""" + aggregated_data = input_data + if not aggregated_data: + return { + "eval_note": 0, + "eval_note_description": "No technical analysis data available", + "confidence": 0, + } + + data_str = json.dumps(aggregated_data, indent=2) + + messages = [ + ai_service.create_message("system", self.prompt), + ai_service.create_message( + "user", + f"Technical analysis data:\n{data_str}\n\n" + "Provide evaluation as JSON matching the TechnicalAnalysisOutput schema. " + "Include mandatory fields (eval_note, confidence, description). " + "Include optional fields only if you have data for them.", + ), + ] + + try: + # Uses TechnicalAnalysisAIAgentChannel.OUTPUT_SCHEMA by default + parsed = await self._call_llm( + messages, + ai_service, + json_output=True, + ) + eval_note = float(parsed.get("eval_note", 0)) + eval_note_description = parsed.get("description", "Technical analysis") + confidence = float(parsed.get("confidence", 0)) + + # Clamp values + eval_note = max(-1, min(1, eval_note)) + confidence = max(0, min(1, confidence)) + + return { + "eval_note": eval_note, + "eval_note_description": eval_note_description, + "confidence": int(confidence * 100), # Convert to 0-100 range + } + except Exception as e: + self.logger.error(f"Error in technical analysis: {e}") + return { + "eval_note": 0, + "eval_note_description": f"Error in technical analysis: {str(e)}", + "confidence": 0, + } diff --git a/packages/tentacles/Agent/teams/default_manager_agent/__init__.py b/packages/tentacles/Agent/teams/default_manager_agent/__init__.py new file mode 100644 index 0000000000..e59a8cc6ba --- /dev/null +++ b/packages/tentacles/Agent/teams/default_manager_agent/__init__.py @@ -0,0 +1,15 @@ +from .default_manager_agent import ( + DefaultTeamManagerAgentProducer, + DefaultTeamManagerAgentChannel, + DefaultTeamManagerAgentConsumer, +) +from .ai_plan_manager_agent import ( + AIPlanTeamManagerAgentProducer, + AIPlanTeamManagerAgentChannel, + AIPlanTeamManagerAgentConsumer, +) +from .ai_tools_manager_agent import ( + AIToolsTeamManagerAgentProducer, + AIToolsTeamManagerAgentChannel, + AIToolsTeamManagerAgentConsumer, +) diff --git a/packages/tentacles/Agent/teams/default_manager_agent/ai_plan_manager_agent.py b/packages/tentacles/Agent/teams/default_manager_agent/ai_plan_manager_agent.py new file mode 100644 index 0000000000..b32e66dacf --- /dev/null +++ b/packages/tentacles/Agent/teams/default_manager_agent/ai_plan_manager_agent.py @@ -0,0 +1,237 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +""" +AI team manager agent - uses LLM to decide execution flow. +""" +import typing + +import pydantic + +import octobot_agents.team.manager as agent_manager +import octobot_agents.models as agent_models + +class AIPlanTeamManagerAgentChannel(agent_manager.AIPlanManagerAgentChannel): + pass + + +class AIPlanTeamManagerAgentConsumer(agent_manager.AIPlanManagerAgentConsumer): + pass + + +class AIPlanTeamManagerAgentProducer(agent_manager.AIPlanManagerAgentProducer): + """ + AI plan team manager agent - uses LLM to decide execution flow. + + Inherits from AIPlanManagerAgentProducer. Has Channel, Producer, Consumer components (as all AI agents do). + """ + + AGENT_CHANNEL: typing.Type[agent_manager.AIPlanManagerAgentChannel] = AIPlanTeamManagerAgentChannel + AGENT_CONSUMER: typing.Type[agent_manager.AIPlanManagerAgentConsumer] = AIPlanTeamManagerAgentConsumer + + def __init__( + self, + channel: typing.Optional[AIPlanTeamManagerAgentChannel] = None, + model: typing.Optional[str] = None, + max_tokens: typing.Optional[int] = None, + temperature: typing.Optional[float] = None, + **kwargs, + ): + super().__init__( + channel=channel, + model=model, + max_tokens=max_tokens, + temperature=temperature, + **kwargs, + ) + + def _get_default_prompt(self) -> str: + """ + Return the default prompt for the AI team manager. + + Returns: + The default system prompt string. + """ + return """You are a team execution manager for an agent team system. +Your role is to analyze the team structure, current state, and any instructions, +then create an execution plan. The plan can contain two kinds of steps: + +1. Agent steps (step_type "agent" or omit): run a single agent. + - agent_name: name of the agent to run + - instructions (optional): list of instructions to send before execution + - wait_for (optional): agent names that must complete before this step + - skip (optional): set true to skip this step in this iteration + +2. Debate steps (step_type "debate"): run a debate phase (debators take turns, then judge decides continue or exit). + - debate_config: object with debator_agent_names (list of agent names that debate, e.g. Bull, Bear), + judge_agent_name (name of the judge agent), max_rounds (max debate rounds, e.g. 3) + - For debate steps, agent_name can be a placeholder (e.g. "debate_1") for logging. + +You may include zero, one, or multiple debate steps in the plan. Debate steps run debators in rounds until the judge decides exit or max_rounds is reached. Order and instructions for agent steps, and whether to loop execution, should optimize for the team's goals while respecting dependencies. + +Critical requirements: +- Every agent step MUST include a non-empty agent_name. +- agent_name MUST be one of the provided agent names in the context. Do NOT invent new names. +- Output ONLY valid JSON matching the ExecutionPlan schema. No markdown or extra text.""" + + def _repair_execution_plan(self, response_data: typing.Any) -> typing.Optional[agent_models.ExecutionPlan]: + if not isinstance(response_data, dict): + return None + steps = response_data.get("steps") + if not isinstance(steps, list): + return None + repaired_steps = [] + for step in steps: + if not isinstance(step, dict): + continue + step_type = step.get("step_type") + agent_name = step.get("agent_name") + if not agent_name and step_type in (None, "agent"): + agent_name = step.get("name") or step.get("agent") + if agent_name: + step = {**step, "agent_name": agent_name} + if step_type in (None, "agent") and not step.get("agent_name"): + continue + repaired_steps.append(step) + if not repaired_steps: + return None + repaired = {**response_data, "steps": repaired_steps} + try: + return agent_models.ExecutionPlan.model_validate(repaired) + except pydantic.ValidationError: + return None + + async def execute( + self, + input_data: typing.Union[agent_models.ManagerInput, typing.Dict[str, typing.Any]], + ai_service: typing.Any # AbstractAIService - type not available at runtime + ) -> agent_models.ExecutionPlan: + """ + Build execution plan using LLM. + + Args: + input_data: Contains {"team_producer": team_producer, "initial_data": initial_data, "instructions": instructions} + ai_service: The AI service instance for LLM calls + + Returns: + ExecutionPlan from LLM + """ + team_producer = input_data.get("team_producer") + initial_data = input_data.get("initial_data", {}) + instructions = input_data.get("instructions") + + if team_producer is None: + raise ValueError("team_producer is required in input_data") + + # Build context + agents_info = [] + for agent in team_producer.agents: + agents_info.append({ + "name": agent.name, + "channel": agent.AGENT_CHANNEL.__name__ if agent.AGENT_CHANNEL else None, + }) + + relations_info = [] + for source_channel, target_channel in team_producer.relations: + relations_info.append({ + "source": source_channel.__name__, + "target": target_channel.__name__, + }) + + context = { + "team_name": team_producer.team_name, + "agents": agents_info, + "relations": relations_info, + "initial_data": initial_data, + "instructions": instructions, + } + + # Build messages for LLM + messages = [ + {"role": "system", "content": self.prompt}, + { + "role": "user", + "content": f"""Analyze the following team structure and create an execution plan: + +Team: {team_producer.team_name} +Agents: {self.format_data(agents_info)} +Relations: {self.format_data(relations_info)} +Initial Data: {self.format_data(initial_data)} +Instructions: {self.format_data(instructions) if instructions else "None"} + +Create an execution plan. Use agent steps (step_type "agent" or omit) for single-agent steps and debate steps (step_type "debate" with debate_config) when you want debators to argue and a judge to decide; you can include multiple debate steps if needed. + +CRITICAL: agent_name MUST be exactly one of the provided agent names. Do NOT invent names.""" + }, + ] + + # Call LLM with ExecutionPlan as response schema + response_data = await self._call_llm( + messages, + ai_service, + json_output=True, + response_schema=agent_models.ExecutionPlan, + ) + allowed_agent_names = [agent["name"] for agent in agents_info] + try: + execution_plan = agent_models.ExecutionPlan.model_validate_with_agent_names( + response_data, + allowed_agent_names, + ) + except (pydantic.ValidationError, ValueError) as e: + repaired = self._repair_execution_plan(response_data) + if repaired is not None: + self.logger.warning("Recovered invalid execution plan by repairing steps.") + return repaired + self.logger.warning(f"Invalid execution plan. Retrying once. Error: {e}") + retry_messages = [ + {"role": "system", "content": self.prompt}, + { + "role": "user", + "content": f"""Analyze the following team structure and create an execution plan: + +Team: {team_producer.team_name} +Agents: {self.format_data(agents_info)} +Relations: {self.format_data(relations_info)} +Initial Data: {self.format_data(initial_data)} +Instructions: {self.format_data(instructions) if instructions else "None"} + +CRITICAL: Every agent step MUST include agent_name (non-empty string). +Create an execution plan. Use agent steps (step_type "agent" or omit) for single-agent steps and debate steps (step_type "debate" with debate_config) when you want debators to argue and a judge to decide; you can include multiple debate steps if needed.""" + }, + ] + response_data = await self._call_llm( + retry_messages, + ai_service, + json_output=True, + response_schema=agent_models.ExecutionPlan, + ) + try: + execution_plan = agent_models.ExecutionPlan.model_validate_with_agent_names( + response_data, + allowed_agent_names, + ) + except (pydantic.ValidationError, ValueError): + repaired = self._repair_execution_plan(response_data) + if repaired is not None: + self.logger.warning("Recovered invalid execution plan by repairing steps after retry.") + return repaired + raise + + # Debate step normalization is handled in the team executor + + self.logger.debug(f"Generated execution plan with {len(execution_plan.steps)} steps") + + return execution_plan diff --git a/packages/tentacles/Agent/teams/default_manager_agent/ai_tools_manager_agent.py b/packages/tentacles/Agent/teams/default_manager_agent/ai_tools_manager_agent.py new file mode 100644 index 0000000000..09765f91f8 --- /dev/null +++ b/packages/tentacles/Agent/teams/default_manager_agent/ai_tools_manager_agent.py @@ -0,0 +1,115 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +""" +AI tools team manager agent - uses LLM with tools to decide execution flow. +""" +import typing + +import octobot_agents.team.manager as agent_manager +from tentacles.Agent.teams.default_manager_agent.ai_plan_manager_agent import ( + AIPlanTeamManagerAgentProducer, +) +import octobot_agents.models as agent_models + + +class AIToolsTeamManagerAgentChannel(agent_manager.AIToolsManagerAgentChannel): + pass + + +class AIToolsTeamManagerAgentConsumer(agent_manager.AIToolsManagerAgentConsumer): + pass + + +class AIToolsTeamManagerAgentProducer(agent_manager.AIToolsManagerAgentProducer): + """ + AI tools team manager agent - uses LLM with tools to decide execution flow. + + Inherits from AIToolsManagerAgentProducer. Has Channel, Producer, Consumer components (as all AI agents do). + """ + + AGENT_CHANNEL: typing.Type[agent_manager.AIToolsManagerAgentChannel] = AIToolsTeamManagerAgentChannel + AGENT_CONSUMER: typing.Type[agent_manager.AIToolsManagerAgentConsumer] = AIToolsTeamManagerAgentConsumer + + def __init__( + self, + channel: typing.Optional[AIToolsTeamManagerAgentChannel] = None, + model: typing.Optional[str] = None, + max_tokens: typing.Optional[int] = None, + temperature: typing.Optional[float] = None, + max_tool_calls: typing.Optional[int] = None, + **kwargs, + ): + super().__init__( + channel=channel, + model=model, + max_tokens=max_tokens, + temperature=temperature, + max_tool_calls=max_tool_calls, + **kwargs, + ) + + def _get_default_prompt(self) -> str: + """ + Return the default prompt for the AI tools team manager. + + Returns: + The default system prompt string. + """ + return """You are a tools-driven team execution manager for an agent team system. +Your role is to analyze the team structure, current state, and any instructions, +then coordinate execution using available tools. You have access to tools to run agents and debates. + +Available tools: +- run_agent: Execute a specific agent by name +- run_debate: Run a debate between multiple agents with a judge +- finish: Complete execution and return current results + +Use these tools to coordinate the team execution. Call finish when you have sufficient results. + +Important: +- Do NOT respond with plain text. You MUST respond with a tool call. +- If unsure, call finish with empty arguments. +""" + + async def execute( + self, + input_data: typing.Union[agent_models.ManagerInput, typing.Dict[str, typing.Any]], + ai_service: typing.Any, # AbstractAIService - type not available at runtime + ): + if not ai_service.supports_call_json_output(): + self.logger.warning( + "tool-call-json-output is disabled. Switching to plan-based manager." + ) + plan_manager = AIPlanTeamManagerAgentProducer( + channel=None, + model=self.model, + max_tokens=self.max_tokens, + temperature=self.temperature, + ) + return await plan_manager.execute(input_data, ai_service) + try: + return await super().execute(input_data, ai_service) + except Exception as e: + self.logger.warning( + f"Tools-driven manager failed. Switching to plan-based manager. Error: {e}" + ) + plan_manager = AIPlanTeamManagerAgentProducer( + channel=None, + model=self.model, + max_tokens=self.max_tokens, + temperature=self.temperature, + ) + return await plan_manager.execute(input_data, ai_service) diff --git a/packages/tentacles/Agent/teams/default_manager_agent/default_manager_agent.py b/packages/tentacles/Agent/teams/default_manager_agent/default_manager_agent.py new file mode 100644 index 0000000000..239905c3c3 --- /dev/null +++ b/packages/tentacles/Agent/teams/default_manager_agent/default_manager_agent.py @@ -0,0 +1,119 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +""" +Default team manager agent - simple agent that executes in topological order. +""" +import typing +from typing import List, Optional + +import octobot_agents.team.manager as agent_manager +import octobot_agents.enums as agent_enums +import octobot_agents.models as agent_models + + +class DefaultTeamManagerAgentChannel(agent_manager.ManagerAgentChannel): + pass + + +class DefaultTeamManagerAgentConsumer(agent_manager.ManagerAgentConsumer): + pass + + +class DefaultTeamManagerAgentProducer(agent_manager.ManagerAgentProducer): + """ + Default team manager agent - simple agent that executes in topological order. + + Inherits from ManagerAgentProducer. Has Channel, Producer, Consumer components (as all agents do). + """ + + AGENT_CHANNEL: typing.Type[agent_manager.ManagerAgentChannel] = DefaultTeamManagerAgentChannel + AGENT_CONSUMER: typing.Type[agent_manager.ManagerAgentConsumer] = DefaultTeamManagerAgentConsumer + + def __init__( + self, + channel: typing.Optional[DefaultTeamManagerAgentChannel] = None, + ): + super().__init__(channel=channel) + + async def execute( + self, + input_data: typing.Union[agent_models.ManagerInput, typing.Dict[str, typing.Any]], + ai_service: typing.Any # AbstractAIService - type not available at runtime + ) -> agent_models.ExecutionPlan: + """ + Build execution plan from topological sort. + + Args: + input_data: Contains {"team_producer": team_producer, "initial_data": initial_data, "instructions": instructions} + ai_service: Not used by default manager + + Returns: + ExecutionPlan with steps in topological order + """ + team_producer = input_data.get("team_producer") + if team_producer is None: + raise ValueError("team_producer is required in input_data") + + # Get execution order (topological sort) + execution_order = team_producer._get_execution_order() + incoming_edges, _ = team_producer._build_dag() + + # Build ExecutionPlan + steps: List[agent_models.ExecutionStep] = [] + for agent in execution_order: + # Get predecessors for wait_for + channel_type = agent.AGENT_CHANNEL + if channel_type is None: + continue + + predecessors = incoming_edges.get(channel_type, []) + wait_for: Optional[List[str]] = None + if predecessors: + wait_for = [] + for pred_channel in predecessors: + pred_agent = team_producer._producer_by_channel.get(pred_channel) + if pred_agent: + wait_for.append(pred_agent.name) + + step = agent_models.ExecutionStep( + agent_name=agent.name, + instructions=None, # No instructions by default + wait_for=wait_for, + skip=False, + ) + steps.append(step) + + # Optional: inject debate steps from initial_data.debate_phases + initial_data = input_data.get("initial_data") or {} + debate_phases = initial_data.get("debate_phases") if isinstance(initial_data, dict) else None + if isinstance(debate_phases, list) and debate_phases: + for idx, phase in enumerate(debate_phases): + config = agent_models.DebatePhaseConfig.model_validate(phase) if not isinstance(phase, agent_models.DebatePhaseConfig) else phase + steps.append( + agent_models.ExecutionStep( + agent_name=f"debate_{idx + 1}", + step_type=agent_enums.StepType.DEBATE.value, + debate_config=config, + skip=False, + ) + ) + + return agent_models.ExecutionPlan( + steps=steps, + loop=False, + loop_condition=None, + max_iterations=None, + ) diff --git a/packages/tentacles/Agent/teams/default_manager_agent/metadata.json b/packages/tentacles/Agent/teams/default_manager_agent/metadata.json new file mode 100644 index 0000000000..f3dba2bb92 --- /dev/null +++ b/packages/tentacles/Agent/teams/default_manager_agent/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["DefaultTeamManagerAgent", "AIPlanTeamManagerAgent", "AIToolsTeamManagerAgent"], + "tentacles-requirements": [] +} diff --git a/packages/tentacles/Agent/teams/simple_ai_evaluator_agents_team/__init__.py b/packages/tentacles/Agent/teams/simple_ai_evaluator_agents_team/__init__.py new file mode 100644 index 0000000000..904de34dad --- /dev/null +++ b/packages/tentacles/Agent/teams/simple_ai_evaluator_agents_team/__init__.py @@ -0,0 +1,16 @@ +from .simple_ai_evaluator_agents_team import ( + SimpleAIEvaluatorAgentsTeamChannel, + SimpleAIEvaluatorAgentsTeamConsumer, + SimpleAIEvaluatorAgentsTeam, +) +from .deep_agent_evaluator_team import ( + DeepAgentEvaluatorTeamChannel, + DeepAgentEvaluatorTeamConsumer, + DeepAgentEvaluatorTeam, + create_evaluator_team, + TECHNICAL_ANALYSIS_INSTRUCTIONS, + SENTIMENT_ANALYSIS_INSTRUCTIONS, + REALTIME_ANALYSIS_INSTRUCTIONS, + SUMMARIZATION_INSTRUCTIONS, + MANAGER_INSTRUCTIONS, +) diff --git a/packages/tentacles/Agent/teams/simple_ai_evaluator_agents_team/deep_agent_evaluator_team.py b/packages/tentacles/Agent/teams/simple_ai_evaluator_agents_team/deep_agent_evaluator_team.py new file mode 100644 index 0000000000..33b422237c --- /dev/null +++ b/packages/tentacles/Agent/teams/simple_ai_evaluator_agents_team/deep_agent_evaluator_team.py @@ -0,0 +1,443 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +""" +Deep Agent Evaluator Team. + +Uses LangChain Deep Agents with the supervisor pattern: +- Manager agent orchestrates worker agents +- Workers: TA, Sentiment, RealTime analysis +- Summarization as final synthesis +- Long-term memory via /memories/ path + +Inherits from AbstractDeepAgentsTeamChannelProducer for proper integration. +""" +from __future__ import annotations + +import typing +import json +import logging +import os + +import octobot_commons.constants as common_constants + +import octobot_agents.team.channels.deep_agents_team as agent_deep_team +import octobot_agents.utils.extractor as agent_extractor +import octobot_services.services.abstract_ai_service as abstract_ai_service + +from . import models + +logger = logging.getLogger(__name__) + + +# Worker agent prompts +TECHNICAL_ANALYSIS_INSTRUCTIONS = """ +You are a Technical Analysis AI expert. Analyze technical evaluator signals: + +1. Examine TA signals: RSI, MACD, moving averages, Bollinger Bands, volume patterns, price action +2. Assess trend strength and direction +3. Consider timeframe context (longer timeframes more significant) +4. Evaluate indicator convergence/divergence + +Output your analysis as JSON with: +- eval_note: float -1 (strong sell) to 1 (strong buy) +- confidence: float 0-1 +- description: detailed explanation +- trend: "uptrend"/"downtrend"/"ranging" if clear +- key_indicators: list of important indicators +""" + +SENTIMENT_ANALYSIS_INSTRUCTIONS = """ +You are a Sentiment Analysis AI expert. Analyze market sentiment signals: + +1. Review news sentiment, social media trends, fear/greed indicators +2. Assess overall market mood +3. Consider sentiment extremes and reversals +4. Evaluate consensus vs contrarian signals + +Output your analysis as JSON with: +- eval_note: float -1 (very bearish) to 1 (very bullish) +- confidence: float 0-1 +- description: detailed explanation +- sentiment_summary: overall market mood +""" + +REALTIME_ANALYSIS_INSTRUCTIONS = """ +You are a Real-Time Market Analysis AI expert. Analyze live market data: + +1. Review order book imbalances, recent trades +2. Assess immediate price momentum +3. Consider liquidity conditions +4. Evaluate short-term price drivers + +Output your analysis as JSON with: +- eval_note: float -1 (bearish momentum) to 1 (bullish momentum) +- confidence: float 0-1 +- description: detailed explanation +- momentum: "strong"/"moderate"/"weak" +""" + +SUMMARIZATION_INSTRUCTIONS = """ +You are a Market Analysis Summarizer. Synthesize analyses from TA, Sentiment, and RealTime agents. + +1. Weigh each analysis by confidence and relevance +2. Resolve conflicting signals with clear reasoning +3. Produce a final consensus recommendation + +IMPORTANT: Output ONLY valid JSON in this EXACT structure (no extra fields): +```json +{ + "eval_note": , + "eval_note_description": "", + "confidence": , + "trend": "uptrend" or "downtrend" or "ranging" or null, + "risk_level": "low" or "medium" or "high" or null, + "key_factors": ["factor1", "factor2"] +} +``` + +Do NOT add extra fields like 'evaluation', 'recommendation', 'market_insights', etc. +""" + +MANAGER_INSTRUCTIONS = """ +You are the Evaluator Team Manager. Coordinate market analysis agents. + +Your team: +- technical_analysis: Analyzes technical indicators +- sentiment_analysis: Analyzes market sentiment +- realtime_analysis: Analyzes live market data +- summarization: Synthesizes all analyses + +Workflow: +1. Use write_todos to plan your approach +2. Delegate to technical_analysis, sentiment_analysis, realtime_analysis (can run in parallel concept) +3. After all three complete, send their outputs to summarization +4. Return ONLY the final JSON result from summarization (no modifications) + +CRITICAL: Your final output must be the exact JSON from summarization with this structure: +{ + "eval_note": , + "eval_note_description": "", + "confidence": , + "trend": "uptrend"|"downtrend"|"ranging"|null, + "risk_level": "low"|"medium"|"high"|null, + "key_factors": ["..."] +} + +Remember to save important insights to /memories/ for future reference. +""" + + +class DeepAgentEvaluatorTeamChannel(agent_deep_team.AbstractDeepAgentsTeamChannel): + pass + + +class DeepAgentEvaluatorTeamConsumer(agent_deep_team.AbstractDeepAgentsTeamChannelConsumer): + pass + + +class DeepAgentEvaluatorTeam(agent_deep_team.AbstractDeepAgentsTeamChannelProducer): + """ + Evaluator team using LangChain Deep Agents with supervisor pattern. + + Inherits from AbstractDeepAgentsTeamChannelProducer which handles: + - Deep Agent creation with supervisor pattern + - Worker subagent orchestration via SubAgentMiddleware + - Task planning via TodoListMiddleware + - Long-term memory via CompositeBackend (/memories/) + - Streaming support for real-time updates + - Debug logging for agent operations + + Usage: + team = DeepAgentEvaluatorTeam(ai_service=llm_service) + result = await team.run(aggregated_data) + eval_note, description = team.parse_evaluator_result(result) + """ + + TEAM_NAME = "DeepAgentEvaluatorTeam" + TEAM_CHANNEL = DeepAgentEvaluatorTeamChannel + TEAM_CONSUMER = DeepAgentEvaluatorTeamConsumer + + MAX_ITERATIONS = 10 + ENABLE_DEBATE = False + ENABLE_STREAMING = False # Enable streaming for real-time debug logs + + def __init__( + self, + ai_service: typing.Optional[abstract_ai_service.AbstractAIService] = None, + model: typing.Optional[str] = None, + max_tokens: typing.Optional[int] = None, + temperature: typing.Optional[float] = None, + channel: typing.Optional[DeepAgentEvaluatorTeamChannel] = None, + team_id: typing.Optional[str] = None, + include_ta: bool = True, + include_sentiment: bool = True, + include_realtime: bool = True, + enable_debate: bool = False, + enable_streaming: bool = False, + ): + """ + Initialize the Deep Agent evaluator team. + + Args: + ai_service: The LLM service instance. + model: LLM model to use. + max_tokens: Maximum tokens for LLM responses. + temperature: Temperature for LLM randomness. + channel: Optional output channel. + team_id: Unique identifier for this team instance. + include_ta: Include technical analysis worker. + include_sentiment: Include sentiment analysis worker. + include_realtime: Include realtime analysis worker. + enable_debate: Enable debate workflow with critic. + enable_streaming: Enable streaming for real-time debug logs. + """ + self.include_ta = include_ta + self.include_sentiment = include_sentiment + self.include_realtime = include_realtime + self.ENABLE_DEBATE = enable_debate + + super().__init__( + channel=channel, + ai_service=ai_service, + model=model, + max_tokens=max_tokens, + temperature=temperature, + team_name=self.TEAM_NAME, + team_id=team_id, + enable_streaming=enable_streaming, + ) + + def get_worker_definitions(self) -> list[dict[str, typing.Any]]: + """Get worker subagent definitions for the evaluator team.""" + workers = [] + + if self.include_ta: + workers.append({ + "name": "technical_analysis", + "instructions": TECHNICAL_ANALYSIS_INSTRUCTIONS, + }) + + if self.include_sentiment: + workers.append({ + "name": "sentiment_analysis", + "instructions": SENTIMENT_ANALYSIS_INSTRUCTIONS, + }) + + if self.include_realtime: + workers.append({ + "name": "realtime_analysis", + "instructions": REALTIME_ANALYSIS_INSTRUCTIONS, + }) + + # Always include summarization + workers.append({ + "name": "summarization", + "instructions": SUMMARIZATION_INSTRUCTIONS, + }) + + return workers + + def get_manager_instructions(self) -> str: + return MANAGER_INSTRUCTIONS + + def get_skills_resources_dir(self) -> str | None: + return os.path.join(os.path.dirname(__file__), "resources", "skills") + + def get_agent_skills(self, agent_name: str) -> list[str] | None: + """ + Get skills for specific worker agents. + Each agent gets its own specialized skill. + """ + skills_dir = self.get_skills_resources_dir() + if not skills_dir: + return None + + agent_skill_dir = os.path.join(skills_dir, agent_name) + if os.path.isdir(agent_skill_dir): + # Check if SKILL.md exists + skill_file = os.path.join(agent_skill_dir, "SKILL.md") + if os.path.isfile(skill_file): + return [f"./{agent_name}/"] + + return None + + def get_critic_config(self) -> dict[str, typing.Any] | None: + """Get critic configuration for debate mode.""" + if not self.ENABLE_DEBATE: + return None + return { + "name": "critic", + "instructions": ( + "Critique the analysis, identify weaknesses in the reasoning, " + "check for confirmation bias, and suggest improvements." + ), + } + + def _build_input_message(self, initial_data: typing.Dict[str, typing.Any]) -> str: + """Build the input message for the supervisor.""" + aggregated_data = initial_data.get("aggregated_data", initial_data) + missing_data_types = initial_data.get("missing_data_types", []) + + data_str = json.dumps(aggregated_data, indent=2, default=str) + + message = f""" +Analyze the following market data and provide a trading recommendation. + +Market Data: +{data_str} +""" + + if missing_data_types: + message += f"\nNote: Missing data types: {', '.join(missing_data_types)}" + + message += """ + +Coordinate with your team: +1. Send relevant data to technical_analysis, sentiment_analysis, realtime_analysis +2. Collect their analyses +3. Send all analyses to summarization for final synthesis +4. Return the final recommendation as JSON with eval_note and eval_note_description + +Save any important market insights to /memories/market_insights/ for future reference. +""" + return message + + def _parse_result(self, result: typing.Dict[str, typing.Any]) -> typing.Dict[str, typing.Any]: + """ + Parse and validate the Deep Agent result using DeepAgentEvaluationResult model. + + Extracts JSON from the response, validates structure, and returns consistent output. + """ + try: + messages = result.get("messages", []) + if not messages: + validated = models.DeepAgentEvaluationResult( + eval_note=common_constants.START_PENDING_EVAL_NOTE, + eval_note_description="No response from agent", + ) + return validated.model_dump() + + # Get the last assistant message + last_message = messages[-1] + # Extract content from LangChain message object (has .content attribute) + # or dict (has "content" key), or convert to string as last resort + if hasattr(last_message, "content"): + content = str(last_message.content) + elif isinstance(last_message, dict): + content = str(last_message.get("content", "")) + else: + content = str(last_message) + + # Try to parse JSON from content + parsed_data = agent_extractor.extract_json_from_content(content) + + if parsed_data: + try: + # Validate against the model - this enforces structure + validated = models.DeepAgentEvaluationResult.model_validate(parsed_data) + return validated.model_dump(exclude_none=False) + except Exception as validation_error: + logger.warning( + f"Validation error for extracted JSON: {validation_error}. " + f"Extracted data: {parsed_data}" + ) + # Fall through to create minimal valid response + + # Fallback: create minimal valid response + validated = models.DeepAgentEvaluationResult( + eval_note=common_constants.START_PENDING_EVAL_NOTE, + eval_note_description=content[:500] if content else "Unable to parse response", + ) + return validated.model_dump() + + except Exception as e: + logger.error(f"Error parsing result: {e}", exc_info=True) + try: + fallback = models.DeepAgentEvaluationResult( + eval_note=common_constants.START_PENDING_EVAL_NOTE, + eval_note_description=f"Error: {str(e)}", + ) + return fallback.model_dump() + except Exception as fallback_error: + logger.error(f"Error creating fallback result: {fallback_error}") + return { + "eval_note": common_constants.START_PENDING_EVAL_NOTE, + "eval_note_description": f"Error: {str(e)}", + "confidence": 0.0, + } + + async def run_with_data( + self, + aggregated_data: dict, + missing_data_types: list | None = None, + ) -> tuple[float | str, str]: + """ + Convenience method to run the team with aggregated evaluator data. + + Args: + aggregated_data: Dict mapping evaluator type to evaluations. + missing_data_types: Optional list of missing data types. + + Returns: + Tuple of (eval_note, eval_note_description). + """ + initial_data = { + "aggregated_data": aggregated_data, + "missing_data_types": missing_data_types or [], + } + + result = await self.run(initial_data) + + eval_note = result.get("eval_note", common_constants.START_PENDING_EVAL_NOTE) + description = result.get("eval_note_description", "") + + return eval_note, description + + +def create_evaluator_team( + ai_service: typing.Optional[abstract_ai_service.AbstractAIService] = None, + model: typing.Optional[str] = None, + include_ta: bool = True, + include_sentiment: bool = True, + include_realtime: bool = True, + enable_debate: bool = False, + enable_streaming: bool = False, +) -> DeepAgentEvaluatorTeam: + """ + Factory function to create a Deep Agent evaluator team. + + Args: + ai_service: The LLM service instance. + model: LLM model to use. + include_ta: Include technical analysis worker. + include_sentiment: Include sentiment analysis worker. + include_realtime: Include realtime analysis worker. + enable_debate: Enable debate workflow. + enable_streaming: Enable streaming for real-time debug logs. + + Returns: + Configured DeepAgentEvaluatorTeam instance. + """ + return DeepAgentEvaluatorTeam( + ai_service=ai_service, + model=model, + include_ta=include_ta, + include_sentiment=include_sentiment, + include_realtime=include_realtime, + enable_debate=enable_debate, + enable_streaming=enable_streaming, + ) diff --git a/packages/tentacles/Agent/teams/simple_ai_evaluator_agents_team/metadata.json b/packages/tentacles/Agent/teams/simple_ai_evaluator_agents_team/metadata.json new file mode 100644 index 0000000000..090ae9e8df --- /dev/null +++ b/packages/tentacles/Agent/teams/simple_ai_evaluator_agents_team/metadata.json @@ -0,0 +1,11 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["SimpleAIEvaluatorAgentsTeam"], + "tentacles-requirements": [ + "TechnicalAnalysisAgent", + "SentimentAnalysisAgent", + "RealTimeAnalysisAgent", + "SummarizationAgent" + ] +} diff --git a/packages/tentacles/Agent/teams/simple_ai_evaluator_agents_team/models.py b/packages/tentacles/Agent/teams/simple_ai_evaluator_agents_team/models.py new file mode 100644 index 0000000000..33e6298b7e --- /dev/null +++ b/packages/tentacles/Agent/teams/simple_ai_evaluator_agents_team/models.py @@ -0,0 +1,88 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +""" +Models for simple AI evaluator agents team. + +These models are specific to the simple_ai_evaluator_agents_team tentacle +and define the validated output structure for deep agent evaluation results. +""" + +from typing import Any, List, Optional, Union +import pydantic +from pydantic import BaseModel, ConfigDict, Field + + +class DeepAgentEvaluationResult(BaseModel): + """Validated output structure for Deep Agent evaluation results. + + Enforces consistent output format across all deep agent evaluations. + Used for post-processing validation (not request-time schema binding). + + This model is specific to simple_ai_evaluator_agents_team and ensures + that agent outputs always conform to the expected structure. + """ + model_config = ConfigDict(extra="forbid") + + eval_note: Union[int, float] = Field( + ..., + ge=-1.0, + le=1.0, + description="Evaluation note from -1 (strong sell) to 1 (strong buy)" + ) + eval_note_description: str = Field( + ..., + description="Detailed explanation of the evaluation" + ) + confidence: Optional[float] = Field( + default=0.5, + ge=0.0, + le=1.0, + description="Confidence in the evaluation (0-1)" + ) + trend: Optional[str] = Field( + default=None, + description="Market trend assessment (e.g., 'uptrend', 'downtrend', 'ranging')" + ) + risk_level: Optional[str] = Field( + default=None, + description="Risk assessment (e.g., 'low', 'medium', 'high')" + ) + key_factors: Optional[List[str]] = Field( + default=None, + description="Key factors influencing the evaluation" + ) + recommendation: Optional[str] = Field( + default=None, + description="Trading recommendation or action items" + ) + + @pydantic.model_validator(mode="before") + @classmethod + def normalize_eval_note(cls, data: Any) -> Any: + """Normalize eval_note to valid float range. + + Ensures eval_note is always within [-1.0, 1.0] range. + """ + if isinstance(data, dict) and "eval_note" in data: + eval_note = data["eval_note"] + if eval_note is not None: + try: + eval_note = float(eval_note) + data["eval_note"] = max(-1.0, min(1.0, eval_note)) + except (ValueError, TypeError): + data["eval_note"] = 0.0 + return data diff --git a/packages/tentacles/Agent/teams/simple_ai_evaluator_agents_team/resources/skills/README.md b/packages/tentacles/Agent/teams/simple_ai_evaluator_agents_team/resources/skills/README.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/packages/tentacles/Agent/teams/simple_ai_evaluator_agents_team/resources/skills/sentiment_analysis/SKILL.md b/packages/tentacles/Agent/teams/simple_ai_evaluator_agents_team/resources/skills/sentiment_analysis/SKILL.md new file mode 100644 index 0000000000..947e1fd8a7 --- /dev/null +++ b/packages/tentacles/Agent/teams/simple_ai_evaluator_agents_team/resources/skills/sentiment_analysis/SKILL.md @@ -0,0 +1,135 @@ +--- +name: market-sentiment +description: Use this skill for analyzing market sentiment from news, social media, fear/greed indices, and overall market mood to assess bullish or bearish conditions. +--- + +# market-sentiment + +## Overview + +This skill provides guidance on interpreting market sentiment signals to understand investor psychology and potential market direction. Use this when analyzing sentiment data or gauging market mood. + +## Instructions + +### 1. Understand Sentiment Indicators + +**Fear & Greed Index** +- 0-25: Extreme Fear (potential buy opportunity) +- 25-45: Fear (cautious bullish) +- 45-55: Neutral +- 55-75: Greed (cautious bearish) +- 75-100: Extreme Greed (potential sell opportunity) + +**News Sentiment** +- Positive news flow: Bullish sentiment +- Negative news flow: Bearish sentiment +- News saturation: Often marks extremes (contrarian signal) + +**Social Media Sentiment** +- High positive sentiment: Bullish, but watch for euphoria +- High negative sentiment: Bearish, but watch for capitulation +- Sentiment spikes often precede reversals + +### 2. Identify Sentiment Extremes + +Extreme sentiment levels are contrarian indicators: +- **Extreme Fear**: Often marks market bottoms (buy opportunity) +- **Extreme Greed**: Often marks market tops (sell opportunity) +- **Consensus bullishness**: Market may be overbought +- **Consensus bearishness**: Market may be oversold + +### 3. Analyze Sentiment Trends + +- **Rising bullish sentiment**: Market gaining confidence +- **Declining bearish sentiment**: Market recovering +- **Rising bearish sentiment**: Market losing confidence +- **Declining bullish sentiment**: Market weakening + +### 4. Evaluate News Impact + +**High Impact News** +- Regulatory announcements +- Major adoption news +- Security breaches +- Economic data releases +- Central bank decisions + +**Transient vs Structural News** +- Short-term noise: Price reactions fade quickly +- Fundamental shifts: Long-term impact on valuations + +### 5. Social Media Analysis + +**Red Flags (Potential Top)** +- Excessive euphoria +- "Get rich quick" narratives spreading +- Retail FOMO (Fear of Missing Out) +- Influencers all bullish + +**Green Flags (Potential Bottom)** +- Widespread despair and capitulation +- "Project is dead" narratives +- Retail giving up +- Contrarian voices emerging + +### 6. Correlation with Price Action + +- **Sentiment leads price**: Strong predictor +- **Sentiment lags price**: Less useful, reaction not prediction +- **Sentiment diverges from price**: Potential reversal signal + +## Output Format + +When analyzing market sentiment, provide: +```json +{ + "eval_note": , + "confidence": , + "sentiment_summary": "Overall market mood description", + "key_factors": ["Fear & Greed at 15 (Extreme Fear)", "News sentiment turned positive", "Social media showing capitulation"], + "description": "Detailed sentiment analysis with reasoning" +} +``` + +## Sentiment Patterns + +**Bullish Sentiment Signals** +- Fear index in extreme fear zone +- Negative news priced in (no reaction to bad news) +- Social media capitulation (everyone giving up) +- Contrarian buying emerging +- Positive news starting to surface + +**Bearish Sentiment Signals** +- Greed index in extreme greed zone +- Good news not moving price higher +- Social media euphoria (everyone bullish) +- Mainstream media excitement +- Negative news having outsized impact + +## Best Practices + +1. Use sentiment as a contrarian indicator at extremes +2. Combine with technical and fundamental analysis +3. Watch for sentiment/price divergences +4. Consider the time horizon (short-term noise vs long-term trend) +5. Distinguish between retail and institutional sentiment +6. Be aware of sentiment manipulation and false narratives +7. Track sentiment changes over time, not just snapshots +8. Consider broader market sentiment, not just asset-specific + +## Warning Signs + +**Extreme Bull Market Top Indicators** +- "This time is different" narratives +- Parabolic price moves with euphoric sentiment +- Mainstream coverage going viral +- Everyone you know is talking about it +- Extreme leverage and speculation + +**Extreme Bear Market Bottom Indicators** +- "It's going to zero" narratives +- Capitulation selling with despair +- Media declaring market dead +- Nobody wants to talk about it anymore +- Forced deleveraging complete diff --git a/packages/tentacles/Agent/teams/simple_ai_evaluator_agents_team/resources/skills/summarization/SKILL.md b/packages/tentacles/Agent/teams/simple_ai_evaluator_agents_team/resources/skills/summarization/SKILL.md new file mode 100644 index 0000000000..ccb9c40533 --- /dev/null +++ b/packages/tentacles/Agent/teams/simple_ai_evaluator_agents_team/resources/skills/summarization/SKILL.md @@ -0,0 +1,202 @@ +--- +name: trading-strategy +description: Use this skill for developing and evaluating trading strategies by synthesizing technical analysis, market sentiment, and risk management principles into actionable trading recommendations. +--- + +# trading-strategy + +## Overview + +This skill provides comprehensive guidance on synthesizing multiple analyses into coherent trading strategies and actionable recommendations. Use this for final decision-making and trade structuring. + +## Instructions + +### 1. Synthesize Multiple Signals + +Integrate analysis from different sources: +- **Technical Analysis**: Trend, momentum, support/resistance +- **Sentiment Analysis**: Market mood, fear/greed levels +- **Real-Time Data**: Order flow, liquidity, recent price action + +Weight each component by: +- Confidence level (higher confidence = higher weight) +- Timeframe alignment (multiple timeframes agreeing) +- Signal strength (strong vs weak signals) + +### 2. Resolve Conflicting Signals + +When analyses disagree: +- **Technical bullish, Sentiment bearish**: Often means "buy the dip" if technical is strong +- **Technical bearish, Sentiment bullish**: Often means "sell the rally" if technical is strong +- **Mixed timeframes**: Defer to higher timeframe for trend direction +- **Low confidence all around**: Recommend staying out or reducing position size + +### 3. Generate eval_note (-1 to 1 scale) + +**Strong Buy Signals (0.6 to 1.0)** +- Multiple timeframes aligned bullish +- Technical indicators show strong uptrend +- Sentiment at extreme fear (contrarian buy) +- High confidence from all analyses + +**Moderate Buy (0.3 to 0.6)** +- Majority of signals bullish +- Some conflicting indicators +- Medium confidence +- Trend supports upside + +**Neutral (−0.3 to 0.3)** +- Mixed signals across analyses +- Ranging market conditions +- Low confidence or uncertainty +- Wait for clearer setup + +**Moderate Sell (−0.6 to −0.3)** +- Majority of signals bearish +- Some conflicting indicators +- Medium confidence +- Trend supports downside + +**Strong Sell (−1.0 to −0.6)** +- Multiple timeframes aligned bearish +- Technical indicators show strong downtrend +- Sentiment at extreme greed (contrarian sell) +- High confidence from all analyses + +### 4. Risk Assessment + +Consider before making recommendations: +- **Volatility**: High volatility = reduce position size +- **Liquidity**: Low liquidity = widen stops, smaller size +- **Time in market**: Newer trends are riskier +- **Correlation**: Diversification reduces risk +- **Black swan risk**: Extreme events possible + +### 5. Position Sizing Guidelines + +Based on confidence and volatility: +- **High confidence, Low volatility**: Larger positions (up to max allowed) +- **High confidence, High volatility**: Medium positions +- **Medium confidence, Any volatility**: Smaller positions +- **Low confidence**: Minimal or no position + +### 6. Entry and Exit Strategy + +**Entry Points** +- Strong signals: Enter on market orders or limit near current price +- Medium signals: Enter on pullbacks to support +- Weak signals: Scale in gradually + +**Stop Loss Placement** +- Below recent swing low (for longs) +- Above recent swing high (for shorts) +- Below key moving averages +- Account for volatility (wider stops in volatile markets) + +**Take Profit Targets** +- First target: 1-2 risk/reward ratio +- Second target: Major resistance/support level +- Final target: Trailing stop to ride trend + +### 7. Timeframe Considerations + +**Short-term trading (minutes to hours)** +- Focus on 1H and below timeframes +- More sensitive to real-time data and sentiment shifts +- Tighter stops and quicker exits + +**Medium-term trading (days to weeks)** +- Focus on 4H and daily timeframes +- Align with daily trend direction +- Wider stops, let positions breathe + +**Long-term trading (weeks to months)** +- Focus on daily and weekly timeframes +- Strong fundamental and macro support needed +- Very wide stops, focus on major trend + +### 8. Market Regime Adaptation + +**Trending Markets** +- Follow the trend direction +- Use pullbacks for entries +- Trail stops to ride trends +- Ignore minor counter-trend signals + +**Ranging Markets** +- Fade extremes (sell highs, buy lows) +- Tighter profit targets +- Avoid trend-following strategies +- Reduce position sizes + +**Volatile Markets** +- Wider stops to avoid whipsaws +- Smaller positions +- Quick profit taking +- Consider staying out if too chaotic + +## Output Format + +When synthesizing into a trading recommendation, provide: +```json +{ + "eval_note": , + "confidence": , + "eval_note_description": "Comprehensive synthesis of all analyses with clear reasoning", + "key_factors": [ + "Technical: Strong uptrend confirmed", + "Sentiment: Extreme fear (contrarian buy)", + "Real-time: Strong buying pressure" + ], + "recommended_action": "BUY" | "SELL" | "HOLD", + "position_size": "FULL" | "MEDIUM" | "SMALL" | "NONE", + "entry_strategy": "Market order | Limit at X | Scale in on pullback", + "stop_loss": "Below X at Y", + "take_profit": ["First: X at R:R 1.5", "Second: Y at resistance", "Final: Trailing stop"] +} +``` + +## Decision Framework + +### Strong Conviction Trade Checklist +- [ ] Multiple timeframes aligned +- [ ] 2+ independent confirmations +- [ ] Clear support/resistance levels identified +- [ ] Sentiment supports the move (or contrarian extreme) +- [ ] Risk/reward ratio > 2:1 +- [ ] Market structure supportive +- [ ] Liquidity adequate + +### Trade Rejection Criteria +- Mixed signals with no clear edge +- Low confidence across analyses +- Highly uncertain market conditions +- Risk/reward ratio < 1.5:1 +- Counter to strong higher timeframe trend +- Illiquid or highly manipulated market + +## Best Practices + +1. **Never force trades**: Wait for high-probability setups +2. **Quality over quantity**: Fewer high-quality trades beat many mediocre trades +3. **Always know your exit**: Plan stop loss and take profit before entry +4. **Manage risk first**: Protect capital over maximizing gains +5. **Adapt to changing conditions**: What works in trending markets fails in ranging markets +6. **Keep it simple**: Complex strategies with many rules often underperform +7. **Document reasoning**: Track what works and what doesn't +8. **Stay disciplined**: Stick to your strategy and don't chase +9. **Cut losses quickly**: Don't hope losing trades will reverse +10. **Let winners run**: Trail stops on profitable positions + +## Common Mistakes to Avoid + +- Over-trading in low-conviction setups +- Revenge trading after losses +- Moving stops further away (hoping for reversal) +- Adding to losing positions +- Taking profits too early on winning trades +- Ignoring risk management rules +- Trading against the higher timeframe trend +- Fighting the tape (market momentum) +- Letting emotions override analysis +- Failing to adapt strategy to market regime diff --git a/packages/tentacles/Agent/teams/simple_ai_evaluator_agents_team/resources/skills/technical_analysis/SKILL.md b/packages/tentacles/Agent/teams/simple_ai_evaluator_agents_team/resources/skills/technical_analysis/SKILL.md new file mode 100644 index 0000000000..b48ac26e3e --- /dev/null +++ b/packages/tentacles/Agent/teams/simple_ai_evaluator_agents_team/resources/skills/technical_analysis/SKILL.md @@ -0,0 +1,110 @@ +--- +name: technical-analysis +description: Use this skill for analyzing technical indicators like RSI, MACD, Moving Averages, Bollinger Bands, and price action patterns to assess market trends and momentum. +--- + +# technical-analysis + +## Overview + +This skill provides comprehensive guidance on analyzing technical indicators to evaluate market conditions and generate trading signals. Use this for any technical analysis-related questions or when you need to interpret indicator values. + +## Instructions + +### 1. Understand Key Technical Indicators + +**RSI (Relative Strength Index)** +- Range: 0-100 +- Oversold: < 30 (potential buy signal) +- Overbought: > 70 (potential sell signal) +- Divergence: Price makes new high/low but RSI doesn't (reversal signal) + +**MACD (Moving Average Convergence Divergence)** +- Components: MACD line, Signal line, Histogram +- Bullish crossover: MACD crosses above signal line +- Bearish crossover: MACD crosses below signal line +- Histogram: Shows momentum strength + +**Moving Averages** +- Short-term (SMA/EMA 20): Recent trend +- Medium-term (SMA/EMA 50): Intermediate trend +- Long-term (SMA/EMA 200): Major trend +- Golden cross: 50 MA crosses above 200 MA (bullish) +- Death cross: 50 MA crosses below 200 MA (bearish) + +**Bollinger Bands** +- Upper/Lower bands: Volatility boundaries +- Price near upper band: Potentially overbought +- Price near lower band: Potentially oversold +- Band squeeze: Low volatility (potential breakout coming) +- Band expansion: High volatility (trend in motion) + +### 2. Analyze Multiple Timeframes + +When analyzing indicators, consider multiple timeframes: +- **1H/4H**: Short-term trading signals +- **1D**: Medium-term trend confirmation +- **1W**: Long-term trend direction + +Higher timeframes carry more weight for trend direction. + +### 3. Look for Indicator Convergence + +Strong signals occur when multiple indicators agree: +- RSI oversold + MACD bullish crossover + price above 50 MA = Strong buy +- RSI overbought + MACD bearish crossover + price below 50 MA = Strong sell + +### 4. Assess Trend Strength + +Determine if the market is: +- **Strong uptrend**: Price above all MAs, RSI 50-70, MACD positive and rising +- **Strong downtrend**: Price below all MAs, RSI 30-50, MACD negative and falling +- **Ranging/Consolidation**: Price oscillating around MAs, RSI 40-60, MACD near zero + +### 5. Identify Support and Resistance + +- Previous highs/lows +- Moving averages acting as dynamic support/resistance +- Bollinger Bands as volatility-based support/resistance +- Round numbers (psychological levels) + +### 6. Volume Analysis + +- Rising volume on breakouts confirms strength +- Declining volume on rallies suggests weakness +- Volume spikes often precede reversals + +## Output Format + +When analyzing technical indicators, provide: +```json +{ + "eval_note": , + "confidence": , + "trend": "uptrend" | "downtrend" | "ranging", + "key_indicators": ["RSI oversold at 28", "MACD bullish crossover", "Price above 50 MA"], + "description": "Detailed analysis explaining the reasoning" +} +``` + +## Common Patterns + +**Bullish Reversal Signs** +- RSI divergence (price lower low, RSI higher low) +- MACD histogram turning positive +- Price bouncing off support +- Volume spike on upward move + +**Bearish Reversal Signs** +- RSI divergence (price higher high, RSI lower high) +- MACD histogram turning negative +- Price rejected at resistance +- Volume spike on downward move + +## Best Practices + +1. Never rely on a single indicator - use multiple confirmations +2. Consider the broader market context and trend +3. Account for timeframe - align trade direction with higher timeframe trend +4. Adjust interpretation for different market conditions (trending vs ranging) +5. Be aware of false signals in choppy, low-volume markets diff --git a/packages/tentacles/Agent/teams/simple_ai_evaluator_agents_team/simple_ai_evaluator_agents_team.py b/packages/tentacles/Agent/teams/simple_ai_evaluator_agents_team/simple_ai_evaluator_agents_team.py new file mode 100644 index 0000000000..1fc74d25ca --- /dev/null +++ b/packages/tentacles/Agent/teams/simple_ai_evaluator_agents_team/simple_ai_evaluator_agents_team.py @@ -0,0 +1,241 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +""" +Simple AI Evaluator Agent Team. +Orchestrates TA, Sentiment, and RealTime agents feeding into a Summarization agent. + +DAG Structure: + TechnicalAnalysis ──┐ + SentimentAnalysis ──┼──> Summarization + RealTimeAnalysis ───┘ +""" +import typing + +import octobot_commons.constants as common_constants + +import octobot_agents.team.channels.agents_team as agents_team +import octobot_agents.team.channels.ai_agents_team as ai_agents_team +import octobot_agents.agent.channels.ai_agent as ai_agent_channels + +from tentacles.Agent.sub_agents.technical_analysis_agent import ( + TechnicalAnalysisAIAgentChannel, + TechnicalAnalysisAIAgentProducer, +) +from tentacles.Agent.sub_agents.sentiment_analysis_agent import ( + SentimentAnalysisAIAgentChannel, + SentimentAnalysisAIAgentProducer, +) +from tentacles.Agent.sub_agents.real_time_analysis_agent import ( + RealTimeAnalysisAIAgentChannel, + RealTimeAnalysisAIAgentProducer, +) +from tentacles.Agent.sub_agents.summarization_agent import ( + SummarizationAIAgentChannel, + SummarizationAIAgentProducer, +) +from tentacles.Agent.sub_agents.default_critic_agent import DefaultAICriticAgentProducer +from tentacles.Agent.teams.default_manager_agent import AIPlanTeamManagerAgentProducer +from tentacles.Agent.sub_agents.default_memory_agent import DefaultAIMemoryAgentProducer + + +class SimpleAIEvaluatorAgentsTeamChannel(agents_team.AbstractAgentsTeamChannel): + pass + + +class SimpleAIEvaluatorAgentsTeamConsumer(agents_team.AbstractAgentsTeamChannelConsumer): + pass + + +class SimpleAIEvaluatorAgentsTeam(ai_agents_team.AbstractSyncAgentsTeamChannelProducer): + """ + Sync team that orchestrates evaluator agents. + + Execution flow: + 1. TechnicalAnalysis, SentimentAnalysis, RealTimeAnalysis run in parallel + 2. Their outputs feed into Summarization + 3. Summarization produces final eval_note and description + + Usage: + team = SimpleAIEvaluatorAgentsTeam(ai_service=llm_service) + results = await team.run(aggregated_data) + # results["SummarizationAgent"] contains the final output + """ + + TEAM_NAME = "SimpleAIEvaluatorAgentsTeam" + TEAM_CHANNEL = SimpleAIEvaluatorAgentsTeamChannel + TEAM_CONSUMER = SimpleAIEvaluatorAgentsTeamConsumer + + CriticAgentClass = DefaultAICriticAgentProducer + MemoryAgentClass = DefaultAIMemoryAgentProducer + ManagerAgentClass = AIPlanTeamManagerAgentProducer + + def __init__( + self, + ai_service: typing.Any, + model: str | None = None, + max_tokens: int | None = None, + temperature: float | None = None, + channel: SimpleAIEvaluatorAgentsTeamChannel | None = None, + team_id: str | None = None, + include_ta: bool = True, + include_sentiment: bool = True, + include_realtime: bool = True, + ): + """ + Initialize the evaluator agent team. + + Args: + ai_service: The LLM service instance. + model: LLM model to use for all agents. + max_tokens: Maximum tokens for LLM responses. + temperature: Temperature for LLM randomness. + channel: Optional output channel for team results. + team_id: Unique identifier for this team instance. + include_ta: Whether to include TechnicalAnalysis agent. + include_sentiment: Whether to include SentimentAnalysis agent. + include_realtime: Whether to include RealTimeAnalysis agent. + """ + agents = [] + relations = [] + + if include_ta: + ta_producer = TechnicalAnalysisAIAgentProducer( + channel=None, + model=model, + max_tokens=max_tokens, + temperature=temperature, + ) + agents.append(ta_producer) + relations.append((TechnicalAnalysisAIAgentChannel, SummarizationAIAgentChannel)) + + if include_sentiment: + sentiment_producer = SentimentAnalysisAIAgentProducer( + channel=None, + model=model, + max_tokens=max_tokens, + temperature=temperature, + ) + agents.append(sentiment_producer) + relations.append((SentimentAnalysisAIAgentChannel, SummarizationAIAgentChannel)) + + if include_realtime: + realtime_producer = RealTimeAnalysisAIAgentProducer( + channel=None, + model=model, + max_tokens=max_tokens, + temperature=temperature, + ) + agents.append(realtime_producer) + relations.append((RealTimeAnalysisAIAgentChannel, SummarizationAIAgentChannel)) + + # Always include summarization as the terminal agent + summarization_producer = SummarizationAIAgentProducer( + channel=None, + model=model, + max_tokens=max_tokens, + temperature=temperature, + ) + agents.append(summarization_producer) + # Store reference for result lookup + self.summarization_producer = summarization_producer + + super().__init__( + channel=channel, + agents=agents, + relations=relations, + ai_service=ai_service, + team_name=self.TEAM_NAME, + team_id=team_id, + self_improving=True, + ) + + async def run_with_data( + self, + aggregated_data: dict, + missing_data_types: list | None = None, + ) -> tuple[float | str, str]: + """ + Convenience method to run the team with aggregated evaluator data. + + Uses Deep Agent file system for context management between agents. + Analysis results are saved to /analysis/* for cross-agent access. + + Args: + aggregated_data: Dict mapping evaluator type to list of evaluations. + missing_data_types: Optional list of missing evaluator types. + + Returns: + Tuple of (eval_note, eval_note_description). + """ + self.clear_transient_files() + + initial_data = { + "aggregated_data": aggregated_data, + "missing_data_types": missing_data_types or [], + } + + # Run the team + results = await self.run(initial_data) + + # Save analysis results to file system for debugging/audit + for agent_name, result in results.items(): + self.save_analysis(agent_name.lower(), result) + + # Extract summarization result using the actual agent name + summarization_result = results.get(self.summarization_producer.name) + if summarization_result is None: + # Retry summarization only once if output is missing + self.logger.warning( + "Summarization agent did not produce output. Retrying summarization only." + ) + try: + available_types = list(aggregated_data.keys()) + missing_types = missing_data_types or [] + total_expected = list(dict.fromkeys(available_types + missing_types)) + context_info = { + "missing_data_types": missing_types, + "available_data_types": available_types, + "total_expected_types": total_expected, + } + return await self.summarization_producer.execute( + results, + ai_service=self.ai_service, + context_info=context_info, + ) + except Exception as e: + self.logger.error(f"Summarization retry failed: {e}") + return common_constants.START_PENDING_EVAL_NOTE, "Error: Summarization agent did not produce output" + + # Unwrap team result wrapper if present + if isinstance(summarization_result, dict) and "result" in summarization_result: + summarization_result = summarization_result.get("result") + + # Handle tuple result from SummarizationAIAgentProducer + if isinstance(summarization_result, tuple): + return summarization_result + + # Handle dict result + try: + eval_note = summarization_result.get("eval_note", common_constants.START_PENDING_EVAL_NOTE) + description = summarization_result.get( + "eval_note_description", + summarization_result.get("description", ""), + ) + return eval_note, description + except AttributeError: + # Not a dict, return as-is + return common_constants.START_PENDING_EVAL_NOTE, "Error: Unexpected result format from summarization agent" diff --git a/packages/tentacles/Automation/actions/cancel_open_order_action/__init__.py b/packages/tentacles/Automation/actions/cancel_open_order_action/__init__.py new file mode 100644 index 0000000000..9e588df6a8 --- /dev/null +++ b/packages/tentacles/Automation/actions/cancel_open_order_action/__init__.py @@ -0,0 +1 @@ +from .cancel_open_orders import CancelOpenOrders \ No newline at end of file diff --git a/packages/tentacles/Automation/actions/cancel_open_order_action/cancel_open_orders.py b/packages/tentacles/Automation/actions/cancel_open_order_action/cancel_open_orders.py new file mode 100644 index 0000000000..b3affb5585 --- /dev/null +++ b/packages/tentacles/Automation/actions/cancel_open_order_action/cancel_open_orders.py @@ -0,0 +1,44 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2023 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . +import asyncio + +import octobot_commons.configuration as configuration +import octobot_trading.api as trading_api +import octobot.automation.bases.abstract_action as abstract_action +import octobot.automation.bases.execution_details as execution_details + + +class CancelOpenOrders(abstract_action.AbstractAction): + async def process( + self, execution_details: execution_details.ExecutionDetails + ) -> bool: + exchange_managers = trading_api.get_exchange_managers_from_exchange_ids(trading_api.get_exchange_ids()) + await asyncio.gather(*( + trading_api.cancel_all_open_orders(exchange_manager) + for exchange_manager in exchange_managers + )) + return True + + @staticmethod + def get_description() -> str: + return "Cancel all OctoBot-managed open orders on each exchange." + + def get_user_inputs(self, UI: configuration.UserInputFactory, inputs: dict, step_name: str) -> dict: + return {} + + def apply_config(self, config): + # no config + pass diff --git a/packages/tentacles/Automation/actions/cancel_open_order_action/metadata.json b/packages/tentacles/Automation/actions/cancel_open_order_action/metadata.json new file mode 100644 index 0000000000..fb528b9ca9 --- /dev/null +++ b/packages/tentacles/Automation/actions/cancel_open_order_action/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["CancelOpenOrders"], + "tentacles-requirements": [] +} \ No newline at end of file diff --git a/packages/tentacles/Automation/actions/sell_all_currencies_action/__init__.py b/packages/tentacles/Automation/actions/sell_all_currencies_action/__init__.py new file mode 100644 index 0000000000..6eabe8c3b7 --- /dev/null +++ b/packages/tentacles/Automation/actions/sell_all_currencies_action/__init__.py @@ -0,0 +1 @@ +from .sell_all_currencies import SellAllCurrencies \ No newline at end of file diff --git a/packages/tentacles/Automation/actions/sell_all_currencies_action/metadata.json b/packages/tentacles/Automation/actions/sell_all_currencies_action/metadata.json new file mode 100644 index 0000000000..db48f68893 --- /dev/null +++ b/packages/tentacles/Automation/actions/sell_all_currencies_action/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["SellAllCurrencies"], + "tentacles-requirements": [] +} \ No newline at end of file diff --git a/packages/tentacles/Automation/actions/sell_all_currencies_action/sell_all_currencies.py b/packages/tentacles/Automation/actions/sell_all_currencies_action/sell_all_currencies.py new file mode 100644 index 0000000000..73f677a8f6 --- /dev/null +++ b/packages/tentacles/Automation/actions/sell_all_currencies_action/sell_all_currencies.py @@ -0,0 +1,44 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2023 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . +import asyncio + +import octobot_commons.configuration as configuration +import octobot_trading.api as trading_api +import octobot.automation.bases.abstract_action as abstract_action +import octobot.automation.bases.execution_details as execution_details + + +class SellAllCurrencies(abstract_action.AbstractAction): + async def process( + self, execution_details: execution_details.ExecutionDetails + ) -> bool: + exchange_managers = trading_api.get_exchange_managers_from_exchange_ids(trading_api.get_exchange_ids()) + await asyncio.gather(*( + trading_api.sell_all_everything_for_reference_market(exchange_manager) + for exchange_manager in exchange_managers + )) + return True + + @staticmethod + def get_description() -> str: + return "Market sell each currency for the reference market on each exchange." + + def get_user_inputs(self, UI: configuration.UserInputFactory, inputs: dict, step_name: str) -> dict: + return {} + + def apply_config(self, config): + # no config + pass diff --git a/packages/tentacles/Automation/actions/send_notification_action/__init__.py b/packages/tentacles/Automation/actions/send_notification_action/__init__.py new file mode 100644 index 0000000000..75864eb01f --- /dev/null +++ b/packages/tentacles/Automation/actions/send_notification_action/__init__.py @@ -0,0 +1 @@ +from .send_notification import SendNotification \ No newline at end of file diff --git a/packages/tentacles/Automation/actions/send_notification_action/metadata.json b/packages/tentacles/Automation/actions/send_notification_action/metadata.json new file mode 100644 index 0000000000..36cc3fdeff --- /dev/null +++ b/packages/tentacles/Automation/actions/send_notification_action/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["SendNotification"], + "tentacles-requirements": [] +} \ No newline at end of file diff --git a/packages/tentacles/Automation/actions/send_notification_action/send_notification.py b/packages/tentacles/Automation/actions/send_notification_action/send_notification.py new file mode 100644 index 0000000000..88b7810428 --- /dev/null +++ b/packages/tentacles/Automation/actions/send_notification_action/send_notification.py @@ -0,0 +1,58 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2023 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . +import octobot_commons.enums as commons_enums +import octobot_commons.configuration as configuration +import octobot_services.enums as services_enums +import octobot_services.api as services_api +import octobot.automation.bases.abstract_action as abstract_action +import octobot.automation.bases.execution_details as execution_details + + +class SendNotification(abstract_action.AbstractAction): + MESSAGE = "message" + + def __init__(self): + super().__init__() + self.notification_message = None + + async def process( + self, execution_details: execution_details.ExecutionDetails + ) -> bool: + await services_api.send_notification( + services_api.create_notification( + self.notification_message, + category=services_enums.NotificationCategory.OTHER + ) + ) + return True + + @staticmethod + def get_description() -> str: + return f"Sends the configured message. " \ + f"Configure notification channels in the 'Accounts' tab. " \ + f"The notification type is '{services_enums.NotificationCategory.OTHER.value.capitalize()}'." + + def get_user_inputs(self, UI: configuration.UserInputFactory, inputs: dict, step_name: str) -> dict: + return { + self.MESSAGE: UI.user_input( + self.MESSAGE, commons_enums.UserInputTypes.TEXT, "Your notification triggered", inputs, + title="Message to include in your notification.", + parent_input_name=step_name, + ) + } + + def apply_config(self, config): + self.notification_message = config[self.MESSAGE] diff --git a/packages/tentacles/Automation/actions/stop_strategies_and_pause_trader_action/__init__.py b/packages/tentacles/Automation/actions/stop_strategies_and_pause_trader_action/__init__.py new file mode 100644 index 0000000000..66ce183a76 --- /dev/null +++ b/packages/tentacles/Automation/actions/stop_strategies_and_pause_trader_action/__init__.py @@ -0,0 +1 @@ +from .stop_strategies_and_pause_trader import StopStrategiesAndPauseTrader \ No newline at end of file diff --git a/packages/tentacles/Automation/actions/stop_strategies_and_pause_trader_action/metadata.json b/packages/tentacles/Automation/actions/stop_strategies_and_pause_trader_action/metadata.json new file mode 100644 index 0000000000..73aa9e708b --- /dev/null +++ b/packages/tentacles/Automation/actions/stop_strategies_and_pause_trader_action/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["StopStrategiesAndPauseTrader"], + "tentacles-requirements": [] +} \ No newline at end of file diff --git a/packages/tentacles/Automation/actions/stop_strategies_and_pause_trader_action/stop_strategies_and_pause_trader.py b/packages/tentacles/Automation/actions/stop_strategies_and_pause_trader_action/stop_strategies_and_pause_trader.py new file mode 100644 index 0000000000..8f0834a6b2 --- /dev/null +++ b/packages/tentacles/Automation/actions/stop_strategies_and_pause_trader_action/stop_strategies_and_pause_trader.py @@ -0,0 +1,57 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2023 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . +import octobot_services.interfaces.util as interfaces_util +import octobot_commons.configuration as configuration +import octobot_commons.enums as commons_enums +import octobot.automation.bases.abstract_action as abstract_action +import octobot.automation.bases.execution_details as execution_details + + +class StopStrategiesAndPauseTrader(abstract_action.AbstractAction): + STOP_REASON = "stop_reason" + + def __init__(self): + super().__init__() + self.stop_reason = commons_enums.StopReason.STOP_CONDITION_TRIGGERED + + @staticmethod + def get_description() -> str: + return "Stop all strategies, clear their state and pause traders." + + def get_user_inputs( + self, UI: configuration.UserInputFactory, inputs: dict, step_name: str + ) -> dict: + return { + self.STOP_REASON: UI.user_input( + self.STOP_REASON, commons_enums.UserInputTypes.OPTIONS, commons_enums.StopReason.STOP_CONDITION_TRIGGERED.value, inputs, + options=[stop_reason.value for stop_reason in commons_enums.StopReason], + title="Stop reason: the reason for stopping the strategies and pausing the traders.", + parent_input_name=step_name, + ) + } + + def apply_config(self, config: dict): + self.stop_reason = commons_enums.StopReason(config.get(self.STOP_REASON, commons_enums.StopReason.STOP_CONDITION_TRIGGERED.value)) + + async def process( + self, execution_details: execution_details.ExecutionDetails + ) -> bool: + await interfaces_util.get_bot_api().stop_all_trading_modes_and_pause_traders( + self.stop_reason, + execution_details=execution_details.get_initial_execution_details(), + schedule_bot_stop=False, + ) + return True diff --git a/packages/tentacles/Automation/actions/stop_trading_action/__init__.py b/packages/tentacles/Automation/actions/stop_trading_action/__init__.py new file mode 100644 index 0000000000..2ca9ef09a8 --- /dev/null +++ b/packages/tentacles/Automation/actions/stop_trading_action/__init__.py @@ -0,0 +1 @@ +from .stop_trading import StopTrading \ No newline at end of file diff --git a/packages/tentacles/Automation/actions/stop_trading_action/metadata.json b/packages/tentacles/Automation/actions/stop_trading_action/metadata.json new file mode 100644 index 0000000000..3f31330872 --- /dev/null +++ b/packages/tentacles/Automation/actions/stop_trading_action/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["StopTrading"], + "tentacles-requirements": [] +} \ No newline at end of file diff --git a/packages/tentacles/Automation/actions/stop_trading_action/stop_trading.py b/packages/tentacles/Automation/actions/stop_trading_action/stop_trading.py new file mode 100644 index 0000000000..4a57271b2f --- /dev/null +++ b/packages/tentacles/Automation/actions/stop_trading_action/stop_trading.py @@ -0,0 +1,41 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2023 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . +import octobot_commons.constants as commons_constants +import octobot_services.interfaces.util as interfaces_util +import tentacles.Automation.actions.cancel_open_order_action as cancel_open_orders +import octobot.automation.bases.execution_details as execution_details + + +class StopTrading(cancel_open_orders.CancelOpenOrders): + PROFILE_ID = commons_constants.DEFAULT_PROFILE # non trading profile + + async def process( + self, execution_details: execution_details.ExecutionDetails + ) -> bool: + # cancel all open orders + await super().process(execution_details) + # select non trading profile + config = interfaces_util.get_edited_config(dict_only=False) + config.select_profile(self.PROFILE_ID) + config.save() + # reboot + interfaces_util.get_bot_api().restart_bot() + return True + + @staticmethod + def get_description() -> str: + return "Cancel all OctoBot-managed open orders on each exchange, switch to the Non-Trading profile " \ + "and restart OctoBot." diff --git a/packages/tentacles/Automation/conditions/no_condition_condition/__init__.py b/packages/tentacles/Automation/conditions/no_condition_condition/__init__.py new file mode 100644 index 0000000000..3ad0602d8a --- /dev/null +++ b/packages/tentacles/Automation/conditions/no_condition_condition/__init__.py @@ -0,0 +1 @@ +from .no_condition import NoCondition \ No newline at end of file diff --git a/packages/tentacles/Automation/conditions/no_condition_condition/metadata.json b/packages/tentacles/Automation/conditions/no_condition_condition/metadata.json new file mode 100644 index 0000000000..573202b27b --- /dev/null +++ b/packages/tentacles/Automation/conditions/no_condition_condition/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["NoCondition"], + "tentacles-requirements": [] +} \ No newline at end of file diff --git a/packages/tentacles/Automation/conditions/no_condition_condition/no_condition.py b/packages/tentacles/Automation/conditions/no_condition_condition/no_condition.py new file mode 100644 index 0000000000..a279c49ca3 --- /dev/null +++ b/packages/tentacles/Automation/conditions/no_condition_condition/no_condition.py @@ -0,0 +1,34 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2023 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . +import octobot_commons.configuration as configuration +import octobot.automation.bases.abstract_condition as abstract_condition +import octobot.automation.bases.execution_details as execution_details + + +class NoCondition(abstract_condition.AbstractCondition): + async def process(self, execution_details: execution_details.ExecutionDetails) -> bool: + return True + + @staticmethod + def get_description() -> str: + return "Is always passing." + + def get_user_inputs(self, UI: configuration.UserInputFactory, inputs: dict, step_name: str) -> dict: + return {} + + def apply_config(self, config): + # no config + pass diff --git a/packages/tentacles/Automation/conditions/scripted_condition/__init__.py b/packages/tentacles/Automation/conditions/scripted_condition/__init__.py new file mode 100644 index 0000000000..5e810dc24a --- /dev/null +++ b/packages/tentacles/Automation/conditions/scripted_condition/__init__.py @@ -0,0 +1 @@ +from .scripted_condition import ScriptedCondition \ No newline at end of file diff --git a/packages/tentacles/Automation/conditions/scripted_condition/metadata.json b/packages/tentacles/Automation/conditions/scripted_condition/metadata.json new file mode 100644 index 0000000000..1854039b1f --- /dev/null +++ b/packages/tentacles/Automation/conditions/scripted_condition/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["ScriptedCondition"], + "tentacles-requirements": [] +} \ No newline at end of file diff --git a/packages/tentacles/Automation/conditions/scripted_condition/scripted_condition.py b/packages/tentacles/Automation/conditions/scripted_condition/scripted_condition.py new file mode 100644 index 0000000000..38f5e21fe9 --- /dev/null +++ b/packages/tentacles/Automation/conditions/scripted_condition/scripted_condition.py @@ -0,0 +1,105 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2023 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . +import typing + +import octobot_commons.configuration as configuration +import octobot_trading.api as trading_api +import octobot_commons.enums as commons_enums +import octobot.automation.bases.abstract_condition as abstract_condition +import octobot_commons.dsl_interpreter as dsl_interpreter +import octobot.errors as errors +import tentacles.Meta.DSL_operators as dsl_operators +import octobot.automation.bases.execution_details as execution_details + + + +class ScriptedCondition(abstract_condition.AbstractCondition): + SCRIPT = "script" + EXCHANGE = "exchange" + + def __init__(self): + super().__init__() + self.script: str = "" + self.exchange_name: str = "" + + self._dsl_interpreter: typing.Optional[dsl_interpreter.Interpreter] = None + + async def process(self, execution_details: execution_details.ExecutionDetails) -> bool: + if self._dsl_interpreter: + script_result = await self._dsl_interpreter.interprete(self.script) + return bool(script_result) + raise errors.InvalidAutomationConfigError("Scripted condition is not properly configured, the script is likely invalid.", self.get_name()) + + @staticmethod + def get_description() -> str: + return "Evaluates a scripted condition using the OctoBot DSL." + + def get_user_inputs(self, UI: configuration.UserInputFactory, inputs: dict, step_name: str) -> dict: + exchanges = list(trading_api.get_exchange_names()) + return { + self.SCRIPT: UI.user_input( + self.SCRIPT, commons_enums.UserInputTypes.TEXT, "", inputs, + title="Scripted condition: the OctoBot DSL expression to evaluate (more info in automation details). Its return value will be converted to a boolean using \"bool()\" to determine if the condition is met.", + parent_input_name=step_name, + ), + self.EXCHANGE: UI.user_input( + self.EXCHANGE, commons_enums.UserInputTypes.OPTIONS, exchanges[0] if exchanges[0] else "binance", inputs, + options=exchanges, + title="Exchange: the name of the exchange to use for the condition.", + parent_input_name=step_name, + ) + } + + def apply_config(self, config): + self.script = config[self.SCRIPT] + self.exchange_name = config[self.EXCHANGE] + if self.script and self.exchange_name: + self._dsl_interpreter = self._create_dsl_interpreter() + self._validate_script() + else: + self._dsl_interpreter = None + + def _validate_script(self): + try: + self._dsl_interpreter.prepare(self.script) + self.logger.info( + f"Formula interpreter successfully prepared \"{self.script}\" condition" + ) + except Exception as e: + self.logger.error(f"Error when parsing condition {self.script}: {e}") + raise e + + def _create_dsl_interpreter(self): + exchange_manager = self._get_exchange_manager() + ohlcv_operators = [] + portfolio_operators = [] + if exchange_manager is not None: + ohlcv_operators = dsl_operators.exchange_operators.create_ohlcv_operators( + exchange_manager, None, None + ) + portfolio_operators = dsl_operators.exchange_operators.create_portfolio_operators( + exchange_manager + ) + return dsl_interpreter.Interpreter( + dsl_interpreter.get_all_operators() + ohlcv_operators + portfolio_operators + ) + + def _get_exchange_manager(self): + for exchange_id in trading_api.get_exchange_ids(): + exchange_manager = trading_api.get_exchange_manager_from_exchange_id(exchange_id) + if exchange_manager.exchange_name == self.exchange_name and exchange_manager.is_backtesting == False: + return exchange_manager + raise errors.InvalidAutomationConfigError(f"No exchange manager found for exchange name: {self.exchange_name}", self.get_name()) diff --git a/packages/tentacles/Automation/trigger_events/holding_threshold_event/__init__.py b/packages/tentacles/Automation/trigger_events/holding_threshold_event/__init__.py new file mode 100644 index 0000000000..80b1bff61c --- /dev/null +++ b/packages/tentacles/Automation/trigger_events/holding_threshold_event/__init__.py @@ -0,0 +1 @@ +from .holding_threshold import HoldingThreshold \ No newline at end of file diff --git a/packages/tentacles/Automation/trigger_events/holding_threshold_event/holding_threshold.py b/packages/tentacles/Automation/trigger_events/holding_threshold_event/holding_threshold.py new file mode 100644 index 0000000000..0301248402 --- /dev/null +++ b/packages/tentacles/Automation/trigger_events/holding_threshold_event/holding_threshold.py @@ -0,0 +1,154 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2023 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . +import decimal +import typing +import asyncio + +import async_channel +import async_channel.enums as channel_enums +import octobot_commons.constants as commons_constants +import octobot_commons.enums as commons_enums +import octobot_commons.configuration as configuration +import octobot_commons.channels_name as channels_name +import octobot_trading.api as trading_api +import octobot_trading.exchange_channel as exchanges_channel +import octobot_trading.constants as trading_constants +import octobot_trading.util as trading_util +import octobot.automation.bases.abstract_channel_based_trigger_event as abstract_channel_based_trigger_event +import octobot.errors as errors + +if typing.TYPE_CHECKING: + import octobot_trading.exchanges as trading_exchanges + + +INITIALIZATION_TIMEOUT = 3 * commons_constants.MINUTE_TO_SECONDS + + +class HoldingThreshold(abstract_channel_based_trigger_event.AbstractChannelBasedTriggerEvent): + ASSET_NAME = "asset_name" + AMOUNT = "amount" + STOP_ON_INFERIOR = "stop_on_inferior" + EXCHANGE = "exchange" + + def __init__(self): + super().__init__() + # config + self.asset_name: str = None # type: ignore + self.amount: decimal.Decimal = trading_constants.ZERO + self.stop_on_inferior: bool = False + + @staticmethod + def get_description() -> str: + return ( + "Will trigger when the holdings of the given asset reach the given amount." \ + "Example: a Amount of 0.01 will trigger the automation if your OctoBot holdings of BTC are 0.01 or bellow ." + ) + + def get_user_inputs( + self, UI: configuration.UserInputFactory, inputs: dict, step_name: str + ) -> dict: + return { + self.EXCHANGE: UI.user_input( + self.EXCHANGE, commons_enums.UserInputTypes.TEXT, "binance", inputs, + title="Exchange: exchange to watch price on. Example: binance. Leave empty to enable on all exchanges.", + parent_input_name=step_name, + ), + self.ASSET_NAME: UI.user_input( + self.ASSET_NAME, commons_enums.UserInputTypes.TEXT, "BTC", inputs, + title="Asset name: asset to watch holdings on. Example: BTC", + parent_input_name=step_name, + other_schema_values={"minLength": 1} + ), + self.AMOUNT: UI.user_input( + self.AMOUNT, commons_enums.UserInputTypes.FLOAT, 0.0, inputs, + title="Amount: amount of the asset to watch holdings on. Example: 0.01", + parent_input_name=step_name, + min_val=0, + other_schema_values={"exclusiveMinimum": True} + ), + self.STOP_ON_INFERIOR: UI.user_input( + self.STOP_ON_INFERIOR, commons_enums.UserInputTypes.BOOLEAN, True, inputs, + title="Stop on inferior: stop the automation if the holdings are inferior to the amount", + parent_input_name=step_name, + ), + } + + def apply_config(self, config: dict) -> None: + self.clear_future() + self.exchange = config[self.EXCHANGE] or None + self.asset_name = config[self.ASSET_NAME] + self.amount = decimal.Decimal(str(config[self.AMOUNT])) + self.stop_on_inferior = config[self.STOP_ON_INFERIOR] + if not self.exchange or not self.asset_name or not self.amount: + raise errors.InvalidAutomationConfigError("Exchange, asset name and amount must be set", self.get_name()) + + def _check_threshold( + self, exchange_manager: "trading_exchanges.ExchangeManager" + ) -> tuple[bool, typing.Optional[str]]: + holdings = exchange_manager.exchange_personal_data.portfolio_manager.portfolio.get_currency_portfolio(self.asset_name) + if self.stop_on_inferior: + if holdings.total <= self.amount: + return True, self._get_reason(holdings) + else: + if holdings.total >= self.amount: + return True, self._get_reason(holdings) + return False, None + + def _get_reason(self, holdings) -> str: + return ( + f"Current {self.asset_name} holdings of {float(holdings.total)} are " + f"{'lower' if self.stop_on_inferior else 'higher'} than the {float(self.amount)} threshold." + ) + + async def register_consumers(self, exchange_id: str) -> list[async_channel.Consumer]: + return [ + await exchanges_channel.get_chan( + channels_name.OctoBotTradingChannelsName.BALANCE_CHANNEL.value, exchange_id + ).new_consumer( + self.balance_callback, + priority_level=channel_enums.ChannelConsumerPriorityLevels.HIGH.value, + ) + ] + + async def balance_callback(self, exchange: str, exchange_id: str, balance): + if self.should_stop: + # do not go any further if the action has been stopped + return + await self.perform_check(exchange_id) + + async def check_initial_event(self): + exchange_manager_ids = [trading_api.get_exchange_manager_id( + trading_api.get_exchange_managers_from_exchange_name(self.exchange)[0] + )] if self.exchange else trading_api.get_exchange_ids() + for exchange_id in exchange_manager_ids: + exchange_manager = trading_api.get_exchange_manager_from_exchange_name_and_id( + self.exchange, exchange_id + ) if self.exchange else trading_api.get_exchange_manager_from_exchange_id(exchange_id) + try: + await trading_util.wait_for_topic_init( + exchange_manager, INITIALIZATION_TIMEOUT, commons_enums.InitializationEventExchangeTopics.BALANCE.value + ) + await self.perform_check(exchange_id=exchange_id) + except asyncio.TimeoutError: + self.logger.error(f"Initialization of balance for {exchange_manager.exchange_name} took more than {INITIALIZATION_TIMEOUT} seconds, skipping initial check") + + async def perform_check(self, exchange_id: str): + exchange_manager = trading_api.get_exchange_manager_from_exchange_name_and_id( + self.exchange, exchange_id + ) if self.exchange else trading_api.get_exchange_manager_from_exchange_id(exchange_id) + is_threshold_met, reason = self._check_threshold(exchange_manager) + if is_threshold_met: + self.trigger(description=reason) diff --git a/packages/tentacles/Automation/trigger_events/holding_threshold_event/metadata.json b/packages/tentacles/Automation/trigger_events/holding_threshold_event/metadata.json new file mode 100644 index 0000000000..a19ed71ccf --- /dev/null +++ b/packages/tentacles/Automation/trigger_events/holding_threshold_event/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["HoldingThreshold"], + "tentacles-requirements": [] +} \ No newline at end of file diff --git a/packages/tentacles/Automation/trigger_events/holding_threshold_event/tests/__init__.py b/packages/tentacles/Automation/trigger_events/holding_threshold_event/tests/__init__.py new file mode 100644 index 0000000000..974dd1623a --- /dev/null +++ b/packages/tentacles/Automation/trigger_events/holding_threshold_event/tests/__init__.py @@ -0,0 +1,15 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. diff --git a/packages/tentacles/Automation/trigger_events/holding_threshold_event/tests/test_holding_threshold.py b/packages/tentacles/Automation/trigger_events/holding_threshold_event/tests/test_holding_threshold.py new file mode 100644 index 0000000000..c9907b8695 --- /dev/null +++ b/packages/tentacles/Automation/trigger_events/holding_threshold_event/tests/test_holding_threshold.py @@ -0,0 +1,236 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2023 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . +import asyncio + +import mock +import decimal +import pytest + +import octobot_commons.enums as commons_enums +import octobot_trading.personal_data + +import tentacles.Automation.trigger_events.holding_threshold_event.holding_threshold as holding_threshold + + +class TestHoldingThreshold: + """Tests for HoldingThreshold""" + + def _create_trigger(self, asset_name="BTC", amount=10.0, stop_on_inferior=True, exchange="binance"): + """Create and configure a HoldingThreshold instance.""" + trigger = holding_threshold.HoldingThreshold() + trigger.apply_config({ + holding_threshold.HoldingThreshold.EXCHANGE: exchange, + holding_threshold.HoldingThreshold.ASSET_NAME: asset_name, + holding_threshold.HoldingThreshold.AMOUNT: amount, + holding_threshold.HoldingThreshold.STOP_ON_INFERIOR: stop_on_inferior, + }) + return trigger + + def test_initialization(self): + """Test HoldingThreshold initialization via apply_config""" + trigger = self._create_trigger(asset_name="BTC", amount=10.5, stop_on_inferior=True) + assert trigger.asset_name == "BTC" + assert trigger.amount == decimal.Decimal("10.5") + assert trigger.stop_on_inferior is True + + def test_initialization_with_decimal(self): + """Test HoldingThreshold initialization with Decimal amount""" + trigger = self._create_trigger(asset_name="ETH", amount=5.25, stop_on_inferior=False) + assert trigger.amount == decimal.Decimal("5.25") + + def test_check_threshold_inferior_true(self): + """Test _check_threshold when stop_on_inferior is True and condition is met""" + trigger = self._create_trigger(asset_name="BTC", amount=10.0, stop_on_inferior=True) + + exchange_manager = mock.Mock() + portfolio_currency = octobot_trading.personal_data.SpotAsset( + name="BTC", + available=decimal.Decimal("5.0"), + total=decimal.Decimal("5.0") # Less than 10.0 + ) + with mock.patch.object( + exchange_manager.exchange_personal_data.portfolio_manager.portfolio, + "get_currency_portfolio", + return_value=portfolio_currency + ): + is_met, reason = trigger._check_threshold(exchange_manager) + assert is_met is True + assert reason == "Current BTC holdings of 5.0 are lower than the 10.0 threshold." + + def test_check_threshold_inferior_false(self): + """Test _check_threshold when stop_on_inferior is True and condition is not met""" + trigger = self._create_trigger(asset_name="BTC", amount=10.0, stop_on_inferior=True) + + exchange_manager = mock.Mock() + portfolio_currency = octobot_trading.personal_data.SpotAsset( + name="BTC", + available=decimal.Decimal("5.0"), + total=decimal.Decimal("15.0") # More than 10.0 + ) + with mock.patch.object( + exchange_manager.exchange_personal_data.portfolio_manager.portfolio, + "get_currency_portfolio", + return_value=portfolio_currency + ): + is_met, reason = trigger._check_threshold(exchange_manager) + assert is_met is False + assert reason is None + + def test_check_threshold_superior_true(self): + """Test _check_threshold when stop_on_inferior is False and condition is met""" + trigger = self._create_trigger(asset_name="ETH", amount=10.0, stop_on_inferior=False) + + exchange_manager = mock.Mock() + portfolio_currency = octobot_trading.personal_data.SpotAsset( + name="ETH", + available=decimal.Decimal("5.0"), + total=decimal.Decimal("15.0") # More than 10.0 + ) + with mock.patch.object( + exchange_manager.exchange_personal_data.portfolio_manager.portfolio, + "get_currency_portfolio", + return_value=portfolio_currency + ): + is_met, reason = trigger._check_threshold(exchange_manager) + assert is_met is True + assert reason == "Current ETH holdings of 15.0 are higher than the 10.0 threshold." + + def test_check_threshold_superior_false(self): + """Test _check_threshold when stop_on_inferior is False and condition is not met""" + trigger = self._create_trigger(asset_name="ETH", amount=10.0, stop_on_inferior=False) + + exchange_manager = mock.Mock() + portfolio_currency = octobot_trading.personal_data.SpotAsset( + name="ETH", + available=decimal.Decimal("5.0"), + total=decimal.Decimal("5.0") # Less than 10.0 + ) + with mock.patch.object( + exchange_manager.exchange_personal_data.portfolio_manager.portfolio, + "get_currency_portfolio", + return_value=portfolio_currency + ): + is_met, reason = trigger._check_threshold(exchange_manager) + assert is_met is False + assert reason is None + + def test_check_threshold_exact_amount_inferior(self): + """Test _check_threshold when holdings equal amount and stop_on_inferior is True""" + trigger = self._create_trigger(asset_name="BTC", amount=10.0, stop_on_inferior=True) + + exchange_manager = mock.Mock() + portfolio_currency = octobot_trading.personal_data.SpotAsset( + name="BTC", + available=decimal.Decimal("5.0"), + total=decimal.Decimal("10.0") # Equal to 10.0 + ) + with mock.patch.object( + exchange_manager.exchange_personal_data.portfolio_manager.portfolio, + "get_currency_portfolio", + return_value=portfolio_currency + ): + is_met, reason = trigger._check_threshold(exchange_manager) + assert is_met is True + assert reason == "Current BTC holdings of 10.0 are lower than the 10.0 threshold." + + def test_check_threshold_returns_reason_in_tuple(self): + """Test _check_threshold returns the reason as second element of tuple when met""" + trigger = self._create_trigger(asset_name="BTC", amount=10.0, stop_on_inferior=True) + + exchange_manager = mock.Mock() + portfolio_currency = octobot_trading.personal_data.SpotAsset( + name="BTC", + available=decimal.Decimal("5.0"), + total=decimal.Decimal("5.0") + ) + with mock.patch.object( + exchange_manager.exchange_personal_data.portfolio_manager.portfolio, + "get_currency_portfolio", + return_value=portfolio_currency + ): + is_met, reason = trigger._check_threshold(exchange_manager) + assert is_met is True + assert reason == "Current BTC holdings of 5.0 are lower than the 10.0 threshold." + + @pytest.mark.asyncio + async def test_check_initial_event_with_exchange_calls_perform_check(self): + """When exchange is set, check_initial_event resolves the exchange and calls perform_check.""" + trigger = self._create_trigger(exchange="binance") + mock_exchange_manager = mock.Mock() + + with mock.patch("tentacles.Automation.trigger_events.holding_threshold_event.holding_threshold.trading_api") as mock_trading_api: + with mock.patch("tentacles.Automation.trigger_events.holding_threshold_event.holding_threshold.trading_util") as mock_trading_util: + mock_trading_api.get_exchange_managers_from_exchange_name.return_value = [mock_exchange_manager] + mock_trading_api.get_exchange_manager_id.return_value = "exchange_1" + mock_trading_api.get_exchange_manager_from_exchange_name_and_id.return_value = mock_exchange_manager + mock_trading_util.wait_for_topic_init = mock.AsyncMock() + + with mock.patch.object(trigger, "perform_check", new_callable=mock.AsyncMock) as mock_perform_check: + await trigger.check_initial_event() + + mock_trading_api.get_exchange_managers_from_exchange_name.assert_called_once_with("binance") + mock_trading_util.wait_for_topic_init.assert_awaited_once_with( + mock_exchange_manager, + holding_threshold.INITIALIZATION_TIMEOUT, + commons_enums.InitializationEventExchangeTopics.BALANCE.value, + ) + mock_perform_check.assert_awaited_once_with(exchange_id="exchange_1") + + @pytest.mark.asyncio + async def test_check_initial_event_without_exchange_iterates_all(self): + """When exchange is None, check_initial_event iterates all exchange IDs.""" + trigger = holding_threshold.HoldingThreshold() + trigger.exchange = None + trigger.asset_name = "BTC" + trigger.amount = decimal.Decimal("10.0") + trigger.stop_on_inferior = True + + mock_manager_1 = mock.Mock() + mock_manager_2 = mock.Mock() + + with mock.patch("tentacles.Automation.trigger_events.holding_threshold_event.holding_threshold.trading_api") as mock_trading_api: + with mock.patch("tentacles.Automation.trigger_events.holding_threshold_event.holding_threshold.trading_util") as mock_trading_util: + mock_trading_api.get_exchange_ids.return_value = ["id_1", "id_2"] + mock_trading_api.get_exchange_manager_from_exchange_id.side_effect = [mock_manager_1, mock_manager_2] + mock_trading_util.wait_for_topic_init = mock.AsyncMock() + + with mock.patch.object(trigger, "perform_check", new_callable=mock.AsyncMock) as mock_perform_check: + await trigger.check_initial_event() + + mock_trading_api.get_exchange_ids.assert_called_once() + assert mock_trading_util.wait_for_topic_init.call_count == 2 + assert mock_perform_check.await_count == 2 + mock_perform_check.assert_any_await(exchange_id="id_1") + mock_perform_check.assert_any_await(exchange_id="id_2") + + @pytest.mark.asyncio + async def test_check_initial_event_timeout_skips_perform_check(self): + """When wait_for_topic_init times out, perform_check is not called.""" + trigger = self._create_trigger(exchange="binance") + mock_exchange_manager = mock.Mock() + mock_exchange_manager.exchange_name = "binance" + + with mock.patch("tentacles.Automation.trigger_events.holding_threshold_event.holding_threshold.trading_api") as mock_trading_api: + with mock.patch("tentacles.Automation.trigger_events.holding_threshold_event.holding_threshold.trading_util") as mock_trading_util: + mock_trading_api.get_exchange_managers_from_exchange_name.return_value = [mock_exchange_manager] + mock_trading_api.get_exchange_manager_id.return_value = "exchange_1" + mock_trading_api.get_exchange_manager_from_exchange_name_and_id.return_value = mock_exchange_manager + mock_trading_util.wait_for_topic_init = mock.AsyncMock(side_effect=asyncio.TimeoutError) + + with mock.patch.object(trigger, "perform_check", new_callable=mock.AsyncMock) as mock_perform_check: + await trigger.check_initial_event() + + mock_perform_check.assert_not_awaited() diff --git a/packages/tentacles/Automation/trigger_events/period_check_event/__init__.py b/packages/tentacles/Automation/trigger_events/period_check_event/__init__.py new file mode 100644 index 0000000000..b4d1fb3dce --- /dev/null +++ b/packages/tentacles/Automation/trigger_events/period_check_event/__init__.py @@ -0,0 +1 @@ +from .period_check import PeriodicCheck \ No newline at end of file diff --git a/packages/tentacles/Automation/trigger_events/period_check_event/metadata.json b/packages/tentacles/Automation/trigger_events/period_check_event/metadata.json new file mode 100644 index 0000000000..d0d357980d --- /dev/null +++ b/packages/tentacles/Automation/trigger_events/period_check_event/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["PeriodicCheck"], + "tentacles-requirements": [] +} \ No newline at end of file diff --git a/packages/tentacles/Automation/trigger_events/period_check_event/period_check.py b/packages/tentacles/Automation/trigger_events/period_check_event/period_check.py new file mode 100644 index 0000000000..73d60a99b3 --- /dev/null +++ b/packages/tentacles/Automation/trigger_events/period_check_event/period_check.py @@ -0,0 +1,57 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2023 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . +import asyncio +import typing + +import octobot_commons.enums as commons_enums +import octobot_commons.configuration as configuration +import octobot.automation.bases.abstract_trigger_event as abstract_trigger_event + + +class PeriodicCheck(abstract_trigger_event.AbstractTriggerEvent): + UPDATE_PERIOD = "update_period" + + def __init__(self): + super().__init__() + self.waiter_task = None + self.waiting_time = None + + async def stop(self): + await super().stop() + if self.waiter_task is not None and not self.waiter_task.done(): + self.waiter_task.cancel() + + async def _get_next_event(self) -> typing.Optional[str]: + if self.should_stop: + raise StopIteration + self.waiter_task = asyncio.create_task(asyncio.sleep(self.waiting_time)) + await self.waiter_task + + @staticmethod + def get_description() -> str: + return "Will trigger periodically, at the specified update period." + + def get_user_inputs(self, UI: configuration.UserInputFactory, inputs: dict, step_name: str) -> dict: + return { + self.UPDATE_PERIOD: UI.user_input( + self.UPDATE_PERIOD, commons_enums.UserInputTypes.FLOAT, 300, inputs, + title="Update period: number of seconds to wait between each update.", + parent_input_name=step_name, + ) + } + + def apply_config(self, config): + self.waiting_time = config[self.UPDATE_PERIOD] diff --git a/packages/tentacles/Automation/trigger_events/price_threshold_event/__init__.py b/packages/tentacles/Automation/trigger_events/price_threshold_event/__init__.py new file mode 100644 index 0000000000..ae15a3f65b --- /dev/null +++ b/packages/tentacles/Automation/trigger_events/price_threshold_event/__init__.py @@ -0,0 +1 @@ +from .price_threshold import PriceThreshold \ No newline at end of file diff --git a/packages/tentacles/Automation/trigger_events/price_threshold_event/metadata.json b/packages/tentacles/Automation/trigger_events/price_threshold_event/metadata.json new file mode 100644 index 0000000000..228cd87217 --- /dev/null +++ b/packages/tentacles/Automation/trigger_events/price_threshold_event/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["PriceThreshold"], + "tentacles-requirements": [] +} \ No newline at end of file diff --git a/packages/tentacles/Automation/trigger_events/price_threshold_event/price_threshold.py b/packages/tentacles/Automation/trigger_events/price_threshold_event/price_threshold.py new file mode 100644 index 0000000000..a73c0978db --- /dev/null +++ b/packages/tentacles/Automation/trigger_events/price_threshold_event/price_threshold.py @@ -0,0 +1,105 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2023 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . +import decimal + +import async_channel +import async_channel.enums as channel_enums +import octobot_commons.enums as commons_enums +import octobot_commons.configuration as configuration +import octobot_commons.channels_name as channels_name +import octobot_trading.exchange_channel as exchanges_channel +import octobot.automation.bases.abstract_channel_based_trigger_event as abstract_channel_based_trigger_event + +class PriceThreshold(abstract_channel_based_trigger_event.AbstractChannelBasedTriggerEvent): + TARGET_PRICE = "target_price" + SYMBOL = "symbol" + TRIGGER_ONLY_ONCE = "trigger_only_once" + MAX_TRIGGER_FREQUENCY = "max_trigger_frequency" + + def __init__(self): + super().__init__() + # config + self.target_price: decimal.Decimal = None # type: ignore + self.last_price: decimal.Decimal = None # type: ignore + + async def register_consumers(self, exchange_id: str) -> list[async_channel.Consumer]: + return [ + await exchanges_channel.get_chan( + channels_name.OctoBotTradingChannelsName.MARK_PRICE_CHANNEL.value, exchange_id + ).new_consumer( + self.mark_price_callback, + priority_level=channel_enums.ChannelConsumerPriorityLevels.HIGH.value, + ) + ] + + async def mark_price_callback( + self, exchange: str, exchange_id: str, cryptocurrency: str, symbol: str, mark_price + ): + if self.should_stop: + # do not go any further if the action has been stopped + return + self._check_threshold(mark_price) + self._update_last_price(mark_price) + + def _update_last_price(self, mark_price): + self.last_price = mark_price + + def _check_threshold(self, mark_price): + if self.last_price is None: + return + if mark_price >= self.target_price > self.last_price or mark_price <= self.target_price < self.last_price: + # mark_price crossed self.target_price threshold + self.trigger(description=f"Price crossed {self.target_price} threshold") + + @staticmethod + def get_description() -> str: + return "Will trigger when the price of the given symbol crosses the given price." + + def get_user_inputs( + self, UI: configuration.UserInputFactory, inputs: dict, step_name: str + ) -> dict: + return { + self.SYMBOL: UI.user_input( + self.SYMBOL, commons_enums.UserInputTypes.TEXT, "BTC/USDT", inputs, + title="Symbol: symbol to watch price on. Example: ETH/BTC or BTC/USDT:USDT", + parent_input_name=step_name, + ), + self.TARGET_PRICE: UI.user_input( + self.TARGET_PRICE, commons_enums.UserInputTypes.FLOAT, 300, inputs, + title="Target price: price triggering the event.", + parent_input_name=step_name, + ), + self.MAX_TRIGGER_FREQUENCY: UI.user_input( + self.MAX_TRIGGER_FREQUENCY, commons_enums.UserInputTypes.FLOAT, 0.0, inputs, + title="Maximum trigger frequency: required time between each trigger. In seconds. " + "Useful to avoid spamming in certain situations.", + parent_input_name=step_name, + ), + self.TRIGGER_ONLY_ONCE: UI.user_input( + self.TRIGGER_ONLY_ONCE, commons_enums.UserInputTypes.BOOLEAN, False, inputs, + title="Trigger only once: can only trigger once until OctoBot restart or " + "the automation configuration changes.", + parent_input_name=step_name, + ), + } + + def apply_config(self, config): + self.clear_future() + self.last_price = None # type: ignore + self.symbol = config[self.SYMBOL] + self.target_price = decimal.Decimal(str(config[self.TARGET_PRICE])) + self.trigger_only_once = config[self.TRIGGER_ONLY_ONCE] + self.max_trigger_frequency = config[self.MAX_TRIGGER_FREQUENCY] diff --git a/packages/tentacles/Automation/trigger_events/profitability_threshold_event/__init__.py b/packages/tentacles/Automation/trigger_events/profitability_threshold_event/__init__.py new file mode 100644 index 0000000000..6934daa2ed --- /dev/null +++ b/packages/tentacles/Automation/trigger_events/profitability_threshold_event/__init__.py @@ -0,0 +1 @@ +from .profitability_threshold import ProfitabilityThreshold \ No newline at end of file diff --git a/packages/tentacles/Automation/trigger_events/profitability_threshold_event/metadata.json b/packages/tentacles/Automation/trigger_events/profitability_threshold_event/metadata.json new file mode 100644 index 0000000000..fe45f65ee5 --- /dev/null +++ b/packages/tentacles/Automation/trigger_events/profitability_threshold_event/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["ProfitabilityThreshold"], + "tentacles-requirements": [] +} \ No newline at end of file diff --git a/packages/tentacles/Automation/trigger_events/profitability_threshold_event/profitability_threshold.py b/packages/tentacles/Automation/trigger_events/profitability_threshold_event/profitability_threshold.py new file mode 100644 index 0000000000..9dcde95a53 --- /dev/null +++ b/packages/tentacles/Automation/trigger_events/profitability_threshold_event/profitability_threshold.py @@ -0,0 +1,123 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2023 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . +import decimal +import time +import sortedcontainers + +import async_channel +import async_channel.enums as channel_enums +import octobot_commons.enums as commons_enums +import octobot_commons.constants as commons_constants +import octobot_commons.configuration as configuration +import octobot_commons.channels_name as channels_name +import octobot_trading.constants as trading_constants +import octobot_trading.exchange_channel as exchanges_channel +import octobot.automation.bases.abstract_channel_based_trigger_event as abstract_channel_based_trigger_event + + +class ProfitabilityThreshold(abstract_channel_based_trigger_event.AbstractChannelBasedTriggerEvent): + PERCENT_CHANGE = "percent_change" + TIME_PERIOD = "time_period" + TRIGGER_ONLY_ONCE = "trigger_only_once" + MAX_TRIGGER_FREQUENCY = "max_trigger_frequency" + + def __init__(self): + super().__init__() + self.percent_change: decimal.Decimal = None # type: ignore + self.time_period: int = None # type: ignore + self.profitability_by_time: sortedcontainers.SortedDict = None # type: ignore + + async def register_consumers(self, exchange_id: str) -> list[async_channel.Consumer]: + return [ + await exchanges_channel.get_chan( + channels_name.OctoBotTradingChannelsName.BALANCE_PROFITABILITY_CHANNEL.value, exchange_id + ).new_consumer( + self.profitability_callback, + priority_level=channel_enums.ChannelConsumerPriorityLevels.HIGH.value, + ) + ] + + async def profitability_callback( + self, + exchange: str, + exchange_id: str, + profitability, + profitability_percent, + market_profitability_percent, + initial_portfolio_current_profitability, + ): + if self.should_stop: + # do not go any further if the action has been stopped + return + self._update_profitability_by_time(profitability_percent) + self._check_threshold(profitability_percent) + + def _update_profitability_by_time(self, profitability_percent): + self.profitability_by_time[int(time.time())] = profitability_percent + current_time = time.time() + for profitability_time in list(self.profitability_by_time): + if profitability_time - current_time > self.time_period: + self.profitability_by_time.pop(profitability_time) + + def _check_threshold(self, profitability_percent): + oldest_compared_profitability = next(iter(self.profitability_by_time.values())) + if trading_constants.ZERO < self.percent_change <= profitability_percent - oldest_compared_profitability: + # profitability_percent reached or when above self.percent_change + self.trigger(description=f"Profitability reached {self.percent_change}%") + if trading_constants.ZERO > self.percent_change >= profitability_percent - oldest_compared_profitability: + # profitability_percent reached or when bellow self.percent_change + self.trigger(description=f"Profitability reached {self.percent_change}%") + + @staticmethod + def get_description() -> str: + return "Will trigger when profitability reaches the given % change on the given time window. " \ + "Example: a Percent change of 10 will trigger the automation if your OctoBot profitability " \ + "changes from 0 to 10 or from 30 to 40." + + def get_user_inputs(self, UI: configuration.UserInputFactory, inputs: dict, step_name: str) -> dict: + return { + self.PERCENT_CHANGE: UI.user_input( + self.PERCENT_CHANGE, commons_enums.UserInputTypes.FLOAT, 35, inputs, + title="Percent change: minimum change of % profitability to trigger the automation. " + "Can be negative to trigger on losses.", + parent_input_name=step_name, + ), + self.TIME_PERIOD: UI.user_input( + self.TIME_PERIOD, commons_enums.UserInputTypes.FLOAT, 300, inputs, + title="Time period: maximum time to consider to compute profitability changes. In minutes.", + parent_input_name=step_name, + ), + self.MAX_TRIGGER_FREQUENCY: UI.user_input( + self.MAX_TRIGGER_FREQUENCY, commons_enums.UserInputTypes.FLOAT, 0.0, inputs, + title="Maximum trigger frequency: required time between each trigger. In seconds. " + "Useful to avoid spamming in certain situations.", + parent_input_name=step_name, + ), + self.TRIGGER_ONLY_ONCE: UI.user_input( + self.TRIGGER_ONLY_ONCE, commons_enums.UserInputTypes.BOOLEAN, False, inputs, + title="Trigger only once: can only trigger once until OctoBot restart or " + "the automation configuration changes.", + parent_input_name=step_name, + ), + } + + def apply_config(self, config): + self.clear_future() + self.profitability_by_time = sortedcontainers.SortedDict() + self.percent_change = decimal.Decimal(str(config[self.PERCENT_CHANGE])) + self.time_period = config[self.TIME_PERIOD] * commons_constants.MINUTE_TO_SECONDS + self.trigger_only_once = config[self.TRIGGER_ONLY_ONCE] + self.max_trigger_frequency = config[self.MAX_TRIGGER_FREQUENCY] diff --git a/packages/tentacles/Automation/trigger_events/volatility_threshold_event/__init__.py b/packages/tentacles/Automation/trigger_events/volatility_threshold_event/__init__.py new file mode 100644 index 0000000000..e0bd3fb98c --- /dev/null +++ b/packages/tentacles/Automation/trigger_events/volatility_threshold_event/__init__.py @@ -0,0 +1 @@ +from .volatility_threshold import VolatilityThreshold \ No newline at end of file diff --git a/packages/tentacles/Automation/trigger_events/volatility_threshold_event/metadata.json b/packages/tentacles/Automation/trigger_events/volatility_threshold_event/metadata.json new file mode 100644 index 0000000000..b5ecf1d057 --- /dev/null +++ b/packages/tentacles/Automation/trigger_events/volatility_threshold_event/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["VolatilityThreshold"], + "tentacles-requirements": [] +} \ No newline at end of file diff --git a/packages/tentacles/Automation/trigger_events/volatility_threshold_event/tests/__init__.py b/packages/tentacles/Automation/trigger_events/volatility_threshold_event/tests/__init__.py new file mode 100644 index 0000000000..974dd1623a --- /dev/null +++ b/packages/tentacles/Automation/trigger_events/volatility_threshold_event/tests/__init__.py @@ -0,0 +1,15 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. diff --git a/packages/tentacles/Automation/trigger_events/volatility_threshold_event/tests/test_volatility_threshold.py b/packages/tentacles/Automation/trigger_events/volatility_threshold_event/tests/test_volatility_threshold.py new file mode 100644 index 0000000000..a4aee25967 --- /dev/null +++ b/packages/tentacles/Automation/trigger_events/volatility_threshold_event/tests/test_volatility_threshold.py @@ -0,0 +1,579 @@ +# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot) +# Copyright (c) 2023 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . +import mock +import decimal +import pytest + +import octobot.errors as errors +import octobot_trading.constants as trading_constants + +import tentacles.Automation.trigger_events.volatility_threshold_event.volatility_threshold as volatility_threshold + + +class TestHistoricalMinAndMaxPrice: + """Tests for HistoricalMinAndMaxPrice""" + + def test_initialization(self): + """Test HistoricalMinAndMaxPrice initialization""" + hist_price = volatility_threshold.HistoricalMinAndMaxPrice( + minute_ts=12345, + min_price=decimal.Decimal("100.0"), + max_price=decimal.Decimal("110.0") + ) + assert hist_price.minute_ts == 12345 + assert hist_price.min_price == decimal.Decimal("100.0") + assert hist_price.max_price == decimal.Decimal("110.0") + + def test_update_with_new_min(self): + """Test update method with new minimum price""" + hist_price = volatility_threshold.HistoricalMinAndMaxPrice( + minute_ts=12345, + min_price=decimal.Decimal("100.0"), + max_price=decimal.Decimal("110.0") + ) + hist_price.update(decimal.Decimal("95.0")) + assert hist_price.min_price == decimal.Decimal("95.0") + assert hist_price.max_price == decimal.Decimal("110.0") + + def test_update_with_new_max(self): + """Test update method with new maximum price""" + hist_price = volatility_threshold.HistoricalMinAndMaxPrice( + minute_ts=12345, + min_price=decimal.Decimal("100.0"), + max_price=decimal.Decimal("110.0") + ) + hist_price.update(decimal.Decimal("115.0")) + assert hist_price.min_price == decimal.Decimal("100.0") + assert hist_price.max_price == decimal.Decimal("115.0") + + def test_update_with_middle_price(self): + """Test update method with price between min and max""" + hist_price = volatility_threshold.HistoricalMinAndMaxPrice( + minute_ts=12345, + min_price=decimal.Decimal("100.0"), + max_price=decimal.Decimal("110.0") + ) + hist_price.update(decimal.Decimal("105.0")) + assert hist_price.min_price == decimal.Decimal("100.0") + assert hist_price.max_price == decimal.Decimal("110.0") + + def test_update_multiple_times(self): + """Test multiple updates""" + hist_price = volatility_threshold.HistoricalMinAndMaxPrice( + minute_ts=12345, + min_price=decimal.Decimal("100.0"), + max_price=decimal.Decimal("100.0") + ) + hist_price.update(decimal.Decimal("95.0")) + hist_price.update(decimal.Decimal("120.0")) + hist_price.update(decimal.Decimal("90.0")) + hist_price.update(decimal.Decimal("125.0")) + assert hist_price.min_price == decimal.Decimal("90.0") + assert hist_price.max_price == decimal.Decimal("125.0") + + +class TestVolatilityThresholdChecker: + """Tests for VolatilityThresholdChecker""" + + def _create_checker( + self, + symbol="BTC/USDT", + period_in_minutes=10, + max_allowed_positive_percentage_change=5.0, + max_allowed_negative_percentage_change=3.0, + ): + """Create a VolatilityThresholdChecker instance with computed ratios. + + Ratios are computed manually to allow testing with zero percentage + values (validate_config rejects zero values). + """ + checker = volatility_threshold.VolatilityThresholdChecker( + symbol=symbol, + period_in_minutes=period_in_minutes, + max_allowed_positive_percentage_change=decimal.Decimal(str(max_allowed_positive_percentage_change)), + max_allowed_negative_percentage_change=decimal.Decimal(str(max_allowed_negative_percentage_change)), + ) + checker._max_positive_ratio = ( + trading_constants.ONE + checker.max_allowed_positive_percentage_change / decimal.Decimal(100) + ) + checker._max_negative_ratio = ( + trading_constants.ONE - checker.max_allowed_negative_percentage_change / decimal.Decimal(100) + ) + return checker + + def test_initialization(self): + """Test VolatilityThresholdChecker initialization""" + checker = self._create_checker( + symbol="BTC/USDT", + period_in_minutes=10, + max_allowed_positive_percentage_change=5.0, + max_allowed_negative_percentage_change=3.0 + ) + assert checker.symbol == "BTC/USDT" + assert checker.period_in_minutes == 10 + assert checker.max_allowed_positive_percentage_change == decimal.Decimal("5.0") + assert checker.max_allowed_negative_percentage_change == decimal.Decimal("3.0") + assert checker._max_positive_ratio == decimal.Decimal("1.05") + assert checker._max_negative_ratio == decimal.Decimal("0.97") + + def test_initialization_with_decimals(self): + """Test VolatilityThresholdChecker initialization with Decimal values""" + checker = self._create_checker( + symbol="ETH/USDT", + period_in_minutes=5, + max_allowed_positive_percentage_change=10.5, + max_allowed_negative_percentage_change=5.5 + ) + assert checker._max_positive_ratio == decimal.Decimal("1.105") + assert checker._max_negative_ratio == decimal.Decimal("0.945") + + def test_validate_config_valid(self): + """Test validate_config succeeds and sets ratios for valid config""" + checker = volatility_threshold.VolatilityThresholdChecker( + symbol="BTC/USDT", + period_in_minutes=10, + max_allowed_positive_percentage_change=decimal.Decimal("5.0"), + max_allowed_negative_percentage_change=decimal.Decimal("3.0"), + ) + checker.validate_config() + assert checker._max_positive_ratio == decimal.Decimal("1.05") + assert checker._max_negative_ratio == decimal.Decimal("0.97") + + def test_validate_config_missing_symbol(self): + """Test validate_config raises when symbol is not set""" + checker = volatility_threshold.VolatilityThresholdChecker( + symbol=None, + period_in_minutes=10, + max_allowed_positive_percentage_change=decimal.Decimal("5.0"), + max_allowed_negative_percentage_change=decimal.Decimal("3.0"), + ) + with pytest.raises(errors.InvalidAutomationConfigError, match="symbol and period in minutes must be set"): + checker.validate_config() + + def test_validate_config_missing_period(self): + """Test validate_config raises when period_in_minutes is not set""" + checker = volatility_threshold.VolatilityThresholdChecker( + symbol="BTC/USDT", + period_in_minutes=0, + max_allowed_positive_percentage_change=decimal.Decimal("5.0"), + max_allowed_negative_percentage_change=decimal.Decimal("3.0"), + ) + with pytest.raises(errors.InvalidAutomationConfigError, match="symbol and period in minutes must be set"): + checker.validate_config() + + def test_validate_config_zero_positive_percentage(self): + """Test validate_config raises when max_allowed_positive_percentage_change is zero""" + checker = volatility_threshold.VolatilityThresholdChecker( + symbol="BTC/USDT", + period_in_minutes=10, + max_allowed_positive_percentage_change=trading_constants.ZERO, + max_allowed_negative_percentage_change=decimal.Decimal("3.0"), + ) + with pytest.raises(errors.InvalidAutomationConfigError, match="max allowed positive percentage change must be > 0"): + checker.validate_config() + + def test_validate_config_negative_positive_percentage(self): + """Test validate_config raises when max_allowed_positive_percentage_change is negative""" + checker = volatility_threshold.VolatilityThresholdChecker( + symbol="BTC/USDT", + period_in_minutes=10, + max_allowed_positive_percentage_change=decimal.Decimal("-1.0"), + max_allowed_negative_percentage_change=decimal.Decimal("3.0"), + ) + with pytest.raises(errors.InvalidAutomationConfigError, match="max allowed positive percentage change must be > 0"): + checker.validate_config() + + def test_validate_config_zero_negative_percentage(self): + """Test validate_config raises when max_allowed_negative_percentage_change is zero""" + checker = volatility_threshold.VolatilityThresholdChecker( + symbol="BTC/USDT", + period_in_minutes=10, + max_allowed_positive_percentage_change=decimal.Decimal("5.0"), + max_allowed_negative_percentage_change=trading_constants.ZERO, + ) + with pytest.raises(errors.InvalidAutomationConfigError, match="max allowed negative percentage change must be > 0"): + checker.validate_config() + + def test_validate_config_negative_negative_percentage(self): + """Test validate_config raises when max_allowed_negative_percentage_change is negative""" + checker = volatility_threshold.VolatilityThresholdChecker( + symbol="BTC/USDT", + period_in_minutes=10, + max_allowed_positive_percentage_change=decimal.Decimal("5.0"), + max_allowed_negative_percentage_change=decimal.Decimal("-2.0"), + ) + with pytest.raises(errors.InvalidAutomationConfigError, match="max allowed negative percentage change must be > 0"): + checker.validate_config() + + def test_check_threshold_not_enough_data(self): + """Test _check_threshold returns False when not enough historical data""" + checker = self._create_checker( + symbol="BTC/USDT", + period_in_minutes=10, + max_allowed_positive_percentage_change=5.0, + max_allowed_negative_percentage_change=3.0 + ) + + # No data + is_met, reason = checker._check_threshold() + assert is_met is False + assert reason is None + + # Only one data point + checker._historical_min_and_max_price_by_minute_ts.append( + volatility_threshold.HistoricalMinAndMaxPrice(1, decimal.Decimal("100"), decimal.Decimal("100")) + ) + is_met, reason = checker._check_threshold() + assert is_met is False + assert reason is None + + def test_check_threshold_positive_volatility_exceeded(self): + """Test _check_threshold when positive volatility threshold is exceeded""" + checker = self._create_checker( + symbol="BTC/USDT", + period_in_minutes=2, + max_allowed_positive_percentage_change=5.0, + max_allowed_negative_percentage_change=3.0 + ) + + # Add historical data + checker._historical_min_and_max_price_by_minute_ts = [ + volatility_threshold.HistoricalMinAndMaxPrice(1, decimal.Decimal("100"), decimal.Decimal("100")), + volatility_threshold.HistoricalMinAndMaxPrice(2, decimal.Decimal("100"), decimal.Decimal("100")), + # Current minute with high max price (106 > 100 * 1.05) + volatility_threshold.HistoricalMinAndMaxPrice(3, decimal.Decimal("100"), decimal.Decimal("106")), + ] + + is_met, reason = checker._check_threshold() + assert is_met is True + assert reason is not None + assert "BTC/USDT reference price of 106.0 is above the 2 minutes average high value of 100.0 +5.0%." in reason + + def test_check_threshold_negative_volatility_exceeded(self): + """Test _check_threshold when negative volatility threshold is exceeded""" + checker = self._create_checker( + symbol="BTC/USDT", + period_in_minutes=2, + max_allowed_positive_percentage_change=5.0, + max_allowed_negative_percentage_change=3.0 + ) + + # Add historical data + checker._historical_min_and_max_price_by_minute_ts = [ + volatility_threshold.HistoricalMinAndMaxPrice(1, decimal.Decimal("100"), decimal.Decimal("100")), + volatility_threshold.HistoricalMinAndMaxPrice(2, decimal.Decimal("100"), decimal.Decimal("100")), + # Current minute with low min price (96 < 100 * 0.97) + volatility_threshold.HistoricalMinAndMaxPrice(3, decimal.Decimal("96"), decimal.Decimal("100")), + ] + + is_met, reason = checker._check_threshold() + assert is_met is True + assert reason is not None + assert "BTC/USDT reference price of 96.0 is bellow the 2 minutes average low value of 100.0 -3.0%." in reason + + def test_check_threshold_within_threshold(self): + """Test _check_threshold when volatility is within threshold""" + checker = self._create_checker( + symbol="BTC/USDT", + period_in_minutes=2, + max_allowed_positive_percentage_change=5.0, + max_allowed_negative_percentage_change=3.0 + ) + + # Add historical data within threshold + checker._historical_min_and_max_price_by_minute_ts = [ + volatility_threshold.HistoricalMinAndMaxPrice(1, decimal.Decimal("100"), decimal.Decimal("100")), + volatility_threshold.HistoricalMinAndMaxPrice(2, decimal.Decimal("100"), decimal.Decimal("100")), + # Current minute within threshold + volatility_threshold.HistoricalMinAndMaxPrice(3, decimal.Decimal("98"), decimal.Decimal("104")), + ] + + is_met, reason = checker._check_threshold() + assert is_met is False + assert reason is None + + def test_check_threshold_ignores_positive_when_zero(self): + """Test _check_threshold ignores positive volatility check when max_allowed_positive_percentage_change is ZERO""" + checker = self._create_checker( + symbol="BTC/USDT", + period_in_minutes=2, + max_allowed_positive_percentage_change=trading_constants.ZERO, + max_allowed_negative_percentage_change=3.0 + ) + + # Add historical data with extreme positive volatility + checker._historical_min_and_max_price_by_minute_ts = [ + volatility_threshold.HistoricalMinAndMaxPrice(1, decimal.Decimal("100"), decimal.Decimal("100")), + volatility_threshold.HistoricalMinAndMaxPrice(2, decimal.Decimal("100"), decimal.Decimal("100")), + # Current minute with very high max price (200% increase) + volatility_threshold.HistoricalMinAndMaxPrice(3, decimal.Decimal("100"), decimal.Decimal("200")), + ] + + is_met, reason = checker._check_threshold() + assert is_met is False + assert reason is None + + def test_check_threshold_ignores_negative_when_zero(self): + """Test _check_threshold ignores negative volatility check when max_allowed_negative_percentage_change is ZERO""" + checker = self._create_checker( + symbol="BTC/USDT", + period_in_minutes=2, + max_allowed_positive_percentage_change=5.0, + max_allowed_negative_percentage_change=0 + ) + + # Add historical data with extreme negative volatility + checker._historical_min_and_max_price_by_minute_ts = [ + volatility_threshold.HistoricalMinAndMaxPrice(1, decimal.Decimal("100"), decimal.Decimal("100")), + volatility_threshold.HistoricalMinAndMaxPrice(2, decimal.Decimal("100"), decimal.Decimal("100")), + # Current minute with very low min price (50% decrease) + volatility_threshold.HistoricalMinAndMaxPrice(3, decimal.Decimal("50"), decimal.Decimal("100")), + ] + + is_met, reason = checker._check_threshold() + assert is_met is False + assert reason is None + + def test_check_threshold_both_zero_never_triggers(self): + """Test _check_threshold never triggers when both percentage changes are ZERO""" + checker = self._create_checker( + symbol="BTC/USDT", + period_in_minutes=2, + max_allowed_positive_percentage_change=0, + max_allowed_negative_percentage_change=0 + ) + + # Add historical data with extreme volatility in both directions + checker._historical_min_and_max_price_by_minute_ts = [ + volatility_threshold.HistoricalMinAndMaxPrice(1, decimal.Decimal("100"), decimal.Decimal("100")), + volatility_threshold.HistoricalMinAndMaxPrice(2, decimal.Decimal("100"), decimal.Decimal("100")), + # Current minute with extreme volatility + volatility_threshold.HistoricalMinAndMaxPrice(3, decimal.Decimal("10"), decimal.Decimal("1000")), + ] + + is_met, reason = checker._check_threshold() + assert is_met is False + assert reason is None + + def test_check_threshold_negative_zero_positive_triggers(self): + """Test _check_threshold can still trigger on positive when negative is ZERO""" + checker = self._create_checker( + symbol="BTC/USDT", + period_in_minutes=2, + max_allowed_positive_percentage_change=5.0, + max_allowed_negative_percentage_change=trading_constants.ZERO + ) + + # Add historical data that exceeds positive threshold + checker._historical_min_and_max_price_by_minute_ts = [ + volatility_threshold.HistoricalMinAndMaxPrice(1, decimal.Decimal("100"), decimal.Decimal("100")), + volatility_threshold.HistoricalMinAndMaxPrice(2, decimal.Decimal("100"), decimal.Decimal("100")), + # Current minute exceeds positive threshold (106 > 100 * 1.05) + volatility_threshold.HistoricalMinAndMaxPrice(3, decimal.Decimal("100"), decimal.Decimal("106")), + ] + + is_met, reason = checker._check_threshold() + assert is_met is True + assert reason is not None + assert "BTC/USDT reference price of 106.0 is above the 2 minutes average high value of 100.0 +5.0%." in reason + + def test_check_threshold_positive_zero_negative_triggers(self): + """Test _check_threshold can still trigger on negative when positive is ZERO""" + checker = self._create_checker( + symbol="BTC/USDT", + period_in_minutes=2, + max_allowed_positive_percentage_change=trading_constants.ZERO, + max_allowed_negative_percentage_change=3.0 + ) + + # Add historical data that exceeds negative threshold + checker._historical_min_and_max_price_by_minute_ts = [ + volatility_threshold.HistoricalMinAndMaxPrice(1, decimal.Decimal("100"), decimal.Decimal("100")), + volatility_threshold.HistoricalMinAndMaxPrice(2, decimal.Decimal("100"), decimal.Decimal("100")), + # Current minute exceeds negative threshold (96 < 100 * 0.97) + volatility_threshold.HistoricalMinAndMaxPrice(3, decimal.Decimal("96"), decimal.Decimal("100")), + ] + + is_met, reason = checker._check_threshold() + # Should trigger on negative volatility + assert is_met is True + assert reason is not None + assert "BTC/USDT reference price of 96.0 is bellow the 2 minutes average low value of 100.0 -3.0%." in reason + + def test_on_new_price_creates_new_minute(self): + """Test on_new_price creates a new minute entry""" + checker = self._create_checker( + symbol="BTC/USDT", + period_in_minutes=5, + max_allowed_positive_percentage_change=5.0, + max_allowed_negative_percentage_change=3.0 + ) + + with mock.patch('time.time', return_value=120.0): # 2 minutes + checker.on_new_price(decimal.Decimal("100.0")) + + assert len(checker._historical_min_and_max_price_by_minute_ts) == 1 + assert checker._historical_min_and_max_price_by_minute_ts[0].min_price == decimal.Decimal("100.0") + assert checker._historical_min_and_max_price_by_minute_ts[0].max_price == decimal.Decimal("100.0") + + def test_on_new_price_updates_existing_minute(self): + """Test on_new_price updates existing minute when called multiple times""" + checker = self._create_checker( + symbol="BTC/USDT", + period_in_minutes=5, + max_allowed_positive_percentage_change=5.0, + max_allowed_negative_percentage_change=3.0 + ) + + # Add multiple prices in the same minute + with mock.patch('time.time', return_value=120.0): + checker.on_new_price(decimal.Decimal("100.0")) + checker.on_new_price(decimal.Decimal("95.0")) + checker.on_new_price(decimal.Decimal("105.0")) + + assert len(checker._historical_min_and_max_price_by_minute_ts) == 1 + assert checker._historical_min_and_max_price_by_minute_ts[0].min_price == decimal.Decimal("95.0") + assert checker._historical_min_and_max_price_by_minute_ts[0].max_price == decimal.Decimal("105.0") + + def test_on_new_price_limits_history_size(self): + """Test on_new_price limits history to period_in_minutes + 1""" + checker = self._create_checker( + symbol="BTC/USDT", + period_in_minutes=3, + max_allowed_positive_percentage_change=5.0, + max_allowed_negative_percentage_change=3.0 + ) + + # Add prices for 6 different minutes (should keep only last 4) + for minute in range(6): + with mock.patch('time.time', return_value=float(minute * 60)): + checker.on_new_price(decimal.Decimal("100.0")) + + # Should have at most period_in_minutes + 1 entries + assert len(checker._historical_min_and_max_price_by_minute_ts) == 4 + + def test_update_last_historical_min_and_max_price_new_minute(self): + """Test _update_last_historical_min_and_max_price with new minute""" + checker = self._create_checker( + symbol="BTC/USDT", + period_in_minutes=5, + max_allowed_positive_percentage_change=5.0, + max_allowed_negative_percentage_change=3.0 + ) + + checker._update_last_historical_min_and_max_price(1, decimal.Decimal("100.0")) + assert len(checker._historical_min_and_max_price_by_minute_ts) == 1 + + checker._update_last_historical_min_and_max_price(2, decimal.Decimal("105.0")) + assert len(checker._historical_min_and_max_price_by_minute_ts) == 2 + + def test_update_last_historical_min_and_max_price_same_minute(self): + """Test _update_last_historical_min_and_max_price with same minute""" + checker = self._create_checker( + symbol="BTC/USDT", + period_in_minutes=5, + max_allowed_positive_percentage_change=5.0, + max_allowed_negative_percentage_change=3.0 + ) + + checker._update_last_historical_min_and_max_price(1, decimal.Decimal("100.0")) + checker._update_last_historical_min_and_max_price(1, decimal.Decimal("95.0")) + checker._update_last_historical_min_and_max_price(1, decimal.Decimal("110.0")) + + assert len(checker._historical_min_and_max_price_by_minute_ts) == 1 + assert checker._historical_min_and_max_price_by_minute_ts[0].min_price == decimal.Decimal("95.0") + assert checker._historical_min_and_max_price_by_minute_ts[0].max_price == decimal.Decimal("110.0") + + def test_check_threshold_returns_reason_in_tuple(self): + """Test _check_threshold returns the reason as second element of tuple when met""" + checker = self._create_checker( + symbol="BTC/USDT", + period_in_minutes=2, + max_allowed_positive_percentage_change=5.0, + max_allowed_negative_percentage_change=3.0 + ) + + checker._historical_min_and_max_price_by_minute_ts = [ + volatility_threshold.HistoricalMinAndMaxPrice(1, decimal.Decimal("100"), decimal.Decimal("100")), + volatility_threshold.HistoricalMinAndMaxPrice(2, decimal.Decimal("100"), decimal.Decimal("100")), + volatility_threshold.HistoricalMinAndMaxPrice(3, decimal.Decimal("100"), decimal.Decimal("106")), + ] + + is_met, reason = checker._check_threshold() + assert is_met is True + assert reason is not None + assert "BTC/USDT reference price of 106.0 is above the 2 minutes average high value of 100.0 +5.0%." in reason + + +class TestVolatilityThreshold: + """Tests for VolatilityThreshold""" + + def _create_trigger( + self, + symbol="BTC/USDT", + period_in_minutes=10, + max_allowed_positive_percentage_change=5.0, + max_allowed_negative_percentage_change=3.0, + exchange="binance" + ): + """Create and configure a VolatilityThreshold instance.""" + trigger = volatility_threshold.VolatilityThreshold() + trigger.apply_config({ + volatility_threshold.VolatilityThreshold.EXCHANGE: exchange, + volatility_threshold.VolatilityThreshold.SYMBOL: symbol, + volatility_threshold.VolatilityThreshold.PERIOD_IN_MINUTES: period_in_minutes, + volatility_threshold.VolatilityThreshold.MAX_ALLOWED_POSITIVE_PERCENTAGE_CHANGE: max_allowed_positive_percentage_change, + volatility_threshold.VolatilityThreshold.MAX_ALLOWED_NEGATIVE_PERCENTAGE_CHANGE: max_allowed_negative_percentage_change, + }) + return trigger + + def test_apply_config_sets_exchange(self): + """Test apply_config sets exchange on the trigger""" + trigger = self._create_trigger(exchange="binance") + assert trigger.exchange == "binance" + + def test_apply_config_sets_exchange_none_when_empty(self): + """Test apply_config sets exchange to None when empty string""" + trigger = self._create_trigger(exchange="") + assert trigger.exchange is None + + def test_apply_config_populates_checker(self): + """Test apply_config correctly configures the internal VolatilityThresholdChecker""" + trigger = self._create_trigger( + symbol="BTC/USDT", + period_in_minutes=10, + max_allowed_positive_percentage_change=5.0, + max_allowed_negative_percentage_change=3.0 + ) + checker = trigger.volatility_threshold_checker + assert checker.symbol == "BTC/USDT" + assert checker.period_in_minutes == 10 + assert checker.max_allowed_positive_percentage_change == decimal.Decimal("5.0") + assert checker.max_allowed_negative_percentage_change == decimal.Decimal("3.0") + assert checker._max_positive_ratio == decimal.Decimal("1.05") + assert checker._max_negative_ratio == decimal.Decimal("0.97") + + def test_apply_config_populates_checker_with_decimals(self): + """Test apply_config correctly computes ratios with decimal values""" + trigger = self._create_trigger( + symbol="ETH/USDT", + period_in_minutes=5, + max_allowed_positive_percentage_change=10.5, + max_allowed_negative_percentage_change=5.5 + ) + checker = trigger.volatility_threshold_checker + assert checker._max_positive_ratio == decimal.Decimal("1.105") + assert checker._max_negative_ratio == decimal.Decimal("0.945") diff --git a/packages/tentacles/Automation/trigger_events/volatility_threshold_event/volatility_threshold.py b/packages/tentacles/Automation/trigger_events/volatility_threshold_event/volatility_threshold.py new file mode 100644 index 0000000000..fea6d053c7 --- /dev/null +++ b/packages/tentacles/Automation/trigger_events/volatility_threshold_event/volatility_threshold.py @@ -0,0 +1,223 @@ +# Drakkar-Software OctoBot-Trading +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import dataclasses +import decimal +import time +import typing + +import async_channel +import async_channel.enums as channel_enums +import octobot_commons.enums as commons_enums +import octobot_commons.configuration as configuration +import octobot_commons.constants as commons_constants +import octobot_commons.channels_name as channels_name +import octobot_commons.data_util as commons_data_util +import octobot.automation.bases.abstract_channel_based_trigger_event as abstract_channel_based_trigger_event +import octobot_trading.constants as trading_constants +import octobot_trading.exchange_channel as exchanges_channel +import octobot.errors as errors + + +@dataclasses.dataclass +class HistoricalMinAndMaxPrice: + minute_ts: int + min_price: decimal.Decimal + max_price: decimal.Decimal + + def update(self, price: decimal.Decimal): + self.min_price = min(self.min_price, price) + self.max_price = max(self.max_price, price) + + +class VolatilityThresholdChecker: + # extracted to be used in other tentacles if needed + def __init__( + self, + symbol: str, + period_in_minutes: float, + max_allowed_positive_percentage_change: decimal.Decimal, + max_allowed_negative_percentage_change: decimal.Decimal, + ): + self.symbol: str = symbol + self.period_in_minutes: float = period_in_minutes + self.max_allowed_positive_percentage_change = max_allowed_positive_percentage_change + self.max_allowed_negative_percentage_change = max_allowed_negative_percentage_change + + self._historical_min_and_max_price_by_minute_ts: list[HistoricalMinAndMaxPrice] = [] + self._max_positive_ratio: decimal.Decimal = trading_constants.ZERO + self._max_negative_ratio: decimal.Decimal = trading_constants.ZERO + self._update_ratios() + + def _update_ratios(self): + if self.max_allowed_positive_percentage_change: + self._max_positive_ratio = trading_constants.ONE + self.max_allowed_positive_percentage_change / trading_constants.ONE_HUNDRED + if self.max_allowed_negative_percentage_change: + self._max_negative_ratio = trading_constants.ONE - self.max_allowed_negative_percentage_change / trading_constants.ONE_HUNDRED + + def validate_config(self): + if not self.symbol or not self.period_in_minutes: + raise errors.InvalidAutomationConfigError("symbol and period in minutes must be set", VolatilityThreshold.get_name()) + if self.max_allowed_positive_percentage_change <= trading_constants.ZERO: + raise errors.InvalidAutomationConfigError("max allowed positive percentage change must be > 0", VolatilityThreshold.get_name()) + if self.max_allowed_negative_percentage_change <= trading_constants.ZERO: + raise errors.InvalidAutomationConfigError("max allowed negative percentage change must be > 0", VolatilityThreshold.get_name()) + + self._update_ratios() + + def _check_threshold(self) -> tuple[bool, typing.Optional[str]]: + if len(self._historical_min_and_max_price_by_minute_ts) < 2: + # need at least the current minute's price and the previous minute's price + return False, None + current_minute_price = self._historical_min_and_max_price_by_minute_ts[-1] + if self.max_allowed_positive_percentage_change > trading_constants.ZERO: + historical_average_max_price = commons_data_util.mean([ + historical_min_and_max_price.max_price + for historical_min_and_max_price in self._historical_min_and_max_price_by_minute_ts[:-1] + ]) + if current_minute_price.max_price > historical_average_max_price * self._max_positive_ratio: # type: ignore + return True, self._get_reason(historical_average_max_price, True) + if self.max_allowed_negative_percentage_change > trading_constants.ZERO: + historical_average_min_price = commons_data_util.mean([ + historical_min_and_max_price.min_price + for historical_min_and_max_price in self._historical_min_and_max_price_by_minute_ts[:-1] + ]) + if current_minute_price.min_price < historical_average_min_price * self._max_negative_ratio: # type: ignore + return True, self._get_reason(historical_average_min_price, False) + return False, None + + def _get_reason(self, historical_average_price: decimal.Decimal, is_superior: bool) -> str: + current_minute_price = self._historical_min_and_max_price_by_minute_ts[-1] + current_value = current_minute_price.max_price if is_superior else current_minute_price.min_price + return ( + f"{self.symbol} reference price of {float(current_value)} is {'above' if is_superior else 'bellow'} " + f"the {self.period_in_minutes} minutes average {'high' if is_superior else 'low'} " + f"value of {float(historical_average_price)} {'+' if is_superior else '-'}" + f"{float(self.max_allowed_positive_percentage_change if is_superior else self.max_allowed_negative_percentage_change)}%." + ) + + def _update_last_historical_min_and_max_price(self, minute_ts: int, price: decimal.Decimal): + if not self._historical_min_and_max_price_by_minute_ts or self._historical_min_and_max_price_by_minute_ts[-1].minute_ts != minute_ts: + self._historical_min_and_max_price_by_minute_ts.append(HistoricalMinAndMaxPrice(minute_ts, price, price)) + else: + self._historical_min_and_max_price_by_minute_ts[-1].update(price) + + def on_new_price(self, price: decimal.Decimal) -> tuple[bool, typing.Optional[str]]: + current_time = time.time() + current_minute_ts = int(current_time - (current_time // 60)) + self._update_last_historical_min_and_max_price(current_minute_ts, price) + # ensure history doesn't grow forever + # +1 because we need to keep the current minute's price in the history as well + if len(self._historical_min_and_max_price_by_minute_ts) > (self.period_in_minutes + 1): + self._historical_min_and_max_price_by_minute_ts.pop(0) + return self._check_threshold() + + +class VolatilityThreshold(abstract_channel_based_trigger_event.AbstractChannelBasedTriggerEvent): + EXCHANGE = "exchange" + SYMBOL = "symbol" + PERIOD_IN_MINUTES = "period_in_minutes" + MAX_ALLOWED_POSITIVE_PERCENTAGE_CHANGE = "max_allowed_positive_percentage_change" + MAX_ALLOWED_NEGATIVE_PERCENTAGE_CHANGE = "max_allowed_negative_percentage_change" + + def __init__(self): + super().__init__() + # config + self.volatility_threshold_checker: VolatilityThresholdChecker = VolatilityThresholdChecker( + symbol=None, # type: ignore + period_in_minutes=None, # type: ignore + max_allowed_positive_percentage_change=None, # type: ignore + max_allowed_negative_percentage_change=None, # type: ignore + ) + + @staticmethod + def get_description() -> str: + return ( + "Will trigger when the price of the given symbol reaches a certain percentage change from the average price of the given period." \ + "Example: a Period of 1440 and a Max allowed positive percentage change of 1 will trigger the automation if the price of ETH/USDT reaches 1% above the average price by minutes over the past 1440 minutes." + ) + + def get_user_inputs( + self, UI: configuration.UserInputFactory, inputs: dict, step_name: str + ) -> dict: + return { + self.EXCHANGE: UI.user_input( + self.EXCHANGE, commons_enums.UserInputTypes.TEXT, "binance", inputs, + title="Exchange: exchange to watch price on. Example: binance. Leave empty to enable on all exchanges.", + parent_input_name=step_name, + other_schema_values={"minLength": 1} + ), + self.SYMBOL: UI.user_input( + self.SYMBOL, commons_enums.UserInputTypes.TEXT, "BTC/USDT", inputs, + title="Symbol: symbol to watch price on. Example: ETH/USDT. The symbol should be a configured trading pair of the exchange.", + parent_input_name=step_name, + other_schema_values={"minLength": 3, "pattern": commons_constants.TRADING_SYMBOL_REGEX} + ), + self.PERIOD_IN_MINUTES: UI.user_input( + self.PERIOD_IN_MINUTES, commons_enums.UserInputTypes.FLOAT, 60, inputs, + title="Period in minutes: period to watch price on. Example: 1440 for 1 day", + parent_input_name=step_name, + min_val=0, + other_schema_values={"exclusiveMinimum": True} + ), + self.MAX_ALLOWED_POSITIVE_PERCENTAGE_CHANGE: UI.user_input( + self.MAX_ALLOWED_POSITIVE_PERCENTAGE_CHANGE, commons_enums.UserInputTypes.FLOAT, 1.0, inputs, + title="Max allowed positive percentage change. Leave 0 to disable. Example: 1 for 1%", + parent_input_name=step_name, + min_val=0, + ), + self.MAX_ALLOWED_NEGATIVE_PERCENTAGE_CHANGE: UI.user_input( + self.MAX_ALLOWED_NEGATIVE_PERCENTAGE_CHANGE, commons_enums.UserInputTypes.FLOAT, 1.0, inputs, + title="Max allowed negative percentage change. Leave 0 to disable. Example: 1 for -1%", + parent_input_name=step_name, + min_val=0, + ), + } + + def apply_config(self, config: dict) -> None: + self.clear_future() + self.exchange = config[self.EXCHANGE] or None + self.symbol = config[self.SYMBOL] + self.volatility_threshold_checker.symbol = self.symbol # type: ignore + self.volatility_threshold_checker.period_in_minutes = config[self.PERIOD_IN_MINUTES] + self.volatility_threshold_checker.max_allowed_positive_percentage_change = decimal.Decimal(str( + config[self.MAX_ALLOWED_POSITIVE_PERCENTAGE_CHANGE] + )) + self.volatility_threshold_checker.max_allowed_negative_percentage_change = decimal.Decimal(str( + config[self.MAX_ALLOWED_NEGATIVE_PERCENTAGE_CHANGE] + )) + self.volatility_threshold_checker.validate_config() + + async def register_consumers(self, exchange_id: str) -> list[async_channel.Consumer]: + return [ + await exchanges_channel.get_chan( + channels_name.OctoBotTradingChannelsName.MARK_PRICE_CHANNEL.value, exchange_id + ).new_consumer( + self.mark_price_callback, + priority_level=channel_enums.ChannelConsumerPriorityLevels.HIGH.value, + symbol=self.volatility_threshold_checker.symbol, + ) + ] + + async def mark_price_callback( + self, exchange: str, exchange_id: str, cryptocurrency: str, symbol: str, mark_price + ): + if self.should_stop: + # do not go any further if the action has been stopped + return + is_threshold_met, reason = self.volatility_threshold_checker.on_new_price(decimal.Decimal(str(mark_price))) + if is_threshold_met: + self.logger.info(f"Volatility threshold met for {exchange}: {reason}") + self.trigger(description=reason) diff --git a/packages/tentacles/BUILD b/packages/tentacles/BUILD new file mode 100644 index 0000000000..25745942e0 --- /dev/null +++ b/packages/tentacles/BUILD @@ -0,0 +1,21 @@ +python_sources(name="octobot_tentacles", sources=["**/*.py", "!**/tests/**/*.py", "!conftest.py"]) + +# Tentacles test utilities (non-test files within tests directories that are imported by other tests) +python_sources( + name="tentacles_test_utils", + sources=[ + "**/tests/**/*.py", + "!**/tests/**/test_*.py", # Exclude actual test files + ], +) + +files( + name="tentacles_metadata", + sources=["**/metadata.json", "**/config/**/*", "metadata.yaml", "octobot_config.json"], +) + +# Static test files (JSON configs, data files, etc.) used by tentacles tests +files( + name="tentacles_test_data", + sources=["**/tests/static/**/*"], +) diff --git a/packages/tentacles/Backtesting/collectors/exchanges/exchange_bot_snapshot_data_collector/__init__.py b/packages/tentacles/Backtesting/collectors/exchanges/exchange_bot_snapshot_data_collector/__init__.py new file mode 100644 index 0000000000..65fcd44350 --- /dev/null +++ b/packages/tentacles/Backtesting/collectors/exchanges/exchange_bot_snapshot_data_collector/__init__.py @@ -0,0 +1 @@ +from .bot_snapshot_with_history_collector import ExchangeBotSnapshotWithHistoryCollector \ No newline at end of file diff --git a/packages/tentacles/Backtesting/collectors/exchanges/exchange_bot_snapshot_data_collector/bot_snapshot_with_history_collector.py b/packages/tentacles/Backtesting/collectors/exchanges/exchange_bot_snapshot_data_collector/bot_snapshot_with_history_collector.py new file mode 100644 index 0000000000..60508cda56 --- /dev/null +++ b/packages/tentacles/Backtesting/collectors/exchanges/exchange_bot_snapshot_data_collector/bot_snapshot_with_history_collector.py @@ -0,0 +1,431 @@ +# Drakkar-Software OctoBot +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import asyncio +import copy +import os +import json +import time +import shutil +import collections + +import octobot_backtesting.collectors as collector +import octobot_backtesting.importers as importers +import octobot_backtesting.enums as backtesting_enums +import octobot_backtesting.constants as backtesting_constants +import octobot_backtesting.errors as backtesting_errors +import octobot_commons.errors as commons_errors +import octobot_commons.constants as commons_constants +import octobot_commons.enums as commons_enums +import octobot_commons.symbols.symbol_util as symbol_util +import octobot_commons.databases as databases +import octobot_backtesting.data as data +import octobot_trading.api as trading_api +import octobot_trading.errors as trading_errors +import tentacles.Backtesting.importers.exchanges.generic_exchange_importer as generic_exchange_importer + + + +class ExchangeBotSnapshotWithHistoryCollector(collector.AbstractExchangeBotSnapshotCollector): + IMPORTER = generic_exchange_importer.GenericExchangeDataImporter + OHLCV = "ohlcv" + KLINE = "kline" + + def __init__(self, config, exchange_name, exchange_type, tentacles_setup_config, symbols, time_frames, + use_all_available_timeframes=False, + data_format=backtesting_enums.DataFormats.REGULAR_COLLECTOR_DATA, + start_timestamp=None, + end_timestamp=None): + super().__init__(config, exchange_name, exchange_type, tentacles_setup_config, symbols, time_frames, + use_all_available_timeframes, data_format=data_format, + start_timestamp=start_timestamp, end_timestamp=end_timestamp) + self.exchange_type = None + self.exchange_manager = None + self.fetch_exchange_manager = None + self.file_name = data.get_backtesting_file_name(self.__class__, + self.get_permanent_file_identifier, + data_format=data_format) + self.is_creating_database = False + self.description = None + self.missing_symbols = [] + self.fetched_data = { + self.OHLCV: {}, + self.KLINE: {}, + } + self.set_file_path() + + def get_permanent_file_identifier(self): + symbols = "-".join(symbol_util.merge_symbol(symbol.symbol_str) for symbol in self.symbols) + time_frames = "-".join(tf.value for tf in self.time_frames) + return f"{self.exchange_name}{backtesting_constants.BACKTESTING_DATA_FILE_SEPARATOR}" \ + f"{symbols}{backtesting_constants.BACKTESTING_DATA_FILE_SEPARATOR}{time_frames}" + + async def initialize(self): + self.create_database() + await self.database.initialize() + await self._check_database_content() + + def set_file_path(self) -> None: + super().set_file_path() + if os.path.isfile(self.file_path): + shutil.copy(self.file_path, self.temp_file_path) + + def finalize_database(self): + if os.path.isfile(self.file_path): + os.remove(self.file_path) + os.rename(self.temp_file_path, self.file_path) + + async def _check_database_content(self): + # load description + try: + self.description = await data.get_database_description(self.database) + found_exchange_name = self.description[backtesting_enums.DataFormatKeys.EXCHANGE.value] + found_symbols = [symbol_util.parse_symbol(symbol) + for symbol in self.description[backtesting_enums.DataFormatKeys.SYMBOLS.value]] + found_time_frames = self.description[backtesting_enums.DataFormatKeys.TIME_FRAMES.value] + if found_exchange_name != self.exchange_name: + raise backtesting_errors.IncompatibleDatafileError(f"Exchange name in database: {found_exchange_name}, " + f"requested exchange: {self.exchange_name}") + if found_symbols != self.symbols: + raise backtesting_errors.IncompatibleDatafileError(f"Pairs in database: {found_symbols}, " + f"requested exchange: {self.symbols}") + if found_time_frames != self.time_frames: + raise backtesting_errors.IncompatibleDatafileError(f"Time frames name in database: {found_time_frames}, " + f"requested exchange: {self.time_frames}") + except commons_errors.DatabaseNotFoundError: + # newly created datafile + self.is_creating_database = True + + async def start(self): + self.should_stop = False + should_stop_database = True + self.current_step_percent = 0 + self.total_steps = len(self.time_frames) * len(self.symbols) + try: + self.exchange_manager = trading_api.get_exchange_manager_from_exchange_id(self.exchange_id) + + # use a secondary exchange manager to fetch candles to fix ccxt pagination issues + # seen on ccxt 4.1.82 + other_config = copy.copy(self.config) + other_config[commons_constants.CONFIG_TIME_FRAME] = [] # any value here to avoid crashing + self.fetch_exchange_manager = await trading_api.create_exchange_builder(other_config, self.exchange_name) \ + .is_simulated() \ + .is_rest_only() \ + .is_exchange_only() \ + .is_future(self.exchange_manager.is_future) \ + .disable_trading_mode() \ + .use_tentacles_setup_config(self.tentacles_setup_config) \ + .build() + + await self.adapt_timestamps() + + # create/update description + if self.is_creating_database: + await self._create_description() + else: + await self._update_description() + + self.in_progress = True + + self.logger.info(f"Start collecting history on {self.exchange_name}") + tasks = [] + for symbol_index, symbol in enumerate(self.symbols): + if symbol in self.missing_symbols: + self.logger.error(f"Skipping {symbol} from backtesting data: " + f"missing price history on {self.exchange_name}") + continue + self.logger.info(f"Collecting history for {symbol}...") + tasks.append(asyncio.create_task(self.get_ticker_history(self.exchange_name, symbol))) + tasks.append(asyncio.create_task(self.get_order_book_history(self.exchange_name, symbol))) + tasks.append(asyncio.create_task(self.get_recent_trades_history(self.exchange_name, symbol))) + + for time_frame_index, time_frame in enumerate(self.time_frames): + tasks.append(asyncio.create_task(self.get_ohlcv_history(self.exchange_name, symbol, time_frame))) + tasks.append(asyncio.create_task(self.get_kline_history(self.exchange_name, symbol, time_frame))) + if symbol_index == time_frame_index == 0: + # let tables get created + await asyncio.gather(*tasks) + tasks = [] + if tasks: + await asyncio.gather(*tasks) + + except Exception as err: + await self.database.stop() + should_stop_database = False + # Do not keep errored data file + if os.path.isfile(self.temp_file_path): + os.remove(self.temp_file_path) + if not self.should_stop: + self.logger.exception(err, True, f"Error when collecting {self.exchange_name} history for " + f"{', '.join([symbol.symbol_str for symbol in self.symbols])}: {err}") + raise backtesting_errors.DataCollectorError(err) from err + finally: + await self.stop(should_stop_database=should_stop_database) + + async def stop(self, should_stop_database=True): + self.should_stop = True + if should_stop_database: + await self.database.stop() + self.finalize_database() + await self.fetch_exchange_manager.stop() + self.exchange_manager = None + self.in_progress = False + self.finished = True + return self.finished + + async def _update_description(self): + updated_values = {} + if self.end_timestamp and int(self.description[backtesting_enums.DataFormatKeys.END_TIMESTAMP.value]) * 1000 < self.end_timestamp: + updated_values["end_timestamp"] = int(self.end_timestamp/1000) + if self.start_timestamp and int(self.description[backtesting_enums.DataFormatKeys.START_TIMESTAMP.value]) * 1000 > self.start_timestamp: + updated_values["start_timestamp"] = int(self.start_timestamp/1000) + if updated_values: + updated_values["timestamp"] = time.time() + await self.database.update(backtesting_enums.DataTables.DESCRIPTION, + updated_value_by_column=updated_values, + version=self.VERSION, + exchange=self.exchange_name, + symbols=json.dumps([symbol.symbol_str for symbol in self.symbols]), + time_frames=json.dumps([tf.value for tf in self.time_frames])) + + async def get_ticker_history(self, exchange, symbol): + pass + + async def get_order_book_history(self, exchange, symbol): + pass + + async def get_recent_trades_history(self, exchange, symbol): + pass + + def get_ohlcv_snapshot(self, symbol, time_frame): + symbol_data = trading_api.get_symbol_data(self.exchange_manager, str(symbol), allow_creation=False) + candles = trading_api.get_symbol_historical_candles(symbol_data, time_frame) + return [ + [ + time_val, + candles[commons_enums.PriceIndexes.IND_PRICE_OPEN.value][index], + candles[commons_enums.PriceIndexes.IND_PRICE_HIGH.value][index], + candles[commons_enums.PriceIndexes.IND_PRICE_LOW.value][index], + candles[commons_enums.PriceIndexes.IND_PRICE_CLOSE.value][index], + candles[commons_enums.PriceIndexes.IND_PRICE_VOL.value][index], + ] + for index, time_val in enumerate(candles[commons_enums.PriceIndexes.IND_PRICE_TIME.value]) + ] + + async def collect_historical_ohlcv(self, exchange, symbol, time_frame, time_frame_sec, + start_time, end_time, progress_multiplier): + last_progress = 0 + symbol_id = str(symbol) + async for candles in trading_api.get_historical_ohlcv( + self.fetch_exchange_manager, symbol_id, time_frame, start_time, end_time + ): + await self.save_ohlcv( + exchange=exchange, + cryptocurrency=self.exchange_manager.exchange.get_pair_cryptocurrency(symbol_id), + symbol=symbol.symbol_str, time_frame=time_frame, candle=candles, + timestamp=[candle[commons_enums.PriceIndexes.IND_PRICE_TIME.value] + time_frame_sec + for candle in candles], + multiple=True + ) + progress = (candles[-1][commons_enums.PriceIndexes.IND_PRICE_TIME.value] - self.start_timestamp / 1000) / \ + ((self.end_timestamp - self.start_timestamp) / 1000) * 100 + progress_over_all_steps = progress * progress_multiplier / self.total_steps + self.current_step_percent += progress_over_all_steps - last_progress + self.logger.debug(f"progress: {self.current_step_percent}%") + last_progress = progress_over_all_steps + return last_progress + + def find_candle(self, candles, timestamp): + for candle in candles: + if candle[-1][commons_enums.PriceIndexes.IND_PRICE_TIME.value] == timestamp: + return candle[-1], candle[0] + return None, None + + async def update_ohlcv(self, exchange, symbol, time_frame, time_frame_sec, + database_candles, current_bot_candles): + to_add_candles = [] + symbol_id = str(symbol) + for up_to_date_candle in current_bot_candles: + current_candle_time = up_to_date_candle[commons_enums.PriceIndexes.IND_PRICE_TIME.value] + equivalent_db_candle, candle_timestamp = self.find_candle(database_candles, current_candle_time) + if equivalent_db_candle is None: + to_add_candles.append(up_to_date_candle) + elif equivalent_db_candle != up_to_date_candle: + updated_value_by_column = { + "candle": json.dumps(up_to_date_candle) + } + await self.database.update(backtesting_enums.ExchangeDataTables.OHLCV, + updated_value_by_column=updated_value_by_column, + exchange_name=exchange, + cryptocurrency= + self.exchange_manager.exchange.get_pair_cryptocurrency(symbol_id), + symbol=symbol.symbol_str, + time_frame=time_frame.value, + timestamp=str(candle_timestamp)) + if to_add_candles: + await self.save_ohlcv( + exchange=exchange, + cryptocurrency=self.exchange_manager.exchange.get_pair_cryptocurrency(symbol_id), + symbol=symbol, time_frame=time_frame, candle=to_add_candles, + timestamp=[candle[commons_enums.PriceIndexes.IND_PRICE_TIME.value] + time_frame_sec + for candle in to_add_candles], + multiple=True + ) + + async def _check_ohlcv_integrity(self, database_candles): + # ensure no timestamp is here twice + all_timestamps = [candle[-1][0] for candle in database_candles] + unique_timestamps = set(all_timestamps) + if len(unique_timestamps) != len(database_candles): + return { + timestamp: counter + for timestamp, counter in collections.Counter(all_timestamps).items() + if counter > 1 + } + return {} + + async def get_ohlcv_history(self, exchange, symbol, time_frame): + try: + last_progress = 0 + time_frame_sec = commons_enums.TimeFramesMinutes[time_frame] * commons_constants.MINUTE_TO_SECONDS + # use current data from current bot + fetch_data_id = self.get_fetch_data_id(symbol, time_frame) + already_fetched_candles_candles = self.fetched_data[self.OHLCV][fetch_data_id] + database_candles = [] + save_all_candles = self.is_creating_database + updated_db = False + if not self.is_creating_database: + database_candles = await self._import_candles_from_datafile(exchange, symbol, time_frame) + counters = await self._check_ohlcv_integrity(database_candles) + if counters: + self.logger.warning(f"Duplicate candles in {exchange} data file for {symbol.symbol_str} " + f"on {time_frame}. Problematic timestamps: {counters}. " + f"Resetting database to ensure data integrity") + + await self.delete_all( + backtesting_enums.ExchangeDataTables.OHLCV, + exchange=exchange, + cryptocurrency=self.exchange_manager.exchange.get_pair_cryptocurrency(str(symbol)), + symbol=symbol.symbol_str, + time_frame=time_frame + ) + updated_db = True + save_all_candles = True + if save_all_candles or not database_candles: + await self.save_ohlcv( + exchange=exchange, + cryptocurrency=self.exchange_manager.exchange.get_pair_cryptocurrency(str(symbol)), + symbol=symbol.symbol_str, time_frame=time_frame, candle=already_fetched_candles_candles, + timestamp=[candle[commons_enums.PriceIndexes.IND_PRICE_TIME.value] + time_frame_sec + for candle in already_fetched_candles_candles], + multiple=True + ) + database_candles = await self._import_candles_from_datafile(exchange, symbol, time_frame) + updated_db = True + candle_times = [ + candle[-1][commons_enums.PriceIndexes.IND_PRICE_TIME.value] + for candle in database_candles + ] + # +/-1 not to fetch the last candle twice + first_candle_data_time = min(candle_times) * 1000 - 1 + last_candle_data_time = max(candle_times) * 1000 + 1 + fill_before = self.start_timestamp and self.start_timestamp + time_frame_sec * 1000 < first_candle_data_time + fill_after = last_candle_data_time < self.end_timestamp + progress_per_collect = 0.5 if fill_after and fill_before else 1 + # 1. fill in any missing candle before existing candles + if fill_before: + # fetch missing data between required start time and actual start time in data file + last_progress = await self.collect_historical_ohlcv( + exchange, symbol, time_frame, time_frame_sec, self.start_timestamp, first_candle_data_time, + progress_per_collect + ) + if last_progress: + self.current_step_percent += 100 * progress_per_collect / self.total_steps - last_progress + updated_db = True + # 2. fill in any missing candle after existing candles + if fill_after: + # fetch missing data between end time in data file and available data + last_progress = await self.collect_historical_ohlcv( + exchange, symbol, time_frame, time_frame_sec, last_candle_data_time, self.end_timestamp, + progress_per_collect + ) + if last_progress: + self.current_step_percent += 100 * progress_per_collect / self.total_steps - last_progress + updated_db = True + if not (fill_before or fill_after): + # nothing to collect, update progress still + self.current_step_percent += 100 / self.total_steps + if updated_db: + database_candles = await self._import_candles_from_datafile(exchange, symbol, time_frame) + counters = await self._check_ohlcv_integrity(database_candles) + if counters: + self.logger.error(f"Error when checking database integrity of {exchange} " + f"data file for {symbol.symbol_str}. " + f"Delete this data file: {self.file_name} to reset it. " + f"Problematic timestamps: {counters}") + except Exception: + raise + + async def _import_candles_from_datafile(self, exchange, symbol, time_frame): + return importers.import_ohlcvs( + await self.database.select(backtesting_enums.ExchangeDataTables.OHLCV, + size=databases.SQLiteDatabase.DEFAULT_SIZE, + exchange_name=exchange, symbol=symbol.symbol_str, + time_frame=time_frame.value) + ) + + async def get_kline_history(self, exchange, symbol, time_frame): + pass + + async def adapt_timestamps(self): + lowest_timestamps = [] + for symbol in self.symbols: + for tf in self.time_frames: + first_timestamp = await self.get_first_candle_timestamp( + self.start_timestamp, symbol, tf + ) + if first_timestamp is None: + self.missing_symbols.append(symbol) + break + else: + lowest_timestamps.append(first_timestamp) + lowest_timestamp = min(lowest_timestamps) + # lowest_timestamp depends on self.start_timestamp if set. It will not go further + if self.start_timestamp is None or lowest_timestamp < self.start_timestamp: + self.start_timestamp = lowest_timestamp + self.end_timestamp = self.end_timestamp or time.time() * 1000 + if self.start_timestamp > self.end_timestamp: + raise backtesting_errors.DataCollectorError("start_timestamp is higher than end_timestamp") + + def get_fetch_data_id(self, symbol, timeframe): + return f"{symbol}{timeframe.value}" + + async def get_first_candle_timestamp(self, ideal_start_timestamp, symbol, time_frame): + try: + symbol_data = trading_api.get_symbol_data(self.exchange_manager, str(symbol), allow_creation=False) + candles = trading_api.get_symbol_historical_candles(symbol_data, time_frame) + self.fetched_data[self.OHLCV][self.get_fetch_data_id(symbol, time_frame)] = self.get_ohlcv_snapshot( + symbol, time_frame + ) + return candles[commons_enums.PriceIndexes.IND_PRICE_TIME.value][0] * 1000 + except KeyError: + # symbol or timeframe not available in live exchange + fetched_candles = await self.fetch_exchange_manager.exchange.get_symbol_prices( + str(symbol), time_frame, limit=1, since=ideal_start_timestamp + ) + if not fetched_candles: + return None + self.fetched_data[self.OHLCV][self.get_fetch_data_id(symbol, time_frame)] = fetched_candles + return fetched_candles[0][commons_enums.PriceIndexes.IND_PRICE_TIME.value] * 1000 diff --git a/packages/tentacles/Backtesting/collectors/exchanges/exchange_bot_snapshot_data_collector/metadata.json b/packages/tentacles/Backtesting/collectors/exchanges/exchange_bot_snapshot_data_collector/metadata.json new file mode 100644 index 0000000000..685e35d67a --- /dev/null +++ b/packages/tentacles/Backtesting/collectors/exchanges/exchange_bot_snapshot_data_collector/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["ExchangeBotSnapshotCollector"], + "tentacles-requirements": [] +} \ No newline at end of file diff --git a/packages/tentacles/Backtesting/collectors/exchanges/exchange_history_collector/__init__.py b/packages/tentacles/Backtesting/collectors/exchanges/exchange_history_collector/__init__.py new file mode 100644 index 0000000000..17024e373a --- /dev/null +++ b/packages/tentacles/Backtesting/collectors/exchanges/exchange_history_collector/__init__.py @@ -0,0 +1 @@ +from .history_collector import ExchangeHistoryDataCollector \ No newline at end of file diff --git a/packages/tentacles/Backtesting/collectors/exchanges/exchange_history_collector/history_collector.pxd b/packages/tentacles/Backtesting/collectors/exchanges/exchange_history_collector/history_collector.pxd new file mode 100644 index 0000000000..e65fe9f602 --- /dev/null +++ b/packages/tentacles/Backtesting/collectors/exchanges/exchange_history_collector/history_collector.pxd @@ -0,0 +1,22 @@ +# cython: language_level=3 +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_backtesting.collectors.exchanges.exchange_collector cimport AbstractExchangeHistoryCollector + +cdef class ExchangeHistoryDataCollector(AbstractExchangeHistoryCollector): + cdef public object exchange + cdef public object exchange_manager diff --git a/packages/tentacles/Backtesting/collectors/exchanges/exchange_history_collector/history_collector.py b/packages/tentacles/Backtesting/collectors/exchanges/exchange_history_collector/history_collector.py new file mode 100644 index 0000000000..c4e54e3aca --- /dev/null +++ b/packages/tentacles/Backtesting/collectors/exchanges/exchange_history_collector/history_collector.py @@ -0,0 +1,195 @@ +# Drakkar-Software OctoBot +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import logging +import os +import time + +import octobot_backtesting.collectors as collector +import octobot_backtesting.enums as backtesting_enums +import octobot_backtesting.errors as errors +import octobot_commons.constants as commons_constants +import octobot_commons.enums as commons_enums +import octobot_commons.time_frame_manager as time_frame_manager +import tentacles.Backtesting.importers.exchanges.generic_exchange_importer as generic_exchange_importer + +try: + import octobot_trading.api as trading_api + import octobot_trading.enums as trading_enums + import octobot_trading.errors as trading_errors +except ImportError: + logging.error("ExchangeHistoryDataCollector requires OctoBot-Trading package installed") + + +class ExchangeHistoryDataCollector(collector.AbstractExchangeHistoryCollector): + IMPORTER = generic_exchange_importer.GenericExchangeDataImporter + + def __init__(self, config, exchange_name, exchange_type, tentacles_setup_config, symbols, time_frames, + use_all_available_timeframes=False, + data_format=backtesting_enums.DataFormats.REGULAR_COLLECTOR_DATA, + start_timestamp=None, + end_timestamp=None): + super().__init__(config, exchange_name, exchange_type, tentacles_setup_config, symbols, time_frames, + use_all_available_timeframes, data_format=data_format, + start_timestamp=start_timestamp, end_timestamp=end_timestamp) + self.exchange = None + self.exchange_manager = None + + async def start(self): + self.should_stop = False + should_stop_database = True + try: + use_future = self.exchange_type == trading_enums.ExchangeTypes.FUTURE + self.exchange_manager = await trading_api.create_exchange_builder(self.config, self.exchange_name) \ + .is_simulated() \ + .is_rest_only() \ + .is_exchange_only() \ + .is_future(use_future) \ + .disable_trading_mode() \ + .use_tentacles_setup_config(self.tentacles_setup_config) \ + .build() + + self.exchange = self.exchange_manager.exchange + self._load_timeframes_if_necessary() + + await self.check_timestamps() + + # create description + await self._create_description() + + self.total_steps = len(self.time_frames) * len(self.symbols) + self.in_progress = True + + self.logger.info(f"Start collecting history on {self.exchange_name}") + for symbol_index, symbol in enumerate(self.symbols): + self.logger.info(f"Collecting history for {symbol}...") + await self.get_ticker_history(self.exchange_name, symbol) + await self.get_order_book_history(self.exchange_name, symbol) + await self.get_recent_trades_history(self.exchange_name, symbol) + + for time_frame_index, time_frame in enumerate(self.time_frames): + self.current_step_index = (symbol_index * len(self.time_frames)) + time_frame_index + 1 + self.logger.info( + f"[{time_frame_index}/{len(self.time_frames)}] Collecting {symbol} history on {time_frame}...") + await self.get_ohlcv_history(self.exchange_name, symbol, time_frame) + await self.get_kline_history(self.exchange_name, symbol, time_frame) + except Exception as err: + await self.database.stop() + should_stop_database = False + # Do not keep errored data file + if os.path.isfile(self.temp_file_path): + os.remove(self.temp_file_path) + if not self.should_stop: + self.logger.exception(err, True, f"Error when collecting {self.exchange_name} history for " + f"{', '.join([str(symbol) for symbol in self.symbols])}: {err}") + raise errors.DataCollectorError(err) + finally: + await self.stop(should_stop_database=should_stop_database) + + def _load_all_available_timeframes(self): + allowed_timeframes = set(tf.value for tf in commons_enums.TimeFrames) + self.time_frames = [commons_enums.TimeFrames(time_frame) + for time_frame in self.exchange_manager.client_time_frames + if time_frame in allowed_timeframes] + + async def stop(self, should_stop_database=True): + self.should_stop = True + if self.exchange_manager is not None: + await self.exchange_manager.stop() + if should_stop_database: + await self.database.stop() + self.finalize_database() + self.exchange_manager = None + self.in_progress = False + self.finished = True + return self.finished + + async def get_ticker_history(self, exchange, symbol): + pass + + async def get_order_book_history(self, exchange, symbol): + pass + + async def get_recent_trades_history(self, exchange, symbol): + pass + + async def get_ohlcv_history(self, exchange, symbol, time_frame): + self.current_step_percent = 0 + # use time_frame_sec to add time to save the candle closing time + time_frame_sec = commons_enums.TimeFramesMinutes[time_frame] * commons_constants.MINUTE_TO_SECONDS + symbol_id = str(symbol) + cryptocurrency = self.exchange_manager.exchange.get_pair_cryptocurrency(symbol_id) + if self.start_timestamp is not None: + start_time = self.start_timestamp + end_time = self.end_timestamp or time.time() * 1000 + first_candle_timestamp = await self.get_first_candle_timestamp( + self.start_timestamp, symbol, time_frame + ) * 1000 + if self.start_timestamp < first_candle_timestamp: + start_time = first_candle_timestamp + async for hist_candles in trading_api.get_historical_ohlcv(self.exchange_manager, symbol_id, time_frame, + start_time, end_time): + if hist_candles: + self.current_step_percent = \ + (hist_candles[-1][commons_enums.PriceIndexes.IND_PRICE_TIME.value] - start_time / 1000) / \ + ((end_time - start_time) / 1000) * 100 + self.logger.info(f"[{self.current_step_percent}%] historical data fetched for {symbol} {time_frame}") + await self.save_ohlcv( + exchange=exchange, + cryptocurrency=cryptocurrency, + symbol=symbol.symbol_str, time_frame=time_frame, candle=hist_candles, + timestamp=[candle[commons_enums.PriceIndexes.IND_PRICE_TIME.value] + time_frame_sec + for candle in hist_candles], + multiple=True) + else: + try: + candles = await self.exchange.get_symbol_prices(symbol_id, time_frame) + if candles: + await self.save_ohlcv(exchange=exchange, + cryptocurrency=cryptocurrency, + symbol=symbol.symbol_str, time_frame=time_frame, candle=candles, + timestamp=[candle[0] + time_frame_sec for candle in candles], multiple=True) + else: + self.logger.error(f"No candles for {symbol} on {time_frame} ({exchange})") + except trading_errors.FailedRequest as err: + self.logger.exception(err, False) + self.logger.warning(f"Ignored {symbol} {time_frame} candles on {exchange} ({err})") + + async def get_kline_history(self, exchange, symbol, time_frame): + pass + + async def check_timestamps(self): + if self.start_timestamp is not None: + lowest_timestamp = min([ + await self.get_first_candle_timestamp( + self.start_timestamp, symbol, time_frame_manager.find_min_time_frame(self.time_frames) + ) + for symbol in self.symbols + ]) + if lowest_timestamp > self.start_timestamp: + self.start_timestamp = lowest_timestamp + if self.start_timestamp > (self.end_timestamp if self.end_timestamp else (time.time() * 1000)): + raise errors.DataCollectorError("start_timestamp is higher than end_timestamp") + + async def get_first_candle_timestamp(self, ideal_start_timestamp, symbol, time_frame): + try: + return ( + await self.exchange.get_symbol_prices(str(symbol), time_frame, limit=1, since=ideal_start_timestamp) + )[0][commons_enums.PriceIndexes.IND_PRICE_TIME.value] + except (trading_errors.FailedRequest, IndexError) as err: + raise errors.DataCollectorError( + f"Impossible to initialize {self.exchange_name} data collector: {err}. This means that {symbol} " + f"for the {time_frame.value} time frame is not supported in this context on {self.exchange_name}." + ) diff --git a/packages/tentacles/Backtesting/collectors/exchanges/exchange_history_collector/metadata.json b/packages/tentacles/Backtesting/collectors/exchanges/exchange_history_collector/metadata.json new file mode 100644 index 0000000000..42cfa0ac25 --- /dev/null +++ b/packages/tentacles/Backtesting/collectors/exchanges/exchange_history_collector/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["ExchangeHistoryDataCollector"], + "tentacles-requirements": [] +} \ No newline at end of file diff --git a/packages/tentacles/Backtesting/collectors/exchanges/exchange_history_collector/tests/__init__.py b/packages/tentacles/Backtesting/collectors/exchanges/exchange_history_collector/tests/__init__.py new file mode 100644 index 0000000000..974dd1623a --- /dev/null +++ b/packages/tentacles/Backtesting/collectors/exchanges/exchange_history_collector/tests/__init__.py @@ -0,0 +1,15 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. diff --git a/packages/tentacles/Backtesting/collectors/exchanges/exchange_history_collector/tests/test_history_collector.py b/packages/tentacles/Backtesting/collectors/exchanges/exchange_history_collector/tests/test_history_collector.py new file mode 100644 index 0000000000..bd8c96bcae --- /dev/null +++ b/packages/tentacles/Backtesting/collectors/exchanges/exchange_history_collector/tests/test_history_collector.py @@ -0,0 +1,241 @@ +# Drakkar-Software OctoBot +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import pytest +import os +import contextlib +import json +import asyncio + +import octobot_commons.databases as databases +import octobot_commons.symbols as commons_symbols +import octobot_commons.enums as commons_enums +import octobot_commons.constants as commons_constants +import octobot_backtesting.enums as enums +import octobot_backtesting.errors as errors +import octobot_trading.enums as trading_enums +import tests.test_utils.config as test_utils_config +import tentacles.Backtesting.collectors.exchanges as collector_exchanges +import tentacles.Trading.Exchange as tentacles_exchanges + +# All test coroutines will be treated as marked. +pytestmark = pytest.mark.asyncio + +BINANCEUS = "binanceus" +BINANCEUS_MAX_CANDLES_COUNT = 500 + + +@contextlib.asynccontextmanager +async def data_collector(exchange_name, tentacles_setup_config, symbols, time_frames, use_all_available_timeframes, + start_timestamp=None, end_timestamp=None): + collector_instance = collector_exchanges.ExchangeHistoryDataCollector( + {}, exchange_name, trading_enums.ExchangeTypes.SPOT, tentacles_setup_config, + [commons_symbols.parse_symbol(symbol) for symbol in symbols], time_frames, + use_all_available_timeframes=use_all_available_timeframes, + start_timestamp=start_timestamp, + end_timestamp=end_timestamp + ) + try: + await collector_instance.initialize() + yield collector_instance + finally: + if collector_instance.file_path and os.path.isfile(collector_instance.file_path): + os.remove(collector_instance.file_path) + if collector_instance.temp_file_path and os.path.isfile(collector_instance.temp_file_path): + os.remove(collector_instance.temp_file_path) + + +@contextlib.asynccontextmanager +async def collector_database(collector): + database = databases.SQLiteDatabase(collector.file_path) + try: + await database.initialize() + yield database + finally: + await database.stop() + + +async def test_collect_valid_data(): + tentacles_setup_config = test_utils_config.load_test_tentacles_config() + symbols = ["ETH/BTC"] + async with data_collector(BINANCEUS, tentacles_setup_config, symbols, None, True) as collector: + assert collector.time_frames == [] + assert collector.symbols == [commons_symbols.parse_symbol(symbol) for symbol in symbols] + assert collector.exchange_name == BINANCEUS + assert collector.tentacles_setup_config == tentacles_setup_config + await collector.start() + assert collector.time_frames != [] + assert collector.exchange_manager is None + assert isinstance(collector.exchange, tentacles_exchanges.BinanceUS) + assert collector.file_path is not None + assert collector.temp_file_path is not None + assert not os.path.isfile(collector.temp_file_path) + assert os.path.isfile(collector.file_path) + async with collector_database(collector) as database: + ohlcv = await database.select(enums.ExchangeDataTables.OHLCV) + # use > to take into account new possible candles since collect max time is not specified + assert len(ohlcv) > 6000 + h_ohlcv = await database.select(enums.ExchangeDataTables.OHLCV, time_frame="1h") + assert len(h_ohlcv) == BINANCEUS_MAX_CANDLES_COUNT + eth_btc_ohlcv = await database.select(enums.ExchangeDataTables.OHLCV, symbol="ETH/BTC") + assert len(eth_btc_ohlcv) == len(ohlcv) + + +async def test_collect_invalid_data(): + tentacles_setup_config = test_utils_config.load_test_tentacles_config() + symbols = ["___ETH/BTC"] + async with data_collector(BINANCEUS, tentacles_setup_config, symbols, None, True) as collector: + with pytest.raises(errors.DataCollectorError): + await collector.start() + assert collector.time_frames != [] + assert collector.exchange_manager is None + assert collector.exchange is not None + assert collector.file_path is not None + assert collector.temp_file_path is not None + assert not os.path.isfile(collector.temp_file_path) + + +async def test_collect_valid_date_range(): + tentacles_setup_config = test_utils_config.load_test_tentacles_config() + symbols = ["ETH/BTC"] + start_time = 1569413160000 + end_time = 1569914160000 + # each request fetches 500 candles + candle_fetch_limit = 500 + async with data_collector(BINANCEUS, tentacles_setup_config, symbols, None, True, start_time, + end_time) as collector: + assert collector.start_timestamp is not None + assert collector.end_timestamp is not None + await collector.start() + assert collector.time_frames != [] + assert collector.exchange_manager is None + assert isinstance(collector.exchange, tentacles_exchanges.BinanceUS) + assert collector.file_path is not None + assert collector.temp_file_path is not None + assert os.path.isfile(collector.file_path) + assert not os.path.isfile(collector.temp_file_path) + async with collector_database(collector) as database: + ohlcv = await database.select(enums.ExchangeDataTables.OHLCV) + assert len(ohlcv) == 13943 + parsed_candles = [ + json.loads(candle[-1]) + for candle in ohlcv + ] + for parsed_candle in parsed_candles: + candle_open_time = parsed_candle[commons_enums.PriceIndexes.IND_PRICE_TIME.value] + assert start_time <= candle_open_time * 1000 <= end_time + for time_frame in commons_enums.TimeFrames: + time_frame_ohlcv = await database.select(enums.ExchangeDataTables.OHLCV, time_frame=time_frame.value) + if not time_frame_ohlcv: + continue + all_timestamps = sorted([ + candle[commons_enums.PriceIndexes.IND_PRICE_TIME.value] + for candle in ( + json.loads(candle[-1]) + for candle in time_frame_ohlcv + ) + ]) + # ensure no duplicate + timestamps = set(all_timestamps) + assert len(timestamps) == len(time_frame_ohlcv) + # ensure no missing + interval = commons_enums.TimeFramesMinutes[time_frame] * commons_constants.MINUTE_TO_SECONDS + current_ts = all_timestamps[0] - interval + for timestamp in all_timestamps: + current_ts += interval + assert timestamp == current_ts + + h_ohlcv = await database.select(enums.ExchangeDataTables.OHLCV, + time_frame=commons_enums.TimeFrames.ONE_HOUR.value) + assert len(h_ohlcv) == 139 + eth_btc_ohlcv = await database.select(enums.ExchangeDataTables.OHLCV, symbol="ETH/BTC") + assert len(eth_btc_ohlcv) == len(ohlcv) + min_timestamp = (await database.select_min(enums.ExchangeDataTables.OHLCV, ["timestamp"], + time_frame=commons_enums.TimeFrames.ONE_MINUTE.value))[0][ + commons_enums.PriceIndexes.IND_PRICE_TIME.value] * 1000 + assert start_time <= min_timestamp <= start_time + (60 * 1000) + max_timestamp = (await database.select_max(enums.ExchangeDataTables.OHLCV, ["timestamp"]))[0][ + commons_enums.PriceIndexes.IND_PRICE_TIME.value] * 1000 + assert end_time <= max_timestamp <= end_time + (31 * 24 * 60 * 60 * 1000) + + +async def test_collect_invalid_date_range(): + tentacles_setup_config = test_utils_config.load_test_tentacles_config() + symbols = ["ETH/BTC"] + async with data_collector(BINANCEUS, tentacles_setup_config, symbols, None, True, 1609459200, 1577836800) \ + as collector: + assert collector.start_timestamp is not None + assert collector.end_timestamp is not None + with pytest.raises(errors.DataCollectorError): + await collector.start() + assert collector.time_frames != [] + assert collector.exchange_manager is None + assert isinstance(collector.exchange, tentacles_exchanges.BinanceUS) + assert collector.file_path is not None + assert collector.temp_file_path is not None + assert not os.path.isfile(collector.file_path) + assert not os.path.isfile(collector.temp_file_path) + + +async def test_collect_multi_pair(): + tentacles_setup_config = test_utils_config.load_test_tentacles_config() + symbols = ["ETH/BTC", "BTC/USDT", "LTC/BTC"] + async with data_collector(BINANCEUS, tentacles_setup_config, symbols, None, True) as collector: + assert collector.time_frames == [] + assert collector.symbols == [commons_symbols.parse_symbol(symbol) for symbol in symbols] + assert collector.exchange_name == BINANCEUS + assert collector.tentacles_setup_config == tentacles_setup_config + await collector.start() + assert collector.time_frames != [] + assert collector.exchange_manager is None + assert isinstance(collector.exchange, tentacles_exchanges.BinanceUS) + assert collector.file_path is not None + assert collector.temp_file_path is not None + assert not os.path.isfile(collector.temp_file_path) + assert os.path.isfile(collector.file_path) + async with collector_database(collector) as database: + ohlcv = await database.select(enums.ExchangeDataTables.OHLCV) + # use > to take into account new possible candles since collect max time is not specified + assert len(ohlcv) > 19316 + h_ohlcv = await database.select(enums.ExchangeDataTables.OHLCV, time_frame="4h") + assert len(h_ohlcv) == len(symbols) * BINANCEUS_MAX_CANDLES_COUNT + symbols_description = json.loads((await database.select(enums.DataTables.DESCRIPTION))[0][4]) + assert all(symbol in symbols_description for symbol in symbols) + eth_btc_ohlcv = await database.select(enums.ExchangeDataTables.OHLCV, symbol="ETH/BTC") + assert len(eth_btc_ohlcv) > 6598 + inch_btc_ohlcv = await database.select(enums.ExchangeDataTables.OHLCV, symbol="LTC/BTC") + assert len(inch_btc_ohlcv) > 5803 + btc_usdt_ohlcv = await database.select(enums.ExchangeDataTables.OHLCV, symbol="BTC/USDT") + assert len(btc_usdt_ohlcv) > 6598 + + +async def test_stop_collect(): + tentacles_setup_config = test_utils_config.load_test_tentacles_config() + symbols = ["AAVE/USDT"] + async with data_collector(BINANCEUS, tentacles_setup_config, symbols, None, True, 1549065660000, + 1632090006000) as collector: + async def stop_soon(): + await asyncio.sleep(5) + await collector.stop(should_stop_database=False) + + await asyncio.gather(collector.start(), stop_soon()) + assert collector.time_frames != [] + assert collector.symbols == [commons_symbols.parse_symbol(symbol) for symbol in symbols] + assert collector.exchange_name == BINANCEUS + assert collector.tentacles_setup_config == tentacles_setup_config + assert collector.finished + assert collector.exchange_manager is None + assert not os.path.isfile(collector.temp_file_path) + assert not os.path.isfile(collector.file_path) diff --git a/packages/tentacles/Backtesting/collectors/exchanges/exchange_live_collector/__init__.py b/packages/tentacles/Backtesting/collectors/exchanges/exchange_live_collector/__init__.py new file mode 100644 index 0000000000..d6179cb659 --- /dev/null +++ b/packages/tentacles/Backtesting/collectors/exchanges/exchange_live_collector/__init__.py @@ -0,0 +1 @@ +from .live_collector import ExchangeLiveDataCollector \ No newline at end of file diff --git a/packages/tentacles/Backtesting/collectors/exchanges/exchange_live_collector/live_collector.pxd b/packages/tentacles/Backtesting/collectors/exchanges/exchange_live_collector/live_collector.pxd new file mode 100644 index 0000000000..9df4ffc606 --- /dev/null +++ b/packages/tentacles/Backtesting/collectors/exchanges/exchange_live_collector/live_collector.pxd @@ -0,0 +1,21 @@ +# cython: language_level=3 +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from octobot_backtesting.collectors.exchanges.exchange_collector cimport ExchangeDataCollector + +cdef class ExchangeLiveDataCollector(ExchangeDataCollector): + pass diff --git a/packages/tentacles/Backtesting/collectors/exchanges/exchange_live_collector/live_collector.py b/packages/tentacles/Backtesting/collectors/exchanges/exchange_live_collector/live_collector.py new file mode 100644 index 0000000000..4f40854130 --- /dev/null +++ b/packages/tentacles/Backtesting/collectors/exchanges/exchange_live_collector/live_collector.py @@ -0,0 +1,95 @@ +# Drakkar-Software OctoBot +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import asyncio +import logging +import time + +import octobot_backtesting.collectors.exchanges as exchanges +import octobot_commons.channels_name as channels_name +import tentacles.Backtesting.importers.exchanges.generic_exchange_importer as generic_exchange_importer + +try: + import octobot_trading.exchange_channel as exchange_channel + import octobot_trading.api as trading_api +except ImportError: + logging.error("ExchangeLiveDataCollector requires OctoBot-Trading package installed") + + +class ExchangeLiveDataCollector(exchanges.AbstractExchangeLiveCollector): + IMPORTER = generic_exchange_importer.GenericExchangeDataImporter + + async def start(self): + exchange_manager = await trading_api.create_exchange_builder(self.config, self.exchange_name) \ + .is_simulated() \ + .is_rest_only() \ + .is_without_auth() \ + .is_ignoring_config() \ + .disable_trading_mode() \ + .use_tentacles_setup_config(self.tentacles_setup_config) \ + .build() + + self._load_timeframes_if_necessary() + + # create description + await self._create_description() + + exchange_id = exchange_manager.id + await exchange_channel.get_chan(channels_name.OctoBotTradingChannelsName.TICKER_CHANNEL.value, + exchange_id).new_consumer(self.ticker_callback) + await exchange_channel.get_chan(channels_name.OctoBotTradingChannelsName.RECENT_TRADES_CHANNEL.value, + exchange_id).new_consumer(self.recent_trades_callback) + await exchange_channel.get_chan(channels_name.OctoBotTradingChannelsName.ORDER_BOOK_CHANNEL.value, + exchange_id).new_consumer(self.order_book_callback) + await exchange_channel.get_chan(channels_name.OctoBotTradingChannelsName.KLINE_CHANNEL.value, + exchange_id).new_consumer(self.kline_callback) + await exchange_channel.get_chan(channels_name.OctoBotTradingChannelsName.OHLCV_CHANNEL.value, + exchange_id).new_consumer(self.ohlcv_callback) + + await asyncio.gather(*asyncio.all_tasks(asyncio.get_event_loop())) + + async def ticker_callback(self, exchange: str, exchange_id: str, + cryptocurrency: str, symbol: str, ticker): + self.logger.info(f"TICKER : CRYPTOCURRENCY = {cryptocurrency} || SYMBOL = {symbol} || TICKER = {ticker}") + await self.save_ticker(timestamp=time.time(), exchange=exchange, + cryptocurrency=cryptocurrency, symbol=symbol, ticker=ticker) + + async def order_book_callback(self, exchange: str, exchange_id: str, + cryptocurrency: str, symbol: str, asks, bids): + self.logger.info(f"ORDERBOOK : CRYPTOCURRENCY = {cryptocurrency} || SYMBOL = {symbol} " + f"|| ASKS = {asks} || BIDS = {bids}") + await self.save_order_book(timestamp=time.time(), exchange=exchange, + cryptocurrency=cryptocurrency, symbol=symbol, asks=asks, bids=bids) + + async def recent_trades_callback(self, exchange: str, exchange_id: str, + cryptocurrency: str, symbol: str, recent_trades): + self.logger.info(f"RECENT TRADE : CRYPTOCURRENCY = {cryptocurrency} || SYMBOL = {symbol} " + f"|| RECENT TRADE = {recent_trades}") + await self.save_recent_trades(timestamp=time.time(), exchange=exchange, + cryptocurrency=cryptocurrency, symbol=symbol, recent_trades=recent_trades) + + async def ohlcv_callback(self, exchange: str, exchange_id: str, + cryptocurrency: str, symbol: str, time_frame, candle): + self.logger.info(f"OHLCV : CRYPTOCURRENCY = {cryptocurrency} || SYMBOL = {symbol} " + f"|| TIME FRAME = {time_frame} || CANDLE = {candle}") + await self.save_ohlcv(timestamp=time.time(), exchange=exchange, + cryptocurrency=cryptocurrency, symbol=symbol, time_frame=time_frame, candle=candle) + + async def kline_callback(self, exchange: str, exchange_id: str, + cryptocurrency: str, symbol: str, time_frame, kline): + self.logger.info(f"KLINE : CRYPTOCURRENCY = {cryptocurrency} || SYMBOL = {symbol} " + f"|| TIME FRAME = {time_frame} || KLINE = {kline}") + await self.save_kline(timestamp=time.time(), exchange=exchange, + cryptocurrency=cryptocurrency, symbol=symbol, time_frame=time_frame, kline=kline) diff --git a/packages/tentacles/Backtesting/collectors/exchanges/exchange_live_collector/metadata.json b/packages/tentacles/Backtesting/collectors/exchanges/exchange_live_collector/metadata.json new file mode 100644 index 0000000000..684012e199 --- /dev/null +++ b/packages/tentacles/Backtesting/collectors/exchanges/exchange_live_collector/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["ExchangeLiveDataCollector"], + "tentacles-requirements": [] +} \ No newline at end of file diff --git a/packages/tentacles/Backtesting/collectors/social/social_history_collector/__init__.py b/packages/tentacles/Backtesting/collectors/social/social_history_collector/__init__.py new file mode 100644 index 0000000000..ea4edb1663 --- /dev/null +++ b/packages/tentacles/Backtesting/collectors/social/social_history_collector/__init__.py @@ -0,0 +1 @@ +from .social_history_collector import SocialHistoryDataCollector diff --git a/packages/tentacles/Backtesting/collectors/social/social_history_collector/metadata.json b/packages/tentacles/Backtesting/collectors/social/social_history_collector/metadata.json new file mode 100644 index 0000000000..6197c58991 --- /dev/null +++ b/packages/tentacles/Backtesting/collectors/social/social_history_collector/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["SocialHistoryDataCollector"], + "tentacles-requirements": [] +} diff --git a/packages/tentacles/Backtesting/collectors/social/social_history_collector/social_history_collector.py b/packages/tentacles/Backtesting/collectors/social/social_history_collector/social_history_collector.py new file mode 100644 index 0000000000..a4e3e2c8db --- /dev/null +++ b/packages/tentacles/Backtesting/collectors/social/social_history_collector/social_history_collector.py @@ -0,0 +1,209 @@ +# Drakkar-Software OctoBot +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import logging +import os +import time +import asyncio + +import octobot_backtesting.collectors as collector +import octobot_backtesting.enums as backtesting_enums +import octobot_backtesting.errors as errors +import octobot_commons.constants as commons_constants +import tentacles.Backtesting.importers.social.generic_social_importer as generic_social_importer + +try: + import octobot_services.api as services_api + import octobot_services.errors as services_errors +except ImportError: + logging.error("SocialHistoryDataCollector requires OctoBot-Services package installed") + + +class SocialHistoryDataCollector(collector.AbstractSocialHistoryCollector): + IMPORTER = generic_social_importer.GenericSocialDataImporter + + def __init__(self, config, services, tentacles_setup_config, sources=None, symbols=None, + use_all_available_sources=False, + data_format=backtesting_enums.DataFormats.REGULAR_COLLECTOR_DATA, + start_timestamp=None, + end_timestamp=None): + super().__init__(config, services, tentacles_setup_config=tentacles_setup_config, + sources=sources, symbols=symbols, + use_all_available_sources=use_all_available_sources, + data_format=data_format, + start_timestamp=start_timestamp, end_timestamp=end_timestamp) + self.tentacles_setup_config = tentacles_setup_config + self.feed_instance = None + self.feed_class = None + + async def start(self): + self.should_stop = False + should_stop_database = True + try: + # Resolve feed class by class name from services list + feed_class = self._get_feed_class_by_class_name(self.services) + if feed_class is None: + available = [f.get_name() for f in services_api.get_available_backtestable_feeds()] + raise errors.DataCollectorError( + f"Feed class not found in services list {self.services}. Available feeds: {available}" + ) + self.feed_class = feed_class + self._set_services_from_feed(feed_class) + + # Create feed instance and ensure required services exist for it + main_loop = asyncio.get_running_loop() + bot_id = "social_collector" + feed_factory = services_api.create_service_feed_factory(self.config, main_loop, bot_id) + self.feed_instance = feed_factory.create_service_feed(feed_class) + if feed_class.REQUIRED_SERVICES: + service_instances = [] + for service_class in feed_class.REQUIRED_SERVICES: + svc = await services_api.get_service( + service_class, is_backtesting=True, config=self.config + ) + service_instances.append(svc) + self.feed_instance.services = service_instances + + self._load_sources_if_necessary() + + await self.check_timestamps() + + # create description + await self._create_description() + + self.total_steps = len(self.sources) * (len(self.symbols) if self.symbols else 1) + if self.total_steps == 0: + self.total_steps = 1 + self.in_progress = True + + self.logger.info(f"Start collecting history on {feed_class.get_name()}") + for source_index, source in enumerate(self.sources or [None]): + if self.symbols: + for symbol_index, symbol in enumerate(self.symbols): + self.current_step_index = (source_index * len(self.symbols)) + symbol_index + 1 + self.logger.info( + f"Collecting history for {feed_class.get_name()} source={source} symbol={symbol}..." + ) + await self.get_social_history(feed_class.get_name(), source, symbol) + else: + self.current_step_index = source_index + 1 + self.logger.info(f"Collecting history for {feed_class.get_name()} source={source}...") + await self.get_social_history(feed_class.get_name(), source, None) + except Exception as err: + await self.database.stop() + should_stop_database = False + # Do not keep errored data file + if os.path.isfile(self.temp_file_path): + os.remove(self.temp_file_path) + if not self.should_stop: + self.logger.exception(err, True, f"Error when collecting {self.services} history: {err}") + raise errors.DataCollectorError(err) + finally: + await self.stop(should_stop_database=should_stop_database) + + def _get_feed_class_by_class_name(self, services): + """Find backtestable feed class by class name from services list.""" + if not services: + return None + feeds = services_api.get_available_backtestable_feeds() + service_set = {svc.lower() for svc in services} + for feed_class in feeds: + if feed_class.get_name().lower() in service_set: + return feed_class + return None + + def _load_all_available_sources(self): + # Override this if feed provides available sources + pass + + def _set_services_from_feed(self, feed_class): + services = [] + if feed_class is not None: + services.append(feed_class.get_name()) + for service_class in getattr(feed_class, "REQUIRED_SERVICES", []) or []: + services.append(service_class.__name__) + # Ensure uniqueness while preserving order + seen = set() + self.services = [svc for svc in services if not (svc in seen or seen.add(svc))] + + async def stop(self, should_stop_database=True): + self.should_stop = True + if self.feed_instance is not None: + await self.feed_instance.stop() + if should_stop_database: + await self.database.stop() + self.finalize_database() + self.feed_instance = None + self.in_progress = False + self.finished = True + return self.finished + + async def get_social_history(self, feed_name, source, symbol=None): + self.current_step_percent = 0 + + # Use provided timestamps (required) + start_time = self.start_timestamp + end_time = self.end_timestamp or time.time() * 1000 + + try: + historical_data = self.feed_instance.get_historical_data( + start_time, end_time, symbols=[symbol] if symbol else None, source=source + ) + + all_events = [] + async for batch in historical_data: + if batch: # batch is a list of events + all_events.extend(batch) + # Update progress + if all_events: + last_timestamp = all_events[-1].get('timestamp', time.time() * 1000) + self.current_step_percent = \ + (last_timestamp - start_time) / ((end_time - start_time)) * 100 + self.logger.info( + f"[{self.current_step_percent:.1f}%] historical data fetched for {feed_name} " + f"source={source} symbol={symbol}" + ) + + if all_events: + self.current_step_percent = 100 + self.logger.info( + f"[100%] historical data fetch complete for {feed_name} source={source} symbol={symbol}, saving..." + ) + timestamps = [event.get('timestamp', time.time() * 1000) for event in all_events] + channels = [event.get('channel', source or '') for event in all_events] + symbols_list = [event.get('symbol', symbol or '') for event in all_events] + payloads = [event.get('payload', event) for event in all_events] + + await self.save_event( + timestamp=timestamps, + service_name=feed_name, + channel=channels, + symbol=symbols_list, + payload=payloads, + multiple=True + ) + except NotImplementedError: + self.logger.warning( + f"Feed {feed_name} does not implement get_historical_data. Skipping history collection." + ) + except Exception as err: + self.logger.exception(err, False) + self.logger.warning(f"Ignored {feed_name} history collection ({err})") + + async def check_timestamps(self): + if self.start_timestamp is None: + raise errors.DataCollectorError("start_timestamp is required for social history collection") + if self.start_timestamp > (self.end_timestamp if self.end_timestamp else (time.time() * 1000)): + raise errors.DataCollectorError("start_timestamp is higher than end_timestamp") diff --git a/packages/tentacles/Backtesting/collectors/social/social_live_collector/__init__.py b/packages/tentacles/Backtesting/collectors/social/social_live_collector/__init__.py new file mode 100644 index 0000000000..205fd786f3 --- /dev/null +++ b/packages/tentacles/Backtesting/collectors/social/social_live_collector/__init__.py @@ -0,0 +1 @@ +from .social_live_collector import SocialLiveDataCollector diff --git a/packages/tentacles/Backtesting/collectors/social/social_live_collector/metadata.json b/packages/tentacles/Backtesting/collectors/social/social_live_collector/metadata.json new file mode 100644 index 0000000000..a524395874 --- /dev/null +++ b/packages/tentacles/Backtesting/collectors/social/social_live_collector/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["SocialLiveDataCollector"], + "tentacles-requirements": [] +} diff --git a/packages/tentacles/Backtesting/collectors/social/social_live_collector/social_live_collector.py b/packages/tentacles/Backtesting/collectors/social/social_live_collector/social_live_collector.py new file mode 100644 index 0000000000..3febb047a7 --- /dev/null +++ b/packages/tentacles/Backtesting/collectors/social/social_live_collector/social_live_collector.py @@ -0,0 +1,164 @@ +# Drakkar-Software OctoBot +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import asyncio +import logging +import time + +import octobot_backtesting.collectors as collector +import octobot_backtesting.enums as backtesting_enums +import async_channel.channels as channels + +try: + import octobot_services.api as services_api + import octobot_services.service_feeds as service_feeds + import octobot_services.service_feeds.service_feed_factory as service_feed_factory +except ImportError: + logging.error("SocialLiveDataCollector requires OctoBot-Services package installed") + + +class SocialLiveDataCollector(collector.AbstractSocialLiveCollector): + IMPORTER = None # Live collectors typically don't need importers + + def __init__(self, config, services, tentacles_setup_config, sources=None, symbols=None, + service_feed_class=None, channel_name=None, + data_format=backtesting_enums.DataFormats.REGULAR_COLLECTOR_DATA): + super().__init__(config, services, sources=sources, symbols=symbols, + data_format=data_format) + self.tentacles_setup_config = tentacles_setup_config + self.service_feed_class = service_feed_class + self.channel_name = channel_name + self.bot_id = "live_collector" # Default bot_id for live collector + self.consumers = [] + self.feed_class = None + + async def start(self): + await self.initialize() + self._load_sources_if_necessary() + + # create description + await self._create_description() + + # Find service feed channels to consume from + feed_channels = await self._get_service_feed_channels() + + if not feed_channels: + self.logger.warning( + f"No service feed channels found for {self.services}. " + f"Make sure service feeds are running." + ) + return + + # Subscribe to all found channels + for channel_name, channel in feed_channels.items(): + self.logger.info(f"Subscribing to service feed channel: {channel_name}") + consumer = await channel.new_consumer(self._service_feed_callback) + self.consumers.append(consumer) + + self.logger.info(f"Started collecting live data from {self.services}") + # Keep running until stopped + await asyncio.gather(*asyncio.all_tasks(asyncio.get_event_loop())) + + async def _get_service_feed_channels(self): + """Get service feed channels associated with the social service""" + feed_channels = {} + + # If channel_name is provided, use it directly + if self.channel_name: + try: + channel = channels.get_chan(self.channel_name) + feed_channels[self.channel_name] = channel + return feed_channels + except KeyError: + self.logger.warning(f"Channel {self.channel_name} not found") + + # If service_feed_class is provided, use it + if self.service_feed_class: + try: + service_feed = services_api.get_service_feed(self.service_feed_class, self.bot_id) + if service_feed and service_feed.FEED_CHANNEL: + channel_name = service_feed.FEED_CHANNEL.get_name() + channel = channels.get_chan(channel_name) + feed_channels[channel_name] = channel + return feed_channels + except (RuntimeError, KeyError) as err: + self.logger.warning(f"Could not get service feed {self.service_feed_class}: {err}") + + # Try to find service feeds associated with the service class names + available_feeds = service_feed_factory.ServiceFeedFactory.get_available_service_feeds(in_backtesting=False) + service_set = {svc.lower() for svc in self.services or []} + for feed_class in available_feeds: + if feed_class.get_name().lower() in service_set: + try: + service_feed = services_api.get_service_feed(feed_class, self.bot_id) + if service_feed and service_feed.FEED_CHANNEL: + channel_name = service_feed.FEED_CHANNEL.get_name() + channel = channels.get_chan(channel_name) + feed_channels[channel_name] = channel + self.logger.info(f"Found matching service feed: {feed_class.get_name()}") + except (RuntimeError, KeyError): + continue + + return feed_channels + + async def _service_feed_callback(self, data): + """Callback for service feed channel messages""" + try: + # Extract data from the channel message + # Service feed channels send: {"data": actual_data} + event_data = data.get("data", data) + + # Extract metadata + service_name = (self.services[0] if self.services else "") + channel = data.get("channel", "") + symbol = event_data.get("symbol") if isinstance(event_data, dict) else None + timestamp = event_data.get("timestamp", time.time() * 1000) if isinstance(event_data, dict) else time.time() * 1000 + + # Prepare payload + if isinstance(event_data, dict): + payload = event_data + else: + payload = {"data": event_data} + + self.logger.info( + f"LIVE EVENT : SERVICE = {service_name} || CHANNEL = {channel} || " + f"SYMBOL = {symbol} || TIMESTAMP = {timestamp}" + ) + + await self.save_event( + timestamp=timestamp, + service_name=service_name, + channel=channel, + symbol=symbol, + payload=payload + ) + except Exception as err: + self.logger.exception(err, False, f"Error processing service feed event: {err}") + + async def stop(self, should_stop_database=True): + self.should_stop = True + + # Stop all consumers + for consumer in self.consumers: + await consumer.stop() + self.consumers = [] + + if should_stop_database: + await self.database.stop() + self.finalize_database() + + self.in_progress = False + self.finished = True + return self.finished diff --git a/packages/tentacles/Backtesting/converters/exchanges/legacy_data_converter/__init__.py b/packages/tentacles/Backtesting/converters/exchanges/legacy_data_converter/__init__.py new file mode 100644 index 0000000000..69fef68891 --- /dev/null +++ b/packages/tentacles/Backtesting/converters/exchanges/legacy_data_converter/__init__.py @@ -0,0 +1 @@ +from .legacy_converter import LegacyDataConverter \ No newline at end of file diff --git a/packages/tentacles/Backtesting/converters/exchanges/legacy_data_converter/legacy_converter.pxd b/packages/tentacles/Backtesting/converters/exchanges/legacy_data_converter/legacy_converter.pxd new file mode 100644 index 0000000000..52b8d5da86 --- /dev/null +++ b/packages/tentacles/Backtesting/converters/exchanges/legacy_data_converter/legacy_converter.pxd @@ -0,0 +1,30 @@ +# cython: language_level=3 +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +from octobot_backtesting.converters.data_converter cimport DataConverter +from octobot_backtesting.data.database cimport DataBase + +cdef class LegacyDataConverter(DataConverter): + cdef str exchange_name + cdef str symbol + cdef str time_data + cdef list time_frames + cdef dict file_content + cdef DataBase database + + cdef list _get_formatted_candles(self, object time_frame) + cdef dict _read_data_file(self) + cdef dict _read_data_file(self) diff --git a/packages/tentacles/Backtesting/converters/exchanges/legacy_data_converter/legacy_converter.py b/packages/tentacles/Backtesting/converters/exchanges/legacy_data_converter/legacy_converter.py new file mode 100644 index 0000000000..716f8bec7d --- /dev/null +++ b/packages/tentacles/Backtesting/converters/exchanges/legacy_data_converter/legacy_converter.py @@ -0,0 +1,158 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import gzip +import json +import enum +import os.path as path +import datetime + +import octobot_backtesting.collectors.exchanges as exchanges +import octobot_backtesting.constants as backtesting_constants +import octobot_backtesting.converters as converters +import octobot_backtesting.data as backtesting_data +import octobot_backtesting.enums as backtesting_enums +import octobot_commons.databases as databases +import octobot_commons.constants as commons_constants +import octobot_commons.enums as commons_enums +import octobot_commons.symbols.symbol_util as symbol_util + + +class LegacyDataConverter(converters.DataConverter): + """ + LegacyDataConverter can be used to convert OctoBot v0.3 data files into v0.4 data files. + """ + DATA_FILE_EXT = ".data" + VERSION = "1.0" + DATA_FILE_TIME_DATE_FORMAT = '%Y%m%d%H%M%S' + + class PriceIndexes(enum.Enum): + IND_PRICE_TIME = 0 + IND_PRICE_OPEN = 1 + IND_PRICE_HIGH = 2 + IND_PRICE_LOW = 3 + IND_PRICE_CLOSE = 4 + IND_PRICE_VOL = 5 + + def __init__(self, backtesting_file_to_convert): + super().__init__(backtesting_file_to_convert) + self.exchange_name = "" + self.symbol = "" + self.time_data = "" + self.time_frames = [] + self.file_content = {} + self.database = None + self.converted_file = backtesting_data.get_backtesting_file_name(exchanges.AbstractExchangeHistoryCollector) + + async def can_convert(self, ) -> bool: + self.exchange_name, self.symbol, self.time_data = LegacyDataConverter._interpret_file_name(self.file_to_convert) + if None in (self.exchange_name, self.symbol, self.time_data): + return False + self.file_content = self._read_data_file() + if not self.file_content: + return False + for time_frame, candles_data in self.file_content.items(): + try: + # check time frame validity + time_frame = commons_enums.TimeFrames(time_frame) + # check candle data validity + if isinstance(candles_data, list) and len(candles_data) == 6: + # check candle data non-emptiness + if all(data for data in candles_data): + self.time_frames.append(time_frame) + except ValueError: + pass + return bool(self.time_frames) + + async def convert(self) -> bool: + try: + self.database = databases.SQLiteDatabase( + path.join(backtesting_constants.BACKTESTING_FILE_PATH, self.converted_file)) + await self.database.initialize() + await self._create_description() + for time_frame in self.time_frames: + await self._convert_ohlcv(time_frame) + return True + except Exception as e: + self.logger.exception(e, True, f"Error while converting data file: {e}") + return False + finally: + if self.database is not None: + await self.database.stop() + + async def _create_description(self): + time_object = datetime.datetime.strptime(self.time_data, self.DATA_FILE_TIME_DATE_FORMAT) + await self.database.insert(backtesting_enums.DataTables.DESCRIPTION, + timestamp=datetime.datetime.timestamp(time_object), + version=self.VERSION, + exchange=self.exchange_name, + symbols=json.dumps([self.symbol]), + time_frames=json.dumps([tf.value for tf in self.time_frames])) + + async def _convert_ohlcv(self, time_frame): + # use time_frame_sec to add time to save the candle closing time + time_frame_sec = commons_enums.TimeFramesMinutes[time_frame] * commons_constants.MINUTE_TO_SECONDS + candles = self._get_formatted_candles(time_frame) + await self.database.insert_all(backtesting_enums.ExchangeDataTables.OHLCV, + timestamp=[candle[0] + time_frame_sec for candle in candles], + exchange_name=self.exchange_name, symbol=self.symbol, + time_frame=time_frame.value, candle=[json.dumps(c) for c in candles]) + + def _get_formatted_candles(self, time_frame): + data = self.file_content[time_frame.value] + candles = [] + for i in range(len(data[LegacyDataConverter.PriceIndexes.IND_PRICE_TIME.value])): + candles.insert(i, [None] * len(LegacyDataConverter.PriceIndexes)) + candles[i][LegacyDataConverter.PriceIndexes.IND_PRICE_CLOSE.value] = \ + data[LegacyDataConverter.PriceIndexes.IND_PRICE_CLOSE.value][i] + candles[i][LegacyDataConverter.PriceIndexes.IND_PRICE_OPEN.value] = \ + data[LegacyDataConverter.PriceIndexes.IND_PRICE_OPEN.value][i] + candles[i][LegacyDataConverter.PriceIndexes.IND_PRICE_HIGH.value] = \ + data[LegacyDataConverter.PriceIndexes.IND_PRICE_HIGH.value][i] + candles[i][LegacyDataConverter.PriceIndexes.IND_PRICE_LOW.value] = \ + data[LegacyDataConverter.PriceIndexes.IND_PRICE_LOW.value][i] + candles[i][LegacyDataConverter.PriceIndexes.IND_PRICE_TIME.value] = \ + data[LegacyDataConverter.PriceIndexes.IND_PRICE_TIME.value][i] + candles[i][LegacyDataConverter.PriceIndexes.IND_PRICE_VOL.value] = \ + data[LegacyDataConverter.PriceIndexes.IND_PRICE_VOL.value][i] + return candles + + def _read_data_file(self): + try: + # try zipfile + with gzip.open(self.file_to_convert, 'r') as file_to_parse: + file_content = json.loads(file_to_parse.read()) + except OSError: + # try without unzip + with open(self.file_to_convert) as file_to_parse: + file_content = json.loads(file_to_parse.read()) + except Exception: + return {} + return file_content + + @staticmethod + def _interpret_file_name(file_name): + data = path.basename(file_name).split("_") + try: + exchange_name = data[0] + symbol = symbol_util.merge_currencies(data[1], data[2]) + file_ext = LegacyDataConverter.DATA_FILE_EXT + timestamp = data[3] + data[4].replace(file_ext, "") + except KeyError: + exchange_name = None + symbol = None + timestamp = None + + return exchange_name, symbol, timestamp diff --git a/packages/tentacles/Backtesting/converters/exchanges/legacy_data_converter/metadata.json b/packages/tentacles/Backtesting/converters/exchanges/legacy_data_converter/metadata.json new file mode 100644 index 0000000000..330532d97c --- /dev/null +++ b/packages/tentacles/Backtesting/converters/exchanges/legacy_data_converter/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["LegacyDataConverter"], + "tentacles-requirements": [] +} \ No newline at end of file diff --git a/packages/tentacles/Backtesting/importers/exchanges/generic_exchange_importer/__init__.py b/packages/tentacles/Backtesting/importers/exchanges/generic_exchange_importer/__init__.py new file mode 100644 index 0000000000..1d63a46c6c --- /dev/null +++ b/packages/tentacles/Backtesting/importers/exchanges/generic_exchange_importer/__init__.py @@ -0,0 +1 @@ +from .generic_exchange_importer import GenericExchangeDataImporter \ No newline at end of file diff --git a/packages/tentacles/Backtesting/importers/exchanges/generic_exchange_importer/generic_exchange_importer.pxd b/packages/tentacles/Backtesting/importers/exchanges/generic_exchange_importer/generic_exchange_importer.pxd new file mode 100644 index 0000000000..f1a6b621a8 --- /dev/null +++ b/packages/tentacles/Backtesting/importers/exchanges/generic_exchange_importer/generic_exchange_importer.pxd @@ -0,0 +1,20 @@ +# cython: language_level=3 +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +from octobot_backtesting.importers.exchanges.exchange_importer cimport ExchangeDataImporter + +cdef class GenericExchangeDataImporter(ExchangeDataImporter): + pass \ No newline at end of file diff --git a/packages/tentacles/Backtesting/importers/exchanges/generic_exchange_importer/generic_exchange_importer.py b/packages/tentacles/Backtesting/importers/exchanges/generic_exchange_importer/generic_exchange_importer.py new file mode 100644 index 0000000000..87a695dae1 --- /dev/null +++ b/packages/tentacles/Backtesting/importers/exchanges/generic_exchange_importer/generic_exchange_importer.py @@ -0,0 +1,20 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_backtesting.importers as importers + + +class GenericExchangeDataImporter(importers.ExchangeDataImporter): + pass diff --git a/packages/tentacles/Backtesting/importers/exchanges/generic_exchange_importer/metadata.json b/packages/tentacles/Backtesting/importers/exchanges/generic_exchange_importer/metadata.json new file mode 100644 index 0000000000..b4d1b72504 --- /dev/null +++ b/packages/tentacles/Backtesting/importers/exchanges/generic_exchange_importer/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["GenericExchangeDataImporter"], + "tentacles-requirements": [] +} \ No newline at end of file diff --git a/packages/tentacles/Backtesting/importers/social/generic_social_importer/__init__.py b/packages/tentacles/Backtesting/importers/social/generic_social_importer/__init__.py new file mode 100644 index 0000000000..a517152e31 --- /dev/null +++ b/packages/tentacles/Backtesting/importers/social/generic_social_importer/__init__.py @@ -0,0 +1 @@ +from .generic_social_importer import GenericSocialDataImporter diff --git a/packages/tentacles/Backtesting/importers/social/generic_social_importer/generic_social_importer.py b/packages/tentacles/Backtesting/importers/social/generic_social_importer/generic_social_importer.py new file mode 100644 index 0000000000..0dcc304038 --- /dev/null +++ b/packages/tentacles/Backtesting/importers/social/generic_social_importer/generic_social_importer.py @@ -0,0 +1,20 @@ +# Drakkar-Software OctoBot-Backtesting +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_backtesting.importers as importers + + +class GenericSocialDataImporter(importers.SocialDataImporter): + pass diff --git a/packages/tentacles/Backtesting/importers/social/generic_social_importer/metadata.json b/packages/tentacles/Backtesting/importers/social/generic_social_importer/metadata.json new file mode 100644 index 0000000000..2c5ec842de --- /dev/null +++ b/packages/tentacles/Backtesting/importers/social/generic_social_importer/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["GenericSocialDataImporter"], + "tentacles-requirements": [] +} diff --git a/packages/tentacles/Evaluator/RealTime/dsl_realtime_evaluator/__init__.py b/packages/tentacles/Evaluator/RealTime/dsl_realtime_evaluator/__init__.py new file mode 100644 index 0000000000..85885abd6f --- /dev/null +++ b/packages/tentacles/Evaluator/RealTime/dsl_realtime_evaluator/__init__.py @@ -0,0 +1,16 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +from .dsl_realtime_evaluator import DSLRealtimeEvaluator diff --git a/packages/tentacles/Evaluator/RealTime/dsl_realtime_evaluator/config/DSLRealtimeEvaluator.json b/packages/tentacles/Evaluator/RealTime/dsl_realtime_evaluator/config/DSLRealtimeEvaluator.json new file mode 100644 index 0000000000..600a734e44 --- /dev/null +++ b/packages/tentacles/Evaluator/RealTime/dsl_realtime_evaluator/config/DSLRealtimeEvaluator.json @@ -0,0 +1,5 @@ +{ + "trigger_channel": "ohlcv", + "dsl_script": "", + "time_frame": "1m" +} diff --git a/packages/tentacles/Evaluator/RealTime/dsl_realtime_evaluator/dsl_realtime_evaluator.py b/packages/tentacles/Evaluator/RealTime/dsl_realtime_evaluator/dsl_realtime_evaluator.py new file mode 100644 index 0000000000..2a70e574e1 --- /dev/null +++ b/packages/tentacles/Evaluator/RealTime/dsl_realtime_evaluator/dsl_realtime_evaluator.py @@ -0,0 +1,282 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import asyncio +import typing + +import octobot_commons.constants as commons_constants +import octobot_commons.enums as commons_enums +import octobot_commons.errors as commons_errors +import octobot_commons.channels_name as channels_name +import octobot_commons.dsl_interpreter as dsl_interpreter +import octobot_evaluators.evaluators as evaluators +import octobot_evaluators.util as evaluators_util +import octobot_trading.exchange_channel as exchange_channels +import octobot_trading.api as trading_api + +import tentacles.Meta.DSL_operators as dsl_operators + + +TRIGGER_CHANNEL_OHLCV = "ohlcv" +TRIGGER_CHANNEL_KLINE = "kline" +TRIGGER_CHANNEL_TICKER = "ticker" +TRIGGER_CHANNEL_ALL_TICKERS = "all_tickers" + +ALL_TICKERS_DEFAULT_REFRESH_TIME = 64 +ALL_TICKERS_REFRESH_TIME_KEY = "all_tickers_refresh_time" + +TRIGGER_CHANNEL_TO_EXCHANGE_CHANNEL = { + TRIGGER_CHANNEL_OHLCV: channels_name.OctoBotTradingChannelsName.OHLCV_CHANNEL.value, + TRIGGER_CHANNEL_KLINE: channels_name.OctoBotTradingChannelsName.KLINE_CHANNEL.value, + TRIGGER_CHANNEL_TICKER: channels_name.OctoBotTradingChannelsName.TICKER_CHANNEL.value, +} + + +class DSLRealtimeEvaluator(evaluators.RealTimeEvaluator): + TRIGGER_CHANNEL_KEY = "trigger_channel" + DSL_SCRIPT_KEY = "dsl_script" + + def __init__(self, tentacles_setup_config): + super().__init__(tentacles_setup_config) + self.trigger_channel: str = TRIGGER_CHANNEL_OHLCV + self.dsl_script: str = "" + self.interpreter: typing.Optional[dsl_interpreter.Interpreter] = None + self.exchange_manager = None + self.triggered_symbol: str = "" + self.all_tickers_refresh_time: float = ALL_TICKERS_DEFAULT_REFRESH_TIME + self._all_tickers_task: typing.Optional[asyncio.Task] = None + self._current_tickers: dict[str, dict] = {} + + def init_user_inputs(self, inputs: dict) -> None: + self.time_frame = self.time_frame or self.UI.user_input( + commons_constants.CONFIG_TIME_FRAME, + commons_enums.UserInputTypes.OPTIONS, + commons_enums.TimeFrames.ONE_MINUTE.value, + inputs, + options=[tf.value for tf in commons_enums.TimeFrames], + title="Time frame: The time frame to observe (used for OHLCV and Kline channels).", + ) + self.trigger_channel = self.UI.user_input( + self.TRIGGER_CHANNEL_KEY, + commons_enums.UserInputTypes.OPTIONS, + TRIGGER_CHANNEL_OHLCV, + inputs, + options=[ + TRIGGER_CHANNEL_OHLCV, TRIGGER_CHANNEL_KLINE, + TRIGGER_CHANNEL_TICKER, TRIGGER_CHANNEL_ALL_TICKERS, + ], + title="Trigger channel: The data channel that triggers DSL evaluation. " + "'ohlcv' fires on candle close, 'kline' fires on every price tick, " + "'ticker' fires on ticker updates (~14-64s), " + "'all_tickers' periodically fetches ALL exchange tickers and evaluates each symbol.", + ) + self.all_tickers_refresh_time = float(self.UI.user_input( + ALL_TICKERS_REFRESH_TIME_KEY, + commons_enums.UserInputTypes.INT, + ALL_TICKERS_DEFAULT_REFRESH_TIME, + inputs, + title="All tickers refresh time (seconds): How often to fetch all tickers " + "(only used when trigger_channel is 'all_tickers').", + )) + self.dsl_script = str(self.UI.user_input( + self.DSL_SCRIPT_KEY, + commons_enums.UserInputTypes.TEXT, + "", + inputs, + other_schema_values={"minLength": 0}, + title="DSL condition: The DSL expression to evaluate. " + "The script result is used as eval_note when truthy, stays pending otherwise. " + "Available operators: close(), market_expiry(), now_ms(), triggered_symbol(), etc.", + )) + + async def start(self, bot_id: str) -> bool: + if trading_api is None or exchange_channels is None: + self.logger.error("Can't connect to trading channels: octobot_trading is not installed") + return False + exchange_id = trading_api.get_exchange_id_from_matrix_id( + self.exchange_name, self.matrix_id + ) + self.exchange_manager = trading_api.get_exchange_manager_from_exchange_id( + exchange_id + ) + self._create_interpreter() + self._prepare_dsl_script() + if self.trigger_channel == TRIGGER_CHANNEL_ALL_TICKERS: + self._all_tickers_task = asyncio.create_task( + self._all_tickers_update_loop() + ) + self.logger.info( + f"Started all_tickers update loop " + f"(refresh every {self.all_tickers_refresh_time}s)" + ) + return True + channel_name = TRIGGER_CHANNEL_TO_EXCHANGE_CHANNEL.get(self.trigger_channel) + if channel_name is None: + self.logger.error(f"Unknown trigger channel: {self.trigger_channel}") + return False + if self.trigger_channel == TRIGGER_CHANNEL_TICKER: + await exchange_channels.get_chan( + channel_name, exchange_id + ).new_consumer( + callback=self.ticker_callback, + symbol=self.symbol, + priority_level=self.priority_level, + ) + elif self.trigger_channel == TRIGGER_CHANNEL_KLINE: + await exchange_channels.get_chan( + channel_name, exchange_id + ).new_consumer( + callback=self.kline_callback, + symbol=self.symbol, + time_frame=self.available_time_frame, + priority_level=self.priority_level, + ) + elif self.trigger_channel == TRIGGER_CHANNEL_OHLCV: + await exchange_channels.get_chan( + channel_name, exchange_id + ).new_consumer( + callback=self.ohlcv_callback, + symbol=self.symbol, + time_frame=self.available_time_frame, + priority_level=self.priority_level, + ) + return True + + async def _all_tickers_update_loop(self): + while True: + try: + tickers = await self.exchange_manager.exchange.get_all_currencies_price_ticker() + if tickers: + self._current_tickers.update(tickers) + self.logger.debug( + f"Fetched {len(tickers)} tickers, evaluating DSL for each" + ) + for symbol in tickers: + await self._evaluate("", symbol, eval_time=0) + else: + self.logger.warning("No tickers returned from exchange") + except asyncio.CancelledError: + self.logger.debug("All tickers update loop cancelled") + return + except Exception as err: + self.logger.exception( + err, True, + f"Error fetching all tickers: {err}", + ) + await asyncio.sleep(self.all_tickers_refresh_time) + + async def ohlcv_callback( + self, exchange: str, exchange_id: str, + cryptocurrency: str, symbol: str, time_frame, candle, + ): + await self._evaluate( + cryptocurrency, symbol, + evaluators_util.get_eval_time(full_candle=candle, time_frame=time_frame), + ) + + async def kline_callback( + self, exchange: str, exchange_id: str, + cryptocurrency: str, symbol: str, time_frame, kline, + ): + await self._evaluate( + cryptocurrency, symbol, + evaluators_util.get_eval_time(kline=kline), + ) + + async def ticker_callback( + self, exchange: str, exchange_id: str, + cryptocurrency: str, symbol: str, ticker, + ): + await self._evaluate(cryptocurrency, symbol, eval_time=0) + + async def _evaluate( + self, cryptocurrency: str, symbol: str, eval_time: int, + ): + if self.interpreter is None: + self.logger.warning("DSL interpreter not initialized, skipping evaluation") + return + self.triggered_symbol = symbol + try: + result = await self.interpreter.compute_expression() + if result is None or result is False: + self.eval_note = commons_constants.START_PENDING_EVAL_NOTE + return + self.eval_note = result + except commons_errors.DSLInterpreterError as err: + self.logger.debug( + f"DSL evaluation skipped for {symbol}: {err}" + ) + self.eval_note = commons_constants.START_PENDING_EVAL_NOTE + return + except Exception as err: + self.logger.exception( + err, True, + f"Unexpected DSL evaluation error for {symbol}: {err}", + ) + self.eval_note = commons_constants.START_PENDING_EVAL_NOTE + return + await self.evaluation_completed( + cryptocurrency, symbol, self.available_time_frame, + eval_time=eval_time, + ) + + async def stop(self) -> None: + await super().stop() + if self._all_tickers_task is not None and not self._all_tickers_task.done(): + self._all_tickers_task.cancel() + try: + await self._all_tickers_task + except asyncio.CancelledError: + pass + self._all_tickers_task = None + + def _create_interpreter(self): + operators = ( + dsl_interpreter.get_all_operators() + + dsl_operators.create_ohlcv_operators(self.exchange_manager, None, None) + + dsl_operators.create_symbol_operators(self) + + dsl_operators.create_ticker_operators(self._current_tickers) + ) + self.interpreter = dsl_interpreter.Interpreter(operators) + + def _prepare_dsl_script(self): + if not self.dsl_script: + self.logger.warning("No DSL script configured") + return + try: + self.interpreter.prepare(self.dsl_script) + self.logger.info(f"DSL script successfully loaded: '{self.dsl_script}'") + except commons_errors.DSLInterpreterError as err: + self.logger.exception( + err, True, + f"Error when parsing DSL script '{self.dsl_script}': {err}", + ) + except Exception as err: + self.logger.exception( + err, True, + f"Unexpected error when parsing DSL script '{self.dsl_script}': {err}", + ) + + def set_default_config(self): + super().set_default_config() + self.specific_config[commons_constants.CONFIG_TIME_FRAME] = "1m" + + @classmethod + def get_is_symbol_wildcard(cls) -> bool: + return True + + @classmethod + def get_is_cryptocurrencies_wildcard(cls) -> bool: + return True diff --git a/packages/tentacles/Evaluator/RealTime/dsl_realtime_evaluator/metadata.json b/packages/tentacles/Evaluator/RealTime/dsl_realtime_evaluator/metadata.json new file mode 100644 index 0000000000..7e82e93850 --- /dev/null +++ b/packages/tentacles/Evaluator/RealTime/dsl_realtime_evaluator/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["DSLRealtimeEvaluator"], + "tentacles-requirements": [] +} diff --git a/packages/tentacles/Evaluator/RealTime/dsl_realtime_evaluator/resources/DSLRealtimeEvaluator.md b/packages/tentacles/Evaluator/RealTime/dsl_realtime_evaluator/resources/DSLRealtimeEvaluator.md new file mode 100644 index 0000000000..79d5d081b0 --- /dev/null +++ b/packages/tentacles/Evaluator/RealTime/dsl_realtime_evaluator/resources/DSLRealtimeEvaluator.md @@ -0,0 +1 @@ +DSLRealtimeEvaluator is a generic real-time evaluator that evaluates a DSL condition expression on a configurable trigger channel (OHLCV, Kline, or Ticker). When the condition is truthy, it emits a positive evaluation (+1); when falsy, it emits a negative evaluation (-1). diff --git a/packages/tentacles/Evaluator/RealTime/dsl_realtime_evaluator/tests/__init__.py b/packages/tentacles/Evaluator/RealTime/dsl_realtime_evaluator/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/packages/tentacles/Evaluator/RealTime/instant_fluctuations_evaluator/__init__.py b/packages/tentacles/Evaluator/RealTime/instant_fluctuations_evaluator/__init__.py new file mode 100644 index 0000000000..fcc7792b11 --- /dev/null +++ b/packages/tentacles/Evaluator/RealTime/instant_fluctuations_evaluator/__init__.py @@ -0,0 +1 @@ +from .instant_fluctuations import InstantFluctuationsEvaluator, InstantMAEvaluator \ No newline at end of file diff --git a/packages/tentacles/Evaluator/RealTime/instant_fluctuations_evaluator/config/InstantFluctuationsEvaluator.json b/packages/tentacles/Evaluator/RealTime/instant_fluctuations_evaluator/config/InstantFluctuationsEvaluator.json new file mode 100644 index 0000000000..7957a440a9 --- /dev/null +++ b/packages/tentacles/Evaluator/RealTime/instant_fluctuations_evaluator/config/InstantFluctuationsEvaluator.json @@ -0,0 +1,5 @@ +{ + "price_difference_threshold_percent": 1, + "volume_difference_threshold_percent": 400, + "time_frame": "1m" +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/RealTime/instant_fluctuations_evaluator/config/InstantMAEvaluator.json b/packages/tentacles/Evaluator/RealTime/instant_fluctuations_evaluator/config/InstantMAEvaluator.json new file mode 100644 index 0000000000..d5b11140a4 --- /dev/null +++ b/packages/tentacles/Evaluator/RealTime/instant_fluctuations_evaluator/config/InstantMAEvaluator.json @@ -0,0 +1,5 @@ +{ + "period": 6, + "time_frame": "1m", + "threshold": 0.5 +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/RealTime/instant_fluctuations_evaluator/instant_fluctuations.py b/packages/tentacles/Evaluator/RealTime/instant_fluctuations_evaluator/instant_fluctuations.py new file mode 100644 index 0000000000..0ef29dd137 --- /dev/null +++ b/packages/tentacles/Evaluator/RealTime/instant_fluctuations_evaluator/instant_fluctuations.py @@ -0,0 +1,295 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import math +import tulipy +import numpy as np + +import octobot_commons.constants as commons_constants +import octobot_commons.enums as commons_enums +import octobot_commons.channels_name as channels_name +import octobot_evaluators.evaluators as evaluators +import octobot_evaluators.util as evaluators_util + + +class InstantFluctuationsEvaluator(evaluators.RealTimeEvaluator): + """ + Idea: moves are lasting approx 12min + Check the last 12 candles and compute mean closing prices as + well as mean volume with a gradually narrower interval to + compute the strength or weakness of the move + """ + + PRICE_THRESHOLD_KEY = "price_difference_threshold_percent" + VOLUME_THRESHOLD_KEY = "volume_difference_threshold_percent" + + def __init__(self, tentacles_setup_config): + super().__init__(tentacles_setup_config) + self.something_is_happening = False + self.last_notification_eval = 0 + + self.average_prices = {} + self.last_price = 0 + + # Volume + self.average_volumes = {} + self.last_volume = 0 + + # Constants + self.time_frame = None + self.VOLUME_HAPPENING_THRESHOLD = None + self.PRICE_HAPPENING_THRESHOLD = None + self.MIN_TRIGGERING_DELTA = 0.15 + self.candle_segments = [10, 8, 6, 5, 4, 3, 2, 1] + + def init_user_inputs(self, inputs: dict) -> None: + """ + Called right before starting the tentacle, should define all the tentacle's user inputs unless + those are defined somewhere else. + """ + self.time_frame = self.time_frame or \ + self.UI.user_input(commons_constants.CONFIG_TIME_FRAME, commons_enums.UserInputTypes.OPTIONS, + commons_enums.TimeFrames.ONE_MINUTE.value, + inputs, options=[tf.value for tf in commons_enums.TimeFrames], + title="Time frame: The time frame to observe in order to spot changes.") + self.VOLUME_HAPPENING_THRESHOLD = 1 + self.UI.user_input( + self.VOLUME_THRESHOLD_KEY, commons_enums.UserInputTypes.FLOAT, 400, inputs, min_val=0, + title="Volume threshold: volume difference in percent from which to trigger a notification." + ) / 100 + self.PRICE_HAPPENING_THRESHOLD = self.UI.user_input( + self.PRICE_THRESHOLD_KEY, commons_enums.UserInputTypes.FLOAT, 1, inputs, min_val=0, + title="Price threshold: price difference in percent from which to trigger a notification." + ) / 100 + + async def ohlcv_callback(self, exchange: str, exchange_id: str, + cryptocurrency: str, symbol: str, time_frame, candle): + volume_data = self.get_symbol_candles(exchange, exchange_id, symbol, time_frame). \ + get_symbol_volume_candles(self.candle_segments[0]) + close_data = self.get_symbol_candles(exchange, exchange_id, symbol, time_frame). \ + get_symbol_close_candles(self.candle_segments[0]) + for segment in self.candle_segments: + volume_data = [d for d in volume_data[-segment:] if d is not None] + price_data = [d for d in close_data[-segment:] if d is not None] + self.average_volumes[segment] = np.mean(volume_data) + self.average_prices[segment] = np.mean(price_data) + + try: + self.last_volume = volume_data[-1] + self.last_price = close_data[-1] + await self._trigger_evaluation(cryptocurrency, symbol, + evaluators_util.get_eval_time(full_candle=candle, time_frame=time_frame)) + except IndexError: + # candles data history is probably not yet available + self.logger.debug(f"Impossible to evaluate, no historical data for {symbol} on {time_frame}") + + async def kline_callback(self, exchange: str, exchange_id: str, + cryptocurrency: str, symbol: str, time_frame, kline): + self.last_volume = kline[commons_enums.PriceIndexes.IND_PRICE_VOL.value] + self.last_price = kline[commons_enums.PriceIndexes.IND_PRICE_CLOSE.value] + await self._trigger_evaluation(cryptocurrency, symbol, evaluators_util.get_eval_time(kline=kline)) + + async def _trigger_evaluation(self, cryptocurrency, symbol, time): + self.evaluate_volume_fluctuations() + if self.something_is_happening and self.eval_note != commons_constants.START_PENDING_EVAL_NOTE: + if abs(self.last_notification_eval - self.eval_note) >= self.MIN_TRIGGERING_DELTA: + self.last_notification_eval = self.eval_note + await self.evaluation_completed(cryptocurrency, symbol, self.available_time_frame, + eval_time=time) + self.something_is_happening = False + else: + self.eval_note = commons_constants.START_PENDING_EVAL_NOTE + + def evaluate_volume_fluctuations(self): + volume_trigger = 0 + price_trigger = 0 + + for segment in self.candle_segments: + if segment in self.average_volumes and segment in self.average_prices: + # check volume fluctuation + if self.last_volume > self.VOLUME_HAPPENING_THRESHOLD * self.average_volumes[segment]: + volume_trigger += 1 + self.something_is_happening = True + + # check price fluctuation + segment_average_price = self.average_prices[segment] + if self.last_price > (1 + self.PRICE_HAPPENING_THRESHOLD) * segment_average_price: + price_trigger += 1 + self.something_is_happening = True + + elif self.last_price < (1 - self.PRICE_HAPPENING_THRESHOLD) * segment_average_price: + price_trigger -= 1 + self.something_is_happening = True + + if self.candle_segments: + average_volume_trigger = min(1, volume_trigger / len(self.candle_segments) + 0.2) + average_price_trigger = price_trigger / len(self.candle_segments) + + if average_price_trigger < 0: + # math.cos(1-x) between 0 and 1 starts around 0.5 and smoothly goes up to 1 + self.eval_note = -1 * math.cos(1 - (-1 * average_price_trigger * average_volume_trigger)) + elif average_price_trigger > 0: + self.eval_note = math.cos(1 - average_price_trigger * average_volume_trigger) + else: + # no price info => high volume but no price move, can't say anything + self.something_is_happening = False + else: + self.something_is_happening = False + + async def start(self, bot_id: str) -> bool: + """ + Subscribe to Kline and OHLCV notification + :return: bool + """ + try: + import octobot_trading.exchange_channel as exchange_channels + import octobot_trading.api as trading_api + exchange_id = trading_api.get_exchange_id_from_matrix_id(self.exchange_name, self.matrix_id) + await exchange_channels.get_chan(channels_name.OctoBotTradingChannelsName.OHLCV_CHANNEL.value, + exchange_id).new_consumer( + callback=self.ohlcv_callback, symbol=self.symbol, + time_frame=self.available_time_frame, priority_level=self.priority_level) + await exchange_channels.get_chan(channels_name.OctoBotTradingChannelsName.KLINE_CHANNEL.value, + exchange_id).new_consumer( + callback=self.kline_callback, symbol=self.symbol, + time_frame=self.available_time_frame, priority_level=self.priority_level) + return True + except ImportError: + self.logger.error("Can't connect to trading channels") + return False + + def set_default_config(self): + super().set_default_config() + self.specific_config[commons_constants.CONFIG_TIME_FRAME] = "1m" + + @classmethod + def get_is_symbol_wildcard(cls) -> bool: + """ + :return: True if the evaluator is not symbol dependant else False + """ + return False + + +class InstantMAEvaluator(evaluators.RealTimeEvaluator): + + def __init__(self, tentacles_setup_config): + super().__init__(tentacles_setup_config) + self.last_candle_data = {} + self.last_moving_average_values = {} + self.period = 6 + self.time_frame = None + self.price_threshold = 0.05 + + def init_user_inputs(self, inputs: dict) -> None: + """ + Called right before starting the tentacle, should define all the tentacle's user inputs unless + those are defined somewhere else. + """ + self.time_frame = self.time_frame or \ + self.UI.user_input(commons_constants.CONFIG_TIME_FRAME, commons_enums.UserInputTypes.OPTIONS, + commons_enums.TimeFrames.ONE_MINUTE.value, + inputs, options=[tf.value for tf in commons_enums.TimeFrames], + title="Time frame: The time frame to observe in order to spot changes.") + self.period = self.UI.user_input("period", commons_enums.UserInputTypes.INT, 6, inputs, + min_val=1, title="Period: the EMA period length to use.") + self.price_threshold = self.UI.user_input( + "threshold", commons_enums.UserInputTypes.FLOAT, self.price_threshold * 100, inputs, min_val=0, + title="Price threshold: price difference in percent from the current moving average value starting " + "from which to trigger an evaluation." + ) / 100 + + async def ohlcv_callback(self, exchange: str, exchange_id: str, + cryptocurrency: str, symbol: str, time_frame, candle): + self.eval_note = 0 + new_data = self.get_symbol_candles(exchange, exchange_id, symbol, time_frame). \ + get_symbol_close_candles(20) + should_eval = symbol not in self.last_candle_data or \ + not self._compare_data(new_data, self.last_candle_data[symbol]) + self.last_candle_data[symbol] = new_data + if should_eval: + if len(self.last_candle_data[symbol]) > self.period: + self.last_moving_average_values[symbol] = tulipy.sma(self.last_candle_data[symbol], + self.period) + await self._evaluate_current_price(self.last_candle_data[symbol][-1], cryptocurrency, symbol, + evaluators_util.get_eval_time(full_candle=candle, + time_frame=time_frame)) + + async def kline_callback(self, exchange: str, exchange_id: str, + cryptocurrency: str, symbol: str, time_frame, kline): + if symbol in self.last_moving_average_values and len(self.last_moving_average_values[symbol]) > 0: + self.eval_note = 0 + last_price = kline[commons_enums.PriceIndexes.IND_PRICE_CLOSE.value] + if last_price != self.last_candle_data[symbol][-1]: + await self._evaluate_current_price(last_price, cryptocurrency, symbol, + evaluators_util.get_eval_time(kline=kline)) + + async def _evaluate_current_price(self, last_price, cryptocurrency, symbol, time): + last_ma_value = self.last_moving_average_values[symbol][-1] + if last_ma_value == 0: + self.eval_note = 0 + else: + lower_threshold = last_ma_value * (1 - self.price_threshold) + upper_threshold = last_ma_value * (1 + self.price_threshold) + if lower_threshold < last_price < upper_threshold: + self.eval_note = 0 + else: + current_ratio = last_price / last_ma_value + if current_ratio > 1: + # last_price > last_ma_value => sell ? => eval_note > 0 + if current_ratio >= 2: + self.eval_note = 1 + else: + self.eval_note = current_ratio - 1 + elif current_ratio < 1: + # last_price < last_ma_value => buy ? => eval_note < 0 + self.eval_note = -1 * (1 - current_ratio) + else: + self.eval_note = 0 + + await self.evaluation_completed(cryptocurrency, symbol, self.available_time_frame, + eval_time=time) + + async def start(self, bot_id: str) -> bool: + """ + Subscribe to Kline and OHLCV notification + :return: bool + """ + try: + import octobot_trading.exchange_channel as exchange_channels + import octobot_trading.api as trading_api + exchange_id = trading_api.get_exchange_id_from_matrix_id(self.exchange_name, self.matrix_id) + await exchange_channels.get_chan(channels_name.OctoBotTradingChannelsName.OHLCV_CHANNEL.value, + exchange_id).new_consumer( + callback=self.ohlcv_callback, time_frame=self.available_time_frame, priority_level=self.priority_level) + await exchange_channels.get_chan(channels_name.OctoBotTradingChannelsName.KLINE_CHANNEL.value, + exchange_id).new_consumer( + callback=self.kline_callback, time_frame=self.available_time_frame, priority_level=self.priority_level) + return True + except ImportError: + self.logger.error("Can't connect to trading channels") + return False + + def set_default_config(self): + super().set_default_config() + self.specific_config[commons_constants.CONFIG_TIME_FRAME] = "1m" + + @staticmethod + def _compare_data(new_data, old_data): + try: + if new_data[commons_enums.PriceIndexes.IND_PRICE_CLOSE.value][-1] != \ + old_data[commons_enums.PriceIndexes.IND_PRICE_CLOSE.value][-1]: + return False + return True + except Exception: + return False diff --git a/packages/tentacles/Evaluator/RealTime/instant_fluctuations_evaluator/metadata.json b/packages/tentacles/Evaluator/RealTime/instant_fluctuations_evaluator/metadata.json new file mode 100644 index 0000000000..0fa8b84697 --- /dev/null +++ b/packages/tentacles/Evaluator/RealTime/instant_fluctuations_evaluator/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["InstantFluctuationsEvaluator", "InstantMAEvaluator"], + "tentacles-requirements": [] +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/RealTime/instant_fluctuations_evaluator/resources/InstantFluctuationsEvaluator.md b/packages/tentacles/Evaluator/RealTime/instant_fluctuations_evaluator/resources/InstantFluctuationsEvaluator.md new file mode 100644 index 0000000000..ec57f08c52 --- /dev/null +++ b/packages/tentacles/Evaluator/RealTime/instant_fluctuations_evaluator/resources/InstantFluctuationsEvaluator.md @@ -0,0 +1,3 @@ +Triggers when a superior to 1% change of price or a superior to x4 change of volume from recent average happens. + +The price distance from recent average is defining the strength the evaluation. \ No newline at end of file diff --git a/packages/tentacles/Evaluator/RealTime/instant_fluctuations_evaluator/resources/InstantMAEvaluator.md b/packages/tentacles/Evaluator/RealTime/instant_fluctuations_evaluator/resources/InstantMAEvaluator.md new file mode 100644 index 0000000000..5e2ad8e656 --- /dev/null +++ b/packages/tentacles/Evaluator/RealTime/instant_fluctuations_evaluator/resources/InstantMAEvaluator.md @@ -0,0 +1,7 @@ +Uses a [moving average](https://www.investopedia.com/terms/m/movingaverage.asp) +computed on close prices to set its evaluation. + +Will trigger an evaluation when the current close price is beyond the given price threshold applied on +the latest moving average value. + +Triggers on each new candle and price change. diff --git a/packages/tentacles/Evaluator/Social/forum_evaluator/__init__.py b/packages/tentacles/Evaluator/Social/forum_evaluator/__init__.py new file mode 100644 index 0000000000..f22788c802 --- /dev/null +++ b/packages/tentacles/Evaluator/Social/forum_evaluator/__init__.py @@ -0,0 +1 @@ +from .forum import RedditForumEvaluator \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Social/forum_evaluator/config/RedditForumEvaluator.json b/packages/tentacles/Evaluator/Social/forum_evaluator/config/RedditForumEvaluator.json new file mode 100644 index 0000000000..7b72e38802 --- /dev/null +++ b/packages/tentacles/Evaluator/Social/forum_evaluator/config/RedditForumEvaluator.json @@ -0,0 +1,68 @@ +{ + "crypto-currencies": [ + { + "crypto-currency": "Bitcoin", + "subreddits": [ + "Bitcoin" + ] + }, + { + "crypto-currency": "Ethereum", + "subreddits": [ + "ethereum" + ] + }, + { + "crypto-currency": "NEO", + "subreddits": [ + "NEO" + ] + }, + { + "crypto-currency": "ICON", + "subreddits": [ + "icon" + ] + }, + { + "crypto-currency": "NANO", + "subreddits": [ + "nanocurrency" + ] + }, + { + "crypto-currency": "VeChain", + "subreddits": [ + "Vechain" + ] + }, + { + "crypto-currency": "VeChain Thor", + "subreddits": [ + "Vechain" + ] + }, + { + "crypto-currency": "Substratum", + "subreddits": [ + "SubstratumNetwork" + ] + }, + { + "crypto-currency": "Ethos", + "subreddits": [ + "ethos_io" + ] + }, + { + "crypto-currency": "Ontology", + "subreddits": [ + "OntologyNetwork" + ] + }, + { + "crypto-currency": "Binance Coin", + "subreddits": [] + } + ] +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Social/forum_evaluator/forum.py b/packages/tentacles/Evaluator/Social/forum_evaluator/forum.py new file mode 100644 index 0000000000..3bbf4f5b5a --- /dev/null +++ b/packages/tentacles/Evaluator/Social/forum_evaluator/forum.py @@ -0,0 +1,134 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.constants as commons_constants +import octobot_commons.enums as commons_enums +import octobot_commons.tentacles_management as tentacles_management +import octobot_evaluators.evaluators as evaluators +import octobot_services.constants as services_constants +import tentacles.Services.Services_feeds as Services_feeds +import tentacles.Evaluator.Util as EvaluatorUtil + +CONFIG_REDDIT = "reddit" +CONFIG_REDDIT_SUBREDDITS = "subreddits" +CONFIG_REDDIT_ENTRY = "entry" +CONFIG_REDDIT_ENTRY_WEIGHT = "entry_weight" + + +# RedditForumEvaluator is used to get an overall state of a market, it will not trigger a trade +# (notify its evaluators) but is used to measure hype and trend of a market. +class RedditForumEvaluator(evaluators.SocialEvaluator): + SERVICE_FEED_CLASS = Services_feeds.RedditServiceFeed if hasattr(Services_feeds, 'RedditServiceFeed') else None + + def __init__(self, tentacles_setup_config): + evaluators.SocialEvaluator.__init__(self, tentacles_setup_config) + self.overall_state_analyser = EvaluatorUtil.OverallStateAnalyser() + self.count = 0 + self.sentiment_analyser = None + self.is_self_refreshing = True + self.subreddits_by_cryptocurrency = {} + + def init_user_inputs(self, inputs: dict) -> None: + """ + Called right before starting the tentacle, should define all the tentacle's user inputs unless + those are defined somewhere else. + """ + cryptocurrencies = [] + config_cryptocurrencies = self.UI.user_input( + commons_constants.CONFIG_CRYPTO_CURRENCIES, commons_enums.UserInputTypes.OBJECT_ARRAY, + cryptocurrencies, inputs, other_schema_values={"minItems": 1, "uniqueItems": True}, + item_title="Crypto currency", + title="Crypto currencies to watch." + ) + # init one user input to generate user input schema and default values + cryptocurrencies.append(self._init_cryptocurrencies(inputs, "Bitcoin", ["Bitcoin"])) + # remove other symbols data to avoid unnecessary entries + self.subreddits_by_cryptocurrency = self._get_config_elements(config_cryptocurrencies, CONFIG_REDDIT_SUBREDDITS) + self.feed_config[services_constants.CONFIG_REDDIT_SUBREDDITS] = self.subreddits_by_cryptocurrency + + def _init_cryptocurrencies(self, inputs, cryptocurrency, subreddits): + return { + commons_constants.CONFIG_CRYPTO_CURRENCY: + self.UI.user_input(commons_constants.CONFIG_CRYPTO_CURRENCY, commons_enums.UserInputTypes.TEXT, + cryptocurrency, inputs, other_schema_values={"minLength": 2}, + parent_input_name=commons_constants.CONFIG_CRYPTO_CURRENCIES, array_indexes=[0], + title="Crypto currency name"), + CONFIG_REDDIT_SUBREDDITS: + self.UI.user_input(CONFIG_REDDIT_SUBREDDITS, commons_enums.UserInputTypes.STRING_ARRAY, + subreddits, inputs, other_schema_values={"uniqueItems": True}, + parent_input_name=commons_constants.CONFIG_CRYPTO_CURRENCIES, array_indexes=[0], + item_title="Subreddit name", + title="Subreddits to watch") + } + + @classmethod + def get_is_cryptocurrencies_wildcard(cls) -> bool: + """ + :return: True if the evaluator is not cryptocurrency dependant else False + """ + return False + + @classmethod + def get_is_cryptocurrency_name_wildcard(cls) -> bool: + """ + :return: True if the evaluator is not cryptocurrency name dependant else False + """ + return False + + def _print_entry(self, entry_text, entry_note, count=""): + self.logger.debug(f"New reddit entry ! : {entry_note} | {count} : {self.cryptocurrency_name} : " + f"Link : {entry_text}") + + async def _feed_callback(self, data): + if self._is_interested_by_this_notification(data[services_constants.FEED_METADATA]): + self.count += 1 + entry_note = self._get_sentiment(data[CONFIG_REDDIT_ENTRY]) + if entry_note != commons_constants.START_PENDING_EVAL_NOTE: + self.overall_state_analyser.add_evaluation(entry_note, data[CONFIG_REDDIT_ENTRY_WEIGHT], False) + if data[CONFIG_REDDIT_ENTRY_WEIGHT] > 3: + link = f"https://www.reddit.com{data[CONFIG_REDDIT_ENTRY].permalink}" + self._print_entry(link, entry_note, str(self.count)) + self.eval_note = self.overall_state_analyser.get_overall_state_after_refresh() + await self.evaluation_completed(self.cryptocurrency, eval_time=self.get_current_exchange_time()) + + def _get_sentiment(self, entry): + # analysis entry text and gives overall sentiment + reddit_entry_min_length = 50 + # ignore usless (very short) entries + if entry.selftext and len(entry.selftext) >= reddit_entry_min_length: + return -1 * self.sentiment_analyser.analyse(entry.selftext) + return commons_constants.START_PENDING_EVAL_NOTE + + def _is_interested_by_this_notification(self, notification_description): + # true if the given subreddit is in this cryptocurrency's subreddits configuration + try: + for subreddit in self.subreddits_by_cryptocurrency[self.cryptocurrency_name]: + if subreddit.lower() == notification_description: + return True + except KeyError: + pass + return False + + def _get_config_elements(self, config_cryptocurrencies, key): + if config_cryptocurrencies: + return { + cc[commons_constants.CONFIG_CRYPTO_CURRENCY]: cc[key] + for cc in config_cryptocurrencies + if cc[commons_constants.CONFIG_CRYPTO_CURRENCY] == self.cryptocurrency_name + } + return {} + + async def prepare(self): + self.sentiment_analyser = tentacles_management.get_single_deepest_child_class(EvaluatorUtil.TextAnalysis)() diff --git a/packages/tentacles/Evaluator/Social/forum_evaluator/metadata.json b/packages/tentacles/Evaluator/Social/forum_evaluator/metadata.json new file mode 100644 index 0000000000..f8e7c1d4d6 --- /dev/null +++ b/packages/tentacles/Evaluator/Social/forum_evaluator/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["RedditForumEvaluator"], + "tentacles-requirements": ["overall_state_analysis", "text_analysis", "reddit_service_feed"] +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Social/forum_evaluator/resources/RedditForumEvaluator.md b/packages/tentacles/Evaluator/Social/forum_evaluator/resources/RedditForumEvaluator.md new file mode 100644 index 0000000000..46be018c18 --- /dev/null +++ b/packages/tentacles/Evaluator/Social/forum_evaluator/resources/RedditForumEvaluator.md @@ -0,0 +1,4 @@ +First initialises using the recent history of the subreddits in RedditForumEvaluator.json then +watches for new posts to update its evaluation. + +Never triggers strategies re-evaluations, acts as a background evaluator diff --git a/packages/tentacles/Evaluator/Social/news_evaluator/__init__.py b/packages/tentacles/Evaluator/Social/news_evaluator/__init__.py new file mode 100644 index 0000000000..b3eabe5b0c --- /dev/null +++ b/packages/tentacles/Evaluator/Social/news_evaluator/__init__.py @@ -0,0 +1 @@ +from .news import TwitterNewsEvaluator, CryptoNewsEvaluator \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Social/news_evaluator/config/CryptoNewsEvaluator.json b/packages/tentacles/Evaluator/Social/news_evaluator/config/CryptoNewsEvaluator.json new file mode 100644 index 0000000000..db3a4f277d --- /dev/null +++ b/packages/tentacles/Evaluator/Social/news_evaluator/config/CryptoNewsEvaluator.json @@ -0,0 +1,3 @@ +{ + "language": "en" +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Social/news_evaluator/config/TwitterNewsEvaluator.json b/packages/tentacles/Evaluator/Social/news_evaluator/config/TwitterNewsEvaluator.json new file mode 100644 index 0000000000..5879c2a3ff --- /dev/null +++ b/packages/tentacles/Evaluator/Social/news_evaluator/config/TwitterNewsEvaluator.json @@ -0,0 +1,90 @@ +{ + "crypto-currencies": [ + { + "crypto-currency": "Bitcoin", + "accounts": [ + "BTCFoundation" + ], + "hashtags": [] + }, + { + "crypto-currency": "Ethereum", + "accounts": [ + "ethereum", + "VitalikButerin" + ], + "hashtags": [] + }, + { + "crypto-currency": "Neo", + "accounts": [ + "NEO_Blockchain", + "NEOnewstoday", + "NEO_council", + "neotogas", + "NEO_DevCon", + "neonexchange", + "dahongfei" + ], + "hashtags": [] + }, + { + "crypto-currency": "ICON", + "accounts": [ + "helloiconworld" + ], + "hashtags": [] + }, + { + "crypto-currency": "NANO", + "accounts": [ + "nanocurrency" + ], + "hashtags": [] + }, + { + "crypto-currency": "VeChain", + "accounts": [ + "sunshinelu24", + "VechainThorCom", + "Vechain1" + ], + "hashtags": [] + }, + { + "crypto-currency": "VeChain Thor", + "accounts": [ + "sunshinelu24", + "VechainThorCom", + "Vechain1" + ], + "hashtags": [] + }, + { + "crypto-currency": "Substratum", + "accounts": [ + "SubstratumNet" + ], + "hashtags": [] + }, + { + "crypto-currency": "Ethos", + "accounts": [ + "Ethos_io" + ], + "hashtags": [] + }, + { + "crypto-currency": "Ontology", + "accounts": [ + "OntologyNetwork" + ], + "hashtags": [] + }, + { + "crypto-currency": "Binance Coin", + "accounts": [], + "hashtags": [] + } + ] +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Social/news_evaluator/metadata.json b/packages/tentacles/Evaluator/Social/news_evaluator/metadata.json new file mode 100644 index 0000000000..5bc986899c --- /dev/null +++ b/packages/tentacles/Evaluator/Social/news_evaluator/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["TwitterNewsEvaluator", "CryptoNewsEvaluator"], + "tentacles-requirements": ["text_analysis", "twitter_service_feed", "coindesk_service_feed"] +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Social/news_evaluator/news.py b/packages/tentacles/Evaluator/Social/news_evaluator/news.py new file mode 100644 index 0000000000..5f137a8144 --- /dev/null +++ b/packages/tentacles/Evaluator/Social/news_evaluator/news.py @@ -0,0 +1,245 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import octobot_commons.constants as commons_constants +import octobot_commons.enums as commons_enums +import octobot_commons.tentacles_management as tentacles_management +import octobot_services.constants as services_constants +import octobot_evaluators.evaluators as evaluators +import tentacles.Services.Services_feeds as Services_feeds +import tentacles.Evaluator.Util as EvaluatorUtil + + +# disable inheritance to disable tentacle visibility. Disabled as starting from feb 9 2023, API is now paid only +# class TwitterNewsEvaluator(evaluators.SocialEvaluator): +class TwitterNewsEvaluator: + SERVICE_FEED_CLASS = Services_feeds.TwitterServiceFeed if hasattr(Services_feeds, 'TwitterServiceFeed') else None + + # max time to live for a pulse is 10min + _EVAL_MAX_TIME_TO_LIVE = 10 * commons_constants.MINUTE_TO_SECONDS + # absolute value above which a notification is triggered + _EVAL_NOTIFICATION_THRESHOLD = 0.6 + + def __init__(self, tentacles_setup_config): + super().__init__(tentacles_setup_config) + self.count = 0 + self.sentiment_analyser = None + self.is_self_refreshing = True + self.accounts_by_cryptocurrency = {} + self.hashtags_by_cryptocurrency = {} + + def init_user_inputs(self, inputs: dict) -> None: + """ + Called right before starting the tentacle, should define all the tentacle's user inputs unless + those are defined somewhere else. + """ + cryptocurrencies = [] + config_cryptocurrencies = self.UI.user_input( + commons_constants.CONFIG_CRYPTO_CURRENCIES, commons_enums.UserInputTypes.OBJECT_ARRAY, + cryptocurrencies, inputs, other_schema_values={"minItems": 1, "uniqueItems": True}, + item_title="Crypto currency", + title="Crypto currencies to watch." + ) + # init one user input to generate user input schema and default values + cryptocurrencies.append(self._init_cryptocurrencies(inputs, "Bitcoin", ["BTCFoundation"], [])) + # remove other symbols data to avoid unnecessary entries + self.accounts_by_cryptocurrency = self._get_config_elements(config_cryptocurrencies, + services_constants.CONFIG_TWITTERS_ACCOUNTS) + self.hashtags_by_cryptocurrency = self._get_config_elements(config_cryptocurrencies, + services_constants.CONFIG_TWITTERS_HASHTAGS) + self.feed_config[services_constants.CONFIG_TWITTERS_ACCOUNTS] = self.accounts_by_cryptocurrency + self.feed_config[services_constants.CONFIG_TWITTERS_HASHTAGS] = self.hashtags_by_cryptocurrency + + def _init_cryptocurrencies(self, inputs, cryptocurrency, accounts, hashtags): + return { + commons_constants.CONFIG_CRYPTO_CURRENCY: + self.UI.user_input(commons_constants.CONFIG_CRYPTO_CURRENCY, commons_enums.UserInputTypes.TEXT, + cryptocurrency, inputs, other_schema_values={"minLength": 2}, + parent_input_name=commons_constants.CONFIG_CRYPTO_CURRENCIES, array_indexes=[0], + title="Crypto currency name"), + services_constants.CONFIG_TWITTERS_ACCOUNTS: + self.UI.user_input(services_constants.CONFIG_TWITTERS_ACCOUNTS, commons_enums.UserInputTypes.STRING_ARRAY, + accounts, inputs, other_schema_values={"uniqueItems": True}, + parent_input_name=commons_constants.CONFIG_CRYPTO_CURRENCIES, array_indexes=[0], + item_title="Twitter account name", + title="Twitter accounts to watch"), + services_constants.CONFIG_TWITTERS_HASHTAGS: + self.UI.user_input(services_constants.CONFIG_TWITTERS_HASHTAGS, commons_enums.UserInputTypes.STRING_ARRAY, + hashtags, inputs, other_schema_values={"uniqueItems": True}, + parent_input_name=commons_constants.CONFIG_CRYPTO_CURRENCIES, array_indexes=[0], + item_title="Hashtag", + title="Twitter hashtags to watch (without the # character), " + "warning: might trigger evaluator for irrelevant tweets.") + } + + @classmethod + def get_is_cryptocurrencies_wildcard(cls) -> bool: + """ + :return: True if the evaluator is not cryptocurrency dependant else False + """ + return False + + @classmethod + def get_is_cryptocurrency_name_wildcard(cls) -> bool: + """ + :return: True if the evaluator is not cryptocurrency name dependant else False + """ + return False + + def _print_tweet(self, tweet_text, tweet_url, note, count=""): + self.logger.debug(f"Current note : {note} | {count} : {self.cryptocurrency_name} : Link: {tweet_url} Text : " + f"{tweet_text.encode('utf-8', 'ignore')}") + + async def _feed_callback(self, data): + if self._is_interested_by_this_notification(data[services_constants.CONFIG_TWEET_DESCRIPTION]): + self.count += 1 + note = self._get_tweet_sentiment(data[services_constants.CONFIG_TWEET], + data[services_constants.CONFIG_TWEET_DESCRIPTION]) + tweet_url = f"https://twitter.com/ProducToken/status/{data['tweet']['id']}" + if note != commons_constants.START_PENDING_EVAL_NOTE: + self._print_tweet(data[services_constants.CONFIG_TWEET_DESCRIPTION], tweet_url, note, str(self.count)) + await self._check_eval_note(note) + + # only set eval note when something is happening + async def _check_eval_note(self, note): + if note != commons_constants.START_PENDING_EVAL_NOTE: + if abs(note) > self._EVAL_NOTIFICATION_THRESHOLD: + self.eval_note = note + self.save_evaluation_expiration_time(self._compute_notification_time_to_live(self.eval_note)) + await self.evaluation_completed(self.cryptocurrency, eval_time=self.get_current_exchange_time()) + + @staticmethod + def _compute_notification_time_to_live(evaluation): + return TwitterNewsEvaluator._EVAL_MAX_TIME_TO_LIVE * abs(evaluation) + + def _get_tweet_sentiment(self, tweet, tweet_text, is_a_quote=False): + try: + if is_a_quote: + return -1 * self.sentiment_analyser.analyse(tweet_text) + else: + padding_name = "########" + author_screen_name = tweet['user']['screen_name'] if "screen_name" in tweet['user'] \ + else padding_name + author_name = tweet['user']['name'] if "name" in tweet['user'] else padding_name + if author_screen_name in self.accounts_by_cryptocurrency[self.cryptocurrency_name] \ + or author_name in self.accounts_by_cryptocurrency[self.cryptocurrency_name]: + return -1 * self.sentiment_analyser.analyse(tweet_text) + except KeyError: + pass + + # ignore # for the moment (too much of bullshit) + return commons_constants.START_PENDING_EVAL_NOTE + + def _is_interested_by_this_notification(self, notification_description): + # true if in twitter accounts + try: + for account in self.accounts_by_cryptocurrency[self.cryptocurrency_name]: + if account.lower() in notification_description: + return True + except KeyError: + return False + # false if it's a RT of an unfollowed account + if notification_description.startswith("rt"): + return False + + # true if contains symbol + if self.cryptocurrency_name.lower() in notification_description: + return True + + # true if in hashtags + if self.hashtags_by_cryptocurrency: + for hashtags in self.hashtags_by_cryptocurrency[self.cryptocurrency_name]: + if hashtags.lower() in notification_description: + return True + return False + + def _get_config_elements(self, config_cryptocurrencies, key): + if config_cryptocurrencies: + return { + cc[commons_constants.CONFIG_CRYPTO_CURRENCY]: cc[key] + for cc in config_cryptocurrencies + if cc[commons_constants.CONFIG_CRYPTO_CURRENCY] == self.cryptocurrency_name + } + return {} + + async def prepare(self): + self.sentiment_analyser = tentacles_management.get_single_deepest_child_class(EvaluatorUtil.TextAnalysis)() + + +NEWS_CONFIG_LANGUAGE = "language" + +# Should use any feed available to fetch crypto news (coindesk, etc.) +class CryptoNewsEvaluator(evaluators.SocialEvaluator): + SERVICE_FEED_CLASS = Services_feeds.CoindeskServiceFeed + + def __init__(self, tentacles_setup_config): + evaluators.SocialEvaluator.__init__(self, tentacles_setup_config) + self.stats_analyser = None + self.language = None + + def init_user_inputs(self, inputs: dict) -> None: + self.language = self.UI.user_input(NEWS_CONFIG_LANGUAGE, + commons_enums.UserInputTypes.TEXT, + self.language, inputs, + title="Language to use to fetch crypto news.", + options=["en", "fr"]) + self.feed_config = { + services_constants.CONFIG_COINDESK_TOPICS: [services_constants.COINDESK_TOPIC_NEWS], + services_constants.CONFIG_COINDESK_LANGUAGE: self.language + } + + @classmethod + def get_is_cryptocurrencies_wildcard(cls) -> bool: + """ + :return: True if the evaluator is not cryptocurrency dependant else False + """ + return True + + @classmethod + def get_is_cryptocurrency_name_wildcard(cls) -> bool: + """ + :return: True if the evaluator is not cryptocurrency name dependant else False + """ + return True + + async def _feed_callback(self, data): + if self._is_interested_by_this_notification(data[services_constants.FEED_METADATA]): + latest_news = self.get_data_cache(self.get_current_exchange_time(), key=services_constants.COINDESK_TOPIC_NEWS) + if latest_news is not None and len(latest_news) > 0: + sentiment_sum = 0 + news_count = 0 + news_titles = [] + for news in latest_news: + sentiment = news.sentiment + sentiment_sum += 0 if sentiment is None else -1 if sentiment == "NEGATIVE" else 1 if sentiment == "POSITIVE" else 0 + news_count += 1 + news_titles.append(news.title) + + if news_count > 0: + self.eval_note = sentiment_sum / news_count + await self.evaluation_completed( + cryptocurrency=None, + eval_time=self.get_current_exchange_time(), + eval_note_description=f"Overall news sentiment: {'POSITIVE' if self.eval_note > 0 else 'NEGATIVE' if self.eval_note < 0 else 'NEUTRAL'}. News titles: " + "; ".join(news_titles) + ) + else: + self.debug(f"No news found") + + def _is_interested_by_this_notification(self, notification_description): + return notification_description == services_constants.COINDESK_TOPIC_NEWS + + async def prepare(self): + self.sentiment_analyser = tentacles_management.get_single_deepest_child_class(EvaluatorUtil.TextAnalysis)() diff --git a/packages/tentacles/Evaluator/Social/news_evaluator/resources/CryptoNewsEvaluator.md b/packages/tentacles/Evaluator/Social/news_evaluator/resources/CryptoNewsEvaluator.md new file mode 100644 index 0000000000..4473083bfe --- /dev/null +++ b/packages/tentacles/Evaluator/Social/news_evaluator/resources/CryptoNewsEvaluator.md @@ -0,0 +1,7 @@ +Analyzes overall crypto market sentiment through cryptocurrency news articles. + +This evaluator interprets aggregated news signals (e.g., article headlines, content, +and sentiment classifications) to produce a normalized score +indicating bullish or bearish market sentiment based on recent news coverage. + +Data source: ([CoinDesk Data API](https://developers.coindesk.com/documentation/data-api/news)) diff --git a/packages/tentacles/Evaluator/Social/news_evaluator/resources/TwitterNewsEvaluator.md b/packages/tentacles/Evaluator/Social/news_evaluator/resources/TwitterNewsEvaluator.md new file mode 100644 index 0000000000..19a4419ae1 --- /dev/null +++ b/packages/tentacles/Evaluator/Social/news_evaluator/resources/TwitterNewsEvaluator.md @@ -0,0 +1,4 @@ +Triggers when a new tweet appears from a Twitter account in TwitterNewsEvaluator.json. + +If the evaluation of any given tweet is significant enough, triggers strategies re-evaluation. Otherwise +acts as a background evaluator. \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Social/sentiment_evaluator/__init__.py b/packages/tentacles/Evaluator/Social/sentiment_evaluator/__init__.py new file mode 100644 index 0000000000..2bb7b4cd2e --- /dev/null +++ b/packages/tentacles/Evaluator/Social/sentiment_evaluator/__init__.py @@ -0,0 +1 @@ +from .sentiment import FearAndGreedIndexEvaluator, SocialScoreEvaluator diff --git a/packages/tentacles/Evaluator/Social/sentiment_evaluator/config/FearAndGreedIndexEvaluator.json b/packages/tentacles/Evaluator/Social/sentiment_evaluator/config/FearAndGreedIndexEvaluator.json new file mode 100644 index 0000000000..dcb8b21213 --- /dev/null +++ b/packages/tentacles/Evaluator/Social/sentiment_evaluator/config/FearAndGreedIndexEvaluator.json @@ -0,0 +1,3 @@ +{ + "trend_averages" : [40, 30, 20, 15, 10] +} diff --git a/packages/tentacles/Evaluator/Social/sentiment_evaluator/config/SocialScoreEvaluator.json b/packages/tentacles/Evaluator/Social/sentiment_evaluator/config/SocialScoreEvaluator.json new file mode 100644 index 0000000000..2c63c08510 --- /dev/null +++ b/packages/tentacles/Evaluator/Social/sentiment_evaluator/config/SocialScoreEvaluator.json @@ -0,0 +1,2 @@ +{ +} diff --git a/packages/tentacles/Evaluator/Social/sentiment_evaluator/metadata.json b/packages/tentacles/Evaluator/Social/sentiment_evaluator/metadata.json new file mode 100644 index 0000000000..bf8c5a8314 --- /dev/null +++ b/packages/tentacles/Evaluator/Social/sentiment_evaluator/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["FearAndGreedIndexEvaluator", "SocialScoreEvaluator"], + "tentacles-requirements": ["alternative_me_service_feed", "lunarcrush_service_feed"] +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Social/sentiment_evaluator/resources/FearAndGreedIndexEvaluator.md b/packages/tentacles/Evaluator/Social/sentiment_evaluator/resources/FearAndGreedIndexEvaluator.md new file mode 100644 index 0000000000..e15f7a8035 --- /dev/null +++ b/packages/tentacles/Evaluator/Social/sentiment_evaluator/resources/FearAndGreedIndexEvaluator.md @@ -0,0 +1,7 @@ +Analyzes overall crypto market sentiment through a Fear & Greed Index. + +This evaluator interprets aggregated market signals (e.g., volatility, volume/momentum, +social media sentiment, dominance, and trends) to produce a normalized score +indicating prevailing fear or greed. + +Data source: ([alternative.me](https://alternative.me/crypto/fear-and-greed-index/)) \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Social/sentiment_evaluator/resources/SocialScoreEvaluator.md b/packages/tentacles/Evaluator/Social/sentiment_evaluator/resources/SocialScoreEvaluator.md new file mode 100644 index 0000000000..fa45b13a8e --- /dev/null +++ b/packages/tentacles/Evaluator/Social/sentiment_evaluator/resources/SocialScoreEvaluator.md @@ -0,0 +1,7 @@ +Analyzes cryptocurrency-specific social sentiment through LunarCrush social metrics. + +This evaluator interprets aggregated social signals (e.g., social volume, social engagement, +social dominance, and community sentiment) to produce a normalized score +indicating bullish or bearish social sentiment for a specific cryptocurrency. + +Data source: ([LunarCrush](https://lunarcrush.com/faq/what-metrics-are-available-on-lunarcrush)) diff --git a/packages/tentacles/Evaluator/Social/sentiment_evaluator/sentiment.py b/packages/tentacles/Evaluator/Social/sentiment_evaluator/sentiment.py new file mode 100644 index 0000000000..9be1787536 --- /dev/null +++ b/packages/tentacles/Evaluator/Social/sentiment_evaluator/sentiment.py @@ -0,0 +1,113 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.enums as commons_enums +import octobot_commons.tentacles_management as tentacles_management +import octobot_evaluators.evaluators as evaluators +import octobot_services.constants as services_constants +import tentacles.Evaluator.Util as EvaluatorUtil +import tentacles.Services.Services_feeds as Services_feeds + +CONFIG_TREND_AVERAGES = "trend_averages" + +class FearAndGreedIndexEvaluator(evaluators.SocialEvaluator): + SERVICE_FEED_CLASS = Services_feeds.AlternativeMeServiceFeed + + def __init__(self, tentacles_setup_config): + evaluators.SocialEvaluator.__init__(self, tentacles_setup_config) + self.stats_analyser = None + self.history_data = None + self.feed_config = { + services_constants.CONFIG_ALTERNATIVE_ME_TOPICS: [services_constants.ALTERNATIVE_ME_TOPIC_FEAR_AND_GREED] + } + self.trend_averages = [40, 30, 20, 15, 10] + + def init_user_inputs(self, inputs: dict) -> None: + self.trend_averages = self.UI.user_input(CONFIG_TREND_AVERAGES, + commons_enums.UserInputTypes.OBJECT_ARRAY, + self.trend_averages, inputs, + title="Averages to use to compute the trend evaluation.") + + @classmethod + def get_is_cryptocurrencies_wildcard(cls) -> bool: + """ + :return: True if the evaluator is not cryptocurrency dependant else False + """ + return True + + @classmethod + def get_is_cryptocurrency_name_wildcard(cls) -> bool: + """ + :return: True if the evaluator is not cryptocurrency name dependant else False + """ + return True + + async def _feed_callback(self, data): + if self._is_interested_by_this_notification(data[services_constants.FEED_METADATA]): + fear_and_greed_history = self.get_data_cache(self.get_current_exchange_time(), key=services_constants.ALTERNATIVE_ME_TOPIC_FEAR_AND_GREED) + if fear_and_greed_history is not None and len(fear_and_greed_history) > 0: + fear_and_greed_history_values = [item.value for item in fear_and_greed_history] + self.eval_note = self.stats_analyser.get_trend(fear_and_greed_history_values, self.trend_averages) + await self.evaluation_completed(cryptocurrency=None, + eval_time=self.get_current_exchange_time(), + eval_note_description="Latest values: " + ", ".join([str(v) for v in fear_and_greed_history_values[-5:]])) + + def _is_interested_by_this_notification(self, notification_description): + return notification_description == services_constants.ALTERNATIVE_ME_TOPIC_FEAR_AND_GREED + + async def prepare(self): + self.stats_analyser = tentacles_management.get_single_deepest_child_class(EvaluatorUtil.TrendAnalysis)() + +class SocialScoreEvaluator(evaluators.SocialEvaluator): + SERVICE_FEED_CLASS = Services_feeds.LunarCrushServiceFeed + + def __init__(self, tentacles_setup_config): + evaluators.SocialEvaluator.__init__(self, tentacles_setup_config) + self.stats_analyser = None + + def init_user_inputs(self, inputs: dict) -> None: + self.feed_config = { + services_constants.CONFIG_LUNARCRUSH_COINS: [self.cryptocurrency] + } + + @classmethod + def get_is_cryptocurrencies_wildcard(cls) -> bool: + """ + :return: True if the evaluator is not cryptocurrency dependant else False + """ + return False + + @classmethod + def get_is_cryptocurrency_name_wildcard(cls) -> bool: + """ + :return: True if the evaluator is not cryptocurrency name dependant else False + """ + return False + + async def _feed_callback(self, data): + if self._is_interested_by_this_notification(data[services_constants.FEED_METADATA]): + coin, _ = data[services_constants.FEED_METADATA].split(";") + coin_data = self.get_data_cache(self.get_current_exchange_time(), key=f"{coin};{services_constants.LUNARCRUSH_COIN_METRICS}") + if coin_data is not None and len(coin_data) > 0: + self.eval_note = coin_data[-1].sentiment + await self.evaluation_completed(cryptocurrency=self.cryptocurrency, eval_time=self.get_current_exchange_time()) + + def _is_interested_by_this_notification(self, notification_description): + try: + coin, topic = notification_description.split(";") + return coin == self.cryptocurrency and topic == services_constants.LUNARCRUSH_COIN_METRICS + except KeyError: + pass + return False diff --git a/packages/tentacles/Evaluator/Social/signal_evaluator/__init__.py b/packages/tentacles/Evaluator/Social/signal_evaluator/__init__.py new file mode 100644 index 0000000000..cff7c851e6 --- /dev/null +++ b/packages/tentacles/Evaluator/Social/signal_evaluator/__init__.py @@ -0,0 +1 @@ +from .signal import TelegramSignalEvaluator, TelegramChannelSignalEvaluator \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Social/signal_evaluator/config/TelegramChannelSignalEvaluator.json b/packages/tentacles/Evaluator/Social/signal_evaluator/config/TelegramChannelSignalEvaluator.json new file mode 100644 index 0000000000..5dbd10bd2b --- /dev/null +++ b/packages/tentacles/Evaluator/Social/signal_evaluator/config/TelegramChannelSignalEvaluator.json @@ -0,0 +1,12 @@ +{ + "telegram-channels": [ + { + "channel_name": "Test-Channel", + "signal_pair": "Pair: (.*)$", + "signal_pattern": { + "MARKET_BUY": "Side: (BUY)$", + "MARKET_SELL": "Side: (SELL)$" + } + } + ] +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Social/signal_evaluator/config/TelegramSignalEvaluator.json b/packages/tentacles/Evaluator/Social/signal_evaluator/config/TelegramSignalEvaluator.json new file mode 100644 index 0000000000..a8f4d74345 --- /dev/null +++ b/packages/tentacles/Evaluator/Social/signal_evaluator/config/TelegramSignalEvaluator.json @@ -0,0 +1,5 @@ +{ + "telegram-channels": [ + "test_telegram_signal_strat" + ] +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Social/signal_evaluator/metadata.json b/packages/tentacles/Evaluator/Social/signal_evaluator/metadata.json new file mode 100644 index 0000000000..2c9aca20b6 --- /dev/null +++ b/packages/tentacles/Evaluator/Social/signal_evaluator/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["TelegramSignalEvaluator", "TelegramChannelSignalEvaluator"], + "tentacles-requirements": ["telegram_service_feed"] +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Social/signal_evaluator/resources/TelegramChannelSignalEvaluator.md b/packages/tentacles/Evaluator/Social/signal_evaluator/resources/TelegramChannelSignalEvaluator.md new file mode 100644 index 0000000000..9deab3dc59 --- /dev/null +++ b/packages/tentacles/Evaluator/Social/signal_evaluator/resources/TelegramChannelSignalEvaluator.md @@ -0,0 +1,7 @@ +Evaluator that catch Telegram channel signals. + +Triggers on a Telegram signal from any channel your personal account joined. + +Signal parsing is configurable according to the name of the channel. + +See [OctoBot docs about Telegram API service](https://www.octobot.cloud/en/guides/octobot-interfaces/telegram/telegram-api?utm_source=octobot&utm_medium=dk&utm_campaign=regular_open_source_content&utm_content=telegramChannelSignalEvaluator) for more information. diff --git a/packages/tentacles/Evaluator/Social/signal_evaluator/resources/TelegramSignalEvaluator.md b/packages/tentacles/Evaluator/Social/signal_evaluator/resources/TelegramSignalEvaluator.md new file mode 100644 index 0000000000..a1f9b3b498 --- /dev/null +++ b/packages/tentacles/Evaluator/Social/signal_evaluator/resources/TelegramSignalEvaluator.md @@ -0,0 +1,14 @@ +Very simple evaluator designed to be an example for an evaluator using Telegram signals. + +Triggers on a Telegram signal from any group or channel listed in this evaluator configuration in which +your Telegram bot is invited. + +Signal format for this implementation is: **SYMBOL[evaluation]**. Example: **BTC/USDT[-0.45]**. + +SYMBOL has to be in current watched symbols (in configuration) and evaluation must be between -1 and 1. + +Remember that OctoBot can only see messages from a +chat/group where its Telegram bot (in OctoBot configuration) has been invited. Keep also in mind that you +need to disable the privacy mode of your Telegram bot to allow it to see group messages. + +See [OctoBot docs about Telegram interface](https://www.octobot.cloud/en/guides/octobot-interfaces/telegram?utm_source=octobot&utm_medium=dk&utm_campaign=regular_open_source_content&utm_content=telegramSignalEvaluator) for more information. diff --git a/packages/tentacles/Evaluator/Social/signal_evaluator/signal.py b/packages/tentacles/Evaluator/Social/signal_evaluator/signal.py new file mode 100644 index 0000000000..f870917145 --- /dev/null +++ b/packages/tentacles/Evaluator/Social/signal_evaluator/signal.py @@ -0,0 +1,207 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import re + +import octobot_commons.constants as commons_constants +import octobot_commons.enums as commons_enums +import octobot_services.constants as services_constants +import octobot_evaluators.evaluators as evaluators +import tentacles.Services.Services_feeds as Services_feeds + + +class TelegramSignalEvaluator(evaluators.SocialEvaluator): + SERVICE_FEED_CLASS = Services_feeds.TelegramServiceFeed if hasattr(Services_feeds, 'TelegramServiceFeed') else None + + def init_user_inputs(self, inputs: dict) -> None: + channels_config = self.UI.user_input(services_constants.CONFIG_TELEGRAM_CHANNEL, + commons_enums.UserInputTypes.STRING_ARRAY, + [], inputs, item_title="Channel name", + title="Name of the watched channels") + self.feed_config[services_constants.CONFIG_TELEGRAM_CHANNEL] = channels_config + + async def _feed_callback(self, data): + if self._is_interested_by_this_notification(data[services_constants.CONFIG_GROUP_MESSAGE_DESCRIPTION]): + await self.analyse_notification(data) + await self.evaluation_completed(self.cryptocurrency, self.symbol, + eval_time=self.get_current_exchange_time()) + else: + self.logger.debug(f"Ignored telegram feed: \"{self.symbol.lower()}\" pattern not found in " + f"\"{data[services_constants.CONFIG_GROUP_MESSAGE_DESCRIPTION].lower()}\"") + + # return true if the given notification is relevant for this client + def _is_interested_by_this_notification(self, notification_description): + if self.symbol: + return self.symbol.lower() in notification_description.lower() + else: + return True + + async def analyse_notification(self, notification): + notification_test = notification[services_constants.CONFIG_GROUP_MESSAGE_DESCRIPTION] + self.eval_note = commons_constants.START_PENDING_EVAL_NOTE + start_eval_chars = "[" + end_eval_chars = "]" + if start_eval_chars in notification_test and end_eval_chars in notification_test: + try: + split_test = notification_test.split(start_eval_chars) + notification_eval = split_test[1].split(end_eval_chars)[0] + potential_note = float(notification_eval) + if -1 <= potential_note <= 1: + self.eval_note = potential_note + else: + self.logger.error(f"Impossible to use notification evaluation: {notification_eval}: " + f"evaluation should be between -1 and 1.") + except Exception as e: + self.logger.error(f"Impossible to parse notification {notification_test}: {e}. Please refer to this " + f"evaluator documentation to check the notification pattern.") + else: + self.logger.error(f"Impossible to parse notification {notification_test}. Please refer to this evaluator " + f"documentation to check the notification pattern.") + + @classmethod + def get_is_cryptocurrencies_wildcard(cls) -> bool: + """ + :return: True if the evaluator is not cryptocurrency dependant else False + """ + return False + + @classmethod + def get_is_cryptocurrency_name_wildcard(cls) -> bool: + """ + :return: True if the evaluator is not cryptocurrency name dependant else False + """ + return False + + @classmethod + def get_is_symbol_wildcard(cls) -> bool: + """ + :return: True if the evaluator is not symbol dependant else False + """ + return False + + def _get_tentacle_registration_topic(self, all_symbols_by_crypto_currencies, time_frames, real_time_time_frames): + currencies = [self.cryptocurrency] + symbols = [self.symbol] + to_handle_time_frames = [self.time_frame] + if self.get_is_cryptocurrencies_wildcard(): + currencies = all_symbols_by_crypto_currencies.keys() + if self.get_is_symbol_wildcard(): + symbols = [] + for currency_symbols in all_symbols_by_crypto_currencies.values(): + symbols += currency_symbols + # by default no time frame registration for social evaluators + return currencies, symbols, to_handle_time_frames + + +class TelegramChannelSignalEvaluator(evaluators.SocialEvaluator): + SERVICE_FEED_CLASS = Services_feeds.TelegramApiServiceFeed if hasattr(Services_feeds, 'TelegramApiServiceFeed') else None + + SIGNAL_PATTERN_KEY = "signal_pattern" + SIGNAL_PATTERN_MARKET_BUY_KEY = "MARKET_BUY" + SIGNAL_PATTERN_MARKET_SELL_KEY = "MARKET_SELL" + SIGNAL_PAIR_KEY = "signal_pair" + SIGNAL_CHANNEL_NAME_KEY = "channel_name" + + def __init__(self, tentacles_setup_config): + super().__init__(tentacles_setup_config) + self.channels_config_by_channel_name = {} + + def init_user_inputs(self, inputs: dict) -> None: + channels = [] + config_channels = self.UI.user_input(services_constants.CONFIG_TELEGRAM_CHANNEL, + commons_enums.UserInputTypes.OBJECT_ARRAY, + channels, inputs, item_title="Channel", + other_schema_values={"minItems": 1, "uniqueItems": True}, + title="Channels to watch") + channels.append(self._init_channel_config(inputs, "Test-Channel", "Pair: (.*)$", + "Side: (BUY)$", "Side: (SELL)$")) + self.channels_config_by_channel_name = { + channel[self.SIGNAL_CHANNEL_NAME_KEY]: channel + for channel in config_channels + } + self.feed_config[services_constants.CONFIG_TELEGRAM_CHANNEL] = list(self.channels_config_by_channel_name) + + def _init_channel_config(self, inputs, channel_name, signal_pair, buy_regex, sell_regex): + return { + self.SIGNAL_CHANNEL_NAME_KEY: self.UI.user_input( + self.SIGNAL_CHANNEL_NAME_KEY, commons_enums.UserInputTypes.TEXT, + channel_name, inputs, + parent_input_name=services_constants.CONFIG_TELEGRAM_CHANNEL, + array_indexes=[0], + title="Channel name"), + self.SIGNAL_PAIR_KEY: self.UI.user_input( + self.SIGNAL_PAIR_KEY, commons_enums.UserInputTypes.TEXT, + signal_pair, inputs, + parent_input_name=services_constants.CONFIG_TELEGRAM_CHANNEL, + array_indexes=[0], + title="Trading pair regex, ex: Pair: (.*)$"), + self.SIGNAL_PATTERN_KEY: self.UI.user_input( + self.SIGNAL_PATTERN_KEY, commons_enums.UserInputTypes.OBJECT, + self._init_pattern_config(inputs, buy_regex, sell_regex), inputs, + parent_input_name=services_constants.CONFIG_TELEGRAM_CHANNEL, + array_indexes=[0], + title="Signal patterns"), + } + + def _init_pattern_config(self, inputs, buy_regex, sell_regex): + return { + self.SIGNAL_PATTERN_MARKET_BUY_KEY: self.UI.user_input( + self.SIGNAL_PATTERN_MARKET_BUY_KEY, commons_enums.UserInputTypes.TEXT, + buy_regex, inputs, parent_input_name=self.SIGNAL_PATTERN_KEY, + array_indexes=[0], + title="Market buy signal regex, ex: Side: (BUY)$"), + self.SIGNAL_PATTERN_MARKET_SELL_KEY: self.UI.user_input( + self.SIGNAL_PATTERN_MARKET_SELL_KEY, + commons_enums.UserInputTypes.TEXT, + sell_regex, inputs, + parent_input_name=self.SIGNAL_PATTERN_KEY, + array_indexes=[0], + title="Market sell signal regex, ex: Side: (SELL)$"), + } + + async def _feed_callback(self, data): + if not data: + return + is_from_channel = data.get(services_constants.CONFIG_IS_CHANNEL_MESSAGE, False) + if is_from_channel: + sender = data.get(services_constants.CONFIG_MESSAGE_SENDER, "") + if sender in self.channels_config_by_channel_name: + try: + message = data.get(services_constants.CONFIG_MESSAGE_CONTENT, "") + channel_data = self.channels_config_by_channel_name[sender] + is_buy_market_signal = self._get_signal_message( + channel_data[self.SIGNAL_PATTERN_KEY][self.SIGNAL_PATTERN_MARKET_BUY_KEY], message) + is_sell_market_signal = self._get_signal_message( + channel_data[self.SIGNAL_PATTERN_KEY][self.SIGNAL_PATTERN_MARKET_SELL_KEY], message) + pair = self._get_signal_message(channel_data[self.SIGNAL_PAIR_KEY], message) + if (is_buy_market_signal or is_sell_market_signal) and pair is not None: + self.eval_note = -1 if is_buy_market_signal else 1 + await self.evaluation_completed(symbol=pair.strip(), eval_time=self.get_current_exchange_time()) + else: + self.logger.warning(f"Unable to parse message from {sender} : {message}") + except KeyError: + self.logger.warning(f"Unable to parse message from {sender}") + else: + self.logger.debug(f"Ignored message : from an unsupported channel ({sender})") + else: + self.logger.debug("Ignored message : not a channel message") + + def _get_signal_message(self, expected_pattern, message): + try: + match = re.search(expected_pattern, message) + return match.group(1) + except AttributeError: + self.logger.debug(f"Ignored message : not matching channel pattern ({message})") + return None diff --git a/packages/tentacles/Evaluator/Social/signal_evaluator/tests/__init__.py b/packages/tentacles/Evaluator/Social/signal_evaluator/tests/__init__.py new file mode 100644 index 0000000000..974dd1623a --- /dev/null +++ b/packages/tentacles/Evaluator/Social/signal_evaluator/tests/__init__.py @@ -0,0 +1,15 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. diff --git a/packages/tentacles/Evaluator/Social/signal_evaluator/tests/test_telegram_channel_signal_evaluator.py b/packages/tentacles/Evaluator/Social/signal_evaluator/tests/test_telegram_channel_signal_evaluator.py new file mode 100644 index 0000000000..9543131958 --- /dev/null +++ b/packages/tentacles/Evaluator/Social/signal_evaluator/tests/test_telegram_channel_signal_evaluator.py @@ -0,0 +1,186 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import pytest +import octobot_commons.constants as commons_constants +import octobot_commons.logging as logging +import octobot_services.constants as services_constants +import tentacles.Evaluator.Social as Social +import tests.test_utils.config as test_utils_config + +# All test coroutines will be treated as marked. +pytestmark = pytest.mark.asyncio + + +async def _trigger_callback_with_data_and_assert_note(evaluator: Social.TelegramChannelSignalEvaluator, + data=None, + note=commons_constants.START_PENDING_EVAL_NOTE): + await evaluator._feed_callback(data) + assert evaluator.eval_note == note + evaluator.eval_note = commons_constants.START_PENDING_EVAL_NOTE + + +def _create_evaluator_with_supported_channel_signals(): + evaluator = Social.TelegramChannelSignalEvaluator(test_utils_config.load_test_tentacles_config()) + evaluator.logger = logging.get_logger(evaluator.get_name()) + evaluator.specific_config = { + "telegram-channels": [ + { + "channel_name": "TEST-CHAN-1", + "signal_pattern": { + "MARKET_BUY": "Side: (BUY)", + "MARKET_SELL": "Side: (SELL)" + }, + "signal_pair": "Pair: (.*)" + }, + { + "channel_name": "TEST-CHAN-2", + "signal_pattern": { + "MARKET_BUY": ".* : (-1)$", + "MARKET_SELL": ".* : (1)$" + }, + "signal_pair": "(.*):" + } + ] + } + evaluator.init_user_inputs({}) + evaluator.eval_note = commons_constants.START_PENDING_EVAL_NOTE + return evaluator + + +async def test_without_data(): + evaluator = _create_evaluator_with_supported_channel_signals() + await _trigger_callback_with_data_and_assert_note(evaluator) + + +async def test_with_empty_data(): + evaluator = _create_evaluator_with_supported_channel_signals() + await _trigger_callback_with_data_and_assert_note(evaluator, data={}) + + +async def test_incorrect_signal_without_sender_without_channel_message(): + evaluator = _create_evaluator_with_supported_channel_signals() + await _trigger_callback_with_data_and_assert_note(evaluator, data={ + services_constants.CONFIG_IS_CHANNEL_MESSAGE: False, + services_constants.CONFIG_MESSAGE_SENDER: "", + services_constants.CONFIG_MESSAGE_CONTENT: "", + }) + + +async def test_incorrect_signal_without_sender_with_channel_message(): + evaluator = _create_evaluator_with_supported_channel_signals() + await _trigger_callback_with_data_and_assert_note(evaluator, data={ + services_constants.CONFIG_IS_CHANNEL_MESSAGE: True, + services_constants.CONFIG_MESSAGE_SENDER: "", + services_constants.CONFIG_MESSAGE_CONTENT: "", + }) + + +async def test_incorrect_signal_chan1_without_content(): + evaluator = _create_evaluator_with_supported_channel_signals() + await _trigger_callback_with_data_and_assert_note(evaluator, data={ + services_constants.CONFIG_IS_CHANNEL_MESSAGE: True, + services_constants.CONFIG_MESSAGE_SENDER: "TEST-CHAN-1", + services_constants.CONFIG_MESSAGE_CONTENT: "", + }) + + +async def test_incorrect_signal_chan1_without_coin(): + evaluator = _create_evaluator_with_supported_channel_signals() + await _trigger_callback_with_data_and_assert_note(evaluator, data={ + services_constants.CONFIG_IS_CHANNEL_MESSAGE: True, + services_constants.CONFIG_MESSAGE_SENDER: "TEST-CHAN-1", + services_constants.CONFIG_MESSAGE_CONTENT: """ + Order Id: 1631033831358699 + Pair: + Side: + Price: 12.909 + """, + }) + + +async def test_incorrect_signal_chan1_without_separator(): + evaluator = _create_evaluator_with_supported_channel_signals() + await _trigger_callback_with_data_and_assert_note(evaluator, data={ + services_constants.CONFIG_IS_CHANNEL_MESSAGE: True, + services_constants.CONFIG_MESSAGE_SENDER: "TEST-CHAN-1", + services_constants.CONFIG_MESSAGE_CONTENT: """ + Order Id: 1631033831358699 + Pair QTUMUSDT + Side: BUY + Price: 12.909 + """, + }) + + +async def test_correct_signal_chan1_with_not_channel_message(): + evaluator = _create_evaluator_with_supported_channel_signals() + await _trigger_callback_with_data_and_assert_note(evaluator, data={ + services_constants.CONFIG_IS_CHANNEL_MESSAGE: False, + services_constants.CONFIG_MESSAGE_SENDER: "TEST-CHAN-1", + services_constants.CONFIG_MESSAGE_CONTENT: """ + Order Id: 1631033831358699 + Pair: QTUMUSDT + Side: BUY + Price: 12.909 + """, + }) + + +async def test_correct_signal_chan1_with_chan2(): + evaluator = _create_evaluator_with_supported_channel_signals() + await _trigger_callback_with_data_and_assert_note(evaluator, data={ + services_constants.CONFIG_IS_CHANNEL_MESSAGE: True, + services_constants.CONFIG_MESSAGE_SENDER: "TEST-CHAN-2", + services_constants.CONFIG_MESSAGE_CONTENT: """ + Order Id: 1631033831358699 + Pair: QTUMUSDT + Side: BUY + Price: 12.909 + """, + }) + + +async def test_correct_signal_chan1(): + evaluator = _create_evaluator_with_supported_channel_signals() + await _trigger_callback_with_data_and_assert_note(evaluator, data={ + services_constants.CONFIG_IS_CHANNEL_MESSAGE: True, + services_constants.CONFIG_MESSAGE_SENDER: "TEST-CHAN-1", + services_constants.CONFIG_MESSAGE_CONTENT: """ + Order Id: 1631033831358699 + Pair: QTUMUSDT + Side: BUY + Price: 12.909 + """, + }, note=-1) + + +async def test_correct_signal_chan2_but_with_chan1(): + evaluator = _create_evaluator_with_supported_channel_signals() + await _trigger_callback_with_data_and_assert_note(evaluator, data={ + services_constants.CONFIG_IS_CHANNEL_MESSAGE: True, + services_constants.CONFIG_MESSAGE_SENDER: "TEST-CHAN-1", + services_constants.CONFIG_MESSAGE_CONTENT: "BTC/USDT : 1", + }) + + +async def test_correct_signal_chan2(): + evaluator = _create_evaluator_with_supported_channel_signals() + await _trigger_callback_with_data_and_assert_note(evaluator, data={ + services_constants.CONFIG_IS_CHANNEL_MESSAGE: True, + services_constants.CONFIG_MESSAGE_SENDER: "TEST-CHAN-2", + services_constants.CONFIG_MESSAGE_CONTENT: "BTC/USDT : -1", + }, note=-1) diff --git a/packages/tentacles/Evaluator/Social/trends_evaluator/__init__.py b/packages/tentacles/Evaluator/Social/trends_evaluator/__init__.py new file mode 100644 index 0000000000..f721b7f44a --- /dev/null +++ b/packages/tentacles/Evaluator/Social/trends_evaluator/__init__.py @@ -0,0 +1 @@ +from .trends import GoogleTrendsEvaluator, MarketCapEvaluator, CryptoMarketCapEvaluator \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Social/trends_evaluator/config/CryptoMarketCapEvaluator.json b/packages/tentacles/Evaluator/Social/trends_evaluator/config/CryptoMarketCapEvaluator.json new file mode 100644 index 0000000000..0967ef424b --- /dev/null +++ b/packages/tentacles/Evaluator/Social/trends_evaluator/config/CryptoMarketCapEvaluator.json @@ -0,0 +1 @@ +{} diff --git a/packages/tentacles/Evaluator/Social/trends_evaluator/config/GoogleTrendsEvaluator.json b/packages/tentacles/Evaluator/Social/trends_evaluator/config/GoogleTrendsEvaluator.json new file mode 100644 index 0000000000..12aae59e4f --- /dev/null +++ b/packages/tentacles/Evaluator/Social/trends_evaluator/config/GoogleTrendsEvaluator.json @@ -0,0 +1,4 @@ +{ + "refresh_rate_seconds" : 86400, + "relevant_history_months" : 3 +} diff --git a/packages/tentacles/Evaluator/Social/trends_evaluator/config/MarketCapEvaluator.json b/packages/tentacles/Evaluator/Social/trends_evaluator/config/MarketCapEvaluator.json new file mode 100644 index 0000000000..582e79d700 --- /dev/null +++ b/packages/tentacles/Evaluator/Social/trends_evaluator/config/MarketCapEvaluator.json @@ -0,0 +1,4 @@ +{ + "trend_averages" : [40, 30, 20, 15, 10] + } + \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Social/trends_evaluator/metadata.json b/packages/tentacles/Evaluator/Social/trends_evaluator/metadata.json new file mode 100644 index 0000000000..552512841b --- /dev/null +++ b/packages/tentacles/Evaluator/Social/trends_evaluator/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["GoogleTrendsEvaluator", "MarketCapEvaluator", "CryptoMarketCapEvaluator"], + "tentacles-requirements": ["statistics_analysis", "google_service_feed", "coindesk_service_feed", "coingecko_service_feed"] +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Social/trends_evaluator/resources/CryptoMarketCapEvaluator.md b/packages/tentacles/Evaluator/Social/trends_evaluator/resources/CryptoMarketCapEvaluator.md new file mode 100644 index 0000000000..74a52f80c1 --- /dev/null +++ b/packages/tentacles/Evaluator/Social/trends_evaluator/resources/CryptoMarketCapEvaluator.md @@ -0,0 +1,11 @@ +Analyzes cryptocurrency market trends through CoinGecko market capitalization data for individual coins. + +This evaluator interprets market cap signals from the top 100 cryptocurrencies (ordered by market cap) to produce a normalized score indicating bullish or bearish trends based on: +- Coin position/rank in the top 100 (higher rank = more established) +- Market cap change percentage over 24 hours +- Price change percentage over 24 hours +- Trading volume relative to other top coins + +The evaluator generates eval notes by combining these factors with position-based weighting, where higher-ranked coins (e.g., rank 1-10) receive higher confidence multipliers than lower-ranked coins. + +Data source: ([CoinGecko API](https://www.coingecko.com/en/api)) diff --git a/packages/tentacles/Evaluator/Social/trends_evaluator/resources/GoogleTrendsEvaluator.md b/packages/tentacles/Evaluator/Social/trends_evaluator/resources/GoogleTrendsEvaluator.md new file mode 100644 index 0000000000..a830a3b290 --- /dev/null +++ b/packages/tentacles/Evaluator/Social/trends_evaluator/resources/GoogleTrendsEvaluator.md @@ -0,0 +1,5 @@ +Analyses the popularity of the given currencies using their names. + +Data are provided by [Google's trends service](https://trends.google.com/trends/?geo=US). + +Due to Google trends poor refresh rate, this evaluation should be considered for large time frames only. \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Social/trends_evaluator/resources/MarketCapEvaluator.md b/packages/tentacles/Evaluator/Social/trends_evaluator/resources/MarketCapEvaluator.md new file mode 100644 index 0000000000..1bcadca7e3 --- /dev/null +++ b/packages/tentacles/Evaluator/Social/trends_evaluator/resources/MarketCapEvaluator.md @@ -0,0 +1,7 @@ +Analyzes overall crypto market trends through total market capitalization data. + +This evaluator interprets aggregated market cap signals (e.g., historical market cap values, +trend changes, and long-term averages) to produce a normalized score +indicating bullish or bearish market trends based on capitalization movements. + +Data source: ([CoinDesk Data API](https://developers.coindesk.com/documentation/data-api/overview_v1_latest_marketcap_all_tick)) diff --git a/packages/tentacles/Evaluator/Social/trends_evaluator/trends.py b/packages/tentacles/Evaluator/Social/trends_evaluator/trends.py new file mode 100644 index 0000000000..869dd67f19 --- /dev/null +++ b/packages/tentacles/Evaluator/Social/trends_evaluator/trends.py @@ -0,0 +1,208 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import numpy + +import octobot_commons.constants as commons_constants +import octobot_commons.enums as commons_enums +import octobot_commons.tentacles_management as tentacles_management +import octobot_evaluators.evaluators as evaluators +import octobot_services.constants as services_constants +import tentacles.Evaluator.Util as EvaluatorUtil +import tentacles.Services.Services_feeds as Services_feeds + +class GoogleTrendsEvaluator(evaluators.SocialEvaluator): + SERVICE_FEED_CLASS = Services_feeds.GoogleServiceFeed if hasattr(Services_feeds, 'GoogleServiceFeed') else None + + def __init__(self, tentacles_setup_config): + evaluators.SocialEvaluator.__init__(self, tentacles_setup_config) + self.stats_analyser = None + self.refresh_rate_seconds = 86400 + self.relevant_history_months = 3 + + def init_user_inputs(self, inputs: dict) -> None: + self.refresh_rate_seconds = self.refresh_rate_seconds or \ + self.UI.user_input(commons_constants.CONFIG_REFRESH_RATE, + commons_enums.UserInputTypes.INT, + self.refresh_rate_seconds, inputs, min_val=1, + title="Seconds between each re-evaluation (do not set too low because google has a low " + "monthly rate limit).") + self.relevant_history_months = self.UI.user_input(services_constants.CONFIG_TREND_HISTORY_TIME, + commons_enums.UserInputTypes.INT, + self.relevant_history_months, inputs, min_val=3, max_val=3, + title="Number of months to look into to compute the trend " + "evaluation (for now works only with 3).") + self.feed_config[services_constants.CONFIG_TREND_TOPICS] = self._build_trend_topics() + + @classmethod + def get_is_cryptocurrencies_wildcard(cls) -> bool: + """ + :return: True if the evaluator is not cryptocurrency dependant else False + """ + return False + + @classmethod + def get_is_cryptocurrency_name_wildcard(cls) -> bool: + """ + :return: True if the evaluator is not cryptocurrency name dependant else False + """ + return False + + async def _feed_callback(self, data): + if self._is_interested_by_this_notification(data[services_constants.FEED_METADATA]): + trend = numpy.array([d["data"] for d in data[services_constants.CONFIG_TREND]]) + # compute bollinger bands + self.eval_note = self.stats_analyser.analyse_recent_trend_changes(trend, numpy.sqrt) + await self.evaluation_completed(self.cryptocurrency, eval_time=self.get_current_exchange_time()) + + def _is_interested_by_this_notification(self, notification_description): + return self.cryptocurrency_name in notification_description + + def _build_trend_topics(self): + trend_time_frame = f"today {self.relevant_history_months}-m" + return [ + Services_feeds.TrendTopic(self.refresh_rate_seconds, + [self.cryptocurrency_name], + time_frame=trend_time_frame) + ] + + async def prepare(self): + self.stats_analyser = tentacles_management.get_single_deepest_child_class(EvaluatorUtil.StatisticAnalysis)() + +CONFIG_TREND_AVERAGES = "trend_averages" + +class MarketCapEvaluator(evaluators.SocialEvaluator): + SERVICE_FEED_CLASS = Services_feeds.CoindeskServiceFeed + + def __init__(self, tentacles_setup_config): + evaluators.SocialEvaluator.__init__(self, tentacles_setup_config) + self.stats_analyser = None + self.feed_config = { + services_constants.CONFIG_COINDESK_TOPICS: [services_constants.COINDESK_TOPIC_MARKETCAP] + } + self.trend_averages = [40, 30, 20, 15, 10] + + def init_user_inputs(self, inputs: dict) -> None: + self.trend_averages = self.UI.user_input(CONFIG_TREND_AVERAGES, + commons_enums.UserInputTypes.OBJECT_ARRAY, + self.trend_averages, inputs, + title="Averages to use to compute the trend evaluation.") + + @classmethod + def get_is_cryptocurrencies_wildcard(cls) -> bool: + """ + :return: True if the evaluator is not cryptocurrency dependant else False + """ + return True + + @classmethod + def get_is_cryptocurrency_name_wildcard(cls) -> bool: + """ + :return: True if the evaluator is not cryptocurrency name dependant else False + """ + return True + + async def _feed_callback(self, data): + if self._is_interested_by_this_notification(data[services_constants.FEED_METADATA]): + marketcap_data = self.get_data_cache(self.get_current_exchange_time(), key=services_constants.COINDESK_TOPIC_MARKETCAP) + if marketcap_data is not None and len(marketcap_data) > 0: + marketcap_history = [item.close for item in marketcap_data] + self.eval_note = self.stats_analyser.get_trend(marketcap_history, self.trend_averages) + await self.evaluation_completed(cryptocurrency=None, + eval_time=self.get_current_exchange_time(), + eval_note_description="Latest market cap values: " + ", ".join([str(v) for v in marketcap_history[-5:]])) + + def _is_interested_by_this_notification(self, notification_description): + return notification_description == services_constants.COINDESK_TOPIC_MARKETCAP + + async def prepare(self): + self.stats_analyser = tentacles_management.get_single_deepest_child_class(EvaluatorUtil.TrendAnalysis)() + + +class CryptoMarketCapEvaluator(evaluators.SocialEvaluator): + SERVICE_FEED_CLASS = Services_feeds.CoingeckoServiceFeed + + def __init__(self, tentacles_setup_config): + evaluators.SocialEvaluator.__init__(self, tentacles_setup_config) + self.feed_config = { + services_constants.CONFIG_COINGECKO_TOPICS: [services_constants.COINGECKO_TOPIC_MARKETS] + } + + @classmethod + def get_is_cryptocurrencies_wildcard(cls) -> bool: + """ + :return: True if the evaluator is not cryptocurrency dependant else False + """ + return False + + @classmethod + def get_is_cryptocurrency_name_wildcard(cls) -> bool: + """ + :return: True if the evaluator is not cryptocurrency name dependant else False + """ + return False + + async def _feed_callback(self, data): + if self._is_interested_by_this_notification(data[services_constants.FEED_METADATA]): + markets_data = self.get_data_cache(self.get_current_exchange_time(), key=services_constants.COINGECKO_TOPIC_MARKETS) + if markets_data is not None and len(markets_data) > 0: + # Find the coin in the markets data by symbol (case-insensitive) + coin_data = None + for market_coin in markets_data: + if market_coin.symbol and market_coin.symbol.upper() == self.cryptocurrency.upper(): + coin_data = market_coin + break + + if coin_data is None: + # Coin not found in top 100, return neutral eval note + self.eval_note = 0.0 + await self.evaluation_completed( + cryptocurrency=self.cryptocurrency, + eval_time=self.get_current_exchange_time(), + eval_note_description=f"{self.cryptocurrency} not found in top 100 market cap coins" + ) + return + + # Calculate base signal from market_cap_change_percentage_24h + market_cap_change = coin_data.market_cap_change_percentage_24h or 0.0 + market_cap_signal = max(-1.0, min(1.0, market_cap_change / 50.0)) + + # Calculate price signal + price_change = coin_data.price_change_percentage_24h or 0.0 + price_signal = max(-1.0, min(1.0, price_change / 20.0)) + + # Calculate position weight (higher rank = more established = higher confidence) + rank = coin_data.market_cap_rank or 100 + position_weight = max(0.7, 1.0 - (rank - 1) / 100.0) + + # Calculate volume factor (normalize by max volume in top 100) + max_volume = max((coin.total_volume or 0.0) for coin in markets_data) + volume_factor = 1.0 + if max_volume > 0: + volume_factor = min(1.0, (coin_data.total_volume or 0.0) / max_volume) + + # Combine all factors + eval_note = (market_cap_signal * 0.6 + price_signal * 0.3 + volume_factor * 0.1) * position_weight + eval_note = max(-1.0, min(1.0, eval_note)) + + self.eval_note = eval_note + await self.evaluation_completed( + cryptocurrency=self.cryptocurrency, + eval_time=self.get_current_exchange_time(), + eval_note_description=f"Rank: {rank}, Market Cap Change: {market_cap_change:.2f}%, Price Change: {price_change:.2f}%, Position Weight: {position_weight:.2f}" + ) + + def _is_interested_by_this_notification(self, notification_description): + return notification_description == services_constants.COINGECKO_TOPIC_MARKETS diff --git a/packages/tentacles/Evaluator/Strategies/ai_strategies_evaluator/__init__.py b/packages/tentacles/Evaluator/Strategies/ai_strategies_evaluator/__init__.py new file mode 100644 index 0000000000..f90d2ea3aa --- /dev/null +++ b/packages/tentacles/Evaluator/Strategies/ai_strategies_evaluator/__init__.py @@ -0,0 +1,5 @@ +from .ai_strategies import ( + BaseLLMAIStrategyEvaluator, + CryptoLLMAIStrategyEvaluator, + GlobalLLMAIStrategyEvaluator +) diff --git a/packages/tentacles/Evaluator/Strategies/ai_strategies_evaluator/ai_strategies.py b/packages/tentacles/Evaluator/Strategies/ai_strategies_evaluator/ai_strategies.py new file mode 100644 index 0000000000..c217731010 --- /dev/null +++ b/packages/tentacles/Evaluator/Strategies/ai_strategies_evaluator/ai_strategies.py @@ -0,0 +1,539 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import typing + +import octobot_commons.constants as common_constants +import octobot_commons.enums as commons_enums +import octobot_commons.evaluators_util as evaluators_util +import octobot_evaluators.api as evaluators_api +import octobot_evaluators.constants as evaluators_constants +import octobot_evaluators.matrix as matrix +import octobot_evaluators.enums as evaluators_enums +import octobot_evaluators.evaluators as evaluators +import octobot_services.api.services as services_api +import tentacles.Services.Services_bases + +from tentacles.Agent.teams.simple_ai_evaluator_agents_team import SimpleAIEvaluatorAgentsTeam, DeepAgentEvaluatorTeam + + +class BaseLLMAIStrategyEvaluator(evaluators.StrategyEvaluator): + """ + Base class for LLM-powered AI Strategy Evaluators. + Contains shared configuration and agent execution logic. + """ + + PROMPT_KEY = "prompt" + MODEL_KEY = "model" + MAX_TOKENS_KEY = "max_tokens" + TEMPERATURE_KEY = "temperature" + OUTPUT_FORMAT_KEY = "output_format" + EVALUATOR_TYPES_KEY = "evaluator_types" + USE_DEEP_AGENT_KEY = "use_deep_agent" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.model = None + self.prompt = None + self.max_tokens = None + self.temperature = None + self.output_format = "with_confidence" + self.evaluator_types = [ + evaluators_enums.EvaluatorMatrixTypes.TA.value, + evaluators_enums.EvaluatorMatrixTypes.SOCIAL.value, + evaluators_enums.EvaluatorMatrixTypes.REAL_TIME.value, + ] + + def init_user_inputs(self, inputs: dict) -> None: + super().init_user_inputs(inputs) + default_config = self.get_default_config() + self.prompt = self.UI.user_input( + self.PROMPT_KEY, + commons_enums.UserInputTypes.TEXT, + default_config[self.PROMPT_KEY], + inputs, + title="Custom prompt for LLM analysis. Leave empty to use default.", + ) + self.model = self.UI.user_input( + self.MODEL_KEY, + commons_enums.UserInputTypes.TEXT, + default_config[self.MODEL_KEY], + inputs, + title="LLM model to use for analysis.", + ) + self.max_tokens = self.UI.user_input( + self.MAX_TOKENS_KEY, + commons_enums.UserInputTypes.INT, + default_config[self.MAX_TOKENS_KEY], + inputs, + min_val=100, + max_val=10000, + title="Maximum tokens for LLM response.", + ) + self.temperature = self.UI.user_input( + self.TEMPERATURE_KEY, + commons_enums.UserInputTypes.FLOAT, + default_config[self.TEMPERATURE_KEY], + inputs, + min_val=0.0, + max_val=1.0, + title="Temperature for LLM randomness (0.0 = deterministic, 1.0 = very random).", + ) + self.use_deep_agent = self.UI.user_input( + self.USE_DEEP_AGENT_KEY, + commons_enums.UserInputTypes.BOOLEAN, + default_config.get(self.USE_DEEP_AGENT_KEY, False), + inputs, + title="Use Deep Agent implementation (requires deepagents package). Default: use traditional AI agent.", + ) + self.evaluator_types = self.UI.user_input( + self.EVALUATOR_TYPES_KEY, + commons_enums.UserInputTypes.MULTIPLE_OPTIONS, + default_config[self.EVALUATOR_TYPES_KEY], + inputs, + options=[ + evaluators_enums.EvaluatorMatrixTypes.TA.value, + evaluators_enums.EvaluatorMatrixTypes.SOCIAL.value, + evaluators_enums.EvaluatorMatrixTypes.REAL_TIME.value, + ], + title="Evaluator types to include in analysis.", + ) + self.output_format = self.UI.user_input( + self.OUTPUT_FORMAT_KEY, + commons_enums.UserInputTypes.OPTIONS, + default_config[self.OUTPUT_FORMAT_KEY], + inputs, + options=["standard", "with_confidence"], + title="Output format: standard (eval_note and description) or with_confidence (includes confidence level).", + ) + + @classmethod + def get_default_config(cls, time_frames: typing.Optional[list[str]] = None) -> dict: + return { + cls.PROMPT_KEY: "", + cls.MODEL_KEY: None, + cls.MAX_TOKENS_KEY: None, + cls.TEMPERATURE_KEY: None, + cls.EVALUATOR_TYPES_KEY: [ + evaluators_enums.EvaluatorMatrixTypes.TA.value, + evaluators_enums.EvaluatorMatrixTypes.SOCIAL.value, + evaluators_enums.EvaluatorMatrixTypes.REAL_TIME.value, + ], + cls.OUTPUT_FORMAT_KEY: "standard", + } + + def get_full_cycle_evaluator_types(self) -> tuple: + # returns a tuple as it is faster to create than a list + return tuple(self.evaluator_types) + + async def _get_ai_service(self): + ai_service = await services_api.get_ai_service( + is_backtesting=self._is_in_backtesting() + ) + if not ai_service: + self.logger.error("AIService not available, cannot perform LLM analysis") + return None + return ai_service + + async def _run_agents_analysis( + self, + aggregated_data: dict, + missing_data_types: list, + ai_service, + ) -> tuple[float | str, str]: + """ + Run strategy agents on aggregated data using the SimpleAIEvaluatorAgentsTeam. + + Returns: + Tuple of (eval_note, eval_note_description). + """ + # Determine which agents to include based on available data + include_ta = evaluators_enums.EvaluatorMatrixTypes.TA.value in aggregated_data + include_sentiment = evaluators_enums.EvaluatorMatrixTypes.SOCIAL.value in aggregated_data + include_realtime = evaluators_enums.EvaluatorMatrixTypes.REAL_TIME.value in aggregated_data + + if not any([include_ta, include_sentiment, include_realtime]): + self.logger.error("No valid data available for any agent") + return common_constants.START_PENDING_EVAL_NOTE, "Error: No valid data available" + + # Create and run the team based on use_deep_agent config + if self.use_deep_agent: + # Deep agents require langchain service explicitly + team_class = DeepAgentEvaluatorTeam + # Get langchain service for deep agents + from tentacles.Services.Services_bases.langchain_service.langchain import LangChainService + langchain_service = await services_api.get_service( + LangChainService, + is_backtesting=self._is_in_backtesting() + ) + team = team_class( + ai_service=langchain_service or ai_service, + model=self.model, + max_tokens=self.max_tokens, + temperature=self.temperature, + include_ta=include_ta, + include_sentiment=include_sentiment, + include_realtime=include_realtime, + ) + else: + team_class = SimpleAIEvaluatorAgentsTeam + team = team_class( + ai_service=ai_service, + model=self.model, + max_tokens=self.max_tokens, + temperature=self.temperature, + include_ta=include_ta, + include_sentiment=include_sentiment, + include_realtime=include_realtime, + ) + + try: + eval_note, eval_note_description = await team.run_with_data( + aggregated_data=aggregated_data, + missing_data_types=missing_data_types, + ) + + return eval_note, eval_note_description + + except Exception as e: + self.logger.exception(f"SimpleAIEvaluatorAgentsTeam failed: {e}") + return common_constants.START_PENDING_EVAL_NOTE, f"Error: Agent team failed: {str(e)}" + + +class CryptoLLMAIStrategyEvaluator(BaseLLMAIStrategyEvaluator): + """ + LLM AI Strategy Evaluator for cryptocurrency-specific evaluations. + Evaluates individual cryptocurrencies (symbol=wildcard, cryptocurrency is specific). + Matrix evaluations are published with cryptocurrency set (not None). + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._pending_evaluations = {} # {cryptocurrency: {(symbol, timeframe): data}} + self._expected_symbols_timeframes = {} # {cryptocurrency: set of (symbol, timeframe)} + + async def matrix_callback( + self, + matrix_id, + evaluator_name, + evaluator_type, + eval_note, + eval_note_type, + eval_note_description, + eval_note_metadata, + exchange_name, + cryptocurrency, + symbol, + time_frame, + **kwargs, + ): + if evaluator_type not in self.evaluator_types: + return + + # Skip global evaluations (cryptocurrency=None) - those are for GlobalLLMAIStrategyEvaluator + if cryptocurrency is None: + return + + # Initialize pending evaluations for this cryptocurrency if needed + if cryptocurrency not in self._pending_evaluations: + self._pending_evaluations[cryptocurrency] = {} + self._expected_symbols_timeframes[cryptocurrency] = set() + + # Track this symbol/timeframe combination + eval_key = (symbol, time_frame) + self._expected_symbols_timeframes[cryptocurrency].add(eval_key) + + # Check if we have sufficient data for this symbol/timeframe + available_eval_types = [] + for eval_type in self.evaluator_types: + if self._are_every_evaluation_valid_and_up_to_date( + matrix_id, + evaluator_name, + eval_type, + exchange_name, + cryptocurrency, + symbol, + time_frame, + ): + available_eval_types.append(eval_type) + + # Require at least one evaluator type to have data + if not available_eval_types: + return + + # Store this evaluation data + self._pending_evaluations[cryptocurrency][eval_key] = { + 'matrix_id': matrix_id, + 'exchange_name': exchange_name, + 'symbol': symbol, + 'time_frame': time_frame, + 'available_eval_types': available_eval_types, + } + + # Check if we have data for all expected symbols/timeframes + # Evaluate when we have at least one complete set + if len(self._pending_evaluations[cryptocurrency]) < 1: + return + + # Trigger evaluation + await self._evaluate_for_cryptocurrency( + matrix_id=matrix_id, + exchange_name=exchange_name, + cryptocurrency=cryptocurrency, + ) + + async def _evaluate_for_cryptocurrency( + self, + matrix_id: str, + exchange_name: str, + cryptocurrency: str, + ): + # Aggregate data by evaluator type across all symbols and timeframes + aggregated_data = {} + missing_data_types = [] + all_eval_types = set() + + # Collect all available eval types from all pending evaluations + for eval_data in self._pending_evaluations[cryptocurrency].values(): + all_eval_types.update(eval_data['available_eval_types']) + + for eval_type in self.evaluator_types: + if eval_type in all_eval_types: + all_evaluations = {} + + # Collect evaluations from all symbols and timeframes + for eval_key, eval_data in self._pending_evaluations[cryptocurrency].items(): + if eval_type not in eval_data['available_eval_types']: + continue + + symbol_tf = eval_key[0] # symbol + time_frame_tf = eval_key[1] # timeframe + matrix_id_tf = eval_data['matrix_id'] + exchange_name_tf = eval_data['exchange_name'] + + if eval_type == evaluators_enums.EvaluatorMatrixTypes.TA.value: + # TA evaluators need time_frame parameter + evaluations = matrix.get_evaluations_by_evaluator( + matrix_id_tf, + exchange_name_tf, + eval_type, + cryptocurrency, + symbol_tf, + time_frame_tf, + ) + elif eval_type == evaluators_enums.EvaluatorMatrixTypes.SOCIAL.value: + # Social evaluators - get those for the same cryptocurrency and symbol + evaluations = matrix.get_evaluations_by_evaluator( + matrix_id_tf, exchange_name_tf, eval_type, cryptocurrency, symbol_tf + ) + # Also get social evaluators by cryptocurrency only + evaluations.update( + matrix.get_evaluations_by_evaluator( + matrix_id_tf, exchange_name_tf, eval_type, cryptocurrency + ) + ) + elif eval_type == evaluators_enums.EvaluatorMatrixTypes.REAL_TIME.value: + # Real-time evaluators need time_frame parameter + evaluations = matrix.get_evaluations_by_evaluator( + matrix_id_tf, + exchange_name_tf, + eval_type, + cryptocurrency, + symbol_tf, + time_frame_tf, + ) + else: + # Fallback for any other evaluator types + evaluations = matrix.get_evaluations_by_evaluator( + matrix_id_tf, + exchange_name_tf, + eval_type, + cryptocurrency, + symbol_tf, + time_frame_tf, + ) + + all_evaluations.update(evaluations) + + valid_evaluations = [] + for ev in all_evaluations.values(): + eval_note = evaluators_api.get_value(ev) + eval_note_type = evaluators_api.get_type(ev) + if evaluators_util.check_valid_eval_note( + eval_note, + eval_note_type, + evaluators_constants.EVALUATOR_EVAL_DEFAULT_TYPE, + ): + valid_evaluations.append( + { + "eval_note": eval_note, + "eval_note_description": evaluators_api.get_description(ev) or "", + } + ) + if valid_evaluations: + aggregated_data[eval_type] = valid_evaluations + else: + missing_data_types.append(eval_type) + else: + missing_data_types.append(eval_type) + + if not aggregated_data: + return + + ai_service = await self._get_ai_service() + if not ai_service: + self.eval_note = 0 + final_eval_note_description = "Error: AIService not available" + await self.evaluation_completed( + cryptocurrency=cryptocurrency, + symbol=None, + time_frame=None, + eval_note=self.eval_note, + eval_note_description=final_eval_note_description, + eval_time=0, + notify=True, + origin_consumer=self.consumer_instance, + ) + return + + self.eval_note, final_eval_note_description = await self._run_agents_analysis( + aggregated_data, missing_data_types, ai_service + ) + + # Publish evaluation on cryptocurrency level (no symbol, no timeframe) + await self.evaluation_completed( + cryptocurrency=cryptocurrency, + symbol=None, + time_frame=None, + eval_note=self.eval_note, + eval_note_description=final_eval_note_description, + eval_time=0, + notify=True, + origin_consumer=self.consumer_instance, + ) + + # Clean up pending evaluations for this cryptocurrency + if cryptocurrency in self._pending_evaluations: + del self._pending_evaluations[cryptocurrency] + if cryptocurrency in self._expected_symbols_timeframes: + del self._expected_symbols_timeframes[cryptocurrency] + + +class GlobalLLMAIStrategyEvaluator(BaseLLMAIStrategyEvaluator): + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._is_evaluating = False + + + @classmethod + def get_is_cryptocurrencies_wildcard(cls) -> bool: + """ + :return: True if the evaluator is not cryptocurrency dependant else False + """ + return False + + async def matrix_callback( + self, + matrix_id, + evaluator_name, + evaluator_type, + eval_note, + eval_note_type, + eval_note_description, + eval_note_metadata, + exchange_name, + cryptocurrency, + symbol, + time_frame, + **kwargs, + ): + if evaluator_type not in self.evaluator_types: + return + + if cryptocurrency is None or (self.eval_note == common_constants.START_PENDING_EVAL_NOTE and not self._is_evaluating): + self._is_evaluating = True + # Only evaluate if it's a global evaluation or if we haven't evaluated yet + await self._evaluate_global( + matrix_id=matrix_id, + exchange_name=exchange_name, + ) + self._is_evaluating = False + + async def _evaluate_global( + self, + matrix_id: str, + exchange_name: str, + ): + """ + Perform global market evaluation across all cryptocurrencies. + """ + aggregated_data = {} + missing_data_types = [] + + for eval_type in self.evaluator_types: + # Fetch global evaluators (no cryptocurrency, no symbol, no timeframe) + evaluations = matrix.get_evaluations_by_evaluator( + matrix_id, exchange_name, eval_type + ) + + valid_evaluations = [ + { + "eval_note": ev.node_value, + "eval_note_description": ev.node_description or "", + } + for ev in evaluations.values() + if evaluators_util.check_valid_eval_note(ev.node_value) + ] + if valid_evaluations: + aggregated_data[eval_type] = valid_evaluations + else: + missing_data_types.append(eval_type) + + if not aggregated_data: + return + + ai_service = await self._get_ai_service() + if not ai_service: + self.eval_note = 0 + final_eval_note_description = "Error: LLMService not available" + self._has_evaluated = True + await self.evaluation_completed( + cryptocurrency=None, + symbol=None, + time_frame=None, + eval_note=self.eval_note, + eval_note_description=final_eval_note_description, + eval_time=0, + notify=True, + origin_consumer=self.consumer_instance, + ) + return + + self.eval_note, final_eval_note_description = await self._run_agents_analysis( + aggregated_data, missing_data_types, ai_service + ) + + # Publish evaluation at global level + await self.evaluation_completed( + cryptocurrency=None, + symbol=None, + time_frame=None, + eval_note=self.eval_note, + eval_note_description=final_eval_note_description, + eval_time=0, + notify=True, + origin_consumer=self.consumer_instance, + ) diff --git a/packages/tentacles/Evaluator/Strategies/ai_strategies_evaluator/config/CryptoLLMAIStrategyEvaluator.json b/packages/tentacles/Evaluator/Strategies/ai_strategies_evaluator/config/CryptoLLMAIStrategyEvaluator.json new file mode 100644 index 0000000000..686b718e89 --- /dev/null +++ b/packages/tentacles/Evaluator/Strategies/ai_strategies_evaluator/config/CryptoLLMAIStrategyEvaluator.json @@ -0,0 +1,19 @@ +{ + "default_config": [ + "MACDMomentumEvaluator", + "RSIMomentumEvaluator", + "EMADivergenceTrendEvaluator", + "DoubleMovingAverageTrendEvaluator", + "BBMomentumEvaluator", + "ADXMomentumEvaluator", + "SocialScoreEvaluator" + ], + "required_evaluators": ["*"], + "required_time_frames": [ + "1h", + "4h", + "1d" + ], + "required_candles_count": 1000, + "use_deep_agent": false +} diff --git a/packages/tentacles/Evaluator/Strategies/ai_strategies_evaluator/config/GlobalLLMAIStrategyEvaluator.json b/packages/tentacles/Evaluator/Strategies/ai_strategies_evaluator/config/GlobalLLMAIStrategyEvaluator.json new file mode 100644 index 0000000000..d64faf970f --- /dev/null +++ b/packages/tentacles/Evaluator/Strategies/ai_strategies_evaluator/config/GlobalLLMAIStrategyEvaluator.json @@ -0,0 +1,15 @@ +{ + "default_config": [ + "FearAndGreedIndexEvaluator", + "MarketCapEvaluator", + "CryptoNewsEvaluator" + ], + "required_evaluators": [ + "FearAndGreedIndexEvaluator", + "MarketCapEvaluator", + "CryptoNewsEvaluator" + ], + "required_time_frames": ["1d"], + "use_deep_agent": false +} + diff --git a/packages/tentacles/Evaluator/Strategies/ai_strategies_evaluator/metadata.json b/packages/tentacles/Evaluator/Strategies/ai_strategies_evaluator/metadata.json new file mode 100644 index 0000000000..f90b39bd60 --- /dev/null +++ b/packages/tentacles/Evaluator/Strategies/ai_strategies_evaluator/metadata.json @@ -0,0 +1,12 @@ +{ + "version": "1.3.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": [ + "CryptoLLMAIStrategyEvaluator", + "GlobalLLMAIStrategyEvaluator" + ], + "tentacles-requirements": [ + "gpt_service", + "simple_ai_evaluator_agents_team" + ] +} diff --git a/packages/tentacles/Evaluator/Strategies/ai_strategies_evaluator/resources/LLMAIStrategyEvaluator.md b/packages/tentacles/Evaluator/Strategies/ai_strategies_evaluator/resources/LLMAIStrategyEvaluator.md new file mode 100644 index 0000000000..a0ff3c6dbc --- /dev/null +++ b/packages/tentacles/Evaluator/Strategies/ai_strategies_evaluator/resources/LLMAIStrategyEvaluator.md @@ -0,0 +1,106 @@ +# LLMAIStrategyEvaluator + +The LLMAIStrategyEvaluator is an advanced strategy evaluator that leverages Large Language Models (LLMs) to analyze and synthesize signals from Technical Analysis (TA), Social sentiment, and Real-Time evaluators. It provides intelligent trading recommendations by combining multiple evaluator inputs with AI-driven reasoning through parallel sub-agent processing. + +## How it works + +1. **Signal Aggregation**: Collects evaluation notes and descriptions from configured TA, Social, and Real-Time evaluators +2. **Parallel Sub-Agent Analysis**: Uses specialized StrategyAgents to analyze each evaluator type independently +3. **AI Synthesis**: Leverages Large Language Model reasoning in each sub-agent for specialized analysis +4. **Summarization**: Combines all sub-agent results through a SummarizationAgent for final evaluation +5. **Output Generation**: Produces eval_note (-1 to 1) and descriptive reasoning + +## File Structure + +The LLMAIStrategyEvaluator is organized in a modular architecture: + +``` +ai_strategies_evaluator/ +├── ai_strategies.py # Main evaluator implementation +├── agents/ # Agent-based architecture +│ ├── __init__.py # Agent module exports +│ ├── base_llm_agent.py # Abstract base agent class +│ ├── summarization_agent.py # Final result synthesis +│ ├── technical_analysis_agent.py # TA signal analysis +│ ├── sentiment_analysis_agent.py # Social sentiment analysis +│ └── real_time_analysis_agent.py # Real-time market analysis +│ └── factory.py # Agent creation factory +├── config/ # Configuration files +│ └── LLMAIStrategyEvaluator.json # Evaluator configuration +├── resources/ # Documentation and metadata +│ ├── LLMAIStrategyEvaluator.md # This documentation +│ └── metadata.json # Tentacle metadata +├── tests/ # Test suite +│ └── test_llm_ai_strategy_evaluator.py # Unit tests +└── __init__.py # Package initialization +``` + +### User Inputs +- **Prompt**: Custom prompt for LLM analysis (leave empty to use default specialized prompts per evaluator type) +- **Model**: GPT model selection (uses GPTService defaults if not specified) +- **Max Tokens**: Maximum response length (uses GPTService defaults if not specified) +- **Temperature**: Randomness in LLM responses (uses GPTService defaults if not specified) +- **Evaluator Types**: Select TA, Social, Real-Time evaluators to include (all enabled by default) +- **Output Format**: Choose "standard" or "with_confidence" (includes average confidence level) + +### Default Behavior +- Evaluates on 1-hour, 4-hour, and 1-day timeframes +- Uses GPTService default model and parameters +- Includes TA, Social, and Real-Time evaluators by default +- Provides specialized analysis for each evaluator type +- Uses parallel processing for improved performance + +### Specialized Analysis Types + +#### Technical Analysis Agent +Focuses exclusively on technical indicators and price patterns: +- Analyzes RSI, MACD, moving averages, Bollinger Bands, ADX, etc. +- Assesses trend direction and indicator convergence +- Provides confidence based on signal strength and agreement + +#### Social Sentiment Agent +Focuses exclusively on social and sentiment signals: +- Analyzes social media, news, community discussions +- Assesses overall market mood and sentiment +- Provides confidence based on signal consistency and volume + +#### Real-Time Agent +Focuses on live market movements and instant fluctuations: +- Analyzes order book data and real-time price movements +- Assesses current buying/selling pressure +- Provides confidence based on signal volatility and recency + +## Requirements +- GPTService must be configured and activated +- At least one TA, Social, or Real-Time evaluator should be active for meaningful analysis +- Works in both live and backtesting modes + +## Use Cases +- Advanced signal synthesis from multiple evaluator types +- Parallel AI-powered market analysis for improved performance +- Specialized analysis combining technical, social, and real-time signals +- Automated trading decisions with multi-faceted AI reasoning +- Backtesting complex multi-signal strategies + +## Architecture Benefits + +### Parallel Processing +- Each evaluator type is analyzed by a dedicated agent running in parallel +- Improved performance and reduced latency compared to sequential processing +- Better resource utilization of LLM API calls + +### Specialized Analysis +- Each sub-agent focuses on its domain expertise +- More accurate analysis through domain-specific prompts and reasoning +- Consistent evaluation methodology across different signal types + +### Intelligent Summarization +- Final evaluation considers all sub-agent results +- Weights signals based on confidence and consistency +- Provides comprehensive reasoning across all analysis domains + +## Warning +- LLM responses may vary due to temperature settings +- Requires OpenAI API access through GPTService +- Parallel processing increases API usage and costs +- Performance depends on quality of input evaluator signals \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Strategies/ai_strategies_evaluator/tests/__init__.py b/packages/tentacles/Evaluator/Strategies/ai_strategies_evaluator/tests/__init__.py new file mode 100644 index 0000000000..974dd1623a --- /dev/null +++ b/packages/tentacles/Evaluator/Strategies/ai_strategies_evaluator/tests/__init__.py @@ -0,0 +1,15 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. diff --git a/packages/tentacles/Evaluator/Strategies/blank_strategy_evaluator/__init__.py b/packages/tentacles/Evaluator/Strategies/blank_strategy_evaluator/__init__.py new file mode 100644 index 0000000000..53250a37d0 --- /dev/null +++ b/packages/tentacles/Evaluator/Strategies/blank_strategy_evaluator/__init__.py @@ -0,0 +1 @@ +from .blank_strategy import BlankStrategyEvaluator diff --git a/packages/tentacles/Evaluator/Strategies/blank_strategy_evaluator/blank_strategy.py b/packages/tentacles/Evaluator/Strategies/blank_strategy_evaluator/blank_strategy.py new file mode 100644 index 0000000000..8f20692829 --- /dev/null +++ b/packages/tentacles/Evaluator/Strategies/blank_strategy_evaluator/blank_strategy.py @@ -0,0 +1,52 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.constants as common_constants +import octobot_commons.enums as common_enums +import octobot_evaluators.evaluators as evaluators +import octobot_evaluators.enums as enums + + +class BlankStrategyEvaluator(evaluators.StrategyEvaluator): + + def init_user_inputs(self, inputs: dict) -> None: + """ + Called right before starting the tentacle, should define all the tentacle's user inputs unless + those are defined somewhere else. + """ + super().init_user_inputs(inputs) + self.UI.user_input(common_constants.CONFIG_TENTACLES_REQUIRED_CANDLES_COUNT, common_enums.UserInputTypes.INT, + 200, inputs, min_val=1, + title="Initialization candles count: the number of historical candles to fetch from " + "exchanges when OctoBot is starting.") + + def get_full_cycle_evaluator_types(self) -> tuple: + # returns a tuple as it is faster to create than a list + return enums.EvaluatorMatrixTypes.TA.value, enums.EvaluatorMatrixTypes.SCRIPTED.value + + async def matrix_callback(self, + matrix_id, + evaluator_name, + evaluator_type, + eval_note, + eval_note_type, + eval_note_description, + eval_note_metadata, + exchange_name, + cryptocurrency, + symbol, + time_frame): + self.eval_note = eval_note + await self.strategy_completed(cryptocurrency, symbol, time_frame=time_frame) diff --git a/packages/tentacles/Evaluator/Strategies/blank_strategy_evaluator/config/BlankStrategyEvaluator.json b/packages/tentacles/Evaluator/Strategies/blank_strategy_evaluator/config/BlankStrategyEvaluator.json new file mode 100644 index 0000000000..a5aafb3885 --- /dev/null +++ b/packages/tentacles/Evaluator/Strategies/blank_strategy_evaluator/config/BlankStrategyEvaluator.json @@ -0,0 +1,6 @@ +{ + "required_time_frames" : ["1h"], + "required_evaluators" : ["*"], + "required_candles_count" : 200, + "default_config" : ["ScriptedEvaluator"] +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Strategies/blank_strategy_evaluator/metadata.json b/packages/tentacles/Evaluator/Strategies/blank_strategy_evaluator/metadata.json new file mode 100644 index 0000000000..98a8e36d4f --- /dev/null +++ b/packages/tentacles/Evaluator/Strategies/blank_strategy_evaluator/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["BlankStrategyEvaluator"], + "tentacles-requirements": [] +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Strategies/blank_strategy_evaluator/resources/BlankStrategyEvaluator.md b/packages/tentacles/Evaluator/Strategies/blank_strategy_evaluator/resources/BlankStrategyEvaluator.md new file mode 100644 index 0000000000..21232afabe --- /dev/null +++ b/packages/tentacles/Evaluator/Strategies/blank_strategy_evaluator/resources/BlankStrategyEvaluator.md @@ -0,0 +1 @@ +BlankStrategyEvaluator is forwarding evaluator values to the trading mode. diff --git a/packages/tentacles/Evaluator/Strategies/dip_analyser_strategy_evaluator/__init__.py b/packages/tentacles/Evaluator/Strategies/dip_analyser_strategy_evaluator/__init__.py new file mode 100644 index 0000000000..e234ad0b2e --- /dev/null +++ b/packages/tentacles/Evaluator/Strategies/dip_analyser_strategy_evaluator/__init__.py @@ -0,0 +1 @@ +from .dip_analyser_strategy import DipAnalyserStrategyEvaluator \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Strategies/dip_analyser_strategy_evaluator/config/DipAnalyserStrategyEvaluator.json b/packages/tentacles/Evaluator/Strategies/dip_analyser_strategy_evaluator/config/DipAnalyserStrategyEvaluator.json new file mode 100644 index 0000000000..3332a0f8eb --- /dev/null +++ b/packages/tentacles/Evaluator/Strategies/dip_analyser_strategy_evaluator/config/DipAnalyserStrategyEvaluator.json @@ -0,0 +1,14 @@ +{ + "default_config": [ + "KlingerOscillatorReversalConfirmationMomentumEvaluator", + "RSIWeightMomentumEvaluator" + ], + "required_evaluators": [ + "InstantFluctuationsEvaluator", + "KlingerOscillatorReversalConfirmationMomentumEvaluator", + "RSIWeightMomentumEvaluator" + ], + "required_time_frames": [ + "4h" + ] +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Strategies/dip_analyser_strategy_evaluator/dip_analyser_strategy.py b/packages/tentacles/Evaluator/Strategies/dip_analyser_strategy_evaluator/dip_analyser_strategy.py new file mode 100644 index 0000000000..146169bd84 --- /dev/null +++ b/packages/tentacles/Evaluator/Strategies/dip_analyser_strategy_evaluator/dip_analyser_strategy.py @@ -0,0 +1,99 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import typing + +import octobot_commons.enums as commons_enums +import octobot_commons.constants as commons_constants +import octobot_evaluators.api.matrix as evaluators_api +import octobot_evaluators.evaluators.channel as evaluator_channel +import octobot_evaluators.constants as evaluator_constants +import octobot_evaluators.matrix as matrix +import octobot_evaluators.enums as evaluators_enums +import octobot_evaluators.evaluators as evaluators +import octobot_tentacles_manager.api as tentacles_manager_api +import octobot_trading.api as trading_api +import tentacles.Evaluator.TA as TA + + +class DipAnalyserStrategyEvaluator(evaluators.StrategyEvaluator): + REVERSAL_CONFIRMATION_CLASS_NAME = TA.KlingerOscillatorReversalConfirmationMomentumEvaluator.get_name() + REVERSAL_WEIGHT_CLASS_NAME = TA.RSIWeightMomentumEvaluator.get_name() + + @staticmethod + def get_eval_type(): + return typing.Dict[str, int] + + def __init__(self, tentacles_setup_config): + super().__init__(tentacles_setup_config) + self.evaluation_time_frame = None + + def init_user_inputs(self, inputs: dict) -> None: + """ + Called right before starting the tentacle, should define all the tentacle's user inputs unless + those are defined somewhere else. + """ + self.evaluation_time_frame = self.evaluation_time_frame or commons_enums.TimeFrames( + self.UI.user_input( + evaluator_constants.STRATEGIES_REQUIRED_TIME_FRAME, + commons_enums.UserInputTypes.MULTIPLE_OPTIONS, + [commons_enums.TimeFrames.ONE_HOUR.value], + inputs, options=[tf.value for tf in commons_enums.TimeFrames], + title="Analysed time frame: only the first one will be considered for DipAnalyserStrategyEvaluator." + )[0] + ).value + + async def matrix_callback(self, + matrix_id, + evaluator_name, + evaluator_type, + eval_note, + eval_note_type, + eval_note_description, + eval_note_metadata, + exchange_name, + cryptocurrency, + symbol, + time_frame): + if evaluator_type == evaluators_enums.EvaluatorMatrixTypes.REAL_TIME.value: + # trigger re-evaluation + exchange_id = trading_api.get_exchange_id_from_matrix_id(exchange_name, matrix_id) + await evaluator_channel.trigger_technical_evaluators_re_evaluation_with_updated_data(matrix_id, + evaluator_name, + evaluator_type, + exchange_name, + cryptocurrency, + symbol, + exchange_id, + self.strategy_time_frames) + # do not continue this evaluation + return + elif evaluator_type == evaluators_enums.EvaluatorMatrixTypes.TA.value: + self.eval_note = commons_constants.START_PENDING_EVAL_NOTE + TA_evaluations = matrix.get_evaluations_by_evaluator(matrix_id, + exchange_name, + evaluators_enums.EvaluatorMatrixTypes.TA.value, + cryptocurrency, + symbol, + self.evaluation_time_frame, + allowed_values=[ + commons_constants.START_PENDING_EVAL_NOTE]) + + try: + if evaluators_api.get_value(TA_evaluations[self.REVERSAL_CONFIRMATION_CLASS_NAME]): + self.eval_note = evaluators_api.get_value(TA_evaluations[self.REVERSAL_WEIGHT_CLASS_NAME]) + await self.strategy_completed(cryptocurrency, symbol) + except KeyError as e: + self.logger.error(f"Missing required evaluator: {e}") diff --git a/packages/tentacles/Evaluator/Strategies/dip_analyser_strategy_evaluator/metadata.json b/packages/tentacles/Evaluator/Strategies/dip_analyser_strategy_evaluator/metadata.json new file mode 100644 index 0000000000..5676bb93db --- /dev/null +++ b/packages/tentacles/Evaluator/Strategies/dip_analyser_strategy_evaluator/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["DipAnalyserStrategyEvaluator"], + "tentacles-requirements": ["momentum_evaluator.py"] +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Strategies/dip_analyser_strategy_evaluator/resources/DipAnalyserStrategyEvaluator.md b/packages/tentacles/Evaluator/Strategies/dip_analyser_strategy_evaluator/resources/DipAnalyserStrategyEvaluator.md new file mode 100644 index 0000000000..65c2b122ef --- /dev/null +++ b/packages/tentacles/Evaluator/Strategies/dip_analyser_strategy_evaluator/resources/DipAnalyserStrategyEvaluator.md @@ -0,0 +1,17 @@ +DipAnalyserStrategyEvaluator is a strategy analysing market dips using [RSI](https://www.investopedia.com/terms/r/rsi.asp) +averages. According to the level of the RSI, a buy signal can be generated. This signal has a weight that corresponds to +a higher or lower intensity of the RSI evaluation. + +This strategy also uses the [Klinger oscillator](https://www.investopedia.com/terms/k/klingeroscillator.asp) to identify +reversals and create buy signals. + +A buy signal is generated when the RSI component is signaling an opportunity and the Klinger part is confirming +a reversal situation. + +This strategy is updated at the end of each candle on the watched time frame. + +It is also possible to make it trigger +automatically using a real-time evaluator. Using a real time evaluator that signals sudden market changes like the +InstantFluctuationsEvaluator will make DipAnalyserStrategyEvaluator also wake up on such events. + +DipAnalyserStrategyEvaluator focuses on one time frame only and works best on larger time frames such as 4h and more. \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Strategies/dip_analyser_strategy_evaluator/tests/__init__.py b/packages/tentacles/Evaluator/Strategies/dip_analyser_strategy_evaluator/tests/__init__.py new file mode 100644 index 0000000000..974dd1623a --- /dev/null +++ b/packages/tentacles/Evaluator/Strategies/dip_analyser_strategy_evaluator/tests/__init__.py @@ -0,0 +1,15 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. diff --git a/packages/tentacles/Evaluator/Strategies/dip_analyser_strategy_evaluator/tests/test_dip_analyser_strategy_evaluator.py b/packages/tentacles/Evaluator/Strategies/dip_analyser_strategy_evaluator/tests/test_dip_analyser_strategy_evaluator.py new file mode 100644 index 0000000000..ad97a67639 --- /dev/null +++ b/packages/tentacles/Evaluator/Strategies/dip_analyser_strategy_evaluator/tests/test_dip_analyser_strategy_evaluator.py @@ -0,0 +1,106 @@ +# Drakkar-Software OctoBot +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import pytest +import decimal + +import tests.functional_tests.strategy_evaluators_tests.abstract_strategy_test as abstract_strategy_test +import tentacles.Evaluator.Strategies as Strategies +import tentacles.Trading.Mode as Mode + +# All test coroutines will be treated as marked. +pytestmark = pytest.mark.asyncio + + +@pytest.fixture +def strategy_tester(): + strategy_tester_instance = DipAnalyserStrategiesEvaluatorTest() + strategy_tester_instance.initialize(Strategies.DipAnalyserStrategyEvaluator, Mode.DipAnalyserTradingMode) + return strategy_tester_instance + + +class DipAnalyserStrategiesEvaluatorTest(abstract_strategy_test.AbstractStrategyTest): + """ + About using this test framework: + To be called by pytest, tests have to be called manually since the cythonized version of AbstractStrategyTest + creates an __init__() which prevents the default pytest tests collect process + """ + + # Careful with results here, unlike other strategy tests, this one uses only the 4h timeframe, therefore results + # are not comparable with regular 1h timeframes strategy tests + + # Cannot use bittrex data since they are not providing 4h timeframe data + + # test_full_mixed_strategies_evaluator.py with only 4h timeframe results are provided for comparison: + # format: results: (bot profitability, market average profitability) + + async def test_default_run(self): + # market: -49.25407390406244 + await self.run_test_default_run(decimal.Decimal(str(-24.612))) + + async def test_slow_downtrend(self): + # market: -49.25407390406244 + # market: -47.50593824228029 + await self.run_test_slow_downtrend(decimal.Decimal(str(-24.612)), decimal.Decimal(str(-33.601)), None, None, skip_extended=True) + + async def test_sharp_downtrend(self): + # market: -34.67997135795625 + await self.run_test_sharp_downtrend(decimal.Decimal(str(-21.634)), None, skip_extended=True) + + async def test_flat_markets(self): + # market: -38.07647740440325 + # market: -53.87077652637819 + await self.run_test_flat_markets(decimal.Decimal(str(-20.577)), decimal.Decimal(str(-32.756)), None, None, skip_extended=True) + + async def test_slow_uptrend(self): + # market: 11.32644122514472 + # market: -36.64596273291926 + await self.run_test_slow_uptrend(decimal.Decimal(str(11.326)), decimal.Decimal(str(-14.248))) + + async def test_sharp_uptrend(self): + # market: -17.047906776003458 + # market: -18.25837965302341 + await self.run_test_sharp_uptrend(decimal.Decimal(str(3.607)), decimal.Decimal(str(10.956))) + + async def test_up_then_down(self): + await self.run_test_up_then_down(None, skip_extended=True) + + +async def test_default_run(strategy_tester): + await strategy_tester.test_default_run() + + +async def test_slow_downtrend(strategy_tester): + await strategy_tester.test_slow_downtrend() + + +async def test_sharp_downtrend(strategy_tester): + await strategy_tester.test_sharp_downtrend() + + +async def test_flat_markets(strategy_tester): + await strategy_tester.test_flat_markets() + + +async def test_slow_uptrend(strategy_tester): + await strategy_tester.test_slow_uptrend() + + +async def test_sharp_uptrend(strategy_tester): + await strategy_tester.test_sharp_uptrend() + + +async def test_up_then_down(strategy_tester): + await strategy_tester.test_up_then_down() diff --git a/packages/tentacles/Evaluator/Strategies/mixed_strategies_evaluator/__init__.py b/packages/tentacles/Evaluator/Strategies/mixed_strategies_evaluator/__init__.py new file mode 100644 index 0000000000..15884a6d72 --- /dev/null +++ b/packages/tentacles/Evaluator/Strategies/mixed_strategies_evaluator/__init__.py @@ -0,0 +1 @@ +from .mixed_strategies import SimpleStrategyEvaluator, TechnicalAnalysisStrategyEvaluator \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Strategies/mixed_strategies_evaluator/config/SimpleStrategyEvaluator.json b/packages/tentacles/Evaluator/Strategies/mixed_strategies_evaluator/config/SimpleStrategyEvaluator.json new file mode 100644 index 0000000000..51701e131a --- /dev/null +++ b/packages/tentacles/Evaluator/Strategies/mixed_strategies_evaluator/config/SimpleStrategyEvaluator.json @@ -0,0 +1,20 @@ +{ + "default_config": [ + "DoubleMovingAverageTrendEvaluator", + "RSIMomentumEvaluator" + ], + "required_evaluators": [ + "*" + ], + "required_time_frames": [ + "1h", + "4h", + "1d" + ], + "required_candles_count": 1000, + "social_evaluators_notification_timeout": 3600, + "re_evaluate_TA_when_social_or_realtime_notification": true, + "background_social_evaluators": [ + "RedditForumEvaluator" + ] +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Strategies/mixed_strategies_evaluator/config/TechnicalAnalysisStrategyEvaluator.json b/packages/tentacles/Evaluator/Strategies/mixed_strategies_evaluator/config/TechnicalAnalysisStrategyEvaluator.json new file mode 100644 index 0000000000..99a92af65c --- /dev/null +++ b/packages/tentacles/Evaluator/Strategies/mixed_strategies_evaluator/config/TechnicalAnalysisStrategyEvaluator.json @@ -0,0 +1,38 @@ +{ + "compatible_evaluator_types": [ + "TA", + "REAL_TIME" + ], + "default_config": [ + "DoubleMovingAverageTrendEvaluator", + "RSIMomentumEvaluator" + ], + "required_evaluators": [ + "*" + ], + "required_time_frames": [ + "30m", "1h", "2h", "4h", "1d" + ], + "time_frames_to_weight": [ + { + "time_frame": "30m", + "weight": 30 + }, + { + "time_frame": "1h", + "weight": 50 + }, + { + "time_frame": "2h", + "weight": 50 + }, + { + "time_frame": "4h", + "weight": 50 + }, + { + "time_frame": "1d", + "weight": 30 + } + ] +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Strategies/mixed_strategies_evaluator/metadata.json b/packages/tentacles/Evaluator/Strategies/mixed_strategies_evaluator/metadata.json new file mode 100644 index 0000000000..23c62b829c --- /dev/null +++ b/packages/tentacles/Evaluator/Strategies/mixed_strategies_evaluator/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["SimpleStrategyEvaluator", "TechnicalAnalysisStrategyEvaluator"], + "tentacles-requirements": [] +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Strategies/mixed_strategies_evaluator/mixed_strategies.py b/packages/tentacles/Evaluator/Strategies/mixed_strategies_evaluator/mixed_strategies.py new file mode 100644 index 0000000000..f2503e6c65 --- /dev/null +++ b/packages/tentacles/Evaluator/Strategies/mixed_strategies_evaluator/mixed_strategies.py @@ -0,0 +1,370 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import typing + +import octobot_commons.constants as commons_constants +import octobot_commons.enums as commons_enums +import octobot_commons.evaluators_util as evaluators_util +import octobot_commons.time_frame_manager as time_frame_manager +import octobot_evaluators.api as evaluators_api +import octobot_evaluators.evaluators.channel as evaluators_channel +import octobot_evaluators.matrix as matrix +import octobot_evaluators.enums as evaluators_enums +import octobot_evaluators.constants as evaluators_constants +import octobot_evaluators.errors as errors +import octobot_evaluators.evaluators as evaluators +import octobot_tentacles_manager.api.configurator as tentacles_manager_api +import octobot_tentacles_manager.configuration as tm_configuration +import octobot_trading.api as trading_api + + +class SimpleStrategyEvaluator(evaluators.StrategyEvaluator): + SOCIAL_EVALUATORS_NOTIFICATION_TIMEOUT_KEY = "social_evaluators_notification_timeout" + RE_EVAL_TA_ON_RT_OR_SOCIAL = "re_evaluate_TA_when_social_or_realtime_notification" + BACKGROUND_SOCIAL_EVALUATORS = "background_social_evaluators" + + def __init__(self, tentacles_setup_config): + super().__init__(tentacles_setup_config) + self.re_evaluation_triggering_eval_types = [evaluators_enums.EvaluatorMatrixTypes.SOCIAL.value, + evaluators_enums.EvaluatorMatrixTypes.REAL_TIME.value] + self.social_evaluators_default_timeout = None + self.re_evaluate_TA_when_social_or_realtime_notification = True + self.background_social_evaluators = [] + + def init_user_inputs(self, inputs: dict) -> None: + """ + Called right before starting the tentacle, should define all the tentacle's user inputs unless + those are defined somewhere else. + """ + super().init_user_inputs(inputs) + default_config = self.get_default_config() + self.UI.user_input(commons_constants.CONFIG_TENTACLES_REQUIRED_CANDLES_COUNT, commons_enums.UserInputTypes.INT, + default_config[commons_constants.CONFIG_TENTACLES_REQUIRED_CANDLES_COUNT], + inputs, min_val=1, + title="Initialization candles count: the number of historical candles to fetch from " + "exchanges when OctoBot is starting.") + self.social_evaluators_default_timeout = \ + self.UI.user_input(self.SOCIAL_EVALUATORS_NOTIFICATION_TIMEOUT_KEY, commons_enums.UserInputTypes.INT, + default_config[self.SOCIAL_EVALUATORS_NOTIFICATION_TIMEOUT_KEY], + inputs, min_val=0, + title="Number of seconds to consider a social evaluation valid from the moment it " + "appears on OctoBot. Example: a tweet evaluation.") + self.re_evaluate_TA_when_social_or_realtime_notification = \ + self.UI.user_input(self.RE_EVAL_TA_ON_RT_OR_SOCIAL, commons_enums.UserInputTypes.BOOLEAN, + default_config[self.RE_EVAL_TA_ON_RT_OR_SOCIAL], inputs, + title="Recompute technical evaluators on real-time evaluator signal: " + "When activated, technical evaluators will be asked to recompute their evaluation " + "based on the current in-construction candle " + "for each new evaluation appearing on social or " + "real-time evaluators. After such an event, this strategy will finalize its " + "evaluation only once this updated technical analyses will be completed. " + "If deactivated, social and real-time evaluations will be taken into account " + "alongside technical analysis results of the last closed candle.") + self.background_social_evaluators = \ + self.UI.user_input(self.BACKGROUND_SOCIAL_EVALUATORS, commons_enums.UserInputTypes.MULTIPLE_OPTIONS, + default_config[self.BACKGROUND_SOCIAL_EVALUATORS], + inputs, other_schema_values={"minItems": 0, "uniqueItems": True}, + options=["RedditForumEvaluator", "TwitterNewsEvaluator", + "TelegramSignalEvaluator", "GoogleTrendsEvaluator", + "FearAndGreedIndexEvaluator", "SocialScoreEvaluator", + "CryptoNewsEvaluator", "MarketCapEvaluator"], + title="Social evaluator to consider as background evaluators: they won't trigger technical " + "evaluators re-evaluation when updated. Avoiding unnecessary updates increases " + "performances.") + + @classmethod + def get_default_config(cls, time_frames: typing.Optional[list[str]] = None) -> dict: + return { + evaluators_constants.STRATEGIES_REQUIRED_TIME_FRAME: ( + time_frames or [commons_enums.TimeFrames.ONE_HOUR.value] + ), + commons_constants.CONFIG_TENTACLES_REQUIRED_CANDLES_COUNT: 500, + cls.SOCIAL_EVALUATORS_NOTIFICATION_TIMEOUT_KEY: 1 * commons_constants.HOURS_TO_SECONDS, + cls.RE_EVAL_TA_ON_RT_OR_SOCIAL: True, + cls.BACKGROUND_SOCIAL_EVALUATORS: [], + } + + async def matrix_callback(self, + matrix_id, + evaluator_name, + evaluator_type, + eval_note, + eval_note_type, + eval_note_description, + eval_note_metadata, + exchange_name, + cryptocurrency, + symbol, + time_frame): + if symbol is None and cryptocurrency is not None and evaluator_type == evaluators_enums.EvaluatorMatrixTypes.SOCIAL.value: + # social evaluators can be cryptocurrency related but not symbol related, wakeup every symbol + for available_symbol in matrix.get_available_symbols(matrix_id, exchange_name, cryptocurrency): + await self._trigger_evaluation(matrix_id, + evaluator_name, + evaluator_type, + eval_note, + eval_note_type, + exchange_name, + cryptocurrency, + available_symbol) + return + else: + await self._trigger_evaluation(matrix_id, + evaluator_name, + evaluator_type, + eval_note, + eval_note_type, + exchange_name, + cryptocurrency, + symbol) + + async def _trigger_evaluation(self, + matrix_id, + evaluator_name, + evaluator_type, + eval_note, + eval_note_type, + exchange_name, + cryptocurrency, + symbol): + # ensure only start evaluations when technical evaluators have been initialized + try: + TA_by_timeframe = { + available_time_frame: matrix.get_evaluations_by_evaluator( + matrix_id, + exchange_name, + evaluators_enums.EvaluatorMatrixTypes.TA.value, + cryptocurrency, + symbol, + available_time_frame.value, + allow_missing=False, + allowed_values=[commons_constants.START_PENDING_EVAL_NOTE]) + for available_time_frame in self.strategy_time_frames + } + # social evaluators by symbol + social_evaluations_by_evaluator = matrix.get_evaluations_by_evaluator(matrix_id, + exchange_name, + evaluators_enums.EvaluatorMatrixTypes.SOCIAL.value, + cryptocurrency, + symbol) + # social evaluators by crypto currency + social_evaluations_by_evaluator.update(matrix.get_evaluations_by_evaluator(matrix_id, + exchange_name, + evaluators_enums.EvaluatorMatrixTypes.SOCIAL.value, + cryptocurrency)) + available_rt_time_frames = self.get_available_time_frames(matrix_id, + exchange_name, + evaluators_enums.EvaluatorMatrixTypes.REAL_TIME.value, + cryptocurrency, + symbol) + RT_evaluations_by_time_frame = { + available_time_frame: matrix.get_evaluations_by_evaluator( + matrix_id, + exchange_name, + evaluators_enums.EvaluatorMatrixTypes.REAL_TIME.value, + cryptocurrency, + symbol, + available_time_frame) + for available_time_frame in available_rt_time_frames + } + if self.re_evaluate_TA_when_social_or_realtime_notification \ + and any(value for value in TA_by_timeframe.values()) \ + and evaluator_type != evaluators_enums.EvaluatorMatrixTypes.TA.value \ + and evaluator_type in self.re_evaluation_triggering_eval_types \ + and evaluator_name not in self.background_social_evaluators: + if evaluators_util.check_valid_eval_note(eval_note, eval_type=eval_note_type, + expected_eval_type=evaluators_constants.EVALUATOR_EVAL_DEFAULT_TYPE): + # trigger re-evaluation + exchange_id = trading_api.get_exchange_id_from_matrix_id(exchange_name, matrix_id) + await evaluators_channel.trigger_technical_evaluators_re_evaluation_with_updated_data(matrix_id, + evaluator_name, + evaluator_type, + exchange_name, + cryptocurrency, + symbol, + exchange_id, + self.strategy_time_frames) + # do not continue this evaluation + return + counter = 0 + total_evaluation = 0 + + for eval_by_rt in RT_evaluations_by_time_frame.values(): + for evaluation in eval_by_rt.values(): + eval_value = evaluators_api.get_value(evaluation) + if evaluators_util.check_valid_eval_note(eval_value, eval_type=evaluators_api.get_type(evaluation), + expected_eval_type=evaluators_constants.EVALUATOR_EVAL_DEFAULT_TYPE): + total_evaluation += eval_value + counter += 1 + + for eval_by_ta in TA_by_timeframe.values(): + for evaluation in eval_by_ta.values(): + eval_value = evaluators_api.get_value(evaluation) + if evaluators_util.check_valid_eval_note(eval_value, eval_type=evaluators_api.get_type(evaluation), + expected_eval_type=evaluators_constants.EVALUATOR_EVAL_DEFAULT_TYPE): + total_evaluation += eval_value + counter += 1 + + if social_evaluations_by_evaluator: + exchange_manager = trading_api.get_exchange_manager_from_exchange_name_and_id( + exchange_name, + trading_api.get_exchange_id_from_matrix_id(exchange_name, self.matrix_id) + ) + current_time = trading_api.get_exchange_current_time(exchange_manager) + for evaluation in social_evaluations_by_evaluator.values(): + eval_value = evaluators_api.get_value(evaluation) + if evaluators_util.check_valid_eval_note(eval_value, eval_type=evaluators_api.get_type(evaluation), + expected_eval_type=evaluators_constants.EVALUATOR_EVAL_DEFAULT_TYPE, + eval_time=evaluators_api.get_time(evaluation), + expiry_delay=self.social_evaluators_default_timeout, + current_time=current_time): + total_evaluation += eval_value + counter += 1 + + if counter > 0: + self.eval_note = total_evaluation / counter + await self.strategy_completed(cryptocurrency, symbol) + + except errors.UnsetTentacleEvaluation as e: + if evaluator_type == evaluators_enums.EvaluatorMatrixTypes.TA.value: + self.logger.error(f"Missing technical evaluator data for ({e})") + # otherwise it's a social or real-time evaluator, it will shortly be taken into account by TA update cycle + except Exception as e: + self.logger.exception(e, True, f"Error when computing strategy evaluation: {e}") + + +class TechnicalAnalysisStrategyEvaluator(evaluators.StrategyEvaluator): + TIME_FRAMES_TO_WEIGHT = "time_frames_to_weight" + TIME_FRAME = "time_frame" + WEIGHT = "weight" + DEFAULT_WEIGHT = 50 + + def __init__(self, tentacles_setup_config): + super().__init__(tentacles_setup_config) + self.allowed_evaluator_types = [evaluators_enums.EvaluatorMatrixTypes.TA.value, + evaluators_enums.EvaluatorMatrixTypes.REAL_TIME.value] + config = tentacles_manager_api.get_tentacle_config(self.tentacles_setup_config, self.__class__) + if config: + self.weight_by_time_frames = TechnicalAnalysisStrategyEvaluator._get_weight_by_time_frames( + config[TechnicalAnalysisStrategyEvaluator.TIME_FRAMES_TO_WEIGHT] + ) + + def init_user_inputs(self, inputs: dict) -> None: + """ + Called right before starting the tentacle, should define all the tentacle's user inputs unless + those are defined somewhere else. + """ + super().init_user_inputs(inputs) + time_frames_and_weight = [] + config_time_frames_and_weight = self.UI.user_input( + self.TIME_FRAMES_TO_WEIGHT, commons_enums.UserInputTypes.OBJECT_ARRAY, + time_frames_and_weight, inputs, other_schema_values={"minItems": 1, "uniqueItems": True}, + item_title="Time frame", + title="Analysed time frames and their associated weight." + ) + # init one user input to generate user input schema and default values + time_frames_and_weight.append(self._init_tf_and_weight(inputs, commons_enums.TimeFrames.THIRTY_MINUTES, 30)) + self.weight_by_time_frames = TechnicalAnalysisStrategyEvaluator._get_weight_by_time_frames( + config_time_frames_and_weight + ) + + def _init_tf_and_weight(self, inputs, timeframe, weight): + return { + self.TIME_FRAME: self.UI.user_input(self.TIME_FRAME, commons_enums.UserInputTypes.OPTIONS, + timeframe.value, inputs, + options=[tf.value for tf in commons_enums.TimeFrames], + parent_input_name=self.TIME_FRAMES_TO_WEIGHT, + array_indexes=[0], + title="Time frame"), + self.WEIGHT: self.UI.user_input(self.WEIGHT, commons_enums.UserInputTypes.FLOAT, + weight, inputs, min_val=0, max_val=100, + parent_input_name=self.TIME_FRAMES_TO_WEIGHT, + array_indexes=[0], + title="Weight of this time frame. This is a multiplier: 0 means this time " + "frame is ignored, 100 means it's 100 times more impactful than another " + "time frame with a weight of 1."), + } + + async def matrix_callback(self, + matrix_id, + evaluator_name, + evaluator_type, + eval_note, + eval_note_type, + eval_note_description, + eval_note_metadata, + exchange_name, + cryptocurrency, + symbol, + time_frame): + if evaluator_type not in self.allowed_evaluator_types: + # only wake up on relevant callbacks + return + + try: + TA_by_timeframe = { + available_time_frame: matrix.get_evaluations_by_evaluator( + matrix_id, + exchange_name, + evaluators_enums.EvaluatorMatrixTypes.TA.value, + cryptocurrency, + symbol, + available_time_frame.value, + allow_missing=False, + allowed_values=[commons_constants.START_PENDING_EVAL_NOTE]) + for available_time_frame in self.strategy_time_frames + } + + if evaluator_type == evaluators_enums.EvaluatorMatrixTypes.REAL_TIME.value: + # trigger re-evaluation + exchange_id = trading_api.get_exchange_id_from_matrix_id(exchange_name, matrix_id) + await evaluators_channel.trigger_technical_evaluators_re_evaluation_with_updated_data(matrix_id, + evaluator_name, + evaluator_type, + exchange_name, + cryptocurrency, + symbol, + exchange_id, + self.strategy_time_frames) + # do not continue this evaluation + return + + total_evaluation = 0 + total_weights = 0 + + for time_frame, eval_by_ta in TA_by_timeframe.items(): + for evaluation in eval_by_ta.values(): + eval_value = evaluators_api.get_value(evaluation) + if evaluators_util.check_valid_eval_note(eval_value, eval_type=evaluators_api.get_type(evaluation), + expected_eval_type=evaluators_constants.EVALUATOR_EVAL_DEFAULT_TYPE): + weight = self.weight_by_time_frames.get(time_frame.value, self.DEFAULT_WEIGHT) + total_evaluation += eval_value * weight + total_weights += weight + + if total_weights > 0: + self.eval_note = total_evaluation / total_weights + await self.strategy_completed(cryptocurrency, symbol) + + except errors.UnsetTentacleEvaluation as e: + self.logger.error(f"Missing technical evaluator data for ({e})") + + @staticmethod + def _get_weight_by_time_frames(tf_to_weight): + return { + tf_and_weight[TechnicalAnalysisStrategyEvaluator.TIME_FRAME]: + tf_and_weight[TechnicalAnalysisStrategyEvaluator.WEIGHT] + for tf_and_weight in tf_to_weight + } diff --git a/packages/tentacles/Evaluator/Strategies/mixed_strategies_evaluator/resources/SimpleStrategyEvaluator.md b/packages/tentacles/Evaluator/Strategies/mixed_strategies_evaluator/resources/SimpleStrategyEvaluator.md new file mode 100644 index 0000000000..ee80c9e188 --- /dev/null +++ b/packages/tentacles/Evaluator/Strategies/mixed_strategies_evaluator/resources/SimpleStrategyEvaluator.md @@ -0,0 +1,9 @@ +SimpleStrategyEvaluator is the most flexible strategy. Meant to be customized, it is using +every activated technical, social and real time evaluator, and averages the evaluation value of +each to compute its final evaluation. + +This strategy can be used to make trading signals using as many evaluators as required. + +Used time frames are 1h, 4h and 1d by default. + +Warning: this strategy only considers evaluators with evaluations values between -1 and 1. diff --git a/packages/tentacles/Evaluator/Strategies/mixed_strategies_evaluator/resources/TechnicalAnalysisStrategyEvaluator.md b/packages/tentacles/Evaluator/Strategies/mixed_strategies_evaluator/resources/TechnicalAnalysisStrategyEvaluator.md new file mode 100644 index 0000000000..5e7667f9cb --- /dev/null +++ b/packages/tentacles/Evaluator/Strategies/mixed_strategies_evaluator/resources/TechnicalAnalysisStrategyEvaluator.md @@ -0,0 +1,16 @@ +TechnicalAnalysisStrategyEvaluator a flexible technical analysis strategy. Meant to be customized, it is using +every activated technical evaluator and averages the evaluation value of each to compute its final evaluation. + +This strategy makes it possible to assign a weight to any time frame in order to make the related technical evaluations +more or less impactful for the final strategy evaluation. If not specified for a time frame, default weight is 50. + +This strategy can be used to create custom trading signals using as many technical +evaluators as desired. + +TechnicalAnalysisStrategyEvaluator can also use real time evaluators to trigger an instant re-evaluation of its technical +evaluators and react quickly. The evaluation value of these real time evaluators will not be considered in the final strategy +evaluation as they are only meant to trigger an emergency re-evaluation. + +Used time frames are 30m, 1h, 2h, 4h and 1d by default. + +Warning: this strategy only considers evaluators with evaluations values between -1 and 1. diff --git a/packages/tentacles/Evaluator/Strategies/mixed_strategies_evaluator/tests/__init__.py b/packages/tentacles/Evaluator/Strategies/mixed_strategies_evaluator/tests/__init__.py new file mode 100644 index 0000000000..974dd1623a --- /dev/null +++ b/packages/tentacles/Evaluator/Strategies/mixed_strategies_evaluator/tests/__init__.py @@ -0,0 +1,15 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. diff --git a/packages/tentacles/Evaluator/Strategies/mixed_strategies_evaluator/tests/test_simple_strategy_evaluator.py b/packages/tentacles/Evaluator/Strategies/mixed_strategies_evaluator/tests/test_simple_strategy_evaluator.py new file mode 100644 index 0000000000..b15c3c217f --- /dev/null +++ b/packages/tentacles/Evaluator/Strategies/mixed_strategies_evaluator/tests/test_simple_strategy_evaluator.py @@ -0,0 +1,106 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import decimal +import pytest + +import tests.functional_tests.strategy_evaluators_tests.abstract_strategy_test as abstract_strategy_test +import tentacles.Evaluator.Strategies as Strategies +import tentacles.Trading.Mode as Mode + +# All test coroutines will be treated as marked. +pytestmark = pytest.mark.asyncio + + +@pytest.fixture +def strategy_tester(): + strategy_tester_instance = SimpleStrategyEvaluatorTest() + strategy_tester_instance.initialize(Strategies.SimpleStrategyEvaluator, Mode.DailyTradingMode) + return strategy_tester_instance + + +class SimpleStrategyEvaluatorTest(abstract_strategy_test.AbstractStrategyTest): + """ + About using this test framework: + To be called by pytest, tests have to be called manually since the cythonized version of AbstractStrategyTest + creates an __init__() which prevents the default pytest tests collect process + """ + + async def test_default_run(self): + # market: -13.599062133645944 + await self.run_test_default_run(decimal.Decimal(str(-1.090))) + + async def test_slow_downtrend(self): + # market: -13.599062133645944 + # market: -44.248234106962656 + # market: -34.87003936300901 + # market: -45.18518518518518 + await self.run_test_slow_downtrend(decimal.Decimal(str(-1.090)), decimal.Decimal(str(-36.523)), + decimal.Decimal(str(-27.337)), decimal.Decimal(str(-31.155))) + + async def test_sharp_downtrend(self): + # market: -30.271723049610415 + # market: -32.091097308488614 + await self.run_test_sharp_downtrend(decimal.Decimal(str(-24.356)), decimal.Decimal(str(-32.781))) + + async def test_flat_markets(self): + # market: 5.052093571849795 + # market: 3.4840425531915002 + # market: -12.732688011913623 + # market: -34.64150943396227 + await self.run_test_flat_markets(decimal.Decimal(str(0.027)), decimal.Decimal(str(11.215)), + decimal.Decimal(str(-13.888)), decimal.Decimal(str(-4.472))) + + async def test_slow_uptrend(self): + # market: 32.524679029957184 + # market: 6.25 + await self.run_test_slow_uptrend(decimal.Decimal(str(15.031)), decimal.Decimal(str(0.831))) + + async def test_sharp_uptrend(self): + # market: 24.56254050550875 + # market: 8.665472458575891 + await self.run_test_sharp_uptrend(decimal.Decimal(str(14.212)), decimal.Decimal(str(13.007))) + + async def test_up_then_down(self): + # market: 1.1543668450702853 + await self.run_test_up_then_down(decimal.Decimal(str(2.674))) + + +async def test_default_run(strategy_tester): + await strategy_tester.test_default_run() + + +async def test_slow_downtrend(strategy_tester): + await strategy_tester.test_slow_downtrend() + + +async def test_sharp_downtrend(strategy_tester): + await strategy_tester.test_sharp_downtrend() + + +async def test_flat_markets(strategy_tester): + await strategy_tester.test_flat_markets() + + +async def test_slow_uptrend(strategy_tester): + await strategy_tester.test_slow_uptrend() + + +async def test_sharp_uptrend(strategy_tester): + await strategy_tester.test_sharp_uptrend() + + +async def test_up_then_down(strategy_tester): + await strategy_tester.test_up_then_down() diff --git a/packages/tentacles/Evaluator/Strategies/mixed_strategies_evaluator/tests/test_technical_analysis_strategy_evaluator.py b/packages/tentacles/Evaluator/Strategies/mixed_strategies_evaluator/tests/test_technical_analysis_strategy_evaluator.py new file mode 100644 index 0000000000..e45d99466a --- /dev/null +++ b/packages/tentacles/Evaluator/Strategies/mixed_strategies_evaluator/tests/test_technical_analysis_strategy_evaluator.py @@ -0,0 +1,106 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import decimal +import pytest + +import tests.functional_tests.strategy_evaluators_tests.abstract_strategy_test as abstract_strategy_test +import tentacles.Evaluator.Strategies as Strategies +import tentacles.Trading.Mode as Mode + +# All test coroutines will be treated as marked. +pytestmark = pytest.mark.asyncio + + +@pytest.fixture +def strategy_tester(): + strategy_tester_instance = TechnicalAnalysisStrategyEvaluatorTest() + strategy_tester_instance.initialize(Strategies.TechnicalAnalysisStrategyEvaluator, Mode.DailyTradingMode) + return strategy_tester_instance + + +class TechnicalAnalysisStrategyEvaluatorTest(abstract_strategy_test.AbstractStrategyTest): + """ + About using this test framework: + To be called by pytest, tests have to be called manually since the cythonized version of AbstractStrategyTest + creates an __init__() which prevents the default pytest tests collect process + """ + + async def test_default_run(self): + # market: -12.052505966587105 + await self.run_test_default_run(decimal.Decimal(str(-8.699))) + + async def test_slow_downtrend(self): + # market: -12.052505966587105 + # market: -15.195702225633141 + # market: -29.12366137549725 + # market: -32.110091743119256 + await self.run_test_slow_downtrend(decimal.Decimal(str(-8.699)), decimal.Decimal(str(-9.671)), + decimal.Decimal(str(-16.968)), decimal.Decimal(str(-7.236))) + + async def test_sharp_downtrend(self): + # market: -26.07183938094741 + # market: -32.1654501216545 + await self.run_test_sharp_downtrend(decimal.Decimal(str(-19.903)), decimal.Decimal(str(-23.076))) + + async def test_flat_markets(self): + # market: -10.560669456066947 + # market: -3.401191658391241 + # market: -5.7854560064282765 + # market: -8.067940552016978 + await self.run_test_flat_markets(decimal.Decimal(str(0.289)), decimal.Decimal(str(1.813)), + decimal.Decimal(str(-4.596)), decimal.Decimal(str(3.884))) + + async def test_slow_uptrend(self): + # market: 17.203948364436457 + # market: 16.19613670133728 + await self.run_test_slow_uptrend(decimal.Decimal(str(8.245)), decimal.Decimal(str(2.882))) + + async def test_sharp_uptrend(self): + # market: 30.881852230166828 + # market: 12.28597871355852 + await self.run_test_sharp_uptrend(decimal.Decimal(str(1.418)), decimal.Decimal(str(4.362))) + + async def test_up_then_down(self): + # market: -6.040105108015155 + await self.run_test_up_then_down(decimal.Decimal(str(-0.964))) + + +async def test_default_run(strategy_tester): + await strategy_tester.test_default_run() + + +async def test_slow_downtrend(strategy_tester): + await strategy_tester.test_slow_downtrend() + + +async def test_sharp_downtrend(strategy_tester): + await strategy_tester.test_sharp_downtrend() + + +async def test_flat_markets(strategy_tester): + await strategy_tester.test_flat_markets() + + +async def test_slow_uptrend(strategy_tester): + await strategy_tester.test_slow_uptrend() + + +async def test_sharp_uptrend(strategy_tester): + await strategy_tester.test_sharp_uptrend() + + +async def test_up_then_down(strategy_tester): + await strategy_tester.test_up_then_down() diff --git a/packages/tentacles/Evaluator/Strategies/move_signals_strategy_evaluator/__init__.py b/packages/tentacles/Evaluator/Strategies/move_signals_strategy_evaluator/__init__.py new file mode 100644 index 0000000000..f13c9a9889 --- /dev/null +++ b/packages/tentacles/Evaluator/Strategies/move_signals_strategy_evaluator/__init__.py @@ -0,0 +1 @@ +from .move_signals_strategy import MoveSignalsStrategyEvaluator \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Strategies/move_signals_strategy_evaluator/config/MoveSignalsStrategyEvaluator.json b/packages/tentacles/Evaluator/Strategies/move_signals_strategy_evaluator/config/MoveSignalsStrategyEvaluator.json new file mode 100644 index 0000000000..02210a5641 --- /dev/null +++ b/packages/tentacles/Evaluator/Strategies/move_signals_strategy_evaluator/config/MoveSignalsStrategyEvaluator.json @@ -0,0 +1,5 @@ +{ + "required_time_frames" : ["30m", "1h", "4h"], + "required_evaluators" : ["InstantFluctuationsEvaluator", "KlingerOscillatorMomentumEvaluator", "BBMomentumEvaluator"], + "default_config" : ["KlingerOscillatorMomentumEvaluator", "BBMomentumEvaluator"] +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Strategies/move_signals_strategy_evaluator/metadata.json b/packages/tentacles/Evaluator/Strategies/move_signals_strategy_evaluator/metadata.json new file mode 100644 index 0000000000..c948e884d7 --- /dev/null +++ b/packages/tentacles/Evaluator/Strategies/move_signals_strategy_evaluator/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["MoveSignalsStrategyEvaluator"], + "tentacles-requirements": ["momentum_evaluator.py"] +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Strategies/move_signals_strategy_evaluator/move_signals_strategy.py b/packages/tentacles/Evaluator/Strategies/move_signals_strategy_evaluator/move_signals_strategy.py new file mode 100644 index 0000000000..ad8fe05ce7 --- /dev/null +++ b/packages/tentacles/Evaluator/Strategies/move_signals_strategy_evaluator/move_signals_strategy.py @@ -0,0 +1,165 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.constants as commons_constants +import octobot_commons.enums as commons_enum +import octobot_evaluators.api.matrix as evaluators_api +import octobot_evaluators.evaluators.channel as evaluators_channel +import octobot_evaluators.matrix as matrix +import octobot_evaluators.enums as evaluators_enums +import octobot_evaluators.errors as errors +import octobot_evaluators.evaluators as evaluators +import octobot_trading.api as trading_api +import tentacles.Evaluator.TA as TA + + +class MoveSignalsStrategyEvaluator(evaluators.StrategyEvaluator): + SIGNAL_CLASS_NAME = TA.KlingerOscillatorMomentumEvaluator.get_name() + WEIGHT_CLASS_NAME = TA.BBMomentumEvaluator.get_name() + + SHORT_PERIOD_WEIGHT = 4 + MEDIUM_PERIOD_WEIGHT = 3 + LONG_PERIOD_WEIGHT = 3 + + SIGNAL_MINIMUM_THRESHOLD = 0.15 + + def __init__(self, tentacles_setup_config): + super().__init__(tentacles_setup_config) + self.evaluation_time_frames = [commons_enum.TimeFrames.THIRTY_MINUTES.value, + commons_enum.TimeFrames.ONE_HOUR.value, + commons_enum.TimeFrames.FOUR_HOURS.value] + self.weights_and_period_evals = [] + self.short_period_eval = None + self.medium_period_eval = None + self.long_period_eval = None + + def init_user_inputs(self, inputs: dict) -> None: + """ + Called right before starting the tentacle, should define all the tentacle's user inputs unless + those are defined somewhere else. + """ + pass + + async def matrix_callback(self, + matrix_id, + evaluator_name, + evaluator_type, + eval_note, + eval_note_type, + eval_note_description, + eval_note_metadata, + exchange_name, + cryptocurrency, + symbol, + time_frame): + if evaluator_type == evaluators_enums.EvaluatorMatrixTypes.REAL_TIME.value: + # trigger re-evaluation + exchange_id = trading_api.get_exchange_id_from_matrix_id(exchange_name, matrix_id) + await evaluators_channel.trigger_technical_evaluators_re_evaluation_with_updated_data(matrix_id, + evaluator_name, + evaluator_type, + exchange_name, + cryptocurrency, + symbol, + exchange_id, + self.strategy_time_frames) + # do not continue this evaluation + return + elif evaluator_type == evaluators_enums.EvaluatorMatrixTypes.TA.value: + try: + TA_by_timeframe = { + available_time_frame: matrix.get_evaluations_by_evaluator( + matrix_id, + exchange_name, + evaluators_enums.EvaluatorMatrixTypes.TA.value, + cryptocurrency, + symbol, + available_time_frame.value, + allow_missing=False, + allowed_values=[commons_constants.START_PENDING_EVAL_NOTE]) + for available_time_frame in self.strategy_time_frames + } + + self._refresh_evaluations(TA_by_timeframe) + self._compute_final_evaluation() + await self.strategy_completed(cryptocurrency, symbol) + + except errors.UnsetTentacleEvaluation as e: + self.logger.debug(f"Tentacles evaluation initialization: not ready yet for a strategy update ({e})") + except KeyError as e: + self.logger.exception(e, True, f"Missing {e} evaluation in matrix for {symbol} on {time_frame}, " + f"did you activate the required evaluator ?") + + def _compute_final_evaluation(self): + weights = 0 + composite_evaluation = 0 + for weight, evaluation in self.weights_and_period_evals: + composite_evaluation += self._compute_fractal_evaluation(evaluation, weight) + weights += weight + self.eval_note = composite_evaluation / weights + + @staticmethod + def _compute_fractal_evaluation(signal_with_weight, multiplier): + if signal_with_weight.signal != commons_constants.START_PENDING_EVAL_NOTE \ + and signal_with_weight.weight != commons_constants.START_PENDING_EVAL_NOTE: + evaluation_sign = signal_with_weight.signal * signal_with_weight.weight + if abs(signal_with_weight.signal) >= MoveSignalsStrategyEvaluator.SIGNAL_MINIMUM_THRESHOLD \ + and evaluation_sign > 0: + eval_side = 1 if signal_with_weight.signal > 0 else -1 + signal_strength = 2 * signal_with_weight.signal * signal_with_weight.weight + weighted_eval = min(signal_strength, 1) + return weighted_eval * multiplier * eval_side + return 0 + + def _refresh_evaluations(self, TA_by_timeframe): + for _, evaluation in self.weights_and_period_evals: + evaluation.refresh_evaluation(TA_by_timeframe) + + def _get_tentacle_registration_topic(self, all_symbols_by_crypto_currencies, time_frames, real_time_time_frames): + currencies, symbols, time_frames = super()._get_tentacle_registration_topic(all_symbols_by_crypto_currencies, + time_frames, + real_time_time_frames) + # register evaluation fractals based on available time frames + self._register_time_frame(commons_enum.TimeFrames.THIRTY_MINUTES, self.SHORT_PERIOD_WEIGHT) + self._register_time_frame(commons_enum.TimeFrames.ONE_HOUR, self.MEDIUM_PERIOD_WEIGHT) + self._register_time_frame(commons_enum.TimeFrames.FOUR_HOURS, self.LONG_PERIOD_WEIGHT) + return currencies, symbols, time_frames + + def _register_time_frame(self, time_frame, weight): + if time_frame in self.strategy_time_frames: + self.weights_and_period_evals.append((weight, + SignalWithWeight(time_frame))) + else: + self.logger.warning(f"Missing {time_frame.value} time frame on {self.exchange_name}, " + f"this strategy will not work at its optimal potential.") + + +class SignalWithWeight: + + def __init__(self, time_frame): + self.time_frame = time_frame + self.signal = commons_constants.START_PENDING_EVAL_NOTE + self.weight = commons_constants.START_PENDING_EVAL_NOTE + + def reset_evaluation(self): + self.signal = commons_constants.START_PENDING_EVAL_NOTE + self.weight = commons_constants.START_PENDING_EVAL_NOTE + + def refresh_evaluation(self, TA_by_timeframe): + self.reset_evaluation() + self.signal = evaluators_api.get_value( + TA_by_timeframe[self.time_frame][MoveSignalsStrategyEvaluator.SIGNAL_CLASS_NAME]) + self.weight = evaluators_api.get_value( + TA_by_timeframe[self.time_frame][MoveSignalsStrategyEvaluator.WEIGHT_CLASS_NAME]) diff --git a/packages/tentacles/Evaluator/Strategies/move_signals_strategy_evaluator/resources/MoveSignalsStrategyEvaluator.md b/packages/tentacles/Evaluator/Strategies/move_signals_strategy_evaluator/resources/MoveSignalsStrategyEvaluator.md new file mode 100644 index 0000000000..0c53454d7c --- /dev/null +++ b/packages/tentacles/Evaluator/Strategies/move_signals_strategy_evaluator/resources/MoveSignalsStrategyEvaluator.md @@ -0,0 +1,17 @@ +MoveSignalsStrategyEvaluator is a fractal strategy: it is using different time frames to +balance decisions. + +This strategy is using the KlingerOscillatorMomentumEvaluator based on the [Klinger Oscillator](https://www.investopedia.com/terms/k/klingeroscillator.asp) +to know when to start a trade and BBMomentumEvaluator based on [Bollinger Bands](https://www.investopedia.com/terms/b/bollingerbands.asp) +to know how much weight to give to this trade. + +This strategy is updated at the end of each candle on the watched time frame which is each 30 minutes. + +It is also possible to make it trigger +automatically using a real-time evaluator. Using a real time evaluator that signals sudden market changes like the +InstantFluctuationsEvaluator will make MoveSignalsStrategyEvaluator also wake up on such events. + +Used time frames are 30m, 1h and 4h. + +Warning: MoveSignalsStrategyEvaluator only works on liquid markets because the Klinger Oscillator requires enough +volume and candles continuity to be accurate. \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Strategies/move_signals_strategy_evaluator/tests/__init__.py b/packages/tentacles/Evaluator/Strategies/move_signals_strategy_evaluator/tests/__init__.py new file mode 100644 index 0000000000..974dd1623a --- /dev/null +++ b/packages/tentacles/Evaluator/Strategies/move_signals_strategy_evaluator/tests/__init__.py @@ -0,0 +1,15 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. diff --git a/packages/tentacles/Evaluator/Strategies/move_signals_strategy_evaluator/tests/test_move_signals_strategy_evaluator.py b/packages/tentacles/Evaluator/Strategies/move_signals_strategy_evaluator/tests/test_move_signals_strategy_evaluator.py new file mode 100644 index 0000000000..4837e6dfd4 --- /dev/null +++ b/packages/tentacles/Evaluator/Strategies/move_signals_strategy_evaluator/tests/test_move_signals_strategy_evaluator.py @@ -0,0 +1,106 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import decimal +import pytest + +import tests.functional_tests.strategy_evaluators_tests.abstract_strategy_test as abstract_strategy_test +import tentacles.Evaluator.Strategies as Strategies +import tentacles.Trading.Mode as Mode + +# All test coroutines will be treated as marked. +pytestmark = pytest.mark.asyncio + + +@pytest.fixture +def strategy_tester(): + strategy_tester_instance = MoveSignalsStrategyEvaluatorTest() + strategy_tester_instance.initialize(Strategies.MoveSignalsStrategyEvaluator, Mode.SignalTradingMode) + return strategy_tester_instance + + +class MoveSignalsStrategyEvaluatorTest(abstract_strategy_test.AbstractStrategyTest): + """ + About using this test framework: + To be called by pytest, tests have to be called manually since the cythonized version of AbstractStrategyTest + creates an __init__() which prevents the default pytest tests collect process + """ + + async def test_default_run(self): + # market: -12.052505966587105 + await self.run_test_default_run(decimal.Decimal(str(-2.549))) + + async def test_slow_downtrend(self): + # market: -12.052505966587105 + # market: -15.195702225633141 + # market: -29.12366137549725 + # market: -32.110091743119256 + await self.run_test_slow_downtrend(decimal.Decimal(str(-2.549)), decimal.Decimal(str(-3.452)), + decimal.Decimal(str(-17.393)), decimal.Decimal(str(-15.761))) + + async def test_sharp_downtrend(self): + # market: -26.07183938094741 + # market: -32.1654501216545 + await self.run_test_sharp_downtrend(decimal.Decimal(str(-12.078)), decimal.Decimal(str(-10.3))) + + async def test_flat_markets(self): + # market: -10.560669456066947 + # market: -3.401191658391241 + # market: -5.7854560064282765 + # market: -8.067940552016978 + await self.run_test_flat_markets(decimal.Decimal(str(-0.200)), decimal.Decimal(str(0.353)), + decimal.Decimal(str(-8.126)), decimal.Decimal(str(-7.038))) + + async def test_slow_uptrend(self): + # market: 17.203948364436457 + # market: 16.19613670133728 + await self.run_test_slow_uptrend(decimal.Decimal(str(10.278)), decimal.Decimal(str(4.299))) + + async def test_sharp_uptrend(self): + # market: 30.881852230166828 + # market: 12.28597871355852 + await self.run_test_sharp_uptrend(decimal.Decimal(str(6.504)), decimal.Decimal(str(5.411))) + + async def test_up_then_down(self): + # market: -6.040105108015155 + await self.run_test_up_then_down(decimal.Decimal(str(-6.691))) + + +async def test_default_run(strategy_tester): + await strategy_tester.test_default_run() + + +async def test_slow_downtrend(strategy_tester): + await strategy_tester.test_slow_downtrend() + + +async def test_sharp_downtrend(strategy_tester): + await strategy_tester.test_sharp_downtrend() + + +async def test_flat_markets(strategy_tester): + await strategy_tester.test_flat_markets() + + +async def test_slow_uptrend(strategy_tester): + await strategy_tester.test_slow_uptrend() + + +async def test_sharp_uptrend(strategy_tester): + await strategy_tester.test_sharp_uptrend() + + +async def test_up_then_down(strategy_tester): + await strategy_tester.test_up_then_down() diff --git a/packages/tentacles/Evaluator/TA/ai_evaluator/__init__.py b/packages/tentacles/Evaluator/TA/ai_evaluator/__init__.py new file mode 100644 index 0000000000..5683510a89 --- /dev/null +++ b/packages/tentacles/Evaluator/TA/ai_evaluator/__init__.py @@ -0,0 +1 @@ +from .ai import GPTEvaluator \ No newline at end of file diff --git a/packages/tentacles/Evaluator/TA/ai_evaluator/ai.py b/packages/tentacles/Evaluator/TA/ai_evaluator/ai.py new file mode 100644 index 0000000000..89e5dcd444 --- /dev/null +++ b/packages/tentacles/Evaluator/TA/ai_evaluator/ai.py @@ -0,0 +1,347 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import tulipy +import os + +import octobot_commons.constants as commons_constants +import octobot_commons.enums as commons_enums +import octobot_commons.enums as enums +import octobot_commons.os_util as os_util +import octobot_commons.data_util as data_util +import octobot_evaluators.evaluators as evaluators +import octobot_evaluators.util as evaluators_util +import octobot_evaluators.errors as evaluators_errors +import octobot_trading.api as trading_api +import octobot_services.api as services_api +import octobot_services.errors as services_errors +import tentacles.Services.Services_bases + + +def _get_llm_service(): + try: + return tentacles.Services.Services_bases.LLMSignalService + except (AttributeError, ImportError): + raise ImportError("the llm_service tentacle is not installed") + + +class GPTEvaluator(evaluators.TAEvaluator): + GLOBAL_VERSION = 1 + PREPROMPT = "Predict: {up or down} {confidence%} (no other information)" + PASSED_DATA_LEN = 10 + MAX_CONFIDENCE_PERCENT = 100 + HIGH_CONFIDENCE_PERCENT = 80 + MEDIUM_CONFIDENCE_PERCENT = 50 + LOW_CONFIDENCE_PERCENT = 30 + INDICATORS = { + "No indicator: raw candles price data": lambda data, period: data, + "EMA: Exponential Moving Average": tulipy.ema, + "SMA: Simple Moving Average": tulipy.sma, + "Kaufman Adaptive Moving Average": tulipy.kama, + "Hull Moving Average": tulipy.kama, + "RSI: Relative Strength Index": tulipy.rsi, + "Detrended Price Oscillator": tulipy.dpo, + } + SOURCES = ["Open", "High", "Low", "Close", "Volume", "Full candle (For no indicator only)"] + ALLOW_GPT_REEVALUATION_ENV = "ALLOW_GPT_REEVALUATIONS" + SAME_MODEL_AS_SERVICE = "use interface configured model" + GPT_MODELS = [] + ALLOW_TOKEN_LIMIT_UPDATE = False + + def __init__(self, tentacles_setup_config): + super().__init__(tentacles_setup_config) + self.indicator = None + self.source = None + self.period = None + self.min_confidence_threshold = 100 + self.gpt_model = _get_llm_service().DEFAULT_MODEL + self.is_backtesting = False + self.min_allowed_timeframe = os.getenv("MIN_GPT_TIMEFRAME", None) + self.enable_model_selector = os_util.parse_boolean_environment_var("ENABLE_GPT_MODELS_SELECTOR", "True") + self._min_allowed_timeframe_minutes = 0 + try: + if self.min_allowed_timeframe: + self._min_allowed_timeframe_minutes = \ + commons_enums.TimeFramesMinutes[commons_enums.TimeFrames(self.min_allowed_timeframe)] + except ValueError: + self.logger.error(f"Invalid timeframe configuration: unknown timeframe: '{self.min_allowed_timeframe}'") + self.allow_reevaluations = os_util.parse_boolean_environment_var(self.ALLOW_GPT_REEVALUATION_ENV, "True") + self.gpt_tokens_limit = _get_llm_service().NO_TOKEN_LIMIT_VALUE + self.services_config = None + + def enable_reevaluation(self) -> bool: + """ + Override when artificial re-evaluations from the evaluator channel can be disabled + """ + return self.allow_reevaluations + + @classmethod + def get_signals_history_type(cls): + """ + Override when this evaluator uses a specific type of signal history + """ + return commons_enums.SignalHistoryTypes.GPT + + async def load_and_save_user_inputs(self, bot_id: str) -> dict: + """ + instance method API for user inputs + Initialize and save the tentacle user inputs in run data + :return: the filled user input configuration + """ + self.is_backtesting = self._is_in_backtesting() + if self.is_backtesting and not services_api.is_service_used_by_backtestable_feed(_get_llm_service()): + self.logger.error(f"{self.get_name()} is disabled in backtesting. It will only emit neutral evaluations") + await self._init_GPT_models() + return await super().load_and_save_user_inputs(bot_id) + + def init_user_inputs(self, inputs: dict) -> None: + self.indicator = self.UI.user_input( + "indicator", enums.UserInputTypes.OPTIONS, next(iter(self.INDICATORS)), + inputs, options=list(self.INDICATORS), + title="Indicator: the technical indicator to apply and give the result of to chat GPT." + ) + self.source = self.UI.user_input( + "source", enums.UserInputTypes.OPTIONS, self.SOURCES[3], + inputs, options=self.SOURCES, + title="Source: values of candles data to pass to the indicator." + ) + self.period = self.UI.user_input( + "period", enums.UserInputTypes.INT, + self.period, inputs, min_val=1, + title="Period: length of the indicator period or the number of candles to give to ChatGPT." + ) + self.min_confidence_threshold = self.UI.user_input( + "min_confidence_threshold", enums.UserInputTypes.INT, + self.min_confidence_threshold, inputs, min_val=0, max_val=100, + title="Minimum confidence threshold: % confidence value starting from which to return 1 or -1." + ) + if self.enable_model_selector: + current_value = self.specific_config.get("GPT_model") + models = (list(self.GPT_MODELS) or ( + [current_value] if current_value else [_get_llm_service().DEFAULT_MODEL] + )) + [self.SAME_MODEL_AS_SERVICE] + self.gpt_model = self.UI.user_input( + "GPT model", enums.UserInputTypes.OPTIONS, self.SAME_MODEL_AS_SERVICE, + inputs, options=sorted(models), + title="GPT Model: the GPT model to use. Enable the evaluator to load other models." + ) + if os_util.parse_boolean_environment_var(self.ALLOW_GPT_REEVALUATION_ENV, "True"): + self.allow_reevaluations = self.UI.user_input( + "allow_reevaluation", enums.UserInputTypes.BOOLEAN, self.allow_reevaluations, + inputs, + title="Allow Reevaluation: send a ChatGPT request when realtime evaluators trigger a " + "global reevaluation Use latest available value otherwise. " + "Warning: enabling this can lead to a large amount of GPT requests and consumed tokens." + ) + if self.ALLOW_TOKEN_LIMIT_UPDATE: + self.gpt_tokens_limit = self.UI.user_input( + "max_gpt_tokens", enums.UserInputTypes.INT, + self.gpt_tokens_limit, inputs, min_val=_get_llm_service().NO_TOKEN_LIMIT_VALUE, + title=f"OpenAI token limit: maximum daily number of tokens to consume with a given OctoBot instance. " + f"Use {_get_llm_service().NO_TOKEN_LIMIT_VALUE} to remove the limit." + ) + + async def _init_GPT_models(self): + if not self.GPT_MODELS: + self.GPT_MODELS = [_get_llm_service().DEFAULT_MODEL] + if self.enable_model_selector and not self.is_backtesting: + try: + service = await services_api.get_service( + _get_llm_service(), self.is_backtesting, self.services_config + ) + self.GPT_MODELS = service.models + self.ALLOW_TOKEN_LIMIT_UPDATE = service.allow_token_limit_update() + except Exception as err: + self.logger.exception(err, True, f"Impossible to fetch GPT models: {err}") + + async def _init_registered_topics(self, all_symbols_by_crypto_currencies, currencies, symbols, time_frames): + await super()._init_registered_topics(all_symbols_by_crypto_currencies, currencies, symbols, time_frames) + for time_frame in time_frames: + if not self._check_timeframe(time_frame.value): + self.logger.error(f"{time_frame.value} time frame will be ignored for {self.get_name()} " + f"as {time_frame.value} is not allowed in this configuration. " + f"The shortest allowed time frame is {self.min_allowed_timeframe}. {self.get_name()} " + f"will emit neutral evaluations on this time frame.") + + async def ohlcv_callback(self, exchange: str, exchange_id: str, + cryptocurrency: str, symbol: str, time_frame, candle, inc_in_construction_data): + candle_data = self.get_candles_data(exchange, exchange_id, symbol, time_frame, inc_in_construction_data) + await self.evaluate(cryptocurrency, symbol, time_frame, candle_data, candle) + + async def evaluate(self, cryptocurrency, symbol, time_frame, candle_data, candle): + async with self.async_evaluation(): + self.eval_note = commons_constants.START_PENDING_EVAL_NOTE + if self._check_timeframe(time_frame): + try: + candle_time = candle[commons_enums.PriceIndexes.IND_PRICE_TIME.value] + computed_data = self.call_indicator(candle_data) + formatted_data = self.get_formatted_data(computed_data) + prediction = await self.ask_gpt(self.PREPROMPT, formatted_data, symbol, time_frame, candle_time) \ + or "" + cleaned_prediction = prediction.strip().replace("\n", "").replace(".", "").lower() + prediction_side = self._parse_prediction_side(cleaned_prediction) + if prediction_side == 0 and not self.is_backtesting: + self.logger.warning( + f"Ignored ChatGPT answer for {symbol} {time_frame}, answer: '{cleaned_prediction}': " + f"missing prediction or % accuracy." + ) + return + confidence = self._parse_confidence(cleaned_prediction) / 100 + self.eval_note = prediction_side * confidence + except services_errors.InvalidRequestError as e: + self.logger.error(f"Invalid GPT request: {e}") + except services_errors.RateLimitError as e: + self.logger.error(f"Impossible to get ChatGPT evaluation for {symbol} on {time_frame}: " + f"No remaining free tokens for today : {e}. To prevent this, you can reduce the " + f"amount of traded pairs, use larger time frames or increase the maximum " + f"allowed tokens.") + except services_errors.UnavailableInBacktestingError: + # error already logged error for backtesting in use_backtesting_init_timeout + pass + except evaluators_errors.UnavailableEvaluatorError as e: + self.logger.exception(e, True, f"Evaluation error: {e}") + except tulipy.lib.InvalidOptionError as e: + self.logger.warning( + f"Error when computing {self.indicator} on {self.period} period with {len(candle_data)} " + f"candles: {e}" + ) + self.logger.exception(e, False) + else: + self.logger.debug(f"Ignored {time_frame} time frame as the shorted allowed time frame is " + f"{self.min_allowed_timeframe}") + await self.evaluation_completed(cryptocurrency, symbol, time_frame, + eval_time=evaluators_util.get_eval_time(full_candle=candle, + time_frame=time_frame)) + + def get_formatted_data(self, computed_data) -> str: + if self.source in self.get_unformated_sources(): + return str(computed_data) + reduced_data = computed_data[-self.PASSED_DATA_LEN:] + return ", ".join(str(datum).replace('[', '').replace(']', '') for datum in reduced_data) + + async def ask_gpt(self, preprompt, inputs, symbol, time_frame, candle_time) -> str: + try: + service = await services_api.get_service( + _get_llm_service(), + self.is_backtesting, + {} if self.is_backtesting else self.services_config + ) + service.apply_daily_token_limit_if_possible(self.gpt_tokens_limit) + model = ( + self.gpt_model + if ( + self.enable_model_selector + and self.gpt_model != self.SAME_MODEL_AS_SERVICE + ) else None + ) + resp = await service.get_chat_completion( + [ + service.create_message("system", preprompt, model=model), + service.create_message("user", inputs, model=model), + ], + model=model, + exchange=self.exchange_name, + symbol=symbol, + time_frame=time_frame, + version=self.get_version(), + candle_open_time=candle_time, + use_stored_signals=self.is_backtesting + ) + self.logger.info( + f"GPT's answer is '{resp}' for {symbol} on {time_frame} with input: {inputs} " + f"and candle_time: {candle_time}" + ) + return resp + except services_errors.CreationError as err: + raise evaluators_errors.UnavailableEvaluatorError(f"Impossible to get ChatGPT prediction: {err}") from err + + def get_version(self): + # later on, identify by its specs + # return f"{self.gpt_model}-{self.source}-{self.indicator}-{self.period}-{self.GLOBAL_VERSION}" + return "0.0.0" + + def call_indicator(self, candle_data): + if self.source in self.get_unformated_sources(): + return candle_data + return data_util.drop_nan(self.INDICATORS[self.indicator](candle_data, self.period)) + + def get_candles_data(self, exchange, exchange_id, symbol, time_frame, inc_in_construction_data): + if self.source in self.get_unformated_sources(): + limit = self.period if inc_in_construction_data else self.period + 1 + full_candles = trading_api.get_candles_as_list( + trading_api.get_symbol_historical_candles( + self.get_exchange_symbol_data(exchange, exchange_id, symbol), time_frame, limit=limit + ) + ) + # remove time value + for candle in full_candles: + candle.pop(commons_enums.PriceIndexes.IND_PRICE_TIME.value) + if inc_in_construction_data: + return full_candles + return full_candles[:-1] + return self.get_candles_data_api()( + self.get_exchange_symbol_data(exchange, exchange_id, symbol), time_frame, + include_in_construction=inc_in_construction_data + ) + + def get_unformated_sources(self): + return (self.SOURCES[5], ) + + def get_candles_data_api(self): + return { + self.SOURCES[0]: trading_api.get_symbol_open_candles, + self.SOURCES[1]: trading_api.get_symbol_high_candles, + self.SOURCES[2]: trading_api.get_symbol_low_candles, + self.SOURCES[3]: trading_api.get_symbol_close_candles, + self.SOURCES[4]: trading_api.get_symbol_volume_candles, + }[self.source] + + def _check_timeframe(self, time_frame): + return commons_enums.TimeFramesMinutes[commons_enums.TimeFrames(time_frame)] >= \ + self._min_allowed_timeframe_minutes + + def _parse_prediction_side(self, cleaned_prediction): + if "down " in cleaned_prediction: + return 1 + elif "up " in cleaned_prediction: + return -1 + return 0 + + def _parse_confidence(self, cleaned_prediction): + """ + possible formats: + up 70% (most common case) + up with 70% confidence + up with high confidence + """ + value = self.LOW_CONFIDENCE_PERCENT + if "%" in cleaned_prediction: + percent_index = cleaned_prediction.index("%") + bracket_index = (cleaned_prediction[:percent_index].rindex("{") + 1) \ + if "{" in cleaned_prediction[:percent_index] else 0 + value = float(cleaned_prediction[bracket_index:percent_index].split(" ")[-1]) + elif "high" in cleaned_prediction: + value = self.HIGH_CONFIDENCE_PERCENT + elif "medium" in cleaned_prediction or "intermediate" in cleaned_prediction: + value = self.MEDIUM_CONFIDENCE_PERCENT + elif "low" in cleaned_prediction: + value = self.LOW_CONFIDENCE_PERCENT + elif not cleaned_prediction: + value = 0 + else: + self.logger.warning(f"Impossible to parse confidence in {cleaned_prediction}. Using low confidence") + if value >= self.min_confidence_threshold: + return self.MAX_CONFIDENCE_PERCENT + return value diff --git a/packages/tentacles/Evaluator/TA/ai_evaluator/config/GPTEvaluator.json b/packages/tentacles/Evaluator/TA/ai_evaluator/config/GPTEvaluator.json new file mode 100644 index 0000000000..4fbbdd47a6 --- /dev/null +++ b/packages/tentacles/Evaluator/TA/ai_evaluator/config/GPTEvaluator.json @@ -0,0 +1,9 @@ +{ + "indicator": "No indicator: raw candles price data", + "period": 2, + "source": "Close", + "min_confidence_threshold": 100, + "allow_reevaluation": false, + "max_gpt_tokens": -1, + "GPT_model": "use interface configured model" +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/TA/ai_evaluator/metadata.json b/packages/tentacles/Evaluator/TA/ai_evaluator/metadata.json new file mode 100644 index 0000000000..316a4c58c9 --- /dev/null +++ b/packages/tentacles/Evaluator/TA/ai_evaluator/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["GPTEvaluator"], + "tentacles-requirements": [] +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/TA/ai_evaluator/resources/GPTEvaluator.md b/packages/tentacles/Evaluator/TA/ai_evaluator/resources/GPTEvaluator.md new file mode 100644 index 0000000000..868714a92e --- /dev/null +++ b/packages/tentacles/Evaluator/TA/ai_evaluator/resources/GPTEvaluator.md @@ -0,0 +1,27 @@ +Uses [Chat GPT](https://chat.openai.com/) to predict the next moves of the market. + +Evaluates between -1 to 1 according to ChatGPT's prediction of the selected data and its confidence. + +Learn more about ChatGPT trading strategies from our + +ChatGPT Trading guide. + +
+
+ + + + +
+ Example of a trading strategy using ChatGPT and the ChatGPTEvaluator +
+ +Any question ? Checkout our [ChatGPT setup guide](https://www.octobot.cloud/en/guides/octobot-interfaces/chatgpt?utm_source=octobot&utm_medium=dk&utm_campaign=regular_open_source_content&utm_content=GPTEvaluator) to configure your OctoBot +to use ChatGPT. + +Note: this evaluator can only be used in backtesting for markets where historical ChatGPT data are available. +Find the full list of supported historical markets on our [ChatGPT page](https://www.octobot.cloud/features/chatgpt-trading?utm_source=octobot&utm_medium=dk&utm_campaign=regular_open_source_content&utm_content=GPTEvaluator). + + diff --git a/packages/tentacles/Evaluator/TA/ai_evaluator/tests/test_ai.py b/packages/tentacles/Evaluator/TA/ai_evaluator/tests/test_ai.py new file mode 100644 index 0000000000..145573cce3 --- /dev/null +++ b/packages/tentacles/Evaluator/TA/ai_evaluator/tests/test_ai.py @@ -0,0 +1,64 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import types +import mock +import pytest +import numpy + +import tentacles.Evaluator.TA.ai_evaluator as ai_evaluator + + +@pytest.fixture +def GPT_evaluator(): + return ai_evaluator.GPTEvaluator(mock.Mock(is_tentacle_activated=mock.Mock(return_value=True))) + + +def test_indicators(GPT_evaluator): + data = numpy.array([100, 223, 123, 23, 134, 124, 434, 3243, 121, 3242.34, 1212, 87, 232.32]) + for indicator in GPT_evaluator.INDICATORS: + GPT_evaluator.indicator = indicator + GPT_evaluator.period = 2 + assert len(data) - (GPT_evaluator.period + 1) <= len(GPT_evaluator.call_indicator(data)) <= len(data) + + +def test_get_candles_data_api(GPT_evaluator): + for source in GPT_evaluator.SOURCES: + GPT_evaluator.source = source + if GPT_evaluator.source not in GPT_evaluator.get_unformated_sources(): + assert isinstance(GPT_evaluator.get_candles_data_api(), types.FunctionType) + + +def test_parse_prediction_side(GPT_evaluator): + assert GPT_evaluator._parse_prediction_side("up 70%") == -1 + assert GPT_evaluator._parse_prediction_side("plop up 70%") == -1 + assert GPT_evaluator._parse_prediction_side(" up with 70%") == -1 + assert GPT_evaluator._parse_prediction_side("Prediction: up with 70% confidence") == -1 + + assert GPT_evaluator._parse_prediction_side("down 70%") == 1 + assert GPT_evaluator._parse_prediction_side("plop down 70%") == 1 + assert GPT_evaluator._parse_prediction_side(" down with 70%") == 1 + assert GPT_evaluator._parse_prediction_side("Prediction: down with 70% confidence") == 1 + + +def test_parse_confidence(GPT_evaluator): + assert GPT_evaluator._parse_confidence("up 70%") == 70 + assert GPT_evaluator._parse_confidence("up 54.33%") == 54.33 + assert GPT_evaluator._parse_confidence("down 70% confidence blablabla") == 70 + assert GPT_evaluator._parse_confidence("Prediction: down 70%") == 70 + GPT_evaluator.min_confidence_threshold = 60 + assert GPT_evaluator._parse_confidence("up 70%") == 100 + assert GPT_evaluator._parse_confidence("up 60%") == 100 + assert GPT_evaluator._parse_confidence("up 59%") == 59 diff --git a/packages/tentacles/Evaluator/TA/momentum_evaluator/__init__.py b/packages/tentacles/Evaluator/TA/momentum_evaluator/__init__.py new file mode 100644 index 0000000000..1d674ac90e --- /dev/null +++ b/packages/tentacles/Evaluator/TA/momentum_evaluator/__init__.py @@ -0,0 +1,3 @@ +from .momentum import RSIMomentumEvaluator, ADXMomentumEvaluator, RSIWeightMomentumEvaluator, \ + BBMomentumEvaluator, MACDMomentumEvaluator, KlingerOscillatorMomentumEvaluator, \ + KlingerOscillatorReversalConfirmationMomentumEvaluator, EMAMomentumEvaluator \ No newline at end of file diff --git a/packages/tentacles/Evaluator/TA/momentum_evaluator/config/ADXMomentumEvaluator.json b/packages/tentacles/Evaluator/TA/momentum_evaluator/config/ADXMomentumEvaluator.json new file mode 100644 index 0000000000..c321fb0141 --- /dev/null +++ b/packages/tentacles/Evaluator/TA/momentum_evaluator/config/ADXMomentumEvaluator.json @@ -0,0 +1,3 @@ +{ + "period_length": 14 +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/TA/momentum_evaluator/config/BBMomentumEvaluator.json b/packages/tentacles/Evaluator/TA/momentum_evaluator/config/BBMomentumEvaluator.json new file mode 100644 index 0000000000..a515eb2a39 --- /dev/null +++ b/packages/tentacles/Evaluator/TA/momentum_evaluator/config/BBMomentumEvaluator.json @@ -0,0 +1,3 @@ +{ + "period_length": 20 +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/TA/momentum_evaluator/config/EMAMomentumEvaluator.json b/packages/tentacles/Evaluator/TA/momentum_evaluator/config/EMAMomentumEvaluator.json new file mode 100644 index 0000000000..700363c289 --- /dev/null +++ b/packages/tentacles/Evaluator/TA/momentum_evaluator/config/EMAMomentumEvaluator.json @@ -0,0 +1,4 @@ +{ + "period_length": 21, + "price_threshold_percent": 2 +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/TA/momentum_evaluator/config/KlingerOscillatorMomentumEvaluator.json b/packages/tentacles/Evaluator/TA/momentum_evaluator/config/KlingerOscillatorMomentumEvaluator.json new file mode 100644 index 0000000000..aa08f59f1e --- /dev/null +++ b/packages/tentacles/Evaluator/TA/momentum_evaluator/config/KlingerOscillatorMomentumEvaluator.json @@ -0,0 +1,5 @@ +{ + "ema_signal_period": 13, + "long_period": 55, + "short_period": 35 +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/TA/momentum_evaluator/config/KlingerOscillatorReversalConfirmationMomentumEvaluator.json b/packages/tentacles/Evaluator/TA/momentum_evaluator/config/KlingerOscillatorReversalConfirmationMomentumEvaluator.json new file mode 100644 index 0000000000..aa08f59f1e --- /dev/null +++ b/packages/tentacles/Evaluator/TA/momentum_evaluator/config/KlingerOscillatorReversalConfirmationMomentumEvaluator.json @@ -0,0 +1,5 @@ +{ + "ema_signal_period": 13, + "long_period": 55, + "short_period": 35 +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/TA/momentum_evaluator/config/MACDMomentumEvaluator.json b/packages/tentacles/Evaluator/TA/momentum_evaluator/config/MACDMomentumEvaluator.json new file mode 100644 index 0000000000..dfa2ad910c --- /dev/null +++ b/packages/tentacles/Evaluator/TA/momentum_evaluator/config/MACDMomentumEvaluator.json @@ -0,0 +1,5 @@ +{ + "long_period_length": 26, + "short_period_length": 12, + "signal_period_length": 9 +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/TA/momentum_evaluator/config/RSIMomentumEvaluator.json b/packages/tentacles/Evaluator/TA/momentum_evaluator/config/RSIMomentumEvaluator.json new file mode 100644 index 0000000000..a61bbfae5c --- /dev/null +++ b/packages/tentacles/Evaluator/TA/momentum_evaluator/config/RSIMomentumEvaluator.json @@ -0,0 +1,6 @@ +{ + "long_threshold": 30, + "period_length": 14, + "short_threshold": 70, + "trend_change_identifier": true +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/TA/momentum_evaluator/config/RSIWeightMomentumEvaluator.json b/packages/tentacles/Evaluator/TA/momentum_evaluator/config/RSIWeightMomentumEvaluator.json new file mode 100644 index 0000000000..e8ab9223a0 --- /dev/null +++ b/packages/tentacles/Evaluator/TA/momentum_evaluator/config/RSIWeightMomentumEvaluator.json @@ -0,0 +1,127 @@ +{ + "period": 14, + "slow_eval_count": 16, + "fast_eval_count": 4, + "RSI_to_weight": [ + { + "slow_threshold": 30, + "fast_thresholds": + [ + { + "fast_threshold" : 20, + "weights": { + "price": 2, + "volume": 2 + } + }, + { + "fast_threshold" : 30, + "weights": { + "price": 1, + "volume": 1 + } + } + ] + }, + { + "slow_threshold": 35, + "fast_thresholds": + [ + { + "fast_threshold" : 20, + "weights": { + "price": 3, + "volume": 3 + } + }, + { + "fast_threshold" : 35, + "weights": { + "price": 1, + "volume": 1 + } + } + ] + }, + { + "slow_threshold": 45, + "fast_thresholds": + [ + { + "fast_threshold" : 20, + "weights": { + "price": 3, + "volume": 3 + } + }, + { + "fast_threshold" : 40, + "weights": { + "price": 2, + "volume": 1 + } + } + ] + }, + { + "slow_threshold": 55, + "fast_thresholds": + [ + { + "fast_threshold" : 45, + "weights": { + "price": 1, + "volume": 1 + } + } + ] + }, + { + "slow_threshold": 65, + "fast_thresholds": + [ + { + "fast_threshold" : 45, + "weights": { + "price": 1, + "volume": 1 + } + }, + { + "fast_threshold" : 55, + "weights": { + "price": 3, + "volume": 2 + } + }, + { + "fast_threshold" : 60, + "weights": { + "price": 2, + "volume": 1 + } + } + ] + }, + { + "slow_threshold": 70, + "fast_thresholds": + [ + { + "fast_threshold" : 55, + "weights": { + "price": 3, + "volume": 2 + } + }, + { + "fast_threshold" : 70, + "weights": { + "price": 2, + "volume": 2 + } + } + ] + } + ] +} diff --git a/packages/tentacles/Evaluator/TA/momentum_evaluator/metadata.json b/packages/tentacles/Evaluator/TA/momentum_evaluator/metadata.json new file mode 100644 index 0000000000..ab35785c48 --- /dev/null +++ b/packages/tentacles/Evaluator/TA/momentum_evaluator/metadata.json @@ -0,0 +1,8 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["RSIMomentumEvaluator", "ADXMomentumEvaluator", "RSIWeightMomentumEvaluator", "BBMomentumEvaluator", + "MACDMomentumEvaluator", "KlingerOscillatorMomentumEvaluator", + "KlingerOscillatorReversalConfirmationMomentumEvaluator", "EMAMomentumEvaluator"], + "tentacles-requirements": [] +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/TA/momentum_evaluator/momentum.py b/packages/tentacles/Evaluator/TA/momentum_evaluator/momentum.py new file mode 100644 index 0000000000..843b240be2 --- /dev/null +++ b/packages/tentacles/Evaluator/TA/momentum_evaluator/momentum.py @@ -0,0 +1,794 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import math +import numpy +import tulipy +import typing + +import octobot_commons.constants as commons_constants +import octobot_commons.enums as enums +import octobot_commons.data_util as data_util +import octobot_evaluators.evaluators as evaluators +import octobot_evaluators.util as evaluators_util +import octobot_trading.api as trading_api +import tentacles.Evaluator.Util as EvaluatorUtil + + +class RSIMomentumEvaluator(evaluators.TAEvaluator): + PERIOD_LENGTH = "period_length" + TREND_CHANGE_IDENTIFIER = "trend_change_identifier" + LONG_THRESHOLD = "long_threshold" + SHORT_THRESHOLD = "short_threshold" + + def __init__(self, tentacles_setup_config): + super().__init__(tentacles_setup_config) + self.pertinence = 1 + self.period_length = 14 + self.short_threshold = 70 + self.long_threshold = 30 + self.is_trend_change_identifier = True + self.short_term_averages = [7, 5, 4, 3, 2, 1] + self.long_term_averages = [40, 30, 20, 15, 10] + + def init_user_inputs(self, inputs: dict) -> None: + """ + Called right before starting the evaluator, should define all the evaluator's user inputs + """ + default_config = self.get_default_config() + self.period_length = self.UI.user_input( + self.PERIOD_LENGTH, enums.UserInputTypes.INT, default_config["period_length"], + inputs, min_val=0, title="RSI period length" + ) + + self.is_trend_change_identifier = self.UI.user_input( + self.TREND_CHANGE_IDENTIFIER, enums.UserInputTypes.BOOLEAN, + default_config["trend_change_identifier"], inputs, + title="Trend identifier: Identify RSI trend changes and evaluate the trend changes strength", + ) + self.short_threshold = self.UI.user_input( + self.SHORT_THRESHOLD, enums.UserInputTypes.FLOAT, default_config["short_threshold"], inputs, + min_val=0, + title="Short threshold: RSI value from with to send a short (sell) signal. " + "Evaluates as 1 when the current RSI value is equal or higher.", + editor_options={ + enums.UserInputOtherSchemaValuesTypes.DEPENDENCIES.value: { + "trend_change_identifier": False + } + } + ) + self.long_threshold = self.UI.user_input( + self.LONG_THRESHOLD, enums.UserInputTypes.FLOAT, default_config["long_threshold"], inputs, + min_val=0, + title="Long threshold: RSI value from with to send a long (buy) signal. " + "Evaluates as -1 when the current RSI value is equal or lower.", + editor_options={ + enums.UserInputOtherSchemaValuesTypes.DEPENDENCIES.value: { + "trend_change_identifier": False + } + } + ) + + @classmethod + def get_default_config( + cls, period_length: typing.Optional[float] = None, trend_change_identifier: typing.Optional[bool] = None, + short_threshold: typing.Optional[float] = None, long_threshold: typing.Optional[float] = None + ): + return { + cls.PERIOD_LENGTH: period_length or 14, + cls.TREND_CHANGE_IDENTIFIER: True if trend_change_identifier is None else trend_change_identifier, + cls.SHORT_THRESHOLD: short_threshold or 70, + cls.LONG_THRESHOLD: long_threshold or 30, + } + + async def ohlcv_callback(self, exchange: str, exchange_id: str, + cryptocurrency: str, symbol: str, time_frame, candle, inc_in_construction_data): + candle_data = trading_api.get_symbol_close_candles(self.get_exchange_symbol_data(exchange, exchange_id, symbol), + time_frame, + include_in_construction=inc_in_construction_data) + await self.evaluate(cryptocurrency, symbol, time_frame, candle_data, candle) + + async def evaluate(self, cryptocurrency, symbol, time_frame, candle_data, candle): + updated_value = False + if candle_data is not None and len(candle_data) > self.period_length: + rsi_v = tulipy.rsi(candle_data, period=self.period_length) + if len(rsi_v) and not math.isnan(rsi_v[-1]): + if self.is_trend_change_identifier: + long_trend = EvaluatorUtil.TrendAnalysis.get_trend(rsi_v, self.long_term_averages) + short_trend = EvaluatorUtil.TrendAnalysis.get_trend(rsi_v, self.short_term_averages) + + # check if trend change + if short_trend > 0 > long_trend: + # trend changed to up + self.set_eval_note(-short_trend) + + elif long_trend > 0 > short_trend: + # trend changed to down + self.set_eval_note(short_trend) + + # use RSI current value + last_rsi_value = rsi_v[-1] + if last_rsi_value > 50: + self.set_eval_note(rsi_v[-1] / 200) + else: + self.set_eval_note((rsi_v[-1] - 100) / 200) + else: + self.eval_note = 0 + if rsi_v[-1] >= self.short_threshold: + self.eval_note = 1 + elif rsi_v[-1] <= self.long_threshold: + self.eval_note = -1 + updated_value = True + if not self.is_trend_change_identifier and not updated_value: + self.eval_note = 0 + await self.evaluation_completed(cryptocurrency, symbol, time_frame, + eval_time=evaluators_util.get_eval_time(full_candle=candle, + time_frame=time_frame)) + + @classmethod + def get_is_symbol_wildcard(cls) -> bool: + """ + :return: True if the evaluator is not symbol dependant else False + """ + return False + + @classmethod + def get_is_time_frame_wildcard(cls) -> bool: + """ + :return: True if the evaluator is not time_frame dependant else False + """ + return False + + +# double RSI analysis +class RSIWeightMomentumEvaluator(evaluators.TAEvaluator): + PERIOD = "period" + SLOW_EVAL_COUNT = "slow_eval_count" + FAST_EVAL_COUNT = "fast_eval_count" + RSI_TO_WEIGHTS = "RSI_to_weight" + SLOW_THRESHOLD = "slow_threshold" + FAST_THRESHOLD = "fast_threshold" + FAST_THRESHOLDS = "fast_thresholds" + WEIGHTS = "weights" + PRICE = "price" + VOLUME = "volume" + + @staticmethod + def get_eval_type(): + return typing.Dict[str, int] + + def __init__(self, tentacles_setup_config): + super().__init__(tentacles_setup_config) + self.period_length = 14 + self.slow_eval_count = 16 + self.fast_eval_count = 4 + self.weights = [] + + def _init_fast_threshold(self, inputs, indexes, fast_threshold, price_weight, volume_weight): + self.UI.user_input(self.WEIGHTS, enums.UserInputTypes.OBJECT, None, inputs, + parent_input_name=self.FAST_THRESHOLDS, + title="Price and volume weights of this interpretation.", array_indexes=indexes) + return { + self.FAST_THRESHOLD: self.UI.user_input(self.FAST_THRESHOLD, enums.UserInputTypes.INT, fast_threshold, + inputs, min_val=0, parent_input_name=self.FAST_THRESHOLDS, + title="Fast RSI threshold under which this interpretation will " + "be triggered.", array_indexes=indexes), + self.WEIGHTS: { + self.PRICE: self.UI.user_input(self.PRICE, enums.UserInputTypes.OPTIONS, price_weight, + inputs, options=[1, 2, 3], parent_input_name=self.WEIGHTS, + editor_options={"enum_titles": ["Light", "Average", "Heavy"]}, + title="Price weight.", array_indexes=indexes), + self.VOLUME: self.UI.user_input(self.VOLUME, enums.UserInputTypes.OPTIONS, volume_weight, + inputs, options=[1, 2, 3], parent_input_name=self.WEIGHTS, + editor_options={"enum_titles": ["Light", "Average", "Heavy"]}, + title="Volume weight.", array_indexes=indexes), + } + } + + def _init_RSI_to_weight(self, inputs, slow_threshold, fast_thresholds): + self.UI.user_input(self.FAST_THRESHOLDS, enums.UserInputTypes.OBJECT_ARRAY, fast_thresholds, inputs, + item_title="Fast RSI interpretation", + other_schema_values={"minItems": 1, "uniqueItems": True}, + parent_input_name=self.RSI_TO_WEIGHTS, + title="Interpretations on this slow threshold trigger case."), + return { + self.SLOW_THRESHOLD: self.UI.user_input(self.SLOW_THRESHOLD, enums.UserInputTypes.INT, slow_threshold, + inputs, + min_val=0, parent_input_name=self.RSI_TO_WEIGHTS, + title="Slow RSI threshold under which this interpretation will " + "be triggered.", array_indexes=[0]), + self.FAST_THRESHOLDS: [ + self._init_fast_threshold(inputs, [0, index], *fast_threshold) + for index, fast_threshold in enumerate(fast_thresholds) + ], + } + + def init_user_inputs(self, inputs: dict) -> None: + """ + Called right before starting the tentacle, should define all the tentacle's user inputs unless + those are defined somewhere else. + """ + self.period_length = self.UI.user_input("period", enums.UserInputTypes.INT, self.period_length, + inputs, min_val=1, + title="Period: RSI period length.") + self.slow_eval_count = self.UI.user_input("slow_eval_count", enums.UserInputTypes.INT, self.slow_eval_count, + inputs, min_val=1, + title="Number of recent RSI values to consider to get the current slow " + "moving market sentiment.") + self.fast_eval_count = self.UI.user_input("fast_eval_count", enums.UserInputTypes.INT, self.fast_eval_count, + inputs, min_val=1, + title="Number of recent RSI values to consider to get the current fast " + "moving market sentiment.") + weights = [] + self.weights = sorted( + self.UI.user_input(self.RSI_TO_WEIGHTS, enums.UserInputTypes.OBJECT_ARRAY, weights, inputs, + item_title="Slow RSI interpretation", + other_schema_values={"minItems": 1, "uniqueItems": True}, + title="RSI values and interpretations."), + key=lambda a: a[self.SLOW_THRESHOLD] + ) + # init one user input to generate user input schema and default values + weights.append(self._init_RSI_to_weight(inputs, 30, [[20, 2, 2]])) + + for i, fast_threshold in enumerate(self.weights): + fast_threshold[self.FAST_THRESHOLDS] = sorted(fast_threshold[self.FAST_THRESHOLDS], + key=lambda a: a[self.FAST_THRESHOLD]) + + def _get_rsi_averages(self, symbol_candles, time_frame, include_in_construction): + # compute the slow and fast RSI average + candle_data = trading_api.get_symbol_close_candles(symbol_candles, time_frame, + include_in_construction=include_in_construction) + if len(candle_data) > self.period_length: + rsi_v = tulipy.rsi(candle_data, period=self.period_length) + rsi_v = data_util.drop_nan(rsi_v) + if len(rsi_v): + slow_average = numpy.mean(rsi_v[-self.slow_eval_count:]) + fast_average = numpy.mean(rsi_v[-self.fast_eval_count:]) + return slow_average, fast_average, rsi_v + return None, None, None + + @staticmethod + def _check_inferior(bound, val1, val2): + return val1 < bound and val2 < bound + + def _analyse_dip_weight(self, slow_rsi, fast_rsi, current_rsi): + # returns price weight, volume weight + try: + for slow_rsi_weight in self.weights: + if slow_rsi < slow_rsi_weight[self.SLOW_THRESHOLD]: + for fast_rsi_weight in slow_rsi_weight[self.FAST_THRESHOLDS]: + if self._check_inferior(fast_rsi_weight[self.FAST_THRESHOLD], fast_rsi, current_rsi): + return fast_rsi_weight[self.WEIGHTS][self.PRICE], \ + fast_rsi_weight[self.WEIGHTS][self.VOLUME] + # exit loop since the target RSI has been found + break + except KeyError as e: + self.logger.error(f"Error when reading from config file: missing {e}") + return None, None + + async def ohlcv_callback(self, exchange: str, exchange_id: str, + cryptocurrency: str, symbol: str, time_frame, candle, inc_in_construction_data): + try: + symbol_candles = self.get_exchange_symbol_data(exchange, exchange_id, symbol) + # compute the slow and fast RSI average + slow_rsi, fast_rsi, rsi_v = self._get_rsi_averages(symbol_candles, time_frame, + include_in_construction=inc_in_construction_data) + current_candle_time = trading_api.get_symbol_time_candles(symbol_candles, time_frame, + include_in_construction=inc_in_construction_data)[ + -1] + await self.evaluate(cryptocurrency, symbol, time_frame, slow_rsi, + fast_rsi, rsi_v, current_candle_time, candle) + except IndexError: + self.eval_note = commons_constants.START_PENDING_EVAL_NOTE + + async def evaluate(self, cryptocurrency, symbol, time_frame, slow_rsi, + fast_rsi, rsi_v, current_candle_time, candle): + self.eval_note = commons_constants.START_PENDING_EVAL_NOTE + if slow_rsi is not None and fast_rsi is not None and rsi_v is not None: + last_rsi_values_to_consider = 5 + analysed_rsi = rsi_v[-last_rsi_values_to_consider:] + peak_reached = EvaluatorUtil.TrendAnalysis.min_has_just_been_reached(analysed_rsi, acceptance_window=0.95, + delay=2) + if peak_reached: + price_weight, volume_weight = self._analyse_dip_weight(slow_rsi, fast_rsi, rsi_v[-1]) + if price_weight is not None and volume_weight is not None: + self.eval_note = { + "price_weight": price_weight, + "volume_weight": volume_weight, + "current_candle_time": current_candle_time + } + await self.evaluation_completed(cryptocurrency, symbol, time_frame, + eval_time=evaluators_util.get_eval_time(full_candle=candle, + time_frame=time_frame)) + + +# bollinger_bands +class BBMomentumEvaluator(evaluators.TAEvaluator): + + def __init__(self, tentacles_setup_config): + super().__init__(tentacles_setup_config) + self.period_length = 20 + + def init_user_inputs(self, inputs: dict) -> None: + self.period_length = self.UI.user_input("period_length", enums.UserInputTypes.INT, self.period_length, + inputs, min_val=1, + title="Period: Bollinger bands period length.") + + async def ohlcv_callback(self, exchange: str, exchange_id: str, + cryptocurrency: str, symbol: str, time_frame, candle, inc_in_construction_data): + candle_data = trading_api.get_symbol_close_candles(self.get_exchange_symbol_data(exchange, exchange_id, symbol), + time_frame, + self.period_length, + include_in_construction=inc_in_construction_data) + await self.evaluate(cryptocurrency, symbol, time_frame, candle_data, candle) + + async def evaluate(self, cryptocurrency, symbol, time_frame, candle_data, candle): + self.eval_note = commons_constants.START_PENDING_EVAL_NOTE + if len(candle_data) >= self.period_length: + # compute bollinger bands + lower_band, middle_band, upper_band = tulipy.bbands(candle_data, self.period_length, 2) + + # if close to lower band => low value => bad, + # therefore if close to middle, value is keeping up => good + # finally if up the middle one or even close to the upper band => very good + + current_value = candle_data[-1] + current_up = upper_band[-1] + current_middle = middle_band[-1] + current_low = lower_band[-1] + delta_up = current_up - current_middle + delta_low = current_middle - current_low + + # its exactly on all bands + if current_up == current_low: + self.eval_note = commons_constants.START_PENDING_EVAL_NOTE + + # exactly on the middle + elif current_value == current_middle: + self.eval_note = 0 + + # up the upper band + elif current_value > current_up: + self.eval_note = 1 + + # down the lower band + elif current_value < current_low: + self.eval_note = -1 + + # regular values case: use parabolic factor all the time + else: + + # up the middle band + if current_middle < current_value: + self.eval_note = math.pow((current_value - current_middle) / delta_up, 2) + + # down the middle band + elif current_middle > current_value: + self.eval_note = -1 * math.pow((current_middle - current_value) / delta_low, 2) + await self.evaluation_completed(cryptocurrency, symbol, time_frame, + eval_time=evaluators_util.get_eval_time(full_candle=candle, + time_frame=time_frame)) + + +# EMA +class EMAMomentumEvaluator(evaluators.TAEvaluator): + PERIOD_LENGTH = "period_length" + PRICE_THRESHOLD_PERCENT = "price_threshold_percent" + REVERSE_SIGNAL = "reverse_signal" + + def __init__(self, tentacles_setup_config): + super().__init__(tentacles_setup_config) + self.period_length = 21 + self.price_threshold_percent = 2 + self.price_threshold_multiplier = self.price_threshold_percent / 100 + self.reverse_signal = False + + def init_user_inputs(self, inputs: dict) -> None: + default_config = self.get_default_config() + self.period_length = self.UI.user_input( + self.PERIOD_LENGTH, enums.UserInputTypes.INT, default_config["period_length"], inputs, + min_val=1, title="Period: Moving Average period length." + ) + self.price_threshold_percent = self.UI.user_input( + self.PRICE_THRESHOLD_PERCENT, enums.UserInputTypes.FLOAT, + default_config["price_threshold_percent"], inputs, + min_val=0, + title="Price threshold: Percent difference between the current price and current EMA value from " + "which to trigger a long or short signal. " + "Example with EMA value=200, Price threshold=5: a short signal will fire when price is above or " + "equal to 210 and a long signal will when price is bellow or equal to 190", + ) + self.reverse_signal = self.UI.user_input( + self.REVERSE_SIGNAL, enums.UserInputTypes.BOOLEAN, default_config["reverse_signal"], inputs, + title="Reverse signal: when enabled, emits a short signal when the current price is bellow the EMA " + "value and long signal when the current price is above the EMA value.", + ) + self.price_threshold_multiplier = self.price_threshold_percent / 100 + + @classmethod + def get_default_config( + cls, + period_length: typing.Optional[int] = None, price_threshold_percent: typing.Optional[float] = None, + reverse_signal: typing.Optional[bool] = False, + ) -> dict: + return { + cls.PERIOD_LENGTH: period_length or 21, + cls.PRICE_THRESHOLD_PERCENT: 2 if price_threshold_percent is None else price_threshold_percent, + cls.REVERSE_SIGNAL: reverse_signal or False, + } + + async def ohlcv_callback(self, exchange: str, exchange_id: str, + cryptocurrency: str, symbol: str, time_frame, candle, inc_in_construction_data): + candle_data = trading_api.get_symbol_close_candles(self.get_exchange_symbol_data(exchange, exchange_id, symbol), + time_frame, + self.period_length, + include_in_construction=inc_in_construction_data) + await self.evaluate(cryptocurrency, symbol, time_frame, candle_data, candle) + + async def evaluate(self, cryptocurrency, symbol, time_frame, candle_data, candle): + self.eval_note = 0 + if len(candle_data) >= self.period_length: + # compute ema + ema_values = tulipy.ema(candle_data, self.period_length) + is_price_above_ema_threshold = candle_data[-1] >= (ema_values[-1] * (1 + self.price_threshold_multiplier)) + is_price_bellow_ema_threshold = candle_data[-1] <= (ema_values[-1] * (1 - self.price_threshold_multiplier)) + if is_price_above_ema_threshold: + self.eval_note = 1 + elif is_price_bellow_ema_threshold: + self.eval_note = -1 + if self.reverse_signal: + self.eval_note = -1 * self.eval_note + await self.evaluation_completed(cryptocurrency, symbol, time_frame, + eval_time=evaluators_util.get_eval_time(full_candle=candle, + time_frame=time_frame)) + + +# ADX --> trend_strength +class ADXMomentumEvaluator(evaluators.TAEvaluator): + + def __init__(self, tentacles_setup_config): + super().__init__(tentacles_setup_config) + self.period_length = 14 + + def init_user_inputs(self, inputs: dict) -> None: + self.period_length = self.UI.user_input("period_length", enums.UserInputTypes.INT, self.period_length, + inputs, min_val=1, + title="Period: ADX period length.") + + def _get_minimal_data(self): + # 26 minimal_data length required for 14 period_length + return self.period_length + 12 + + # implementation according to: https://www.investopedia.com/articles/technical/02/041002.asp => length = 14 and + # exponential moving average = 20 in a uptrend market + # idea: adx > 30 => strong trend, < 20 => trend change to come + async def ohlcv_callback(self, exchange: str, exchange_id: str, + cryptocurrency: str, symbol: str, time_frame, candle, inc_in_construction_data): + symbol_candles = self.get_exchange_symbol_data(exchange, exchange_id, symbol) + close_candles = trading_api.get_symbol_close_candles(symbol_candles, time_frame, + include_in_construction=inc_in_construction_data) + if len(close_candles) > self._get_minimal_data(): + high_candles = trading_api.get_symbol_high_candles(symbol_candles, time_frame, + include_in_construction=inc_in_construction_data) + low_candles = trading_api.get_symbol_low_candles(symbol_candles, time_frame, + include_in_construction=inc_in_construction_data) + await self.evaluate(cryptocurrency, symbol, time_frame, close_candles, high_candles, low_candles, candle) + else: + self.eval_note = commons_constants.START_PENDING_EVAL_NOTE + await self.evaluation_completed(cryptocurrency, symbol, time_frame, + eval_time=evaluators_util.get_eval_time(full_candle=candle, + time_frame=time_frame)) + + async def evaluate(self, cryptocurrency, symbol, time_frame, close_candles, high_candles, low_candles, candle): + self.eval_note = commons_constants.START_PENDING_EVAL_NOTE + if len(close_candles) >= self._get_minimal_data(): + min_adx = 7.5 + max_adx = 45 + neutral_adx = 25 + adx = tulipy.adx(high_candles, low_candles, close_candles, self.period_length) + instant_ema = data_util.drop_nan(tulipy.ema(close_candles, 2)) + slow_ema = data_util.drop_nan(tulipy.ema(close_candles, 20)) + adx = data_util.drop_nan(adx) + + if len(adx): + current_adx = adx[-1] + current_slows_ema = slow_ema[-1] + current_instant_ema = instant_ema[-1] + + multiplier = -1 if current_instant_ema < current_slows_ema else 1 + + # strong adx => strong trend + if current_adx > neutral_adx: + # if max adx already reached => when ADX forms a top and begins to turn down, you should look for a + # retracement that causes the price to move toward its 20-day exponential moving average (EMA). + adx_last_values = adx[-15:] + adx_last_value = adx_last_values[-1] + + local_max_adx = adx_last_values.max() + # max already reached => trend will slow down + if adx_last_value < local_max_adx: + + self.eval_note = multiplier * (current_adx - neutral_adx) / (local_max_adx - neutral_adx) + + # max not reached => trend will continue, return chances to be max now + else: + crossing_indexes = EvaluatorUtil.TrendAnalysis.get_threshold_change_indexes(adx, neutral_adx) + chances_to_be_max = \ + EvaluatorUtil.TrendAnalysis.get_estimation_of_move_state_relatively_to_previous_moves_length( + crossing_indexes, adx) if len(crossing_indexes) > 2 else 0.75 + proximity_to_max = min(1, current_adx / max_adx) + self.eval_note = multiplier * proximity_to_max * chances_to_be_max + + # weak adx => change to come + else: + self.eval_note = multiplier * min(1, ((neutral_adx - current_adx) / (neutral_adx - min_adx))) + await self.evaluation_completed(cryptocurrency, symbol, time_frame, + eval_time=evaluators_util.get_eval_time(full_candle=candle, + time_frame=time_frame)) + + +class MACDMomentumEvaluator(evaluators.TAEvaluator): + def __init__(self, tentacles_setup_config): + super().__init__(tentacles_setup_config) + self.previous_note = None + self.long_period_length = 26 + self.short_period_length = 12 + self.signal_period_length = 9 + + def init_user_inputs(self, inputs: dict) -> None: + self.short_period_length = self.UI.user_input( + "short_period_length", enums.UserInputTypes.INT, self.short_period_length, inputs, + min_val=1, title="MACD fast period length." + ) + self.long_period_length = self.UI.user_input( + "long_period_length", enums.UserInputTypes.INT, self.long_period_length, inputs, + min_val=1, title="MACD slow period length." + ) + self.signal_period_length = self.UI.user_input( + "signal_period_length", enums.UserInputTypes.INT, self.signal_period_length, inputs, + min_val=1, title="MACD signal period." + ) + + def _analyse_pattern(self, pattern, macd_hist, zero_crossing_indexes, price_weight, + pattern_move_time, sign_multiplier): + # add pattern's strength + weight = price_weight * EvaluatorUtil.PatternAnalyser.get_pattern_strength(pattern) + + average_pattern_period = 0.7 + if len(zero_crossing_indexes) > 1: + # compute chances to be after average pattern period + patterns = [EvaluatorUtil.PatternAnalyser.get_pattern( + macd_hist[zero_crossing_indexes[i]:zero_crossing_indexes[i + 1]]) + for i in range(len(zero_crossing_indexes) - 1) + ] + if 0 != zero_crossing_indexes[0]: + patterns.append(EvaluatorUtil.PatternAnalyser.get_pattern(macd_hist[0:zero_crossing_indexes[0]])) + if len(macd_hist) - 1 != zero_crossing_indexes[-1]: + patterns.append(EvaluatorUtil.PatternAnalyser.get_pattern(macd_hist[zero_crossing_indexes[-1]:])) + double_patterns_count = patterns.count("W") + patterns.count("M") + + average_pattern_period = EvaluatorUtil.TrendAnalysis. \ + get_estimation_of_move_state_relatively_to_previous_moves_length( + zero_crossing_indexes, + macd_hist, + pattern_move_time, + double_patterns_count) + + # if we have few data but wave is growing => set higher value + if len(zero_crossing_indexes) <= 1 and price_weight == 1: + if self.previous_note is not None: + average_pattern_period = 0.95 + self.previous_note = sign_multiplier * weight * average_pattern_period + else: + self.previous_note = None + + self.eval_note = sign_multiplier * weight * average_pattern_period + + async def ohlcv_callback(self, exchange: str, exchange_id: str, + cryptocurrency: str, symbol: str, time_frame, candle, inc_in_construction_data): + candle_data = trading_api.get_symbol_close_candles(self.get_exchange_symbol_data(exchange, exchange_id, symbol), + time_frame, + include_in_construction=inc_in_construction_data) + await self.evaluate(cryptocurrency, symbol, time_frame, candle_data, candle) + + async def evaluate(self, cryptocurrency, symbol, time_frame, candle_data, candle): + self.eval_note = commons_constants.START_PENDING_EVAL_NOTE + if len(candle_data) > self.long_period_length: + macd, macd_signal, macd_hist = tulipy.macd(candle_data, self.short_period_length, + self.long_period_length, self.signal_period_length) + + # on macd hist => M pattern: bearish movement, W pattern: bullish movement + # max on hist: optimal sell or buy + macd_hist = data_util.drop_nan(macd_hist) + zero_crossing_indexes = EvaluatorUtil.TrendAnalysis.get_threshold_change_indexes(macd_hist, 0) + last_index = len(macd_hist) - 1 + pattern, start_index, end_index = EvaluatorUtil.PatternAnalyser.find_pattern(macd_hist, + zero_crossing_indexes, + last_index) + + if pattern != EvaluatorUtil.PatternAnalyser.UNKNOWN_PATTERN: + + # set sign (-1 buy or 1 sell) + sign_multiplier = -1 if pattern == "W" or pattern == "V" else 1 + + # set pattern time frame => W and M are on 2 time frames, others 1 + pattern_move_time = 2 if (pattern == "W" or pattern == "M") and end_index == last_index else 1 + + # set weight according to the max value of the pattern and the current value + current_pattern_start = start_index + price_weight = macd_hist[-1] / macd_hist[current_pattern_start:].max() if sign_multiplier == 1 \ + else macd_hist[-1] / macd_hist[current_pattern_start:].min() + + if not math.isnan(price_weight): + self._analyse_pattern(pattern, macd_hist, zero_crossing_indexes, price_weight, + pattern_move_time, sign_multiplier) + await self.evaluation_completed(cryptocurrency, symbol, time_frame, + eval_time=evaluators_util.get_eval_time(full_candle=candle, + time_frame=time_frame)) + + +class KlingerOscillatorMomentumEvaluator(evaluators.TAEvaluator): + def __init__(self, tentacles_setup_config): + super().__init__(tentacles_setup_config) + self.short_period = 35 # standard with klinger + self.long_period = 55 # standard with klinger + self.ema_signal_period = 13 # standard ema signal for klinger + + def init_user_inputs(self, inputs: dict) -> None: + self.short_period = self.UI.user_input("short_period", enums.UserInputTypes.INT, self.short_period, + inputs, min_val=1, + title="Short period: length of the short klinger period (standard is 35).") + self.long_period = self.UI.user_input("long_period", enums.UserInputTypes.INT, self.long_period, + inputs, min_val=1, + title="Long period: length of the long klinger period (standard is 55).") + self.ema_signal_period = self.UI.user_input("ema_signal_period", enums.UserInputTypes.INT, + self.ema_signal_period, + inputs, min_val=1, + title="Long period: length of the exponential moving average used " + "to apply on the klinger results (standard is 13).") + + async def ohlcv_callback(self, exchange: str, exchange_id: str, + cryptocurrency: str, symbol: str, time_frame, candle, inc_in_construction_data): + symbol_candles = self.get_exchange_symbol_data(exchange, exchange_id, symbol) + high_candles = trading_api.get_symbol_high_candles(symbol_candles, time_frame, + include_in_construction=inc_in_construction_data) + if len(high_candles) >= self.short_period: + low_candles = trading_api.get_symbol_low_candles(symbol_candles, time_frame, + include_in_construction=inc_in_construction_data) + close_candles = trading_api.get_symbol_close_candles(symbol_candles, time_frame, + include_in_construction=inc_in_construction_data) + volume_candles = trading_api.get_symbol_volume_candles(symbol_candles, time_frame, + include_in_construction=inc_in_construction_data) + await self.evaluate(cryptocurrency, symbol, time_frame, high_candles, low_candles, + close_candles, volume_candles, candle) + else: + self.eval_note = commons_constants.START_PENDING_EVAL_NOTE + await self.evaluation_completed(cryptocurrency, symbol, time_frame, + eval_time=evaluators_util.get_eval_time(full_candle=candle, + time_frame=time_frame)) + + async def evaluate(self, cryptocurrency, symbol, time_frame, high_candles, low_candles, + close_candles, volume_candles, candle): + eval_proposition = commons_constants.START_PENDING_EVAL_NOTE + kvo = tulipy.kvo(high_candles, + low_candles, + close_candles, + volume_candles, + self.short_period, + self.long_period) + kvo = data_util.drop_nan(kvo) + if len(kvo) >= self.ema_signal_period: + kvo_ema = tulipy.ema(kvo, self.ema_signal_period) + + ema_difference = kvo - kvo_ema + + if len(ema_difference) > 1: + zero_crossing_indexes = EvaluatorUtil.TrendAnalysis.get_threshold_change_indexes(ema_difference, 0) + + current_difference = ema_difference[-1] + significant_move_threshold = numpy.std(ema_difference) + + factor = 0.2 + + if EvaluatorUtil.TrendAnalysis.peak_has_been_reached_already( + ema_difference[zero_crossing_indexes[-1]:]): + if abs(current_difference) > significant_move_threshold: + factor = 1 + else: + factor = 0.5 + + eval_proposition = current_difference * factor / significant_move_threshold + + if abs(eval_proposition) > 1: + eval_proposition = 1 if eval_proposition > 0 else -1 + self.eval_note = eval_proposition + await self.evaluation_completed(cryptocurrency, symbol, time_frame, + eval_time=evaluators_util.get_eval_time(full_candle=candle, + time_frame=time_frame)) + + +class KlingerOscillatorReversalConfirmationMomentumEvaluator(evaluators.TAEvaluator): + def __init__(self, tentacles_setup_config): + super().__init__(tentacles_setup_config) + self.short_period = 35 # standard with klinger + self.long_period = 55 # standard with klinger + self.ema_signal_period = 13 # standard ema signal for klinger + + def init_user_inputs(self, inputs: dict) -> None: + """ + Called right before starting the tentacle, should define all the tentacle's user inputs unless + those are defined somewhere else. + """ + self.short_period = self.UI.user_input("short_period", enums.UserInputTypes.INT, self.short_period, + inputs, min_val=1, + title="Short period: length of the short klinger period (standard is 35).") + self.long_period = self.UI.user_input("long_period", enums.UserInputTypes.INT, self.long_period, + inputs, min_val=1, + title="Long period: length of the long klinger period (standard is 55).") + self.ema_signal_period = self.UI.user_input("ema_signal_period", enums.UserInputTypes.INT, + self.ema_signal_period, + inputs, min_val=1, + title="Long period: length of the exponential moving average used " + "to apply on the klinger results (standard is 13).") + + @staticmethod + def get_eval_type(): + return bool + + async def ohlcv_callback(self, exchange: str, exchange_id: str, + cryptocurrency: str, symbol: str, time_frame, candle, inc_in_construction_data): + symbol_candles = self.get_exchange_symbol_data(exchange, exchange_id, symbol) + high_candles = trading_api.get_symbol_high_candles(symbol_candles, time_frame, + include_in_construction=inc_in_construction_data) + if len(high_candles) >= self.short_period: + low_candles = trading_api.get_symbol_low_candles(symbol_candles, time_frame, + include_in_construction=inc_in_construction_data) + close_candles = trading_api.get_symbol_close_candles(symbol_candles, time_frame, + include_in_construction=inc_in_construction_data) + volume_candles = trading_api.get_symbol_volume_candles(symbol_candles, time_frame, + include_in_construction=inc_in_construction_data) + await self.evaluate(cryptocurrency, symbol, time_frame, high_candles, low_candles, + close_candles, volume_candles, candle) + else: + self.eval_note = False + await self.evaluation_completed(cryptocurrency, symbol, time_frame, + eval_time=evaluators_util.get_eval_time(full_candle=candle, + time_frame=time_frame)) + + async def evaluate(self, cryptocurrency, symbol, time_frame, high_candles, low_candles, + close_candles, volume_candles, candle): + if len(high_candles) >= self.short_period: + kvo = tulipy.kvo(high_candles, + low_candles, + close_candles, + volume_candles, + self.short_period, + self.long_period) + kvo = data_util.drop_nan(kvo) + if len(kvo) >= self.ema_signal_period: + + kvo_ema = tulipy.ema(kvo, self.ema_signal_period) + ema_difference = kvo - kvo_ema + + if len(ema_difference) > 1: + zero_crossing_indexes = EvaluatorUtil.TrendAnalysis.get_threshold_change_indexes(ema_difference, 0) + max_elements = 7 + to_consider_kvo = min(max_elements, len(ema_difference) - zero_crossing_indexes[-1]) + self.eval_note = EvaluatorUtil.TrendAnalysis.min_has_just_been_reached( + ema_difference[-to_consider_kvo:], + acceptance_window=0.9, delay=1) + await self.evaluation_completed(cryptocurrency, symbol, time_frame, + eval_time=evaluators_util.get_eval_time(full_candle=candle, + time_frame=time_frame)) diff --git a/packages/tentacles/Evaluator/TA/momentum_evaluator/resources/ADXMomentumEvaluator.md b/packages/tentacles/Evaluator/TA/momentum_evaluator/resources/ADXMomentumEvaluator.md new file mode 100644 index 0000000000..1194bb882b --- /dev/null +++ b/packages/tentacles/Evaluator/TA/momentum_evaluator/resources/ADXMomentumEvaluator.md @@ -0,0 +1,7 @@ +Uses the [Average Directional Index](https://www.investopedia.com/terms/a/adx.asp) +to find reversals. The default implementation is according to +[Investopedia's ADX: The Trend Strength Indicator](https://www.investopedia.com/articles/technical/02/041002.asp). + +Evaluates -1 to 1 according to the current price using the +[Exponential Moving Average](https://www.investopedia.com/terms/e/ema.asp) with a length of 20 coupled with +the [ADX](https://www.investopedia.com/terms/a/adx.asp). diff --git a/packages/tentacles/Evaluator/TA/momentum_evaluator/resources/BBMomentumEvaluator.md b/packages/tentacles/Evaluator/TA/momentum_evaluator/resources/BBMomentumEvaluator.md new file mode 100644 index 0000000000..1d3304a6b5 --- /dev/null +++ b/packages/tentacles/Evaluator/TA/momentum_evaluator/resources/BBMomentumEvaluator.md @@ -0,0 +1,2 @@ +Uses the [Bollinger bands](https://www.investopedia.com/terms/b/bollingerbands.asp) to evaluate a value from -1 to 1 according to the current price +distance from to the [Bollinger bands](https://www.investopedia.com/terms/b/bollingerbands.asp) values. diff --git a/packages/tentacles/Evaluator/TA/momentum_evaluator/resources/EMAMomentumEvaluator.md b/packages/tentacles/Evaluator/TA/momentum_evaluator/resources/EMAMomentumEvaluator.md new file mode 100644 index 0000000000..aa75d68e02 --- /dev/null +++ b/packages/tentacles/Evaluator/TA/momentum_evaluator/resources/EMAMomentumEvaluator.md @@ -0,0 +1,3 @@ +Uses [exponential moving averages](https://www.investopedia.com/terms/m/movingaverage.asp) to find signal when the current price exceeds the average value. + +Evaluates -1 or 1 when the current price is far enough from the EMA. \ No newline at end of file diff --git a/packages/tentacles/Evaluator/TA/momentum_evaluator/resources/KlingerOscillatorMomentumEvaluator.md b/packages/tentacles/Evaluator/TA/momentum_evaluator/resources/KlingerOscillatorMomentumEvaluator.md new file mode 100644 index 0000000000..d93b58fb7e --- /dev/null +++ b/packages/tentacles/Evaluator/TA/momentum_evaluator/resources/KlingerOscillatorMomentumEvaluator.md @@ -0,0 +1,3 @@ +Uses [Klinger Oscillator](https://www.investopedia.com/terms/k/klingeroscillator.asp) to find reversals. + +Evaluates -1 to 1 using [Klinger](https://www.investopedia.com/terms/k/klingeroscillator.asp) reversal estimation \ No newline at end of file diff --git a/packages/tentacles/Evaluator/TA/momentum_evaluator/resources/KlingerOscillatorReversalConfirmationMomentumEvaluator.md b/packages/tentacles/Evaluator/TA/momentum_evaluator/resources/KlingerOscillatorReversalConfirmationMomentumEvaluator.md new file mode 100644 index 0000000000..b37b842f5c --- /dev/null +++ b/packages/tentacles/Evaluator/TA/momentum_evaluator/resources/KlingerOscillatorReversalConfirmationMomentumEvaluator.md @@ -0,0 +1,3 @@ +Uses [Klinger Oscillator](https://www.investopedia.com/terms/k/klingeroscillator.asp) to find reversals. + +Returns True on reversal confirmation. \ No newline at end of file diff --git a/packages/tentacles/Evaluator/TA/momentum_evaluator/resources/MACDMomentumEvaluator.md b/packages/tentacles/Evaluator/TA/momentum_evaluator/resources/MACDMomentumEvaluator.md new file mode 100644 index 0000000000..db231320ba --- /dev/null +++ b/packages/tentacles/Evaluator/TA/momentum_evaluator/resources/MACDMomentumEvaluator.md @@ -0,0 +1,3 @@ +Uses the [Moving Average Convergence Divergence](https://www.investopedia.com/terms/m/macd.asp) to find reversals. + +This evaluator will try to find patterns in the [MACD](https://www.investopedia.com/terms/m/macd.asp) histogram and returns -1 to 1 according to the price and identified pattern strength. diff --git a/packages/tentacles/Evaluator/TA/momentum_evaluator/resources/RSIMomentumEvaluator.md b/packages/tentacles/Evaluator/TA/momentum_evaluator/resources/RSIMomentumEvaluator.md new file mode 100644 index 0000000000..7cd8bc87a2 --- /dev/null +++ b/packages/tentacles/Evaluator/TA/momentum_evaluator/resources/RSIMomentumEvaluator.md @@ -0,0 +1,3 @@ +Uses the [Relative Strength Index](https://www.investopedia.com/terms/r/rsi.asp) to find trend reversals. + +When found, evaluates -1 to 1 according to the strength of the [RSI](https://www.investopedia.com/terms/r/rsi.asp). \ No newline at end of file diff --git a/packages/tentacles/Evaluator/TA/momentum_evaluator/resources/RSIWeightMomentumEvaluator.md b/packages/tentacles/Evaluator/TA/momentum_evaluator/resources/RSIWeightMomentumEvaluator.md new file mode 100644 index 0000000000..9313960f71 --- /dev/null +++ b/packages/tentacles/Evaluator/TA/momentum_evaluator/resources/RSIWeightMomentumEvaluator.md @@ -0,0 +1 @@ +Uses the [Relative Strength Index](https://www.investopedia.com/terms/r/rsi.asp) to find dips and give them weight according to the trend diff --git a/packages/tentacles/Evaluator/TA/momentum_evaluator/tests/__init__.py b/packages/tentacles/Evaluator/TA/momentum_evaluator/tests/__init__.py new file mode 100644 index 0000000000..974dd1623a --- /dev/null +++ b/packages/tentacles/Evaluator/TA/momentum_evaluator/tests/__init__.py @@ -0,0 +1,15 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. diff --git a/packages/tentacles/Evaluator/TA/momentum_evaluator/tests/test_adx_momentum_evaluator.py b/packages/tentacles/Evaluator/TA/momentum_evaluator/tests/test_adx_momentum_evaluator.py new file mode 100644 index 0000000000..5b6f9552fb --- /dev/null +++ b/packages/tentacles/Evaluator/TA/momentum_evaluator/tests/test_adx_momentum_evaluator.py @@ -0,0 +1,73 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import pytest +import pytest_asyncio + +import tests.functional_tests.evaluators_tests.abstract_TA_test as abstract_TA_test +import tentacles.Evaluator.TA as TA + + +# All test coroutines will be treated as marked. +pytestmark = pytest.mark.asyncio + + +@pytest_asyncio.fixture +async def evaluator_tester(): + evaluator_tester_instance = TestADXTAEvaluator() + evaluator_tester_instance.TA_evaluator_class = TA.ADXMomentumEvaluator + return evaluator_tester_instance + + +class TestADXTAEvaluator(abstract_TA_test.AbstractTATest): + + @staticmethod + async def test_stress_test(evaluator_tester): + await evaluator_tester.run_stress_test_without_exceptions(0.7) + + @staticmethod + async def test_reactions_to_dump(evaluator_tester): + await evaluator_tester.run_test_reactions_to_dump(0.2, 0.35, -0.2, -0.1, 0) + + @staticmethod + async def test_reactions_to_pump(evaluator_tester): + await evaluator_tester.run_test_reactions_to_pump(0, 0.1, 0.45, 0.7, 0.6, 0.65, 0.75) + + @staticmethod + async def test_reaction_to_rise_after_over_sold(evaluator_tester): + await evaluator_tester.run_test_reactions_to_rise_after_over_sold(0.8, -0.1, -0.5, -0.52, 0.8) + + @staticmethod + async def test_reaction_to_over_bought_then_dip(evaluator_tester): + await evaluator_tester.run_test_reactions_to_over_bought_then_dip(0.1, 0.1, 0.3, 0.4, -0.4, 0.2) + + @staticmethod + async def test_reaction_to_flat_trend(evaluator_tester): + await evaluator_tester.run_test_reactions_to_flat_trend( + # eval_start_move_ending_up_in_a_rise, + 0.4, + # eval_reaches_flat_trend, eval_first_micro_up_p1, eval_first_micro_up_p2, + 0.1, 0.4, 0.45, + # eval_micro_down1, eval_micro_up1, eval_micro_down2, eval_micro_up2, + 1, 0.6, 0.1, 0.4, + # eval_micro_down3, eval_back_normal3, eval_micro_down4, eval_back_normal4, + -0.4, 0.5, -0.7, 0.8, + # eval_micro_down5, eval_back_up5, eval_micro_up6, eval_back_down6, + -0.1, -0.5, 0.25, 0.35, + # eval_back_normal6, eval_micro_down7, eval_back_up7, eval_micro_down8, + 0.3, -0.5, -0.6, -0.45, + # eval_back_up8, eval_micro_down9, eval_back_up9 + -0.35, -0.1, 0.1) diff --git a/packages/tentacles/Evaluator/TA/momentum_evaluator/tests/test_bollinger_bands_momentum_TA_evaluator.py b/packages/tentacles/Evaluator/TA/momentum_evaluator/tests/test_bollinger_bands_momentum_TA_evaluator.py new file mode 100644 index 0000000000..fe355afbe1 --- /dev/null +++ b/packages/tentacles/Evaluator/TA/momentum_evaluator/tests/test_bollinger_bands_momentum_TA_evaluator.py @@ -0,0 +1,74 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import pytest +import pytest_asyncio + + +import tests.functional_tests.evaluators_tests.abstract_TA_test as abstract_TA_test +import tentacles.Evaluator.TA as TA + + +# All test coroutines will be treated as marked. +pytestmark = pytest.mark.asyncio + + +@pytest_asyncio.fixture +async def evaluator_tester(): + evaluator_tester_instance = TestBollingerBandsMomentumeEvaluator() + evaluator_tester_instance.TA_evaluator_class = TA.BBMomentumEvaluator + return evaluator_tester_instance + + +class TestBollingerBandsMomentumeEvaluator(abstract_TA_test.AbstractTATest): + + @staticmethod + async def test_stress_test(evaluator_tester): + await evaluator_tester.run_stress_test_without_exceptions() + + @staticmethod + async def test_reactions_to_dump(evaluator_tester): + await evaluator_tester.run_test_reactions_to_dump(0.7, 0.2, -1, -1, -1) + + @staticmethod + async def test_reactions_to_pump(evaluator_tester): + await evaluator_tester.run_test_reactions_to_pump(0.4, 0.5, 1, 1, 1, 1, 0.1) + + @staticmethod + async def test_reaction_to_rise_after_over_sold(evaluator_tester): + await evaluator_tester.run_test_reactions_to_rise_after_over_sold(-0.1, -0.99, -0.99, -0.5, 1) + + @staticmethod + async def test_reaction_to_over_bought_then_dip(evaluator_tester): + await evaluator_tester.run_test_reactions_to_over_bought_then_dip(0, 1, 1, 0.95, -0.3, -0.1) + + @staticmethod + async def test_reaction_to_flat_trend(evaluator_tester): + await evaluator_tester.run_test_reactions_to_flat_trend( + # eval_start_move_ending_up_in_a_rise, + 1, + # eval_reaches_flat_trend, eval_first_micro_up_p1, eval_first_micro_up_p2, + 1, 0.8, 0.4, + # eval_micro_down1, eval_micro_up1, eval_micro_down2, eval_micro_up2, + 0.1, 1, -0.3, 0.1, + # eval_micro_down3, eval_back_normal3, eval_micro_down4, eval_back_normal4, + -0.6, 0.5, 0, 0.5, + # eval_micro_down5, eval_back_up5, eval_micro_up6, eval_back_down6, + -1, -0.15, 1, 0.1, + # eval_back_normal6, eval_micro_down7, eval_back_up7, eval_micro_down8, + 0.4, -0.1, 0, -1, + # eval_back_up8, eval_micro_down9, eval_back_up9 + -0.05, -1, 0.5) diff --git a/packages/tentacles/Evaluator/TA/momentum_evaluator/tests/test_klinger_TA_evaluator.py b/packages/tentacles/Evaluator/TA/momentum_evaluator/tests/test_klinger_TA_evaluator.py new file mode 100644 index 0000000000..e6c217852b --- /dev/null +++ b/packages/tentacles/Evaluator/TA/momentum_evaluator/tests/test_klinger_TA_evaluator.py @@ -0,0 +1,75 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import pytest +import pytest_asyncio + + +import tests.functional_tests.evaluators_tests.abstract_TA_test as abstract_TA_test +import tentacles.Evaluator.TA as TA + + +# All test coroutines will be treated as marked. +pytestmark = pytest.mark.asyncio + + +@pytest_asyncio.fixture +async def evaluator_tester(): + evaluator_tester_instance = TestKlingerEvaluator() + evaluator_tester_instance.TA_evaluator_class = TA.KlingerOscillatorMomentumEvaluator + return evaluator_tester_instance + + +class TestKlingerEvaluator(abstract_TA_test.AbstractTATest): + + @staticmethod + async def test_stress_test(evaluator_tester): + await evaluator_tester.run_stress_test_without_exceptions(0.7, False, skip_long_time_frames=True) + + @staticmethod + async def test_reactions_to_dump(evaluator_tester): + await evaluator_tester.run_test_reactions_to_dump(0, 0, -0.2, -0.4, -0.55) + + @staticmethod + async def test_reactions_to_pump(evaluator_tester): + await evaluator_tester.run_test_reactions_to_pump(-0.1, -0.1, 0, 0.1, 0.2, + 0, -0.5) + + @staticmethod + async def test_reaction_to_rise_after_over_sold(evaluator_tester): + await evaluator_tester.run_test_reactions_to_rise_after_over_sold(-0.2, -0.6, -1, -1, 0.1) + + @staticmethod + async def test_reaction_to_over_bought_then_dip(evaluator_tester): + await evaluator_tester.run_test_reactions_to_over_bought_then_dip(-1, 0, 0.5, 0.5, -0.8, -1) + + @staticmethod + async def test_reaction_to_flat_trend(evaluator_tester): + await evaluator_tester.run_test_reactions_to_flat_trend( + # eval_start_move_ending_up_in_a_rise, + 0.9, + # eval_reaches_flat_trend, eval_first_micro_up_p1, eval_first_micro_up_p2, + 0.7, 0.55, 0.3, + # eval_micro_down1, eval_micro_up1, eval_micro_down2, eval_micro_up2, + -0.3, -0.25, -0.4, -0.1, + # eval_micro_down3, eval_back_normal3, eval_micro_down4, eval_back_normal4, + 0, -0.1, 0.1, 0.1, + # eval_micro_down5, eval_back_up5, eval_micro_up6, eval_back_down6, + -0.1, -0.1, 0.1, 0.25, + # eval_back_normal6, eval_micro_down7, eval_back_up7, eval_micro_down8, + 0, 0.1, -0.2, 0, + # eval_back_up8, eval_micro_down9, eval_back_up9 + 0, 0, 0.1) diff --git a/packages/tentacles/Evaluator/TA/momentum_evaluator/tests/test_macd_TA_evaluator.py b/packages/tentacles/Evaluator/TA/momentum_evaluator/tests/test_macd_TA_evaluator.py new file mode 100644 index 0000000000..89c266188a --- /dev/null +++ b/packages/tentacles/Evaluator/TA/momentum_evaluator/tests/test_macd_TA_evaluator.py @@ -0,0 +1,74 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import pytest +import pytest_asyncio + + +import tests.functional_tests.evaluators_tests.abstract_TA_test as abstract_TA_test +import tentacles.Evaluator.TA as TA + + +# All test coroutines will be treated as marked. +pytestmark = pytest.mark.asyncio + + +@pytest_asyncio.fixture +async def evaluator_tester(): + evaluator_tester_instance = TestMACDEvaluator() + evaluator_tester_instance.TA_evaluator_class = TA.MACDMomentumEvaluator + return evaluator_tester_instance + + +class TestMACDEvaluator(abstract_TA_test.AbstractTATest): + + @staticmethod + async def test_stress_test(evaluator_tester): + await evaluator_tester.run_stress_test_without_exceptions(0.6) + + @staticmethod + async def test_reactions_to_dump(evaluator_tester): + await evaluator_tester.run_test_reactions_to_dump(0.3, 0.25, -0.15, -0.3, -0.5) + + @staticmethod + async def test_reactions_to_pump(evaluator_tester): + await evaluator_tester.run_test_reactions_to_pump(0.3, 0.4, 0.75, 0.75, 0.75, 0.75, 0.2) + + @staticmethod + async def test_reaction_to_rise_after_over_sold(evaluator_tester): + await evaluator_tester.run_test_reactions_to_rise_after_over_sold(0, -0.5, -0.65, -0.4, -0.08) + + @staticmethod + async def test_reaction_to_over_bought_then_dip(evaluator_tester): + await evaluator_tester.run_test_reactions_to_over_bought_then_dip(-0.6, 0.1, 0.6, 0.7, -0.35, -0.65) + + @staticmethod + async def test_reaction_to_flat_trend(evaluator_tester): + await evaluator_tester.run_test_reactions_to_flat_trend( + # eval_start_move_ending_up_in_a_rise, + 0.75, + # eval_reaches_flat_trend, eval_first_micro_up_p1, eval_first_micro_up_p2, + 0.6, 0.7, 0.45, + # eval_micro_down1, eval_micro_up1, eval_micro_down2, eval_micro_up2, + -0.1, -0.6, -0.55, -0.4, + # eval_micro_down3, eval_back_normal3, eval_micro_down4, eval_back_normal4, + -0.25, -0.1, -0.1, 0.2, + # eval_micro_down5, eval_back_up5, eval_micro_up6, eval_back_down6, + -0.5, -0.6, 0.24, 0.35, + # eval_back_normal6, eval_micro_down7, eval_back_up7, eval_micro_down8, + 0.49, -0.1, -0.4, -0.26, + # eval_back_up8, eval_micro_down9, eval_back_up9 + -0.31, -0.7, 0.1) diff --git a/packages/tentacles/Evaluator/TA/momentum_evaluator/tests/test_rsi_TA_evaluator.py b/packages/tentacles/Evaluator/TA/momentum_evaluator/tests/test_rsi_TA_evaluator.py new file mode 100644 index 0000000000..f749c5fa39 --- /dev/null +++ b/packages/tentacles/Evaluator/TA/momentum_evaluator/tests/test_rsi_TA_evaluator.py @@ -0,0 +1,73 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import pytest +import pytest_asyncio + +import tests.functional_tests.evaluators_tests.abstract_TA_test as abstract_TA_test +import tentacles.Evaluator.TA as TA + + +# All test coroutines will be treated as marked. +pytestmark = pytest.mark.asyncio + + +@pytest_asyncio.fixture +async def evaluator_tester(): + evaluator_tester_instance = TestRSIEvaluator() + evaluator_tester_instance.TA_evaluator_class = TA.RSIMomentumEvaluator + return evaluator_tester_instance + + +class TestRSIEvaluator(abstract_TA_test.AbstractTATest): + + @staticmethod + async def test_stress_test(evaluator_tester): + await evaluator_tester.run_stress_test_without_exceptions(0.7, False) + + @staticmethod + async def test_reactions_to_dump(evaluator_tester): + await evaluator_tester.run_test_reactions_to_dump(0.3, -0.2, -0.8, -1, -1) + + @staticmethod + async def test_reactions_to_pump(evaluator_tester): + await evaluator_tester.run_test_reactions_to_pump(0.3, 0.6, 1, 1, 1, 1, 0.5) + + @staticmethod + async def test_reaction_to_rise_after_over_sold(evaluator_tester): + await evaluator_tester.run_test_reactions_to_rise_after_over_sold(-1, -1, -1, -1, -0.7) + + @staticmethod + async def test_reaction_to_over_bought_then_dip(evaluator_tester): + await evaluator_tester.run_test_reactions_to_over_bought_then_dip(0.1, 0.4, 0.85, 1, 0.75, 0.8) + + @staticmethod + async def test_reaction_to_flat_trend(evaluator_tester): + await evaluator_tester.run_test_reactions_to_flat_trend( + # eval_start_move_ending_up_in_a_rise, + 0.4, + # eval_reaches_flat_trend, eval_first_micro_up_p1, eval_first_micro_up_p2, + 0.55, 0.9, 1, + # eval_micro_down1, eval_micro_up1, eval_micro_down2, eval_micro_up2, + 0.5, 0.8, 1, 0.7, + # eval_micro_down3, eval_back_normal3, eval_micro_down4, eval_back_normal4, + 0.55, -0.1, 0.75, 0, + # eval_micro_down5, eval_back_up5, eval_micro_up6, eval_back_down6, + 0.2, -0.6, -0.45, 0.1, + # eval_back_normal6, eval_micro_down7, eval_back_up7, eval_micro_down8, + 0, 0.75, 0.25, 0, + # eval_back_up8, eval_micro_down9, eval_back_up9 + -1, -1, -0.75) diff --git a/packages/tentacles/Evaluator/TA/trend_evaluator/__init__.py b/packages/tentacles/Evaluator/TA/trend_evaluator/__init__.py new file mode 100644 index 0000000000..47c15e129a --- /dev/null +++ b/packages/tentacles/Evaluator/TA/trend_evaluator/__init__.py @@ -0,0 +1 @@ +from .trend import DoubleMovingAverageTrendEvaluator, EMADivergenceTrendEvaluator, DeathAndGoldenCrossEvaluator, SuperTrendEvaluator \ No newline at end of file diff --git a/packages/tentacles/Evaluator/TA/trend_evaluator/config/DeathAndGoldenCrossEvaluator.json b/packages/tentacles/Evaluator/TA/trend_evaluator/config/DeathAndGoldenCrossEvaluator.json new file mode 100644 index 0000000000..923a3dd11c --- /dev/null +++ b/packages/tentacles/Evaluator/TA/trend_evaluator/config/DeathAndGoldenCrossEvaluator.json @@ -0,0 +1,6 @@ +{ + "fast_length": 50, + "slow_length": 200, + "slow_ma_type": "SMA", + "fast_ma_type": "SMA" +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/TA/trend_evaluator/config/DoubleMovingAverageTrendEvaluator.json b/packages/tentacles/Evaluator/TA/trend_evaluator/config/DoubleMovingAverageTrendEvaluator.json new file mode 100644 index 0000000000..7416a48f29 --- /dev/null +++ b/packages/tentacles/Evaluator/TA/trend_evaluator/config/DoubleMovingAverageTrendEvaluator.json @@ -0,0 +1,4 @@ +{ + "long_period_length": 10, + "short_period_length": 5 +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/TA/trend_evaluator/config/EMADivergenceTrendEvaluator.json b/packages/tentacles/Evaluator/TA/trend_evaluator/config/EMADivergenceTrendEvaluator.json new file mode 100644 index 0000000000..6402ea3177 --- /dev/null +++ b/packages/tentacles/Evaluator/TA/trend_evaluator/config/EMADivergenceTrendEvaluator.json @@ -0,0 +1,5 @@ +{ + "size": 50, + "short": -2, + "long": 2 +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/TA/trend_evaluator/config/SuperTrendEvaluator.json b/packages/tentacles/Evaluator/TA/trend_evaluator/config/SuperTrendEvaluator.json new file mode 100644 index 0000000000..2e75a7a464 --- /dev/null +++ b/packages/tentacles/Evaluator/TA/trend_evaluator/config/SuperTrendEvaluator.json @@ -0,0 +1,4 @@ +{ + "factor": 3, + "length": 10 +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/TA/trend_evaluator/metadata.json b/packages/tentacles/Evaluator/TA/trend_evaluator/metadata.json new file mode 100644 index 0000000000..edd35859c4 --- /dev/null +++ b/packages/tentacles/Evaluator/TA/trend_evaluator/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["DoubleMovingAverageTrendEvaluator", "EMADivergenceTrendEvaluator", "DeathAndGoldenCrossEvaluator", "SuperTrendEvaluator"], + "tentacles-requirements": [] +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/TA/trend_evaluator/resources/DeathAndGoldenCrossEvaluator.md b/packages/tentacles/Evaluator/TA/trend_evaluator/resources/DeathAndGoldenCrossEvaluator.md new file mode 100644 index 0000000000..916ede4330 --- /dev/null +++ b/packages/tentacles/Evaluator/TA/trend_evaluator/resources/DeathAndGoldenCrossEvaluator.md @@ -0,0 +1,7 @@ +DeathAndGoldenCrossEvaluator is based on two [moving averages](https://www.investopedia.com/terms/m/movingaverage.asp), by default one of **50** periods and other one of **200**. + +If the fast moving average is above the slow moving average, this indicates a bull market (signal: -1) When this happens it's called a [Golden Cross](https://www.investopedia.com/terms/g/goldencross.asp). +Inversely, if it's the fast moving average which is above the slow moving average this indicates a bear market (signal: 1). When this happens it's called a [Death Cross](https://www.investopedia.com/terms/d/deathcross.asp) + +This evaluator will always produce a value of `0` except right after a golden or death cross +is found, in this case a `-1` or `1` value will be produced. diff --git a/packages/tentacles/Evaluator/TA/trend_evaluator/resources/DoubleMovingAverageTrendEvaluator.md b/packages/tentacles/Evaluator/TA/trend_evaluator/resources/DoubleMovingAverageTrendEvaluator.md new file mode 100644 index 0000000000..e1dd47d6c0 --- /dev/null +++ b/packages/tentacles/Evaluator/TA/trend_evaluator/resources/DoubleMovingAverageTrendEvaluator.md @@ -0,0 +1,4 @@ +Uses two [moving averages](https://www.investopedia.com/terms/m/movingaverage.asp) (a slow and a fast one) to find reversals. + +Evaluates from -1 to 1 relatively to the computed reversal probability and the current price distance from +[moving averages](https://www.investopedia.com/terms/m/movingaverage.asp). \ No newline at end of file diff --git a/packages/tentacles/Evaluator/TA/trend_evaluator/resources/EMADivergenceTrendEvaluator.md b/packages/tentacles/Evaluator/TA/trend_evaluator/resources/EMADivergenceTrendEvaluator.md new file mode 100644 index 0000000000..5ca9dc9e6b --- /dev/null +++ b/packages/tentacles/Evaluator/TA/trend_evaluator/resources/EMADivergenceTrendEvaluator.md @@ -0,0 +1,3 @@ +Uses [exponential moving averages](https://www.investopedia.com/terms/e/ema.asp) to find price divergences. + +Evaluates from -1 to 1 relatively to the computed divergence strength. \ No newline at end of file diff --git a/packages/tentacles/Evaluator/TA/trend_evaluator/resources/SuperTrendEvaluator.md b/packages/tentacles/Evaluator/TA/trend_evaluator/resources/SuperTrendEvaluator.md new file mode 100644 index 0000000000..57b7425176 --- /dev/null +++ b/packages/tentacles/Evaluator/TA/trend_evaluator/resources/SuperTrendEvaluator.md @@ -0,0 +1,3 @@ +SuperTrendEvaluator is a trend-following indicator based on Average True Range [ATR](https://www.tradingview.com/scripts/averagetruerange/). The calculation of its single line combines trend detection and volatility. It can be used to detect changes in trend direction and to position stops. + +Evaluates -1 on an upwards trend and 1 if the trend is downwards. diff --git a/packages/tentacles/Evaluator/TA/trend_evaluator/tests/__init__.py b/packages/tentacles/Evaluator/TA/trend_evaluator/tests/__init__.py new file mode 100644 index 0000000000..974dd1623a --- /dev/null +++ b/packages/tentacles/Evaluator/TA/trend_evaluator/tests/__init__.py @@ -0,0 +1,15 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. diff --git a/packages/tentacles/Evaluator/TA/trend_evaluator/tests/test_double_moving_averages_TA_evaluator.py b/packages/tentacles/Evaluator/TA/trend_evaluator/tests/test_double_moving_averages_TA_evaluator.py new file mode 100644 index 0000000000..de17bcd2c8 --- /dev/null +++ b/packages/tentacles/Evaluator/TA/trend_evaluator/tests/test_double_moving_averages_TA_evaluator.py @@ -0,0 +1,74 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import pytest +import pytest_asyncio + + +from tests.functional_tests.evaluators_tests.abstract_TA_test import AbstractTATest +from tentacles.Evaluator.TA import DoubleMovingAverageTrendEvaluator + + +# All test coroutines will be treated as marked. +pytestmark = pytest.mark.asyncio + + +@pytest_asyncio.fixture +async def evaluator_tester(): + evaluator_tester_instance = TestDoubleMovingAveragesEvaluator() + evaluator_tester_instance.TA_evaluator_class = DoubleMovingAverageTrendEvaluator + return evaluator_tester_instance + + +class TestDoubleMovingAveragesEvaluator(AbstractTATest): + + @staticmethod + async def test_stress_test(evaluator_tester): + await evaluator_tester.run_stress_test_without_exceptions(0.8) + + @staticmethod + async def test_reactions_to_dump(evaluator_tester): + await evaluator_tester.run_test_reactions_to_dump(0.15, 0.15, -0.35, -0.75, -1) + + @staticmethod + async def test_reactions_to_pump(evaluator_tester): + await evaluator_tester.run_test_reactions_to_pump(0.1, 0.4, 1, 1, 1, 0.96, -0.45) + + @staticmethod + async def test_reaction_to_rise_after_over_sold(evaluator_tester): + await evaluator_tester.run_test_reactions_to_rise_after_over_sold(-0.7, -0.99, -0.99, -0.5, 0.85) + + @staticmethod + async def test_reaction_to_over_bought_then_dip(evaluator_tester): + await evaluator_tester.run_test_reactions_to_over_bought_then_dip(0, 0.4, 0.7, 0.6, -0.88, -0.1) + + @staticmethod + async def test_reaction_to_flat_trend(evaluator_tester): + await evaluator_tester.run_test_reactions_to_flat_trend( + # eval_start_move_ending_up_in_a_rise, + 0.45, + # eval_reaches_flat_trend, eval_first_micro_up_p1, eval_first_micro_up_p2, + 1, 0.65, 0.2, + # eval_micro_down1, eval_micro_up1, eval_micro_down2, eval_micro_up2, + -0.25, 0, -0.1, 0, + # eval_micro_down3, eval_back_normal3, eval_micro_down4, eval_back_normal4, + -0.1, 0, -0.1, 0, + # eval_micro_down5, eval_back_up5, eval_micro_up6, eval_back_down6, + 0.2, -0.10, 0, 0.1, + # eval_back_normal6, eval_micro_down7, eval_back_up7, eval_micro_down8, + -0.05, -0.1, -0.1, -0.15, + # eval_back_up8, eval_micro_down9, eval_back_up9 + 0, -0.1, 0.1) diff --git a/packages/tentacles/Evaluator/TA/trend_evaluator/trend.py b/packages/tentacles/Evaluator/TA/trend_evaluator/trend.py new file mode 100644 index 0000000000..c367ef0e78 --- /dev/null +++ b/packages/tentacles/Evaluator/TA/trend_evaluator/trend.py @@ -0,0 +1,351 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import math + +import tulipy +import numpy + +import octobot_commons.constants as commons_constants +import octobot_commons.enums as enums +import octobot_commons.data_util as data_util +import octobot_evaluators.evaluators as evaluators +import octobot_evaluators.util as evaluators_util +import octobot_trading.api as trading_api +import tentacles.Evaluator.Util as EvaluatorUtil + + +class SuperTrendEvaluator(evaluators.TAEvaluator): + FACTOR = "factor" + LENGTH = "length" + PREV_UPPER_BAND = "prev_upper_band" + PREV_LOWER_BAND = "prev_lower_band" + PREV_SUPERTREND = "prev_supertrend" + PREV_ATR = "prev_atr" + + def __init__(self, tentacles_setup_config): + super().__init__(tentacles_setup_config) + self.factor = 3 + self.length = 10 + self.reversals_only = False + self.eval_note = commons_constants.START_PENDING_EVAL_NOTE + self.previous_value = {} + + def init_user_inputs(self, inputs: dict) -> None: + """ + Called right before starting the evaluator, should define all the evaluator's user inputs + """ + self.factor = self.UI.user_input("factor", enums.UserInputTypes.FLOAT, self.factor, + inputs, min_val=0, title="Factor multiplier of the ATR") + self.length = self.UI.user_input("length", enums.UserInputTypes.INT, self.length, + inputs, min_val=1, title="Length of the ATR") + self.reversals_only = self.UI.user_input( + "reversals_only", enums.UserInputTypes.BOOLEAN, self.reversals_only, inputs, + title="Reversals only: evaluates -1 and 1 only on trend reversals, 0 otherwise" + ) + + async def ohlcv_callback(self, exchange: str, exchange_id: str, cryptocurrency: str, + symbol: str, time_frame, candle, inc_in_construction_data): + exchange_symbol_data = self.get_exchange_symbol_data(exchange, exchange_id, symbol) + high = trading_api.get_symbol_high_candles(exchange_symbol_data, time_frame, + include_in_construction=inc_in_construction_data) + low = trading_api.get_symbol_low_candles(exchange_symbol_data, time_frame, + include_in_construction=inc_in_construction_data) + close = trading_api.get_symbol_close_candles(exchange_symbol_data, time_frame, + include_in_construction=inc_in_construction_data) + self.eval_note = commons_constants.START_PENDING_EVAL_NOTE + if len(close) > self.length: + await self.evaluate(cryptocurrency, symbol, time_frame, candle, high, low, close) + await self.evaluation_completed(cryptocurrency, symbol, time_frame, + eval_time=evaluators_util.get_eval_time(full_candle=candle, + time_frame=time_frame)) + + async def evaluate(self, cryptocurrency, symbol, time_frame, candle, high, low, close): + hl2 = EvaluatorUtil.CandlesUtil.HL2(high, low)[-1] + atr = tulipy.atr(high, low, close, self.length)[-1] + + previous_value = self.get_previous_value(symbol, time_frame) + + upper_band = hl2 + self.factor * atr + lower_band = hl2 - self.factor * atr + prev_upper_band = previous_value.get(self.PREV_UPPER_BAND, 0) + prev_lower_band = previous_value.get(self.PREV_LOWER_BAND, 0) + + # compute latest lower and upper band values + latest_lower_band = lower_band if (lower_band > prev_lower_band or close[-2] < prev_lower_band) else prev_lower_band + latest_upper_band = upper_band if (upper_band < prev_upper_band or close[-2] > prev_upper_band) else prev_upper_band + + prev_super_trend = previous_value.get(self.PREV_SUPERTREND, 0) + + signal = -1 + is_reversal = False + if previous_value.get(self.PREV_ATR, None) is None: + # not enough data to compute supertrend evaluation + signal = -1 + else: + # there is a previous value: check if the latest close is above or below ATR + # and select the correct band to use + if prev_super_trend == prev_upper_band: + # previous bearish trend: previous super trend used the upper band + # bullish if the latest close is above latest upper band + bullish_switch = close[-1] > latest_upper_band + if bullish_switch: + # bullish switch of the trend + signal = -1 + is_reversal = True + else: + # bearish continuation of the trend + signal = 1 + else: + # previous bullish trend: previous super trend used the lower band + # bearsish if the latest close is bellow latest lower band + bearish_switch = close[-1] < latest_lower_band + if bearish_switch: + # bearish switch of the trend + signal = 1 + is_reversal = True + else: + # bullish continuation of the trend + signal = -1 + + previous_value[self.PREV_ATR] = atr + previous_value[self.PREV_UPPER_BAND] = latest_upper_band + previous_value[self.PREV_LOWER_BAND] = latest_lower_band + # store the latest used super trend band: bullish = lower band, bearish = upper band + previous_value[self.PREV_SUPERTREND] = latest_lower_band if signal == -1 else latest_upper_band + self.eval_note = signal if is_reversal or not self.reversals_only else commons_constants.START_PENDING_EVAL_NOTE + + def get_previous_value(self, symbol, time_frame): + try: + previous_symbol_value = self.previous_value[symbol] + except KeyError: + self.previous_value[symbol] = {} + previous_symbol_value = self.previous_value[symbol] + try: + return previous_symbol_value[time_frame] + except KeyError: + previous_symbol_value[time_frame] = {} + return previous_symbol_value[time_frame] + + +class DeathAndGoldenCrossEvaluator(evaluators.TAEvaluator): + FAST_LENGTH = "fast_length" + SLOW_LENGTH = "slow_length" + SLOW_MA_TYPE = "slow_ma_type" + FAST_MA_TYPE = "fast_ma_type" + MA_TYPES = ["EMA", "WMA", "SMA", "LSMA", "KAMA", "DEMA", "TEMA", "VWMA"] + + def __init__(self, tentacles_setup_config): + super().__init__(tentacles_setup_config) + self.fast_length = 50 + self.slow_length = 200 + self.fast_ma_type = "sma" + self.slow_ma_type = "sma" + self.eval_note = commons_constants.START_PENDING_EVAL_NOTE + + def init_user_inputs(self, inputs: dict) -> None: + """ + Called right before starting the evaluator, should define all the evaluator's user inputs + """ + self.fast_length = self.UI.user_input(self.FAST_LENGTH, enums.UserInputTypes.INT, self.fast_length, + inputs, min_val=1, title="Fast MA length") + self.slow_length = self.UI.user_input(self.SLOW_LENGTH, enums.UserInputTypes.INT, self.slow_length, + inputs, min_val=1, title="Slow MA length") + self.fast_ma_type = self.UI.user_input(self.FAST_MA_TYPE, enums.UserInputTypes.OPTIONS, self.fast_ma_type, + inputs, options=self.MA_TYPES, title="Fast MA type").lower() + self.slow_ma_type = self.UI.user_input(self.SLOW_MA_TYPE, enums.UserInputTypes.OPTIONS, self.slow_ma_type, + inputs, options=self.MA_TYPES, title="Slow MA type").lower() + + async def ohlcv_callback(self, exchange: str, exchange_id: str, + cryptocurrency: str, symbol: str, time_frame, candle, inc_in_construction_data): + + close = trading_api.get_symbol_close_candles(self.get_exchange_symbol_data(exchange, exchange_id, symbol), + time_frame, + include_in_construction=inc_in_construction_data) + volume = trading_api.get_symbol_volume_candles(self.get_exchange_symbol_data(exchange, exchange_id, symbol), + time_frame, + include_in_construction=inc_in_construction_data) + self.eval_note = commons_constants.START_PENDING_EVAL_NOTE + if len(close) > max(self.slow_length, self.fast_length): + await self.evaluate(cryptocurrency, symbol, time_frame, candle, close, volume) + await self.evaluation_completed(cryptocurrency, symbol, time_frame, + eval_time=evaluators_util.get_eval_time(full_candle=candle, + time_frame=time_frame)) + + async def evaluate(self, cryptocurrency, symbol, time_frame, candle, candle_data, volume_data): + if self.fast_ma_type == "vwma": + fast_ma = tulipy.vwma(candle_data, volume_data, self.fast_length) + elif self.fast_ma_type == "lsma": + fast_ma = tulipy.linreg(candle_data, self.fast_length) + else: + fast_ma = getattr(tulipy, self.fast_ma_type)(candle_data, self.fast_length) + + if self.slow_ma_type == "vwma": + slow_ma = tulipy.vwma(candle_data, volume_data, self.slow_length) + elif self.slow_ma_type == "lsma": + slow_ma = tulipy.linreg(candle_data, self.slow_length) + else: + slow_ma = getattr(tulipy, self.slow_ma_type)(candle_data, self.slow_length) + + if min(len(fast_ma), len(slow_ma)) < 2: + # can't compute crosses: not enough data + self.logger.debug(f"Not enough data to compute crosses, skipping {symbol} {time_frame} evaluation") + return + + just_crossed = ( + fast_ma[-1] > slow_ma[-1] and fast_ma[-2] < slow_ma[-2] + ) or ( + fast_ma[-1] < slow_ma[-1] and fast_ma[-2] > slow_ma[-2] + ) + if just_crossed: + # crosses happen when the fast_ma and fast_ma just crossed, therefore when it happened on the last candle + if fast_ma[-1] > slow_ma[-1]: + # golden cross + self.eval_note = -1 + elif fast_ma[-1] < slow_ma[-1]: + # death cross + self.eval_note = 1 + + +# evaluates position of the current (2 unit) average trend relatively to the 5 units average and 10 units average trend +class DoubleMovingAverageTrendEvaluator(evaluators.TAEvaluator): + + def __init__(self, tentacles_setup_config): + super().__init__(tentacles_setup_config) + self.slow_period_length = 10 + self.fast_period_length = 5 + + def init_user_inputs(self, inputs: dict) -> None: + """ + Called right before starting the evaluator, should define all the evaluator's user inputs + """ + self.slow_period_length = self.UI.user_input("long_period_length", enums.UserInputTypes.INT, + self.slow_period_length, + inputs, min_val=1, title="Slow SMA length") + self.fast_period_length = self.UI.user_input("short_period_length", enums.UserInputTypes.INT, + self.fast_period_length, + inputs, min_val=1, title="Fast SMA length") + + async def ohlcv_callback(self, exchange: str, exchange_id: str, + cryptocurrency: str, symbol: str, time_frame, candle, inc_in_construction_data): + candle_data = trading_api.get_symbol_close_candles(self.get_exchange_symbol_data(exchange, exchange_id, symbol), + time_frame, + include_in_construction=inc_in_construction_data) + await self.evaluate(cryptocurrency, symbol, time_frame, candle_data, candle) + + async def evaluate(self, cryptocurrency, symbol, time_frame, candle_data, candle): + self.eval_note = commons_constants.START_PENDING_EVAL_NOTE + if len(candle_data) >= max(self.slow_period_length, self.fast_period_length): + current_moving_average = tulipy.sma(candle_data, 2) + results = [self.get_moving_average_analysis(candle_data, current_moving_average, time_unit) + for time_unit in (self.fast_period_length, self.slow_period_length)] + if len(results): + self.eval_note = numpy.mean(results) + else: + self.eval_note = commons_constants.START_PENDING_EVAL_NOTE + + if self.eval_note == 0: + self.eval_note = commons_constants.START_PENDING_EVAL_NOTE + await self.evaluation_completed(cryptocurrency, symbol, time_frame, + eval_time=evaluators_util.get_eval_time(full_candle=candle, + time_frame=time_frame)) + + # < 0 --> Current average bellow other one (computed using time_period) + # > 0 --> Current average above other one (computed using time_period) + @staticmethod + def get_moving_average_analysis(data, current_moving_average, time_period): + + time_period_unit_moving_average = tulipy.sma(data, time_period) + + # equalize array size + min_len_arrays = min(len(time_period_unit_moving_average), len(current_moving_average)) + + # compute difference between 1 unit values and others ( >0 means currently up the other one) + values_difference = \ + (current_moving_average[-min_len_arrays:] - time_period_unit_moving_average[-min_len_arrays:]) + values_difference = data_util.drop_nan(values_difference) + + if len(values_difference): + # indexes where current_unit_moving_average crosses time_period_unit_moving_average + crossing_indexes = EvaluatorUtil.TrendAnalysis.get_threshold_change_indexes(values_difference, 0) + + multiplier = 1 if values_difference[-1] > 0 else -1 + + # check at least some data crossed 0 + if crossing_indexes: + normalized_data = data_util.normalize_data(values_difference) + current_value = min(abs(normalized_data[-1]) * 2, 1) + if math.isnan(current_value): + return 0 + # check <= values_difference.count()-1if current value is max/min + if current_value == 0 or current_value == 1: + chances_to_be_max = EvaluatorUtil.TrendAnalysis.get_estimation_of_move_state_relatively_to_previous_moves_length( + crossing_indexes, + values_difference) + return multiplier * current_value * chances_to_be_max + # other case: maxima already reached => return distance to max + else: + return multiplier * current_value + + # just crossed the average => neutral + return 0 + + +# evaluates position of the current ema to detect divergences +class EMADivergenceTrendEvaluator(evaluators.TAEvaluator): + EMA_SIZE = "size" + SHORT_VALUE = "short" + LONG_VALUE = "long" + + def __init__(self, tentacles_setup_config): + super().__init__(tentacles_setup_config) + self.period = 50 + self.long_value = 2 + self.short_value = -2 + + def init_user_inputs(self, inputs: dict) -> None: + """ + Called right before starting the evaluator, should define all the evaluator's user inputs + """ + self.period = self.UI.user_input(self.EMA_SIZE, enums.UserInputTypes.INT, self.period, + inputs, min_val=1, title="EMA period length") + self.long_value = self.UI.user_input("long_value", enums.UserInputTypes.INT, self.long_value, + inputs, title="Long threshold: Minimum % price difference from EMA " + "consider a long signal. Should be positive in most cases") + self.short_value = self.UI.user_input("short_value", enums.UserInputTypes.INT, self.short_value, + inputs, title="Short threshold: Minimum % price difference from EMA " + "consider a short signal. Should be negative in most cases") + + async def ohlcv_callback(self, exchange: str, exchange_id: str, + cryptocurrency: str, symbol: str, time_frame, candle, inc_in_construction_data): + candle_data = trading_api.get_symbol_close_candles(self.get_exchange_symbol_data(exchange, exchange_id, symbol), + time_frame, + include_in_construction=inc_in_construction_data) + await self.evaluate(cryptocurrency, symbol, time_frame, candle_data, candle) + + async def evaluate(self, cryptocurrency, symbol, time_frame, candle_data, candle): + self.eval_note = commons_constants.START_PENDING_EVAL_NOTE + if len(candle_data) >= self.period: + current_ema = tulipy.ema(candle_data, self.period)[-1] + current_price_close = candle_data[-1] + diff = (current_price_close / current_ema * 100) - 100 + + if diff <= self.long_value: + self.eval_note = -1 + elif diff >= self.short_value: + self.eval_note = 1 + await self.evaluation_completed(cryptocurrency, symbol, time_frame, + eval_time=evaluators_util.get_eval_time(full_candle=candle, + time_frame=time_frame)) diff --git a/packages/tentacles/Evaluator/TA/volatility_evaluator/__init__.py b/packages/tentacles/Evaluator/TA/volatility_evaluator/__init__.py new file mode 100644 index 0000000000..86d6f10a43 --- /dev/null +++ b/packages/tentacles/Evaluator/TA/volatility_evaluator/__init__.py @@ -0,0 +1 @@ +from .volatility import StochasticRSIVolatilityEvaluator \ No newline at end of file diff --git a/packages/tentacles/Evaluator/TA/volatility_evaluator/config/StochasticRSIVolatilityEvaluator.json b/packages/tentacles/Evaluator/TA/volatility_evaluator/config/StochasticRSIVolatilityEvaluator.json new file mode 100644 index 0000000000..0dcd4f7088 --- /dev/null +++ b/packages/tentacles/Evaluator/TA/volatility_evaluator/config/StochasticRSIVolatilityEvaluator.json @@ -0,0 +1,5 @@ +{ + "period": 14, + "low_level": 1, + "high_level": 98 +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/TA/volatility_evaluator/metadata.json b/packages/tentacles/Evaluator/TA/volatility_evaluator/metadata.json new file mode 100644 index 0000000000..0f1ab12969 --- /dev/null +++ b/packages/tentacles/Evaluator/TA/volatility_evaluator/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["StochasticRSIVolatilityEvaluator"], + "tentacles-requirements": [] +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/TA/volatility_evaluator/resources/StochasticRSIVolatilityEvaluator.md b/packages/tentacles/Evaluator/TA/volatility_evaluator/resources/StochasticRSIVolatilityEvaluator.md new file mode 100644 index 0000000000..a47e824fb8 --- /dev/null +++ b/packages/tentacles/Evaluator/TA/volatility_evaluator/resources/StochasticRSIVolatilityEvaluator.md @@ -0,0 +1,3 @@ +Uses the [Stochastic RSI](https://www.investopedia.com/terms/s/stochrsi.asp) as a volatilty evaluator to identify trends. + +When found, evaluates from -1 to 1 according to the strength of the trend. \ No newline at end of file diff --git a/packages/tentacles/Evaluator/TA/volatility_evaluator/volatility.py b/packages/tentacles/Evaluator/TA/volatility_evaluator/volatility.py new file mode 100644 index 0000000000..cd7bde6721 --- /dev/null +++ b/packages/tentacles/Evaluator/TA/volatility_evaluator/volatility.py @@ -0,0 +1,78 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import tulipy + +import octobot_commons.constants as commons_constants +import octobot_commons.enums as enums +import octobot_commons.data_util as data_util +import octobot_evaluators.evaluators as evaluators +import octobot_evaluators.util as evaluators_util +import octobot_trading.api as trading_api + + +class StochasticRSIVolatilityEvaluator(evaluators.TAEvaluator): + STOCHRSI_PERIOD = "period" + HIGH_LEVEL = "high_level" + LOW_LEVEL = "low_level" + TULIPY_INDICATOR_MULTIPLICATOR = 100 + + def __init__(self, tentacles_setup_config): + super().__init__(tentacles_setup_config) + self.period = 14 + self.low_level = 1 + self.high_level = 98 + + def init_user_inputs(self, inputs: dict) -> None: + self.period = self.UI.user_input(self.STOCHRSI_PERIOD, enums.UserInputTypes.INT, + self.period, inputs, min_val=2, + title="Period: length of the stochastic RSI period.") + self.low_level = self.UI.user_input(self.LOW_LEVEL, enums.UserInputTypes.FLOAT, + self.low_level, inputs, min_val=0, + title="Low threshold: stochastic RSI level from which evaluation " + "is considered a buy signal.") + self.high_level = self.UI.user_input(self.HIGH_LEVEL, enums.UserInputTypes.FLOAT, + self.high_level, inputs, min_val=0, + title="High threshold: stochastic RSI level from which evaluation " + "is considered a sell signal.") + + async def ohlcv_callback(self, exchange: str, exchange_id: str, + cryptocurrency: str, symbol: str, time_frame, candle, inc_in_construction_data): + candle_data = trading_api.get_symbol_close_candles(self.get_exchange_symbol_data(exchange, exchange_id, symbol), + time_frame, + include_in_construction=inc_in_construction_data) + await self.evaluate(cryptocurrency, symbol, time_frame, candle_data, candle) + + async def evaluate(self, cryptocurrency, symbol, time_frame, candle_data, candle): + try: + if len(candle_data) >= self.period * 2: + stochrsi_value = tulipy.stochrsi(data_util.drop_nan(candle_data), self.period)[-1] + + if stochrsi_value * self.TULIPY_INDICATOR_MULTIPLICATOR >= self.high_level: + self.eval_note = 1 + elif stochrsi_value * self.TULIPY_INDICATOR_MULTIPLICATOR <= self.low_level: + self.eval_note = -1 + else: + self.eval_note = stochrsi_value - 0.5 + except tulipy.lib.InvalidOptionError as e: + message = "" + if self.period <= 1: + message = " period should be higher than 1." + self.logger.warning(f"Error when computing StochasticRSIVolatilityEvaluator: {e}{message}") + self.logger.exception(e, False) + self.eval_note = commons_constants.START_PENDING_EVAL_NOTE + await self.evaluation_completed(cryptocurrency, symbol, time_frame, + eval_time=evaluators_util.get_eval_time(full_candle=candle, + time_frame=time_frame)) diff --git a/packages/tentacles/Evaluator/Util/candles_util/__init__.py b/packages/tentacles/Evaluator/Util/candles_util/__init__.py new file mode 100644 index 0000000000..2221a4954c --- /dev/null +++ b/packages/tentacles/Evaluator/Util/candles_util/__init__.py @@ -0,0 +1 @@ +from .candles_util import CandlesUtil \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Util/candles_util/candles_util.pxd b/packages/tentacles/Evaluator/Util/candles_util/candles_util.pxd new file mode 100644 index 0000000000..9a755417d5 --- /dev/null +++ b/packages/tentacles/Evaluator/Util/candles_util/candles_util.pxd @@ -0,0 +1,24 @@ +# cython: language_level=3 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +cimport numpy as np +from math cimport mean + +cpdef object HL2(object high, object low) +cpdef object HLC3(object high, object low, object close) +cpdef object OHLC4(object open, object high, object low, object close) +cpdef tuple HeikinAshi(object open, object high, object low, object close) \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Util/candles_util/candles_util.py b/packages/tentacles/Evaluator/Util/candles_util/candles_util.py new file mode 100644 index 0000000000..cd6eeb984f --- /dev/null +++ b/packages/tentacles/Evaluator/Util/candles_util/candles_util.py @@ -0,0 +1,83 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import numpy as np +from octobot_commons.data_util import mean + +class CandlesUtil: + + @staticmethod + def HL2(candles_high, candles_low): + """ + Return a list of HL2 value (high + low ) / 2 + :param high: list of high + :param low: list of low + :return: list of HL2 + """ + return np.array(list(map((lambda candles_high, candles_low: mean([candles_high, candles_low])), + candles_high, candles_low))) + + @staticmethod + def HLC3(candles_high, candles_low, candles_close): + """ + Return a list of HLC3 values (high + low + close) / 3 + :param high: list of high + :param low: list of low + :param close: list of close + :return: list of HLC3 + """ + return np.array(list(map((lambda candles_high, candles_low, candles_close: + mean([candles_high, candles_low, candles_close])), + candles_high, candles_low, candles_close))) + + @staticmethod + def OHLC4(candles_open, candles_high, candles_low, candles_close): + """ + Return a list of OHLC4 value (open + high + low + close) / 4 + :param open: list of open + :param high: list of high + :param low: list of low + :param close: list of close + :return: list of OHLC4 + """ + return np.array(list(map((lambda candles_open, candles_high, candles_low, candles_close: + mean([candles_open, candles_high, candles_low, candles_close])), + candles_open, candles_high, candles_low, candles_close))) + + @staticmethod + def HeikinAshi(candles_open, candles_high, candles_low, candles_close): + """ + Return HeikinAshi array of the given candles + :param open: list of open + :param high: list of high + :param low: list of low + :param close: list of close + :return: HAopen, HAhigh, HAlow, HAclose + """ + haOpen, haHigh, haLow, haClose = [np.array([]) for i in range(4)] + for i, (open_value, high_value, low_value, close_value) \ + in enumerate(zip(candles_open, candles_high, candles_low, candles_close)): + if i == 0: + haOpen = np.append(haOpen, open_value) + haHigh = np.append(haHigh, high_value) + haLow = np.append(haLow, low_value) + haClose = np.append(haClose, close_value) + continue + haOpen = np.append(haOpen, mean([candles_open[i-1], candles_close[i-1]])) + haHigh = np.append(haHigh, high_value) + haLow = np.append(haLow, low_value) + haClose = np.append(haClose, mean([open_value, high_value, low_value, close_value])) + return haOpen, haHigh, haLow, haClose \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Util/candles_util/metadata.json b/packages/tentacles/Evaluator/Util/candles_util/metadata.json new file mode 100644 index 0000000000..2b9eae0460 --- /dev/null +++ b/packages/tentacles/Evaluator/Util/candles_util/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["CandlesUtil"], + "tentacles-requirements": [] +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Util/candles_util/tests/test_candles_util.py b/packages/tentacles/Evaluator/Util/candles_util/tests/test_candles_util.py new file mode 100644 index 0000000000..7054f56d1c --- /dev/null +++ b/packages/tentacles/Evaluator/Util/candles_util/tests/test_candles_util.py @@ -0,0 +1,95 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import numpy as np + +from tentacles.Evaluator.Util import CandlesUtil + + +def test_HL2(): + candles_high = np.array([10, 12, np.nan, 45, 5.67, 6.54, 75, 8.01, 9]) + candles_low = np.array([9, 8, 7, 6, 5, 4, 3, 2, 1]) + np.testing.assert_array_equal(CandlesUtil.HL2(candles_high, candles_low), + np.array([9.5, 10, np.nan, 25.5, 5.335, 5.27, 39.0, 5.005, 5.0], dtype=np.float64)) + + candles_high = np.array([120, 123, 54, 45, 210.54, 546.21, 981.2, .958, 65.7]) + candles_low = np.array([887.592, 896.519, 97.416, 233.987, 846.789, 713.054, 856.985, 421.17, 874.296]) + np.testing.assert_array_equal(CandlesUtil.HL2(candles_high, candles_low), + np.array([503.796, 509.7595, 75.708, 139.49349999999998, 528.6645, 629.6320000000001, + 919.0925, 211.06400000000002, 469.99800000000005], dtype=np.float64)) + +def test_HLC3(): + candles_high = np.array([9, 13, np.nan, 45, 5.67, 6.54, 75, 8.01, 9]) + candles_low = np.array([19, 25, 17, 36, 45, 84, 31, 21, 10]) + candles_close = np.array([2, 4, 4, 4, 6, 7, 8, 9, 10]) + np.testing.assert_array_equal(CandlesUtil.HLC3(candles_high, candles_low, candles_close), + np.array([10, 14, np.nan, 28.333333333333332, 18.89, + 32.513333333333335, 38, 12.67, 9.666666666666666], dtype=np.float64)) + + candles_high = np.array([733.985, 86.751, 388.834, 630.849, 231.102, 224.815, 430.74, 776.919, 209.207]) + candles_low = np.array([145.747, 829.698, 534.426, 879.53, 187.895, 698.515, 822.942, 532.641, 626.917]) + candles_close = np.array([811.199, 278.313, 817.295, 315.199, 974.104, 775.321, 979.139, 790.477, 518.736]) + np.testing.assert_array_equal(CandlesUtil.HLC3(candles_high, candles_low, candles_close), + np.array([563.6436666666667, 398.25399999999996, 580.185, 608.526, 464.367, + 566.217, 744.2736666666666, 700.0123333333332, 451.62000000000006], dtype=np.float64)) + +def test_OHLC4(): + candles_open = np.array([251.613, 259.098, 247.819, 140.73, 237.547, 830.611, 433.168, 404.026, 403.538]) + candles_high = np.array([980.99, 403.92, 698.072, 658.647, 245.151, 480.9, 621.35, 429.109, 637.439]) + candles_low = np.array([658.777, 101.13, 549.588, 28.624, 132.07, 813.572, 366.478, 619.649, 371.696]) + candles_close = np.array([812.829, 880.456, 406.039, 39.224, 917.386, 707.281, 737.851, 330.262, 258.689]) + np.testing.assert_array_equal(CandlesUtil.OHLC4(candles_open, candles_high, candles_low, candles_close), + np.array([676.05225, 411.151, 475.37949999999995, 216.80625000000003, + 383.0385, 708.091, 539.71175, 445.7615, 417.84049999999996], dtype=np.float64)) + + candles_open = np.array([345.468, 484.778, 332.855, 401.893, 41.936, 333.738, 983.158, 996.979, 807.855]) + candles_high = np.array([547.277, 856.206, 439.542, 921.475, 778.994, 156.285, 653.31, 534.865, 427.64]) + candles_low = np.array([328.444, 593.535, 4.243, 83.902, 811.859, 396.442, 433.552, 127.624, 314.613]) + candles_close = np.array([905.792, 382.98, 135.529, 494.942, 510.52, 399.78, 897.088, 192.068, 771.189]) + np.testing.assert_array_equal(CandlesUtil.OHLC4(candles_open, candles_high, candles_low, candles_close), + np.array([531.74525, 579.37475, 228.04225, 475.553, 535.82725, + 321.56125, 741.777, 462.884, 580.32425], dtype=np.float64)) + +def test_HeikinAshi(): + candles_open = np.array([977.88, 573.634, 816.233, 846.748, 184.114, 35.742, 598.653, 745.916, 854.334]) + candles_high = np.array([4.757, 499.759, 602.794, 179.313, 802.019, 384.307, 637.378, 161.048, 366.51]) + candles_low = np.array([903.152, 877.832, 966.154, 104.582, 837.638, 568.788, 788.584, 510.926, 608.184]) + candles_close = np.array([405.527, 685.962, 495.698, 271.687, 573.667, 891.018, 445.342, 344.928, 894.279]) + + haOpen, haHigh, haLow, haClose = CandlesUtil.HeikinAshi(candles_open, candles_high, candles_low, candles_close) + np.testing.assert_array_equal(haOpen, np.array([977.88, 691.7035, 629.798, 655.9655, 559.2175, + 378.89050000000003, 463.38, 521.9975, 545.422], dtype=np.float64)) + np.testing.assert_array_equal(haHigh, np.array([4.757, 499.759, 602.794, 179.313, 802.019, + 384.307, 637.378, 161.048, 366.51], dtype=np.float64)) + np.testing.assert_array_equal(haLow, np.array([903.152, 877.832, 966.154, 104.582, 837.638, + 568.788, 788.584, 510.926, 608.184], dtype=np.float64)) + np.testing.assert_array_equal(haClose, np.array([405.527, 659.29675, 720.21975, 350.5825, 599.3595, + 469.96375, 617.48925, 440.70450000000005, 680.82675], dtype=np.float64)) + + candles_open = np.array([188.539, 334.682, 495.604, 638.736, 632.213, 705.675, 876.735, 69.951, 909.477]) + candles_high = np.array([259.316, 843.705, 170.388, 318.961, 918.236, 585.595, 23.266, 657.422, 270.557]) + candles_low = np.array([652.361, 293.607, 295.191, 893.255, 819.447, 647.016, 330.303, 472.415, 617.705]) + candles_close = np.array([968.007, 114.792, 680.216, 168.147, 478.577, 437.676, 299.474, 208.601, 333.237]) + + haOpen, haHigh, haLow, haClose = CandlesUtil.HeikinAshi(candles_open, candles_high, candles_low, candles_close) + np.testing.assert_array_equal(haOpen, np.array([188.539, 578.2729999999999, 224.73700000000002, 587.91, + 403.4415, 555.395, 571.6754999999999, 588.1045, 139.276], dtype=np.float64)) + np.testing.assert_array_equal(haHigh, np.array([259.316, 843.705, 170.388, 318.961, 918.236, 585.595, + 23.266, 657.422, 270.557], dtype=np.float64)) + np.testing.assert_array_equal(haLow, np.array([652.361, 293.607, 295.191, 893.255, 819.447, 647.016, + 330.303, 472.415, 617.705], dtype=np.float64)) + np.testing.assert_array_equal(haClose, np.array([968.007, 396.6965, 410.34975, 504.77475, 712.11825, + 593.9905, 382.4445, 352.09725000000003, 532.744], dtype=np.float64)) \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Util/overall_state_analysis/__init__.py b/packages/tentacles/Evaluator/Util/overall_state_analysis/__init__.py new file mode 100644 index 0000000000..4c07c3f841 --- /dev/null +++ b/packages/tentacles/Evaluator/Util/overall_state_analysis/__init__.py @@ -0,0 +1 @@ +from .overall_state_analysis import OverallStateAnalyser \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Util/overall_state_analysis/metadata.json b/packages/tentacles/Evaluator/Util/overall_state_analysis/metadata.json new file mode 100644 index 0000000000..f80f57e265 --- /dev/null +++ b/packages/tentacles/Evaluator/Util/overall_state_analysis/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["OverallStateAnalyser"], + "tentacles-requirements": [] +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Util/overall_state_analysis/overall_state_analysis.py b/packages/tentacles/Evaluator/Util/overall_state_analysis/overall_state_analysis.py new file mode 100644 index 0000000000..0621cce3f7 --- /dev/null +++ b/packages/tentacles/Evaluator/Util/overall_state_analysis/overall_state_analysis.py @@ -0,0 +1,51 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import numpy + +import octobot_commons.constants as commons_constants + + +class OverallStateAnalyser: + def __init__(self): + self.overall_state = commons_constants.START_PENDING_EVAL_NOTE + self.evaluation_count = 0 + self.evaluations = [] + + # evaluation: number between -1 and 1 + # weight: integer between 0 (not even taken into account) and X + def add_evaluation(self, evaluation, weight, refresh_overall_state=True): + self.evaluations.append(StateEvaluation(evaluation, weight)) + if refresh_overall_state: + self._refresh_overall_state() + + def get_overall_state_after_refresh(self, refresh_overall_state=True): + if refresh_overall_state: + self._refresh_overall_state() + return self.overall_state + + # computes self.overall_state using self.evaluations values and weights + def _refresh_overall_state(self): + if self.evaluations: + self.overall_state = numpy.mean( + [evaluation.value for evaluation in self.evaluations for _ in range(evaluation.weight)] + ) + + +class StateEvaluation: + def __init__(self, value, weight): + self.value = value + self.weight = weight diff --git a/packages/tentacles/Evaluator/Util/pattern_analysis/__init__.py b/packages/tentacles/Evaluator/Util/pattern_analysis/__init__.py new file mode 100644 index 0000000000..437fd34564 --- /dev/null +++ b/packages/tentacles/Evaluator/Util/pattern_analysis/__init__.py @@ -0,0 +1 @@ +from .pattern_analysis import PatternAnalyser \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Util/pattern_analysis/metadata.json b/packages/tentacles/Evaluator/Util/pattern_analysis/metadata.json new file mode 100644 index 0000000000..ec2a97f981 --- /dev/null +++ b/packages/tentacles/Evaluator/Util/pattern_analysis/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["PatternAnalyser"], + "tentacles-requirements": [] +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Util/pattern_analysis/pattern_analysis.py b/packages/tentacles/Evaluator/Util/pattern_analysis/pattern_analysis.py new file mode 100644 index 0000000000..7a4df99ddb --- /dev/null +++ b/packages/tentacles/Evaluator/Util/pattern_analysis/pattern_analysis.py @@ -0,0 +1,93 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import numpy as np +import math + + +class PatternAnalyser: + + UNKNOWN_PATTERN = "?" + + # returns the starting and ending index of the pattern if it's found + # supported patterns: + # W, M, N and V (ex: for macd) + # return boolean (pattern found or not), start index and end index + @staticmethod + def find_pattern(data, zero_crossing_indexes, data_frame_max_index): + if len(zero_crossing_indexes) > 1: + + last_move_data = data[zero_crossing_indexes[-1]:] + + # if last_move_data is shaped in W + shape = PatternAnalyser.get_pattern(last_move_data) + + if shape == "N" or shape == "V": + # check presence of W or M with insignificant move in the other direction + backwards_index = 2 + while backwards_index < len(zero_crossing_indexes) and \ + zero_crossing_indexes[-1*backwards_index] - zero_crossing_indexes[-1*backwards_index-1] < 4: + backwards_index += 1 + extended_last_move_data = data[zero_crossing_indexes[-1 * backwards_index]:] + extended_shape = PatternAnalyser.get_pattern(extended_last_move_data) + + if extended_shape == "W" or extended_shape == "M": + # check that values are on the same side (< or >0) + first_part = data[zero_crossing_indexes[-1 * backwards_index]: + zero_crossing_indexes[-1*backwards_index+1]] + second_part = data[zero_crossing_indexes[-1]:] + if np.mean(first_part)*np.mean(second_part) > 0: + return extended_shape, zero_crossing_indexes[-1*backwards_index], zero_crossing_indexes[-1] + + return shape, zero_crossing_indexes[-1], data_frame_max_index + else: + # if very few data: proceed with basic analysis + + # if last_move_data is shaped in W + start_pattern_index = 0 if not zero_crossing_indexes else zero_crossing_indexes[0] + shape = PatternAnalyser.get_pattern(data[start_pattern_index:]) + return shape, start_pattern_index, data_frame_max_index + + @staticmethod + def get_pattern(data): + if len(data) > 0: + mean_value = np.mean(data) * 0.7 + else: + mean_value = math.nan + if math.isnan(mean_value): + return PatternAnalyser.UNKNOWN_PATTERN + indexes_under_mean_value = np.where(data > mean_value)[0] \ + if mean_value < 0 \ + else np.where(data < mean_value)[0] + + nb_gaps = 0 + for i in range(len(indexes_under_mean_value)-1): + if indexes_under_mean_value[i+1]-indexes_under_mean_value[i] > 3: + nb_gaps += 1 + + if nb_gaps > 1: + return "W" if mean_value < 0 else "M" + else: + return "V" if mean_value < 0 else "N" + + # returns a value 0 < value < 1: the higher the stronger is the pattern + @staticmethod + def get_pattern_strength(pattern): + if pattern == "W" or pattern == "M": + return 1 + elif pattern == "N" or pattern == "V": + return 0.75 + return 0 diff --git a/packages/tentacles/Evaluator/Util/statistics_analysis/__init__.py b/packages/tentacles/Evaluator/Util/statistics_analysis/__init__.py new file mode 100644 index 0000000000..ac164fdb12 --- /dev/null +++ b/packages/tentacles/Evaluator/Util/statistics_analysis/__init__.py @@ -0,0 +1 @@ +from .statistics_analysis import StatisticAnalysis \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Util/statistics_analysis/metadata.json b/packages/tentacles/Evaluator/Util/statistics_analysis/metadata.json new file mode 100644 index 0000000000..b362306e56 --- /dev/null +++ b/packages/tentacles/Evaluator/Util/statistics_analysis/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["StatisticAnalysis"], + "tentacles-requirements": [] +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Util/statistics_analysis/statistics_analysis.py b/packages/tentacles/Evaluator/Util/statistics_analysis/statistics_analysis.py new file mode 100644 index 0000000000..a58d831421 --- /dev/null +++ b/packages/tentacles/Evaluator/Util/statistics_analysis/statistics_analysis.py @@ -0,0 +1,75 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import tulipy +import numpy + +import octobot_commons.constants as commons_constants + + +class StatisticAnalysis: + + # Return linear proximity to the lower or the upper band relatively to the middle band. + # Linearly compute proximity between middle and delta before linear: + @staticmethod + def analyse_recent_trend_changes(data, delta_function): + # compute bollinger bands + lower_band, middle_band, upper_band = tulipy.bbands(data, 20, 2) + # if close to lower band => low value => bad, + # therefore if close to middle, value is keeping up => good + # finally if up the middle one or even close to the upper band => very good + + current_value = data[-1] + current_up = upper_band[-1] + current_middle = middle_band[-1] + current_low = lower_band[-1] + delta_up = current_up - current_middle + delta_low = current_middle - current_low + + # its exactly on all bands + if current_up == current_low: + return commons_constants.START_PENDING_EVAL_NOTE + + # exactly on the middle + elif current_value == current_middle: + return 0 + + # up the upper band + elif current_value > current_up: + return -1 + + # down the lower band + elif current_value < current_low: + return 1 + + # delta given: use parabolic factor after delta, linear before + delta = delta_function(numpy.mean([delta_up, delta_low])) + + micro_change = ((current_value / current_middle) - 1) / 2 + + # approximately on middle band + if current_middle + delta >= current_value >= current_middle - delta: + return micro_change + + # up the middle area + elif current_middle + delta < current_value: + return -1 * max(micro_change, (current_value - current_middle) / delta_up) + + # down the middle area + elif current_middle - delta > current_value: + return max(micro_change, (current_middle - current_value) / delta_low) + + # should not happen + return 0 diff --git a/packages/tentacles/Evaluator/Util/text_analysis/__init__.py b/packages/tentacles/Evaluator/Util/text_analysis/__init__.py new file mode 100644 index 0000000000..4c1d3a97fd --- /dev/null +++ b/packages/tentacles/Evaluator/Util/text_analysis/__init__.py @@ -0,0 +1 @@ +from .text_analysis import TextAnalysis \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Util/text_analysis/metadata.json b/packages/tentacles/Evaluator/Util/text_analysis/metadata.json new file mode 100644 index 0000000000..b0caab70ff --- /dev/null +++ b/packages/tentacles/Evaluator/Util/text_analysis/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["TextAnalysis"], + "tentacles-requirements": [] +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Util/text_analysis/text_analysis.py b/packages/tentacles/Evaluator/Util/text_analysis/text_analysis.py new file mode 100644 index 0000000000..72e2f813ca --- /dev/null +++ b/packages/tentacles/Evaluator/Util/text_analysis/text_analysis.py @@ -0,0 +1,110 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import octobot_commons.constants as commons_constants +try: + import vaderSentiment.vaderSentiment as vaderSentiment +except ImportError: + if commons_constants.USE_MINIMAL_LIBS: + # mock vaderSentiment imports + class VaderSentimentImportMock: + class SentimentIntensityAnalyzer: + def __init__(self, *args): + raise ImportError("vaderSentiment not installed") + vaderSentiment = VaderSentimentImportMock() + + +class TextAnalysis: + IMAGE_ENDINGS = ["png", "jpg", "jpeg", "gif", "jfif", "tiff", "bmp", "ppm", "pgm", "pbm", "pnm", "webp", "hdr", + "heif", + "bat", "bpg", "svg", "cgm"] + + def __init__(self): + super().__init__() + self.analyzer = vaderSentiment.SentimentIntensityAnalyzer() + # self.test() + + def analyse(self, text): + # The compound score is computed by summing the valence scores of each word in the lexicon, adjusted according + # to the rules, and then normalized to be between -1 (most extreme negative) and +1 (most extreme positive). + # https://github.com/cjhutto/vaderSentiment + return self.analyzer.polarity_scores(text)["compound"] + + # return a list of high influential value websites + @staticmethod + def get_high_value_websites(): + return [ + "https://www.youtube.com" + ] + + @staticmethod + def is_analysable_url(url): + url_ending = str(url).split(".")[-1] + return url_ending.lower() not in TextAnalysis.IMAGE_ENDINGS + + # official account tweets that can be used for testing purposes + def test(self): + texts = [ + "Have you read about VeChain and INPI ASIA's integration to bring nanotechnology for digital identity to " + "the VeChainThor blockchain? NDCodes resist high temperature, last over 100 years, are incredibly durable " + "and invisible to the naked eye", + "A scientific hypothesis about how cats, infected with toxoplasmosis, are making humans buy Bitcoin was " + "presented at last night's BAHFest at MIT.", + "Net Neutrality Ends! Substratum Update 4.23.18", + "One more test from @SubstratumNet for today. :)", + "Goldman Sachs hires crypto trader as head of digital assets markets", + "Big news coming! Scheduled to be 27th/28th April... Have a guess...", + "This week's Theta Surge on http://SLIVER.tv isn't just for virtual items... five PlayStation 4s will " + "be given out to viewers that use Theta Tokens to reward the featured #Fortnite streamer! Tune in this " + "Friday at 1pm PST to win!", + "The European Parliament has voted for regulations to prevent the use of cryptocurrencies in money " + "laundering and terrorism financing. As long as they have good intention i don' t care.. but how " + "much can we trust them??!?!" + "By partnering with INPI ASIA, the VeChainThor Platform incorporates nanotechnology with digital " + "identification to provide solutions to some of the worlds most complex IoT problems.", + "Thanks to the China Academy of Information and Communication Technology, IPRdaily and Nashwork for " + "organizing the event.", + "Delivered a two hour open course last week in Beijing. You can tell the awareness of blockchain is " + "drastically increasing by the questions asked by the audience. But people need hand holding and " + "business friendly features to adopt the tech.", + "Introducing the first Oracle Enabler tool of the VeChainThor Platform: Multi-Party Payment Protocol " + "(MPP).", + "An open letter from Sunny Lu (CEO) on VeChainThor Platform.", + "VeChain has finished the production of digital intellectual property services with partner iTaotaoke. " + "This solution provides a competitive advantage for an industry in need of trust-free reporting and " + "content protections.#GoVeChain", + "Special thanks to @GaboritMickael to have invited @vechainofficial to present our solution and make " + "a little demo to @AccentureFrance", + "VeChain will pitch their solutions potentially landing a co-development product with LVMH. In " + "attendance will be CEOs Bill McDermott (SAP), Chuck Robbins (CISCO), Ginni Rometty (IBM), and Stephane " + "Richard (Orange) as speakers -", + "As the only blockchain company selected, VeChain is among 30 of 800+ hand-picked startups to compete " + "for the second edition of the LVMH Innovation Award. As a result, VeChain has been invited to join the " + "Luxury Lab LVMH at Viva Technology in Paris from May 24-26, 2018.", + "VeChain to further its partnership with RFID leader Xiamen Innov and newly announced top enterprise " + "solution provider CoreLink by deploying a VeChainThor enterprise level decentralized application - " + "AssetLink.", + "Today, a group of senior leaders from TCL's Eagle Talent program visited the VeChain SH office. " + "@VeChain_GU demonstrated our advanced enterprise solutions and it's relation to TCL's market. As a " + "result, we're exploring new developments within TCL related to blockchain technology.", + "We are glad to be recognized as Top 10 blockchain technology solution providers in 2018. outprovides a " + "platform for CIOs and decision makers to share their experiences, wisdom and advice. Read the full " + "version article via", + "Talked about TOTO at the blockchain seminar in R University of Science and Technology business school " + "last Saturday. It covered 3000 MBA students across business schools in China." + ] + for text in texts: + print(str(self.analyse(text)) + " => "+str(text.encode("utf-8", "ignore"))) diff --git a/packages/tentacles/Evaluator/Util/trend_analysis/__init__.py b/packages/tentacles/Evaluator/Util/trend_analysis/__init__.py new file mode 100644 index 0000000000..908732f472 --- /dev/null +++ b/packages/tentacles/Evaluator/Util/trend_analysis/__init__.py @@ -0,0 +1 @@ +from .trend_analysis import TrendAnalysis \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Util/trend_analysis/metadata.json b/packages/tentacles/Evaluator/Util/trend_analysis/metadata.json new file mode 100644 index 0000000000..ce5adfbd54 --- /dev/null +++ b/packages/tentacles/Evaluator/Util/trend_analysis/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["TrendAnalysis"], + "tentacles-requirements": [] +} \ No newline at end of file diff --git a/packages/tentacles/Evaluator/Util/trend_analysis/trend_analysis.py b/packages/tentacles/Evaluator/Util/trend_analysis/trend_analysis.py new file mode 100644 index 0000000000..1b7ce49851 --- /dev/null +++ b/packages/tentacles/Evaluator/Util/trend_analysis/trend_analysis.py @@ -0,0 +1,149 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import numpy as np + + +class TrendAnalysis: + + # trend < 0 --> Down trend + # trend > 0 --> Up trend + @staticmethod + def get_trend(data, averages_to_use): + trend = 0 + inc = round(1 / len(averages_to_use), 2) + averages = [] + + # Get averages + for average_to_use in averages_to_use: + data_to_mean = data[-average_to_use:] + if len(data_to_mean): + averages.append(np.mean(data_to_mean)) + else: + averages.append(0) + + for a in range(0, len(averages) - 1): + if averages[a] - averages[a + 1] > 0: + trend -= inc + else: + trend += inc + + return trend + + @staticmethod + def peak_has_been_reached_already(data, neutral_val=0): + if len(data) > 1: + min_val = min(data) + max_val = max(data) + current_val = data[-1] / 0.8 + if current_val > neutral_val: + return current_val < max_val + else: + return current_val > min_val + else: + return False + + @staticmethod + def min_has_just_been_reached(data, acceptance_window=0.8, delay=1): + if len(data) > 1: + min_val = min(data) + current_val = data[-1] / acceptance_window + accepted_delayed_min = data[-(delay+1):] + return bool(min_val in accepted_delayed_min and current_val > min_val) + else: + return False + + @staticmethod + # TODO + def detect_divergence(data_frame, indicator_data_frame): + pass + # candle_data = data_frame.tail(DIVERGENCE_USED_VALUE) + # indicator_data = indicator_data_frame.tail(DIVERGENCE_USED_VALUE) + # + # total_delta = [] + # + # for i in range(0, DIVERGENCE_USED_VALUE - 1): + # candle_delta = candle_data.values[i] - candle_data.values[i + 1] + # indicator_delta = indicator_data.values[i] - indicator_data.values[i + 1] + # total_delta.append(candle_delta - indicator_delta) + + @staticmethod + def get_estimation_of_move_state_relatively_to_previous_moves_length(mean_crossing_indexes, + current_trend, + pattern_move_size=1, + double_size_patterns_count=0): + + if mean_crossing_indexes: + # compute average move size + time_averages = [(lambda a: mean_crossing_indexes[a+1]-mean_crossing_indexes[a])(a) + for a in range(len(mean_crossing_indexes)-1)] + # add 1st length + if 0 != mean_crossing_indexes[0]: + time_averages.append(mean_crossing_indexes[0]) + + # take double_size_patterns_count into account + time_averages += [0]*double_size_patterns_count + + time_average = np.mean(time_averages)*pattern_move_size if time_averages else 0 + + current_move_length = len(current_trend) - mean_crossing_indexes[-1] + # higher than time_average => high chances to be at half of the move already + if current_move_length > time_average/2: + return 1 + else: + return current_move_length / (time_average/2) + else: + return 0 + + @staticmethod + def get_threshold_change_indexes(data, threshold): + + # sub threshold values + sub_threshold_indexes = np.where(data <= threshold)[0] + + # remove consecutive sub-threshold values because they are not crosses + threshold_crossing_indexes = [] + current_move_size = 1 + for i, index in enumerate(sub_threshold_indexes): + if not len(threshold_crossing_indexes): + threshold_crossing_indexes.append(index) + else: + if threshold_crossing_indexes[-1] == index - current_move_size: + current_move_size += 1 + else: + if sub_threshold_indexes[i-1] not in threshold_crossing_indexes: + threshold_crossing_indexes.append(sub_threshold_indexes[i-1]) + if index not in threshold_crossing_indexes: + threshold_crossing_indexes.append(index) + current_move_size = 1 + # add last index if data_frame ends above threshold and last threshold_crossing_indexes inferior + # to data_frame size + if len(sub_threshold_indexes) > 0 \ + and sub_threshold_indexes[-1] < len(data) \ + and data[-1] > threshold \ + and sub_threshold_indexes[-1]+1 not in threshold_crossing_indexes: + threshold_crossing_indexes.append(sub_threshold_indexes[-1]+1) + + return threshold_crossing_indexes + + @staticmethod + def have_just_crossed_over(list_1, list_2): + # returns True if the last value of list_1 is higher than the last value of list_2 but the immediately + # preceding list_1 value is lower than the one from list_2 + try: + return list_1[-1] > list_2[-1] and list_1[-2] < list_2[-2] + except KeyError: + return False diff --git a/packages/tentacles/LICENSE b/packages/tentacles/LICENSE new file mode 100644 index 0000000000..0a041280bd --- /dev/null +++ b/packages/tentacles/LICENSE @@ -0,0 +1,165 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff --git a/packages/tentacles/Meta/DSL_operators/automation_operators/__init__.py b/packages/tentacles/Meta/DSL_operators/automation_operators/__init__.py new file mode 100644 index 0000000000..0516a710f1 --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/automation_operators/__init__.py @@ -0,0 +1,24 @@ +# pylint: disable=R0801 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import tentacles.Meta.DSL_operators.automation_operators.automation_management +from tentacles.Meta.DSL_operators.automation_operators.automation_management import ( + StopAutomationOperator, +) +__all__ = [ + "StopAutomationOperator", +] \ No newline at end of file diff --git a/packages/tentacles/Meta/DSL_operators/automation_operators/automation_management.py b/packages/tentacles/Meta/DSL_operators/automation_operators/automation_management.py new file mode 100644 index 0000000000..22c85551b0 --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/automation_operators/automation_management.py @@ -0,0 +1,37 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.dsl_interpreter as dsl_interpreter + +import octobot_flow.entities + + +class StopAutomationOperator(dsl_interpreter.CallOperator): + MIN_PARAMS = 0 + MAX_PARAMS = 0 + DESCRIPTION = "Signals the automation to stop." + EXAMPLE = "stop_automation()" + + @staticmethod + def get_name() -> str: + return "stop_automation" + + def compute(self) -> dict: + return { + octobot_flow.entities.PostIterationActionsDetails.__name__: + octobot_flow.entities.PostIterationActionsDetails( + stop_automation=True + ).to_dict(include_default_values=False) + } diff --git a/packages/tentacles/Meta/DSL_operators/automation_operators/metadata.json b/packages/tentacles/Meta/DSL_operators/automation_operators/metadata.json new file mode 100644 index 0000000000..319240c835 --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/automation_operators/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": [], + "tentacles-requirements": [] +} \ No newline at end of file diff --git a/packages/tentacles/Meta/DSL_operators/automation_operators/tests/test_automation_management.py b/packages/tentacles/Meta/DSL_operators/automation_operators/tests/test_automation_management.py new file mode 100644 index 0000000000..f222f098a8 --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/automation_operators/tests/test_automation_management.py @@ -0,0 +1,68 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import pytest + +import octobot_commons.dsl_interpreter as dsl_interpreter +import octobot_commons.errors +import octobot_flow.entities + +import tentacles.Meta.DSL_operators.automation_operators.automation_management as automation_management + + +@pytest.fixture +def interpreter(): + return dsl_interpreter.Interpreter( + dsl_interpreter.get_all_operators() + ) + + +def _assert_stop_automation_result(result): + assert isinstance(result, dict) + assert octobot_flow.entities.PostIterationActionsDetails.__name__ in result + details = octobot_flow.entities.PostIterationActionsDetails.from_dict( + result[octobot_flow.entities.PostIterationActionsDetails.__name__] + ) + assert details.stop_automation is True + + +@pytest.mark.asyncio +async def test_stop_automation_call_as_dsl(interpreter): + assert "stop_automation" in interpreter.operators_by_name + + result = await interpreter.interprete("stop_automation()") + _assert_stop_automation_result(result) + + +def test_stop_automation_operator_compute(): + operator = automation_management.StopAutomationOperator() + result = operator.compute() + _assert_stop_automation_result(result) + + +@pytest.mark.asyncio +async def test_stop_automation_operator_invalid_parameters(interpreter): + with pytest.raises( + octobot_commons.errors.InvalidParametersError, + match="supports up to 0 parameters", + ): + await interpreter.interprete("stop_automation(1)") + + +def test_stop_automation_operator_docs(): + docs = automation_management.StopAutomationOperator.get_docs() + assert docs.name == "stop_automation" + assert "stop" in docs.description.lower() + assert docs.example == "stop_automation()" diff --git a/packages/tentacles/Meta/DSL_operators/blockchain_wallet_operators/__init__.py b/packages/tentacles/Meta/DSL_operators/blockchain_wallet_operators/__init__.py new file mode 100644 index 0000000000..0e02768af7 --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/blockchain_wallet_operators/__init__.py @@ -0,0 +1,26 @@ +# pylint: disable=R0801 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import tentacles.Meta.DSL_operators.blockchain_wallet_operators.blockchain_wallet_ops +from tentacles.Meta.DSL_operators.blockchain_wallet_operators.blockchain_wallet_ops import ( + create_blockchain_wallet_operators, + CREATED_TRANSACTIONS_KEY, +) +__all__ = [ + "create_blockchain_wallet_operators", + "CREATED_TRANSACTIONS_KEY", +] \ No newline at end of file diff --git a/packages/tentacles/Meta/DSL_operators/blockchain_wallet_operators/blockchain_wallet_ops.py b/packages/tentacles/Meta/DSL_operators/blockchain_wallet_operators/blockchain_wallet_ops.py new file mode 100644 index 0000000000..16a74f8332 --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/blockchain_wallet_operators/blockchain_wallet_ops.py @@ -0,0 +1,150 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import typing +import dataclasses +import decimal + +import octobot_commons.dataclasses +import octobot_commons.errors +import octobot_commons.dsl_interpreter as dsl_interpreter +import octobot_trading.exchanges +import octobot_trading.api +import octobot_trading.enums +import octobot_trading.constants +import octobot_trading.blockchain_wallets as blockchain_wallets + + +@dataclasses.dataclass +class BlockchainWalletBalanceParams(octobot_commons.dataclasses.FlexibleDataclass): + blockchain_descriptor: blockchain_wallets.BlockchainDescriptor # descriptor of the blockchain to use + wallet_descriptor: blockchain_wallets.WalletDescriptor # descriptor of the wallet to use + asset: str + + +@dataclasses.dataclass +class TransferFundsParams(octobot_commons.dataclasses.FlexibleDataclass): + blockchain_descriptor: blockchain_wallets.BlockchainDescriptor # descriptor of the blockchain to use + wallet_descriptor: blockchain_wallets.WalletDescriptor # descriptor of the wallet to use + asset: str + amount: float + address: typing.Optional[str] = None # recipient address of the transfer + destination_exchange: typing.Optional[str] = None # recipient address of the transfer on the exchange + + +BLOCKCHAIN_WALLET_LIBRARY = "blockchain_wallet" + +CREATED_TRANSACTIONS_KEY = "created_transactions" + + +class BlockchainWalletOperator(dsl_interpreter.PreComputingCallOperator): + @staticmethod + def get_library() -> str: + # this is a contextual operator, so it should not be included by default in the get_all_operators function return values + return BLOCKCHAIN_WALLET_LIBRARY + + @classmethod + def get_blockchain_wallet_parameters(cls) -> list[dsl_interpreter.OperatorParameter]: + return [ + dsl_interpreter.OperatorParameter(name="blockchain_descriptor", description="descriptor of the blockchain to use as in octobot_trading.blockchain_wallets.BlockchainDescriptor", required=True, type=dict), + dsl_interpreter.OperatorParameter(name="wallet_descriptor", description="descriptor of the wallet to use as in octobot_trading.blockchain_wallets.WalletDescriptor", required=True, type=dict), + ] + + +def create_blockchain_wallet_operators( + exchange_manager: typing.Optional[octobot_trading.exchanges.ExchangeManager], +) -> typing.List[type[BlockchainWalletOperator]]: + + class _BlockchainWalletBalanceOperator(BlockchainWalletOperator): + DESCRIPTION = "Returns the balance of the asset in the blockchain wallet" + EXAMPLE = "blockchain_wallet_balance({blockchain_descriptor}, {wallet_descriptor}, 'BTC')" + + @staticmethod + def get_name() -> str: + return "blockchain_wallet_balance" + + @classmethod + def get_parameters(cls) -> list[dsl_interpreter.OperatorParameter]: + return [ + *cls.get_blockchain_wallet_parameters(), + dsl_interpreter.OperatorParameter(name="asset", description="the asset to get the balance for", required=True, type=str), + ] + + async def pre_compute(self) -> None: + param_by_name = self.get_computed_value_by_parameter() + blockchain_wallet_balance_params = BlockchainWalletBalanceParams.from_dict(param_by_name) + async with octobot_trading.api.blockchain_wallet_context( + blockchain_wallets.BlockchainWalletParameters( + blockchain_descriptor=blockchain_wallet_balance_params.blockchain_descriptor, + wallet_descriptor=blockchain_wallet_balance_params.wallet_descriptor, + ), + exchange_manager.trader if exchange_manager else None + ) as wallet: + wallet_balance = await wallet.get_balance() + self.value = float( + wallet_balance[blockchain_wallet_balance_params.asset][ + octobot_trading.constants.CONFIG_PORTFOLIO_FREE + ] if blockchain_wallet_balance_params.asset in wallet_balance else octobot_trading.constants.ZERO + ) + + class _BlockchainWalletTransferOperator(BlockchainWalletOperator): + DESCRIPTION = "Withdraws an asset from the exchange's portfolio. requires ALLOW_FUNDS_TRANSFER env to be True (disabled by default to protect funds)" + EXAMPLE = "blockchain_wallet_transfer({blockchain_descriptor}, {wallet_descriptor}, 'BTC', 0.1, '{address}')" + + @staticmethod + def get_name() -> str: + return "blockchain_wallet_transfer" + + @classmethod + def get_parameters(cls) -> list[dsl_interpreter.OperatorParameter]: + return [ + *cls.get_blockchain_wallet_parameters(), + dsl_interpreter.OperatorParameter(name="asset", description="the asset to transfer", required=True, type=str), + dsl_interpreter.OperatorParameter(name="amount", description="the amount to transfer", required=True, type=float), + dsl_interpreter.OperatorParameter(name="address", description="the address to transfer to", required=False, type=str, default=None), + dsl_interpreter.OperatorParameter(name="destination_exchange", description="the exchange to transfer to", required=False, type=str, default=None), + ] + + async def pre_compute(self) -> None: + await super().pre_compute() + param_by_name = self.get_computed_value_by_parameter() + transfer_funds_params = TransferFundsParams.from_dict(param_by_name) + async with octobot_trading.api.blockchain_wallet_context( + blockchain_wallets.BlockchainWalletParameters( + blockchain_descriptor=transfer_funds_params.blockchain_descriptor, + wallet_descriptor=transfer_funds_params.wallet_descriptor, + ), + exchange_manager.trader if exchange_manager else None + ) as wallet: + if transfer_funds_params.address: + address = transfer_funds_params.address + elif transfer_funds_params.destination_exchange == exchange_manager.exchange_name: + address = ( + await exchange_manager.trader.get_deposit_address(transfer_funds_params.asset) + )[octobot_trading.enums.ExchangeConstantsDepositAddressColumns.ADDRESS.value] + else: + raise octobot_commons.errors.DSLInterpreterError( + f"Unsupported destination exchange: {transfer_funds_params.destination_exchange}" + ) + # requires ALLOW_FUNDS_TRANSFER env to be True (disabled by default to protect funds) + created_transaction = await wallet.withdraw( + transfer_funds_params.asset, + decimal.Decimal(str(transfer_funds_params.amount)), + transfer_funds_params.blockchain_descriptor.network, + address, + ) + self.value = {CREATED_TRANSACTIONS_KEY: [created_transaction]} + + return [_BlockchainWalletBalanceOperator, _BlockchainWalletTransferOperator] diff --git a/packages/tentacles/Meta/DSL_operators/blockchain_wallet_operators/metadata.json b/packages/tentacles/Meta/DSL_operators/blockchain_wallet_operators/metadata.json new file mode 100644 index 0000000000..319240c835 --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/blockchain_wallet_operators/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": [], + "tentacles-requirements": [] +} \ No newline at end of file diff --git a/packages/tentacles/Meta/DSL_operators/blockchain_wallet_operators/tests/test_blockchain_wallet_ops.py b/packages/tentacles/Meta/DSL_operators/blockchain_wallet_operators/tests/test_blockchain_wallet_ops.py new file mode 100644 index 0000000000..8db3585b0f --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/blockchain_wallet_operators/tests/test_blockchain_wallet_ops.py @@ -0,0 +1,212 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import decimal +import pytest +import pytest_asyncio + +import octobot_commons.errors +import octobot_commons.dsl_interpreter as dsl_interpreter +import octobot_trading.constants +import octobot_trading.enums +import octobot_trading.errors +import octobot_trading.blockchain_wallets as blockchain_wallets +import octobot_trading.blockchain_wallets.simulator.blockchain_wallet_simulator as blockchain_wallet_simulator + +import tentacles.Meta.DSL_operators.blockchain_wallet_operators.blockchain_wallet_ops as blockchain_wallet_ops + +from tentacles.Meta.DSL_operators.exchange_operators.tests import ( + backtesting_config, + fake_backtesting, + backtesting_exchange_manager, + backtesting_trader, +) + + +BLOCKCHAIN_DESCRIPTOR = { + "blockchain": blockchain_wallets.BlockchainWalletSimulator.BLOCKCHAIN, + "network": octobot_trading.constants.SIMULATED_BLOCKCHAIN_NETWORK, + "native_coin_symbol": "ETH", +} +WALLET_DESCRIPTOR = {"address": "0x1234567890123456789012345678901234567890"} + + +def _wallet_descriptor_with_eth_balance(amount: float): + return { + **WALLET_DESCRIPTOR, + "specific_config": { + blockchain_wallet_simulator.BlockchainWalletSimulatorConfigurationKeys.ASSETS.value: [ + { + blockchain_wallet_simulator.BlockchainWalletSimulatorConfigurationKeys.ASSET.value: "ETH", + blockchain_wallet_simulator.BlockchainWalletSimulatorConfigurationKeys.AMOUNT.value: amount, + } + ] + }, + } + + +@pytest_asyncio.fixture +async def blockchain_wallet_operators(backtesting_trader): + _config, exchange_manager, _trader = backtesting_trader + return blockchain_wallet_ops.create_blockchain_wallet_operators(exchange_manager) + + +@pytest_asyncio.fixture +async def interpreter(blockchain_wallet_operators): + return dsl_interpreter.Interpreter( + dsl_interpreter.get_all_operators() + + blockchain_wallet_operators + ) + + +class TestBlockchainWalletBalanceOperator: + @pytest.mark.asyncio + async def test_pre_compute(self, blockchain_wallet_operators): + balance_op_class, _ = blockchain_wallet_operators + + operator = balance_op_class( + BLOCKCHAIN_DESCRIPTOR, + _wallet_descriptor_with_eth_balance(1.5), + "ETH", + ) + await operator.pre_compute() + assert operator.value == 1.5 + + @pytest.mark.asyncio + async def test_pre_compute_asset_not_in_balance(self, blockchain_wallet_operators): + balance_op_class, _ = blockchain_wallet_operators + + operator = balance_op_class( + BLOCKCHAIN_DESCRIPTOR, + _wallet_descriptor_with_eth_balance(10.0), + "BTC", + ) + await operator.pre_compute() + assert operator.value == float(octobot_trading.constants.ZERO) + + def test_compute_without_pre_compute(self, blockchain_wallet_operators): + balance_op_class, _ = blockchain_wallet_operators + operator = balance_op_class(BLOCKCHAIN_DESCRIPTOR, WALLET_DESCRIPTOR, "BTC") + with pytest.raises( + octobot_commons.errors.DSLInterpreterError, + match="has not been pre_computed", + ): + operator.compute() + + @pytest.mark.asyncio + async def test_blockchain_wallet_balance_call_as_dsl(self, interpreter): + blockchain_descriptor = BLOCKCHAIN_DESCRIPTOR + wallet_descriptor = _wallet_descriptor_with_eth_balance(1.5) + assert await interpreter.interprete( + f"blockchain_wallet_balance({blockchain_descriptor}, {wallet_descriptor}, 'ETH')" + ) == 1.5 + assert await interpreter.interprete( + f"blockchain_wallet_balance({blockchain_descriptor}, {wallet_descriptor}, 'BTC')" + ) == 0.0 + + +class TestBlockchainWalletTransferOperator: + @pytest.mark.asyncio + async def test_pre_compute_with_address(self, blockchain_wallet_operators): + _, transfer_op_class = blockchain_wallet_operators + + octobot_trading.constants.ALLOW_FUNDS_TRANSFER = True + operator = transfer_op_class( + BLOCKCHAIN_DESCRIPTOR, + _wallet_descriptor_with_eth_balance(10.0), + "ETH", + 0.1, + address="0xrecipient123", + ) + await operator.pre_compute() + octobot_trading.constants.ALLOW_FUNDS_TRANSFER = False + + assert operator.value is not None + assert isinstance(operator.value, dict) + assert "created_transactions" in operator.value + assert len(operator.value["created_transactions"]) == 1 + tx = operator.value["created_transactions"][0] + assert octobot_trading.enums.ExchangeConstantsTransactionColumns.TXID.value in tx + + @pytest.mark.asyncio + async def test_pre_compute_with_destination_exchange(self, blockchain_wallet_operators): + _, transfer_op_class = blockchain_wallet_operators + + octobot_trading.constants.ALLOW_FUNDS_TRANSFER = True + operator = transfer_op_class( + BLOCKCHAIN_DESCRIPTOR, + _wallet_descriptor_with_eth_balance(10.0), + "ETH", + 0.5, + destination_exchange="binanceus", + ) + await operator.pre_compute() + octobot_trading.constants.ALLOW_FUNDS_TRANSFER = False + + assert operator.value is not None + assert isinstance(operator.value, dict) + assert "created_transactions" in operator.value + assert len(operator.value["created_transactions"]) == 1 + tx = operator.value["created_transactions"][0] + assert octobot_trading.enums.ExchangeConstantsTransactionColumns.TXID.value in tx + + @pytest.mark.asyncio + async def test_pre_compute_unsupported_destination_exchange(self, blockchain_wallet_operators): + _, transfer_op_class = blockchain_wallet_operators + + operator = transfer_op_class( + BLOCKCHAIN_DESCRIPTOR, + WALLET_DESCRIPTOR, + "BTC", + 0.1, + destination_exchange="unknown_exchange", + ) + with pytest.raises( + octobot_commons.errors.DSLInterpreterError, + match="Unsupported destination exchange: unknown_exchange", + ): + await operator.pre_compute() + + @pytest.mark.asyncio + async def test_blockchain_wallet_transfer_call_as_dsl(self, interpreter): + blockchain_descriptor = BLOCKCHAIN_DESCRIPTOR + wallet_descriptor = _wallet_descriptor_with_eth_balance(1.5) + octobot_trading.constants.ALLOW_FUNDS_TRANSFER = False + with pytest.raises( + octobot_trading.errors.DisabledFundsTransferError, + match="Funds transfer is not enabled", + ): + await interpreter.interprete( + f"blockchain_wallet_transfer({blockchain_descriptor}, {wallet_descriptor}, 'ETH', 0.1, address='0xrecipient123')" + ) + octobot_trading.constants.ALLOW_FUNDS_TRANSFER = True + result = await interpreter.interprete( + f"blockchain_wallet_transfer({blockchain_descriptor}, {wallet_descriptor}, 'ETH', 0.1, address='0xrecipient123')" + ) + assert "created_transactions" in result + assert len(result["created_transactions"]) == 1 + tx = result["created_transactions"][0] + assert tx[octobot_trading.enums.ExchangeConstantsTransactionColumns.TXID.value] + assert tx[octobot_trading.enums.ExchangeConstantsTransactionColumns.ADDRESS_FROM.value] == "0x1234567890123456789012345678901234567890" + assert tx[octobot_trading.enums.ExchangeConstantsTransactionColumns.ADDRESS_TO.value] == "0xrecipient123" + assert tx[octobot_trading.enums.ExchangeConstantsTransactionColumns.AMOUNT.value] == decimal.Decimal('0.1') + assert tx[octobot_trading.enums.ExchangeConstantsTransactionColumns.CURRENCY.value] == "ETH" + assert tx[octobot_trading.enums.ExchangeConstantsTransactionColumns.FEE.value] is None + assert tx[octobot_trading.enums.ExchangeConstantsTransactionColumns.INTERNAL.value] is False + result = await interpreter.interprete( + f"blockchain_wallet_transfer({blockchain_descriptor}, {wallet_descriptor}, 'ETH', 0.1, destination_exchange='binanceus')" + ) + assert result and isinstance(result, dict) + assert "created_transactions" in result diff --git a/packages/tentacles/Meta/DSL_operators/exchange_operators/__init__.py b/packages/tentacles/Meta/DSL_operators/exchange_operators/__init__.py new file mode 100644 index 0000000000..c8d929c329 --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/exchange_operators/__init__.py @@ -0,0 +1,56 @@ +# pylint: disable=R0801 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import tentacles.Meta.DSL_operators.exchange_operators.exchange_public_data_operators +from tentacles.Meta.DSL_operators.exchange_operators.exchange_public_data_operators import ( + OHLCVOperator, + ExchangeDataDependency, + create_ohlcv_operators, + create_ticker_operators, + create_symbol_operators, +) +import tentacles.Meta.DSL_operators.exchange_operators.exchange_personal_data_operators +from tentacles.Meta.DSL_operators.exchange_operators.exchange_personal_data_operators import ( + create_portfolio_operators, + create_cancel_order_operators, + create_fetch_order_operators, + create_create_order_operators, + create_futures_contracts_operators, + create_copy_exchange_account_operators, + CREATED_ORDERS_KEY, + CANCELLED_ORDERS_KEY, + CREATED_WITHDRAWALS_KEY, + CopyExchangeAccountOperatorNames, +) + +__all__ = [ + "OHLCVOperator", + "ExchangeDataDependency", + "create_ohlcv_operators", + "create_ticker_operators", + "create_symbol_operators", + "create_portfolio_operators", + "create_cancel_order_operators", + "create_fetch_order_operators", + "create_create_order_operators", + "create_futures_contracts_operators", + "create_copy_exchange_account_operators", + "CREATED_ORDERS_KEY", + "CANCELLED_ORDERS_KEY", + "CREATED_WITHDRAWALS_KEY", + "CopyExchangeAccountOperatorNames", +] diff --git a/packages/tentacles/Meta/DSL_operators/exchange_operators/exchange_operator.py b/packages/tentacles/Meta/DSL_operators/exchange_operators/exchange_operator.py new file mode 100644 index 0000000000..c644012fc5 --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/exchange_operators/exchange_operator.py @@ -0,0 +1,30 @@ +# pylint: disable=missing-class-docstring,missing-function-docstring +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.dsl_interpreter + + +EXCHANGE_LIBRARY = "exchange" + + +class ExchangeOperator(octobot_commons.dsl_interpreter.PreComputingCallOperator): + + @staticmethod + def get_library() -> str: + """ + Get the library of the operator. + """ + return EXCHANGE_LIBRARY diff --git a/packages/tentacles/Meta/DSL_operators/exchange_operators/exchange_personal_data_operators/__init__.py b/packages/tentacles/Meta/DSL_operators/exchange_operators/exchange_personal_data_operators/__init__.py new file mode 100644 index 0000000000..f83febba5b --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/exchange_operators/exchange_personal_data_operators/__init__.py @@ -0,0 +1,54 @@ +# pylint: disable=R0801 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import tentacles.Meta.DSL_operators.exchange_operators.exchange_personal_data_operators.portfolio_operators +from tentacles.Meta.DSL_operators.exchange_operators.exchange_personal_data_operators.portfolio_operators import ( + create_portfolio_operators, + CREATED_WITHDRAWALS_KEY, +) +import tentacles.Meta.DSL_operators.exchange_operators.exchange_personal_data_operators.cancel_order_operators +from tentacles.Meta.DSL_operators.exchange_operators.exchange_personal_data_operators.cancel_order_operators import ( + create_cancel_order_operators, + CANCELLED_ORDERS_KEY, +) +import tentacles.Meta.DSL_operators.exchange_operators.exchange_personal_data_operators.fetch_order_operators +from tentacles.Meta.DSL_operators.exchange_operators.exchange_personal_data_operators.fetch_order_operators import ( + create_fetch_order_operators, +) +import tentacles.Meta.DSL_operators.exchange_operators.exchange_personal_data_operators.create_order_operators +from tentacles.Meta.DSL_operators.exchange_operators.exchange_personal_data_operators.create_order_operators import ( + create_create_order_operators, CREATED_ORDERS_KEY +) +import tentacles.Meta.DSL_operators.exchange_operators.exchange_personal_data_operators.futures_contracts_operators +from tentacles.Meta.DSL_operators.exchange_operators.exchange_personal_data_operators.futures_contracts_operators import ( + create_futures_contracts_operators, +) +from tentacles.Meta.DSL_operators.exchange_operators.exchange_personal_data_operators.copy_exchange_account_operators import ( + create_copy_exchange_account_operators, + CopyExchangeAccountOperatorNames, +) +__all__ = [ + "create_portfolio_operators", + "create_cancel_order_operators", + "create_fetch_order_operators", + "create_create_order_operators", + "create_futures_contracts_operators", + "create_copy_exchange_account_operators", + "CREATED_ORDERS_KEY", + "CANCELLED_ORDERS_KEY", + "CREATED_WITHDRAWALS_KEY", +] \ No newline at end of file diff --git a/packages/tentacles/Meta/DSL_operators/exchange_operators/exchange_personal_data_operators/cancel_order_operators.py b/packages/tentacles/Meta/DSL_operators/exchange_operators/exchange_personal_data_operators/cancel_order_operators.py new file mode 100644 index 0000000000..e8334b86c4 --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/exchange_operators/exchange_personal_data_operators/cancel_order_operators.py @@ -0,0 +1,112 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import typing + +import octobot_commons.constants +import octobot_commons.errors +import octobot_commons.dsl_interpreter as dsl_interpreter +import octobot_commons.signals +import octobot_trading.exchanges +import octobot_trading.enums +import octobot_trading.errors +import octobot_trading.modes.abstract_trading_mode +import octobot_trading.dsl + +import tentacles.Meta.DSL_operators.exchange_operators.exchange_operator as exchange_operator + + +CANCELLED_ORDERS_KEY = "cancelled_orders" + + +def create_cancel_order_operators( + exchange_manager: typing.Optional[octobot_trading.exchanges.ExchangeManager], + trading_mode: typing.Optional[octobot_trading.modes.abstract_trading_mode.AbstractTradingMode] = None, + dependencies: typing.Optional[octobot_commons.signals.SignalDependencies] = None, + wait_for_cancelling: bool = True, +) -> list: + + class _CancelOrderOperator(exchange_operator.ExchangeOperator): + DESCRIPTION = "Cancels one or many orders" + EXAMPLE = "cancel_order('BTC/USDT', side='buy')" + + @staticmethod + def get_name() -> str: + return "cancel_order" + + @staticmethod + def get_library() -> str: + # this is a contextual operator, so it should not be included by default in the get_all_operators function return values + return octobot_commons.constants.CONTEXTUAL_OPERATORS_LIBRARY + + @classmethod + def get_parameters(cls) -> list[dsl_interpreter.OperatorParameter]: + return [ + dsl_interpreter.OperatorParameter(name="symbol", description="the symbol of the orders to cancel", required=True, type=str, default=None), + dsl_interpreter.OperatorParameter(name="side", description="the side of the orders to cancel", required=False, type=str, default=None), + dsl_interpreter.OperatorParameter(name="tag", description="the tag of the orders to cancel", required=False, type=str, default=None), + dsl_interpreter.OperatorParameter(name="exchange_order_ids", description="the exchange id of the orders to cancel", required=False, type=list[str], default=None), + ] + + def get_dependencies(self) -> typing.List[dsl_interpreter.InterpreterDependency]: + local_dependencies = [] + if symbol := self.get_input_value_by_parameter().get("symbol"): + local_dependencies.append(octobot_trading.dsl.SymbolDependency(symbol=symbol)) + return super().get_dependencies() + local_dependencies + + async def pre_compute(self) -> None: + await super().pre_compute() + if exchange_manager is None: + raise octobot_commons.errors.DSLInterpreterError( + "exchange_manager is required for cancel_order operator" + ) + cancelled_order_ids = [] + param_by_name = self.get_computed_value_by_parameter() + if side := param_by_name.get("side"): + side = octobot_trading.enums.TradeOrderSide(side) + exchange_order_ids = param_by_name.get("exchange_order_ids") + to_cancel = [ + order + for order in exchange_manager.exchange_personal_data.orders_manager.get_open_orders( + symbol=param_by_name.get("symbol"), tag=param_by_name.get("tag"), active=None + ) + if ( + not (order.is_cancelled() or order.is_closed()) + and (side is None or (side is order.side)) + and (exchange_order_ids is None or (order.exchange_order_id in exchange_order_ids)) # type: ignore + ) + ] + for order in to_cancel: + if trading_mode: + cancelled, _ = await trading_mode.cancel_order( + order, wait_for_cancelling=wait_for_cancelling, dependencies=dependencies + ) + else: + cancelled = await exchange_manager.trader.cancel_order( + order, wait_for_cancelling=wait_for_cancelling + ) + if cancelled: + cancelled_order_ids.append(order.exchange_order_id) + if not cancelled_order_ids: + description = {k: v for k, v in param_by_name.items() if v} + raise octobot_trading.errors.OrderDescriptionNotFoundError( + f"No [{exchange_manager.exchange_name}] order found matching {description}" + ) + self.value = {CANCELLED_ORDERS_KEY: cancelled_order_ids} + + + return [ + _CancelOrderOperator, + ] diff --git a/packages/tentacles/Meta/DSL_operators/exchange_operators/exchange_personal_data_operators/copy_exchange_account_operators.py b/packages/tentacles/Meta/DSL_operators/exchange_operators/exchange_personal_data_operators/copy_exchange_account_operators.py new file mode 100644 index 0000000000..485abcacba --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/exchange_operators/exchange_personal_data_operators/copy_exchange_account_operators.py @@ -0,0 +1,191 @@ +# pylint: disable=missing-class-docstring,missing-function-docstring +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import json +import typing +import time +import enum + +import octobot_commons.constants as commons_constants +import octobot_commons.dsl_interpreter as dsl_interpreter +import octobot_commons.errors as commons_errors + +import octobot_commons.symbols.symbol_util as symbol_util + +import octobot_trading.dsl as trading_dsl +import octobot_trading.exchanges +import octobot_trading.modes + +import octobot_copy.copiers +import octobot_copy.entities +import octobot_copy.constants + +import tentacles.Meta.DSL_operators.exchange_operators.exchange_personal_data_operators.create_order_operators as create_order_operators + + +class CopyExchangeAccountOperatorNames(enum.StrEnum): + COPY_EXCHANGE_ACCOUNT = "copy_exchange_account" + + +def create_copy_exchange_account_operators( + copier_exchange_manager: typing.Optional[octobot_trading.exchanges.ExchangeManager] = None, + copier_trading_mode: typing.Optional[octobot_trading.modes.AbstractTradingMode] = None, +) -> list[type[dsl_interpreter.PreComputingCallOperator]]: + class _CopyExchangeAccountOperator(dsl_interpreter.PreComputingCallOperator, dsl_interpreter.ReCallableOperatorMixin): + DESCRIPTION = ( + "Rebalances the copier exchange toward the reference account allocation. " + "strategy_id identifies the copied community strategy (first parameter; used for copy-trading dependencies). " + "reference_account is JSON for octobot_copy.entities.Account (portfolio content, orders, positions). " + "account_copy_settings is optional JSON for AccountCopySettings; " + "reference market comes from the copier portfolio." + ) + EXAMPLE = ( + r"""copy_exchange_account(strategy_id='community-strategy-1', reference_market='USDT', """ + r"""reference_account='{"content":{"BTC":{"available":"0.01","total":"0.01"}}}', """ + r"""account_copy_settings='{"reference_market_ratio":"1","allow_skip_asset":false}')""" + ) + + @staticmethod + def get_library() -> str: + return commons_constants.CONTEXTUAL_OPERATORS_LIBRARY + + @staticmethod + def get_name() -> str: + return CopyExchangeAccountOperatorNames.COPY_EXCHANGE_ACCOUNT.value + + @classmethod + def get_parameters(cls) -> list[dsl_interpreter.OperatorParameter]: + return [ + dsl_interpreter.OperatorParameter( + name="strategy_id", + description=( + "Identifier of the copied community strategy." + ), + required=True, + type=str, + ), + dsl_interpreter.OperatorParameter( + name="reference_market", + description="Quote asset symbol for rebalance (e.g. USDT).", + required=True, + type=str, + ), + dsl_interpreter.OperatorParameter( + name="reference_account", + description=( + "JSON string for Account: fields content (asset -> available/total amounts), " + "optional orders and positions lists." + ), + required=True, + type=str, + ), + dsl_interpreter.OperatorParameter( + name="account_copy_settings", + description=( + "JSON string for AccountCopySettings: optional keys " + "synchronization_policy, rebalance_trigger_min_ratio, " + "quote_asset_rebalance_ratio_threshold, reference_market_ratio, " + "sell_untargeted_traded_coins, min_order_size_margin, allow_skip_asset " + "(omit keys to use defaults)." + ), + required=False, + type=str, + ), + ] + super().get_re_callable_parameters() + + def _parse_reference_account(self, raw: typing.Any) -> octobot_copy.entities.Account: + if raw is None: + raise commons_errors.InvalidParameterFormatError("reference_account is required") + if isinstance(raw, dict): + payload = raw + elif isinstance(raw, str): + try: + payload = json.loads(raw) + except json.JSONDecodeError as err: + raise commons_errors.InvalidParameterFormatError( + f"Invalid reference_account JSON: {err}" + ) from err + else: + raise commons_errors.InvalidParameterFormatError( + f"reference_account must be a JSON string or object, got {type(raw).__name__}" + ) + if not isinstance(payload, dict): + raise commons_errors.InvalidParameterFormatError( + "reference_account JSON must deserialize to an object" + ) + return octobot_copy.entities.Account.from_dict(payload) + + def get_dependencies(self) -> list[dsl_interpreter.InterpreterDependency]: + dependencies = super().get_dependencies() + params = self.get_computed_value_by_parameter() + try: + reference_account = self._parse_reference_account(params.get("reference_account")) + except commons_errors.InvalidParameterFormatError: + reference_account = None + if strategy_id := params.get("strategy_id"): + # no reference account: a refresh is required to fetch it + refresh_required = reference_account is None + dependencies.append(trading_dsl.CopyTradingDependency( + strategy_id=str(strategy_id), refresh_required=refresh_required + )) + if reference_account is None: + return dependencies + # there are account details: add symbol dependencies + ref_market = params.get("reference_market") + seen: set[str] = set() + for asset in reference_account.content: + if asset == ref_market: + continue + symbol = symbol_util.merge_currencies(asset, ref_market) + if symbol not in seen: + seen.add(symbol) + dependencies.append(trading_dsl.SymbolDependency(symbol=symbol)) + return dependencies + + async def pre_compute(self) -> None: + await super().pre_compute() + execution_time = time.time() + if copier_exchange_manager is None: + raise commons_errors.DSLInterpreterError( + "copier_exchange_manager is required in context to execute copy_exchange_account" + ) + params = self.get_computed_value_by_parameter() + reference_account = self._parse_reference_account(params.get("reference_account")) + copy_settings = octobot_copy.entities.parse_account_copy_settings( + params.get("account_copy_settings") + ) + account_copier = octobot_copy.copiers.create_account_copier( + reference_account, + copy_settings, + copier_exchange_manager, + copier_trading_mode, + ) + copy_result = await account_copier.copy_account() + self.value = self.create_re_callable_result_dict( + keyword=self.get_name(), + waiting_time=octobot_copy.constants.DEFAULT_COPY_WAITING_TIME, + last_execution_time=execution_time, + state={ + create_order_operators.CREATED_ORDERS_KEY: [ + order.to_dict() for order in copy_result.created_orders + ], + }, + ) + + return [_CopyExchangeAccountOperator] + + +__all__ = ["CopyExchangeAccountOperatorNames", "create_copy_exchange_account_operators"] diff --git a/packages/tentacles/Meta/DSL_operators/exchange_operators/exchange_personal_data_operators/create_order_operators.py b/packages/tentacles/Meta/DSL_operators/exchange_operators/exchange_personal_data_operators/create_order_operators.py new file mode 100644 index 0000000000..9649c67e97 --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/exchange_operators/exchange_personal_data_operators/create_order_operators.py @@ -0,0 +1,256 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import typing +import asyncio +import json + +import octobot_commons.constants +import octobot_commons.errors +import octobot_commons.signals as commons_signals +import octobot_commons.dsl_interpreter as dsl_interpreter +import octobot_commons.tentacles_management as tentacles_management + +import octobot_trading.personal_data +import octobot_trading.exchanges +import octobot_trading.enums +import octobot_trading.modes +import octobot_trading.errors +import octobot_trading.dsl + +import tentacles.Meta.DSL_operators.exchange_operators.exchange_operator as exchange_operator + + +CREATED_ORDERS_KEY = "created_orders" + + +_CANCEL_POLICIES_CACHE = {} +def _parse_cancel_policy(kwargs: dict) -> typing.Optional[octobot_trading.personal_data.OrderCancelPolicy]: + if policy := kwargs.get("cancel_policy"): + lowercase_policy = policy.casefold() + if not _CANCEL_POLICIES_CACHE: + _CANCEL_POLICIES_CACHE.update({ + policy.__name__.casefold(): policy + for policy in tentacles_management.get_all_classes_from_parent(octobot_trading.personal_data.OrderCancelPolicy) + }) + try: + policy_class = _CANCEL_POLICIES_CACHE[lowercase_policy] + policy_params = kwargs.get("cancel_policy_params") + parsed_policy_params = json.loads(policy_params.replace("'", '"')) if isinstance(policy_params, str) else policy_params + return policy_class(**(parsed_policy_params or {})) # type: ignore + except KeyError: + raise octobot_commons.errors.InvalidParametersError( + f"Unknown cancel policy: {policy}. Available policies: {', '.join(_CANCEL_POLICIES_CACHE.keys())}" + ) + return None + + +class CreateOrderOperator(exchange_operator.ExchangeOperator): + def __init__(self, *parameters: dsl_interpreter.OperatorParameterType, **kwargs: typing.Any): + super().__init__(*parameters, **kwargs) + self.param_by_name: dict[str, dsl_interpreter.ComputedOperatorParameterType] = dsl_interpreter.UNINITIALIZED_VALUE # type: ignore + + @staticmethod + def get_library() -> str: + # this is a contextual operator, so it should not be included by default in the get_all_operators function return values + return octobot_commons.constants.CONTEXTUAL_OPERATORS_LIBRARY + + @classmethod + def get_parameters(cls) -> list[dsl_interpreter.OperatorParameter]: + return ( + cls.get_first_required_parameters() + + cls.get_second_required_parameters() + + cls.get_last_parameters() + ) + + @classmethod + def get_first_required_parameters(cls) -> list[dsl_interpreter.OperatorParameter]: + return [ + dsl_interpreter.OperatorParameter(name="side", description="the side of the order", required=True, type=str), + dsl_interpreter.OperatorParameter(name="symbol", description="the symbol of the order", required=True, type=str), + dsl_interpreter.OperatorParameter(name="amount", description="the amount of the order", required=True, type=float), + ] + + @classmethod + def get_second_required_parameters(cls) -> list[dsl_interpreter.OperatorParameter]: + return [] + + @classmethod + def get_last_parameters(cls) -> list[dsl_interpreter.OperatorParameter]: + return [ + dsl_interpreter.OperatorParameter(name="reduce_only", description="whether the order is reduce only", required=False, type=bool), + dsl_interpreter.OperatorParameter(name="tag", description="the tag of the order", required=False, type=str), + dsl_interpreter.OperatorParameter(name="take_profit_prices", description="the price or price offset of the take profit order(s)", required=False, type=list[str]), + dsl_interpreter.OperatorParameter(name="take_profit_volume_percents", description="% volume of the entry for each take profit", required=False, type=list[float]), + dsl_interpreter.OperatorParameter(name="stop_loss_price", description="the stop loss price or price offset of the order", required=False, type=str), + dsl_interpreter.OperatorParameter(name="trailing_profile", description="the trailing profile of the order", required=False, type=dict), + dsl_interpreter.OperatorParameter(name="cancel_policy", description="the cancel policy of the order", required=False, type=str), + dsl_interpreter.OperatorParameter(name="cancel_policy_params", description="the cancel policy params of the order", required=False, type=dict), + dsl_interpreter.OperatorParameter(name="active_order_swap_strategy", description="the type of the active order swap strategy", required=False, type=str), + dsl_interpreter.OperatorParameter(name="active_order_swap_strategy_params", description="the params of the active order swap strategy", required=False, type=dict), + dsl_interpreter.OperatorParameter(name="params", description="additional params for the order", required=False, type=dict), + dsl_interpreter.OperatorParameter(name="allow_holdings_adaptation", description="allow reducing the order amount to account for available holdings", required=False, type=bool), + ] + + def get_dependencies(self) -> typing.List[dsl_interpreter.InterpreterDependency]: + local_dependencies = [] + if symbol := self.get_input_value_by_parameter().get("symbol"): + local_dependencies.append(octobot_trading.dsl.SymbolDependency(symbol=symbol)) + return super().get_dependencies() + local_dependencies + + async def create_base_orders_and_associated_elements(self) -> list[octobot_trading.personal_data.Order]: + order_factory = self.get_order_factory() + maybe_cancel_policy = _parse_cancel_policy(self.param_by_name) + try: + amount = self.param_by_name["amount"] + if not amount: + raise octobot_commons.errors.InvalidParameterFormatError("amount is missing") + orders = await order_factory.create_base_orders_and_associated_elements( + order_type=self.param_by_name["order_type"], + symbol=self.param_by_name["symbol"], + side=octobot_trading.enums.TradeOrderSide(self.param_by_name["side"]), + amount=amount, + price=self.param_by_name.get("price", None), + reduce_only=self.param_by_name.get("reduce_only", False), + allow_holdings_adaptation=self.param_by_name.get("allow_holdings_adaptation", False), + tag=self.param_by_name.get("tag", None), + exchange_creation_params=self.param_by_name.get("params", None), + cancel_policy=maybe_cancel_policy, + stop_loss_price=self.param_by_name.get("stop_loss_price", None), + take_profit_prices=self.param_by_name.get("take_profit_prices", None), + take_profit_volume_percents=self.param_by_name.get("take_profit_volume_percents", None), + trailing_profile_type=self.param_by_name.get("trailing_profile", None), + active_order_swap_strategy_type=self.param_by_name.get( + "active_order_swap_strategy", octobot_trading.personal_data.StopFirstActiveOrderSwapStrategy.__name__ + ), + active_order_swap_strategy_params=self.param_by_name.get("active_order_swap_strategy_params", {}), + ) + except octobot_trading.errors.UnSupportedSymbolError as e: + raise octobot_commons.errors.InvalidParametersError( + f"Invalid parameters: {e}" + ) from e + except octobot_trading.errors.InvalidArgumentError as e: + raise octobot_commons.errors.InvalidParameterFormatError(e) from e + except asyncio.TimeoutError as e: + raise octobot_commons.errors.DSLInterpreterError( + f"Impossible to create order for {self.param_by_name["symbol"]} on {order_factory.exchange_manager.exchange_name}: {e} and is necessary to compute the order details." + ) + return orders + + async def pre_compute(self) -> None: + await super().pre_compute() + self.param_by_name = self.get_computed_value_by_parameter() + self.param_by_name["order_type"] = self.get_order_type() + order_factory = self.get_order_factory() + orders = await self.create_base_orders_and_associated_elements() + created_orders = [] + for order in orders: + created_order = await order_factory.create_order_on_exchange(order) + if created_order is None: + raise octobot_commons.errors.DSLInterpreterError( + f"Failed to create {order.symbol} {order.order_type.name} order on {order.exchange_manager.exchange_name}" + ) + else: + created_orders.append(created_order) + self.value = {CREATED_ORDERS_KEY: [order.to_dict() for order in created_orders]} + + def get_order_type(self) -> octobot_trading.enums.TraderOrderType: + raise NotImplementedError("get_order_type must be implemented") + + def get_order_factory(self) -> octobot_trading.personal_data.OrderFactory: + raise NotImplementedError("get_order_factory must be implemented") + +def create_create_order_operators( + exchange_manager: typing.Optional[octobot_trading.exchanges.ExchangeManager], + trading_mode: typing.Optional[octobot_trading.modes.AbstractTradingMode] = None, + dependencies: typing.Optional[commons_signals.SignalDependencies] = None, + wait_for_creation: bool = True, + try_to_handle_unconfigured_symbol: bool = False, +) -> list[type[CreateOrderOperator]]: + _order_factory = octobot_trading.personal_data.OrderFactory( + exchange_manager, trading_mode, dependencies, wait_for_creation, try_to_handle_unconfigured_symbol + ) + + class _FactoryMixin: + def get_order_factory(self) -> octobot_trading.personal_data.OrderFactory: + try: + _order_factory.validate() + except ValueError as e: + raise octobot_commons.errors.DSLInterpreterError(e) from e + return _order_factory + + class _MarketOrderOperator(_FactoryMixin, CreateOrderOperator): + DESCRIPTION = "Creates a market order" + EXAMPLE = "market('buy', 'BTC/USDT', 0.01)" + + @staticmethod + def get_name() -> str: + return "market" + + def get_order_type(self) -> octobot_trading.enums.TraderOrderType: + return ( + octobot_trading.enums.TraderOrderType.BUY_MARKET + if self.param_by_name["side"] == octobot_trading.enums.TradeOrderSide.BUY.value else octobot_trading.enums.TraderOrderType.SELL_MARKET + ) + + class _LimitOrderOperator(_FactoryMixin, CreateOrderOperator): + DESCRIPTION = "Creates a limit order" + EXAMPLE = "limit('buy', 'BTC/USDT', 0.01, price='-1%')" + + @staticmethod + def get_name() -> str: + return "limit" + + @classmethod + def get_second_required_parameters(cls) -> list[dsl_interpreter.OperatorParameter]: + return [ + dsl_interpreter.OperatorParameter(name="price", description="the limit price of the order: a flat or offset price", required=True, type=str), + ] + + def get_order_type(self) -> octobot_trading.enums.TraderOrderType: + return ( + octobot_trading.enums.TraderOrderType.BUY_LIMIT + if self.param_by_name["side"] == octobot_trading.enums.TradeOrderSide.BUY.value else octobot_trading.enums.TraderOrderType.SELL_LIMIT + ) + + class _StopLossOrderOperator(_FactoryMixin, CreateOrderOperator): + DESCRIPTION = "Creates a stop market order" + EXAMPLE = "stop_loss('buy', 'BTC/USDT', 0.01, price='-1%')" + + @staticmethod + def get_name() -> str: + return "stop_loss" + + + async def pre_compute(self) -> None: + self.get_order_factory()._ensure_supported_order_type( + octobot_trading.enums.TraderOrderType.STOP_LOSS + ) + return await super().pre_compute() + + @classmethod + def get_second_required_parameters(cls) -> list[dsl_interpreter.OperatorParameter]: + return [ + dsl_interpreter.OperatorParameter(name="price", description="the trigger price of the order: a flat or offset price", required=True, type=str), + ] + + def get_order_type(self) -> octobot_trading.enums.TraderOrderType: + return octobot_trading.enums.TraderOrderType.STOP_LOSS + + return [ + _MarketOrderOperator, + _LimitOrderOperator, + _StopLossOrderOperator, + ] \ No newline at end of file diff --git a/packages/tentacles/Meta/DSL_operators/exchange_operators/exchange_personal_data_operators/fetch_order_operators.py b/packages/tentacles/Meta/DSL_operators/exchange_operators/exchange_personal_data_operators/fetch_order_operators.py new file mode 100644 index 0000000000..9cc40e8ee0 --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/exchange_operators/exchange_personal_data_operators/fetch_order_operators.py @@ -0,0 +1,191 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import typing + +import octobot_commons.constants +import octobot_commons.errors +import octobot_commons.dsl_interpreter as dsl_interpreter +import octobot_trading.constants as trading_constants +import octobot_trading.enums as trading_enums +import octobot_trading.exchanges +import octobot_trading.personal_data as personal_data +import octobot_trading.dsl + +import tentacles.Meta.DSL_operators.exchange_operators.exchange_operator as exchange_operator + + +def _trade_to_order_dict_for_fetch(order_trade: personal_data.Trade) -> dict: + order_dict = dict(order_trade.to_dict()) + order_dict[trading_enums.ExchangeConstantsOrderColumns.FILLED.value] = ( + order_trade.executed_quantity if order_trade.has_been_executed() else trading_constants.ZERO + ) + return order_dict + + +def _try_simulated_fetch_order_from_trades( + exchange_manager: octobot_trading.exchanges.ExchangeManager, + symbol: str, + exchange_order_id: str, +) -> typing.Optional[dict]: + matching_trades = [ + trade_item + for trade_item in exchange_manager.exchange_personal_data.trades_manager.get_trades( + exchange_order_id=exchange_order_id + ) + if trade_item.symbol == symbol + ] + if not matching_trades: + return None + selected_trade = max(matching_trades, key=lambda trade_item: trade_item.get_time()) + return personal_data.create_order_from_dict( + exchange_manager.trader, + _trade_to_order_dict_for_fetch(selected_trade), + ).to_dict() + + +def _resolve_simulated_fetch_order_dict( + exchange_manager: octobot_trading.exchanges.ExchangeManager, + symbol: str, + exchange_order_id: str, + raise_if_not_found: bool = False, +) -> typing.Optional[dict]: + orders_manager = exchange_manager.exchange_personal_data.orders_manager + try: + managed_order = orders_manager.get_order(None, exchange_order_id=exchange_order_id) + except KeyError: + from_trades = _try_simulated_fetch_order_from_trades( + exchange_manager, symbol, exchange_order_id + ) + if from_trades is not None: + return from_trades + if raise_if_not_found: + raise octobot_commons.errors.InvalidParametersError( + f"No [{exchange_manager.exchange_name}] order found for symbol={symbol!r} " + f"exchange_order_id={exchange_order_id!r}" + ) from None + return None + if managed_order.symbol != symbol: + raise octobot_commons.errors.InvalidParametersError( + f"Order exchange_order_id={exchange_order_id!r} is for symbol " + f"{managed_order.symbol!r}, not {symbol!r}" + ) + return managed_order.to_dict() + + +async def _resolve_real_trading_fetch_order_dict( + exchange_manager: octobot_trading.exchanges.ExchangeManager, + symbol: str, + exchange_order_id: str, + raise_if_not_found: bool = False, +) -> typing.Optional[dict]: + if raw_order := await exchange_manager.exchange.get_order(exchange_order_id, symbol=symbol): + return personal_data.create_order_instance_from_raw( + exchange_manager.trader, raw_order + ).to_dict() + if raise_if_not_found: + raise octobot_commons.errors.InvalidParametersError( + f"No [{exchange_manager.exchange_name}] order found for symbol={symbol!r} " + f"exchange_order_id={exchange_order_id!r}" + ) from None + return None + + +def create_fetch_order_operators( + exchange_manager: typing.Optional[octobot_trading.exchanges.ExchangeManager] +) -> list: + + class _FetchOrderOperator(exchange_operator.ExchangeOperator): + DESCRIPTION = ( + "Fetches one order from the exchange by symbol and exchange order id. " + "When the order cannot be resolved (simulated: not in orders or matching trades; " + "real: exchange returns no order), the result is None unless raise_if_not_found is True, " + "in which case an error is raised." + ) + EXAMPLE = ( + "fetch_order('BTC/USDT', exchange_order_id='12345') " + "or fetch_order('BTC/USDT', exchange_order_id='12345', raise_if_not_found=True)" + ) + + @staticmethod + def get_name() -> str: + return "fetch_order" + + @staticmethod + def get_library() -> str: + return octobot_commons.constants.CONTEXTUAL_OPERATORS_LIBRARY + + @classmethod + def get_parameters(cls) -> list[dsl_interpreter.OperatorParameter]: + return [ + dsl_interpreter.OperatorParameter( + name="symbol", description="the symbol of the order", required=True, type=str, default=None + ), + dsl_interpreter.OperatorParameter( + name="exchange_order_id", + description="the exchange id of the order", + required=True, + type=str, + default=None, + ), + dsl_interpreter.OperatorParameter( + name="raise_if_not_found", + description=( + "if True, raise when the order cannot be resolved; if False, return None." + ), + required=False, + type=bool, + default=False, + ), + ] + + def get_dependencies(self) -> typing.List[dsl_interpreter.InterpreterDependency]: + local_dependencies = [] + if symbol := self.get_input_value_by_parameter().get("symbol"): + local_dependencies.append(octobot_trading.dsl.SymbolDependency(symbol=symbol)) + return super().get_dependencies() + local_dependencies + + async def pre_compute(self) -> None: + await super().pre_compute() + if exchange_manager is None or exchange_manager.trader is None: + raise octobot_commons.errors.DSLInterpreterError( + "exchange_manager and exchange_manager.trader are required for fetch_order operator" + ) + param_by_name = self.get_computed_value_by_parameter() + symbol = param_by_name.get("symbol") + exchange_order_id = param_by_name.get("exchange_order_id") + raise_if_not_found = bool(param_by_name.get("raise_if_not_found", False)) + if not symbol or not exchange_order_id: + raise octobot_commons.errors.DSLInterpreterError( + "symbol and exchange_order_id are required for fetch_order operator" + ) + if exchange_manager.is_trader_simulated: + self.value = _resolve_simulated_fetch_order_dict( + exchange_manager, + symbol, + exchange_order_id, + raise_if_not_found=raise_if_not_found, + ) + else: + self.value = await _resolve_real_trading_fetch_order_dict( + exchange_manager, + symbol, + exchange_order_id, + raise_if_not_found=raise_if_not_found, + ) + + return [ + _FetchOrderOperator, + ] diff --git a/packages/tentacles/Meta/DSL_operators/exchange_operators/exchange_personal_data_operators/futures_contracts_operators.py b/packages/tentacles/Meta/DSL_operators/exchange_operators/exchange_personal_data_operators/futures_contracts_operators.py new file mode 100644 index 0000000000..a484b72858 --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/exchange_operators/exchange_personal_data_operators/futures_contracts_operators.py @@ -0,0 +1,69 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import typing +import decimal + +import octobot_commons.constants +import octobot_commons.errors +import octobot_commons.dsl_interpreter as dsl_interpreter +import octobot_trading.exchanges + +import tentacles.Meta.DSL_operators.exchange_operators.exchange_operator as exchange_operator + + +class FuturesContractsOperator(exchange_operator.ExchangeOperator): + @staticmethod + def get_library() -> str: + # this is a contextual operator, so it should not be included by default in the get_all_operators function return values + return octobot_commons.constants.CONTEXTUAL_OPERATORS_LIBRARY + + +def create_futures_contracts_operators( + exchange_manager: typing.Optional[octobot_trading.exchanges.ExchangeManager], +) -> typing.List[type[FuturesContractsOperator]]: + + class _SetLeverageOperator(FuturesContractsOperator): + DESCRIPTION = "Sets the leverage for the futures contract" + EXAMPLE = "set_leverage('BTC/USDT:USDT', 10)" + + @staticmethod + def get_name() -> str: + return "set_leverage" + + @classmethod + def get_parameters(cls) -> list[dsl_interpreter.OperatorParameter]: + return [ + dsl_interpreter.OperatorParameter(name="symbol", description="the symbol of the futures contract", required=True, type=str), + dsl_interpreter.OperatorParameter(name="leverage", description="the leverage to set", required=True, type=float), + ] + + async def pre_compute(self) -> None: + await super().pre_compute() + if exchange_manager is None: + raise octobot_commons.errors.DSLInterpreterError( + "exchange_manager is required for set_leverage operator" + ) + param_by_name = self.get_computed_value_by_parameter() + leverage = decimal.Decimal(str(param_by_name["leverage"])) + await exchange_manager.trader.set_leverage( + param_by_name["symbol"], + None, + leverage, + ) + self.value = float(leverage) + + + return [_SetLeverageOperator] diff --git a/packages/tentacles/Meta/DSL_operators/exchange_operators/exchange_personal_data_operators/portfolio_operators.py b/packages/tentacles/Meta/DSL_operators/exchange_operators/exchange_personal_data_operators/portfolio_operators.py new file mode 100644 index 0000000000..7464a4e48b --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/exchange_operators/exchange_personal_data_operators/portfolio_operators.py @@ -0,0 +1,135 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import typing +import dataclasses +import decimal + +import octobot_commons.dataclasses +import octobot_commons.constants +import octobot_commons.errors +import octobot_commons.dsl_interpreter as dsl_interpreter +import octobot_trading.personal_data +import octobot_trading.exchanges +import octobot_trading.api + +import tentacles.Meta.DSL_operators.exchange_operators.exchange_operator as exchange_operator + + +CREATED_WITHDRAWALS_KEY = "created_withdrawals" + + +@dataclasses.dataclass +class WithdrawFundsParams(octobot_commons.dataclasses.FlexibleDataclass): + asset: str + network: str # network to withdraw to + address: str # recipient address of the withdrawal + amount: typing.Optional[float] = None # defaults to all available balance if unspecified + tag: str = "" + params: dict = dataclasses.field(default_factory=dict) # extra parameters specific to the exchange API endpoint + + +class PortfolioOperator(exchange_operator.ExchangeOperator): + @staticmethod + def get_library() -> str: + # this is a contextual operator, so it should not be included by default in the get_all_operators function return values + return octobot_commons.constants.CONTEXTUAL_OPERATORS_LIBRARY + + @classmethod + def get_parameters(cls) -> list[dsl_interpreter.OperatorParameter]: + return [ + dsl_interpreter.OperatorParameter(name="asset", description="the asset to get the value for", required=False, type=str), + ] + + +def create_portfolio_operators( + exchange_manager: typing.Optional[octobot_trading.exchanges.ExchangeManager], +) -> typing.List[type[PortfolioOperator]]: + + def _get_asset_holdings(asset: str) -> octobot_trading.personal_data.Asset: + if exchange_manager is None: + raise octobot_commons.errors.DSLInterpreterError( + "exchange_manager is required for portfolio operators" + ) + return octobot_trading.api.get_portfolio_currency(exchange_manager, asset) + + class _TotalOperator(PortfolioOperator): + DESCRIPTION = "Returns the total holdings of the asset in the portfolio" + EXAMPLE = "total('BTC')" + + @staticmethod + def get_name() -> str: + return "total" + + async def pre_compute(self) -> None: + await super().pre_compute() + asset = self.get_computed_parameters()[0] + self.value = float(_get_asset_holdings(asset).total) + + class _AvailableOperator(PortfolioOperator): + DESCRIPTION = "Returns the available holdings of the asset in the portfolio" + EXAMPLE = "available('BTC')" + + @staticmethod + def get_name() -> str: + return "available" + + async def pre_compute(self) -> None: + await super().pre_compute() + asset = self.get_computed_parameters()[0] + self.value = float(_get_asset_holdings(asset).available) + + class _WithdrawOperator(PortfolioOperator): + DESCRIPTION = "Withdraws an asset from the exchange's portfolio. requires ALLOW_FUNDS_TRANSFER env to be True (disabled by default to protect funds)" + EXAMPLE = "withdraw('BTC', 'ethereum', '0x1234567890abcdef1234567890abcdef12345678', 0.1)" + + @staticmethod + def get_name() -> str: + return "withdraw" + + @classmethod + def get_parameters(cls) -> list[dsl_interpreter.OperatorParameter]: + return [ + dsl_interpreter.OperatorParameter(name="asset", description="the asset to withdraw", required=True, type=str), + dsl_interpreter.OperatorParameter(name="network", description="the network to withdraw to", required=True, type=str), + dsl_interpreter.OperatorParameter(name="address", description="the address to withdraw to", required=True, type=str), + dsl_interpreter.OperatorParameter(name="amount", description="the amount to withdraw", required=False, type=float, default=None), + dsl_interpreter.OperatorParameter(name="tag", description="a tag to associate with the withdrawal", required=False, type=str, default=None), + dsl_interpreter.OperatorParameter(name="params", description="extra parameters specific to the exchange API endpoint", required=False, type=dict), + ] + + + async def pre_compute(self) -> None: + await super().pre_compute() + if exchange_manager is None: + raise octobot_commons.errors.DSLInterpreterError( + "exchange_manager is required for withdraw operator" + ) + param_by_name = self.get_computed_value_by_parameter() + withdraw_funds_params = WithdrawFundsParams.from_dict(param_by_name) + amount = withdraw_funds_params.amount or ( + octobot_trading.api.get_portfolio_currency(exchange_manager, withdraw_funds_params.asset).available + ) + created_withdrawal = await exchange_manager.trader.withdraw( + withdraw_funds_params.asset, + decimal.Decimal(str(amount)), + withdraw_funds_params.network, + withdraw_funds_params.address, + tag=withdraw_funds_params.tag, + params=withdraw_funds_params.params + ) + self.value = {CREATED_WITHDRAWALS_KEY: [created_withdrawal]} + + return [_TotalOperator, _AvailableOperator, _WithdrawOperator] diff --git a/packages/tentacles/Meta/DSL_operators/exchange_operators/exchange_public_data_operators/__init__.py b/packages/tentacles/Meta/DSL_operators/exchange_operators/exchange_public_data_operators/__init__.py new file mode 100644 index 0000000000..5e93951893 --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/exchange_operators/exchange_public_data_operators/__init__.py @@ -0,0 +1,39 @@ +# pylint: disable=R0801 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import tentacles.Meta.DSL_operators.exchange_operators.exchange_public_data_operators.ohlcv_operators +from tentacles.Meta.DSL_operators.exchange_operators.exchange_public_data_operators.ohlcv_operators import ( + OHLCVOperator, + ExchangeDataDependency, + create_ohlcv_operators, +) +import tentacles.Meta.DSL_operators.exchange_operators.exchange_public_data_operators.ticker_operators +from tentacles.Meta.DSL_operators.exchange_operators.exchange_public_data_operators.ticker_operators import ( + create_ticker_operators, +) +import tentacles.Meta.DSL_operators.exchange_operators.exchange_public_data_operators.symbol_operators +from tentacles.Meta.DSL_operators.exchange_operators.exchange_public_data_operators.symbol_operators import ( + create_symbol_operators, +) + +__all__ = [ + "OHLCVOperator", + "ExchangeDataDependency", + "create_ohlcv_operators", + "create_ticker_operators", + "create_symbol_operators", +] \ No newline at end of file diff --git a/packages/tentacles/Meta/DSL_operators/exchange_operators/exchange_public_data_operators/ohlcv_operators.py b/packages/tentacles/Meta/DSL_operators/exchange_operators/exchange_public_data_operators/ohlcv_operators.py new file mode 100644 index 0000000000..34e54d40c8 --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/exchange_operators/exchange_public_data_operators/ohlcv_operators.py @@ -0,0 +1,257 @@ +# pylint: disable=missing-class-docstring,missing-function-docstring +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import typing +import dataclasses +import numpy as np + +import octobot_commons.constants +import octobot_commons.errors +import octobot_commons.logging +import octobot_commons.enums as commons_enums +import octobot_commons.dsl_interpreter as dsl_interpreter +import octobot_trading.exchanges +import octobot_trading.exchange_data +import octobot_trading.api +import octobot_trading.constants +import octobot_trading.dsl + +import tentacles.Meta.DSL_operators.exchange_operators.exchange_operator as exchange_operator + + +@dataclasses.dataclass +class ExchangeDataDependency(octobot_trading.dsl.SymbolDependency): + data_source: str = octobot_trading.constants.OHLCV_CHANNEL + + def __hash__(self) -> int: + return hash((self.symbol, self.time_frame, self.data_source)) + + +class OHLCVOperator(exchange_operator.ExchangeOperator): + @staticmethod + def get_library() -> str: + # this is a contextual operator, so it should not be included by default in the get_all_operators function return values + return octobot_commons.constants.CONTEXTUAL_OPERATORS_LIBRARY + + @classmethod + def get_parameters(cls) -> list[dsl_interpreter.OperatorParameter]: + return [ + dsl_interpreter.OperatorParameter(name="symbol", description="the symbol to get the OHLCV data for", required=False, type=str), + dsl_interpreter.OperatorParameter(name="time_frame", description="the time frame to get the OHLCV data for", required=False, type=str), + ] + + def get_symbol_and_time_frame(self) -> typing.Tuple[typing.Optional[str], typing.Optional[str]]: + if parameters := self.get_computed_parameters(): + symbol = parameters[0] if len(parameters) > 0 else None + time_frame = parameters[1] if len(parameters) > 1 else None + return ( + str(symbol) if symbol is not None else None, + str(time_frame) if time_frame is not None else None + ) + return None, None + +def create_ohlcv_operators( + exchange_manager: typing.Optional[octobot_trading.exchanges.ExchangeManager], + symbol: typing.Optional[str], + time_frame: typing.Optional[str], + candle_manager_by_time_frame_by_symbol: typing.Optional[ + typing.Dict[str, typing.Dict[str, octobot_trading.exchange_data.CandlesManager]] + ] = None +) -> typing.List[type[OHLCVOperator]]: + + def _get_candles_values_with_latest_kline_if_available( + input_symbol: typing.Optional[str], input_time_frame: typing.Optional[str], + value_type: commons_enums.PriceIndexes, limit: int = -1 + ) -> np.ndarray: + if exchange_manager is None and candle_manager_by_time_frame_by_symbol is None: + raise octobot_commons.errors.DSLInterpreterError( + "exchange_manager or candle_manager_by_time_frame_by_symbol must be provided" + ) + _symbol = input_symbol or symbol + _time_frame = input_time_frame or time_frame + if exchange_manager is None: + if candle_manager_by_time_frame_by_symbol is not None: + candles_manager = candle_manager_by_time_frame_by_symbol[_time_frame][_symbol] + symbol_data = None + else: + symbol_data = octobot_trading.api.get_symbol_data( + exchange_manager, _symbol, allow_creation=False + ) + candles_manager = octobot_trading.api.get_symbol_candles_manager( + symbol_data, _time_frame + ) + candles_values = _get_candles_values(candles_manager, value_type, limit) + if symbol_data is not None and (kline := _get_kline(symbol_data, _time_frame)): + kline_time = kline[commons_enums.PriceIndexes.IND_PRICE_TIME.value] + last_candle_time = candles_manager.time_candles[candles_manager.time_candles_index - 1] + if kline_time == last_candle_time: + # kline is an update of the last candle + return _adapt_last_candle_value(candles_manager, value_type, candles_values, kline) + else: + tf_seconds = commons_enums.TimeFramesMinutes[commons_enums.TimeFrames(_time_frame)] * octobot_commons.constants.MINUTE_TO_SECONDS + if kline_time == last_candle_time + tf_seconds: + # kline is a new candle + kline_value = kline[value_type.value] + return np.append(candles_values[1:], kline_value) + else: + octobot_commons.logging.get_logger(OHLCVOperator.__name__).error( + f"{exchange_manager.exchange_name + '' if exchange_manager is not None else ''}{_symbol} {_time_frame} " + f"kline time ({kline_time}) is not equal to last candle time not the last time + {_time_frame} " + f"({last_candle_time} + {tf_seconds}) seconds. Kline has been ignored." + ) + return candles_values + + def _static_get_dependencies() -> typing.List[ExchangeDataDependency]: + return [ + ExchangeDataDependency( + symbol=symbol, + time_frame=time_frame, + ) + ] if symbol else [] + + class _LocalOHLCVOperator(OHLCVOperator): + PRICE_INDEX: commons_enums.PriceIndexes = None # type: ignore + + def get_dependencies(self) -> typing.List[dsl_interpreter.InterpreterDependency]: + local_dependencies = _static_get_dependencies() + param_by_name = self.get_input_value_by_parameter() + if symbol := param_by_name.get("symbol"): + symbol_dep = ExchangeDataDependency( + symbol=symbol, + time_frame=param_by_name.get("time_frame"), + ) + if symbol_dep not in local_dependencies: + local_dependencies.append(symbol_dep) + return super().get_dependencies() + local_dependencies + + async def pre_compute(self) -> None: + await super().pre_compute() + self.value = _get_candles_values_with_latest_kline_if_available(*self.get_symbol_and_time_frame(), self.PRICE_INDEX, -1) + + class _OpenPriceOperator(_LocalOHLCVOperator): + DESCRIPTION = "Returns the candle's open price as array of floats" + EXAMPLE = "open('BTC/USDT', '1h')" + + PRICE_INDEX = commons_enums.PriceIndexes.IND_PRICE_OPEN + + @staticmethod + def get_name() -> str: + return "open" + + class _HighPriceOperator(_LocalOHLCVOperator): + DESCRIPTION = "Returns the candle's high price as array of floats" + EXAMPLE = "high('BTC/USDT', '1h')" + + PRICE_INDEX = commons_enums.PriceIndexes.IND_PRICE_HIGH + + @staticmethod + def get_name() -> str: + return "high" + + class _LowPriceOperator(_LocalOHLCVOperator): + DESCRIPTION = "Returns the candle's low price as array of floats" + EXAMPLE = "low('BTC/USDT', '1h')" + + PRICE_INDEX = commons_enums.PriceIndexes.IND_PRICE_LOW + + @staticmethod + def get_name() -> str: + return "low" + + class _ClosePriceOperator(_LocalOHLCVOperator): + DESCRIPTION = "Returns the candle's close price as array of floats" + EXAMPLE = "close('BTC/USDT', '1h')" + + PRICE_INDEX = commons_enums.PriceIndexes.IND_PRICE_CLOSE + + @staticmethod + def get_name() -> str: + return "close" + + class _VolumePriceOperator(_LocalOHLCVOperator): + DESCRIPTION = "Returns the candle's volume as array of floats" + EXAMPLE = "volume('BTC/USDT', '1h')" + + PRICE_INDEX = commons_enums.PriceIndexes.IND_PRICE_VOL + + @staticmethod + def get_name() -> str: + return "volume" + + class _TimePriceOperator(_LocalOHLCVOperator): + DESCRIPTION = "Returns the candle's time as array of floats" + EXAMPLE = "time('BTC/USDT', '1h')" + + PRICE_INDEX = commons_enums.PriceIndexes.IND_PRICE_TIME + + @staticmethod + def get_name() -> str: + return "time" + + return [_OpenPriceOperator, _HighPriceOperator, _LowPriceOperator, _ClosePriceOperator, _VolumePriceOperator, _TimePriceOperator] + +def _get_kline( + symbol_data: octobot_trading.exchange_data.ExchangeSymbolData, _time_frame: str +) -> typing.Optional[list]: + try: + return octobot_trading.api.get_symbol_klines(symbol_data, _time_frame) + except KeyError: + return None + + +def _get_candles_values( + candles_manager: octobot_trading.exchange_data.CandlesManager, + candle_value: commons_enums.PriceIndexes, limit: int = -1 +) -> np.ndarray: + match candle_value: + case commons_enums.PriceIndexes.IND_PRICE_CLOSE: + return candles_manager.get_symbol_close_candles(limit) + case commons_enums.PriceIndexes.IND_PRICE_OPEN: + return candles_manager.get_symbol_open_candles(limit) + case commons_enums.PriceIndexes.IND_PRICE_HIGH: + return candles_manager.get_symbol_high_candles(limit) + case commons_enums.PriceIndexes.IND_PRICE_LOW: + return candles_manager.get_symbol_low_candles(limit) + case commons_enums.PriceIndexes.IND_PRICE_VOL: + return candles_manager.get_symbol_volume_candles(limit) + case commons_enums.PriceIndexes.IND_PRICE_TIME: + return candles_manager.get_symbol_time_candles(limit) + case _: + raise octobot_commons.errors.InvalidParametersError(f"Invalid candle value: {candle_value}") + +def _adapt_last_candle_value( + candles_manager: octobot_trading.exchange_data.CandlesManager, + candle_value: commons_enums.PriceIndexes, + candles_values: np.ndarray, + kline: list +) -> np.ndarray: + match candle_value: + case commons_enums.PriceIndexes.IND_PRICE_CLOSE: + candles_values[candles_manager.close_candles_index - 1] = kline[commons_enums.PriceIndexes.IND_PRICE_CLOSE.value] + case commons_enums.PriceIndexes.IND_PRICE_OPEN: + candles_values[candles_manager.open_candles_index - 1] = kline[commons_enums.PriceIndexes.IND_PRICE_OPEN.value] + case commons_enums.PriceIndexes.IND_PRICE_HIGH: + candles_values[candles_manager.high_candles_index - 1] = kline[commons_enums.PriceIndexes.IND_PRICE_HIGH.value] + case commons_enums.PriceIndexes.IND_PRICE_LOW: + candles_values[candles_manager.low_candles_index - 1] = kline[commons_enums.PriceIndexes.IND_PRICE_LOW.value] + case commons_enums.PriceIndexes.IND_PRICE_VOL: + candles_values[candles_manager.volume_candles_index - 1] = kline[commons_enums.PriceIndexes.IND_PRICE_VOL.value] + case commons_enums.PriceIndexes.IND_PRICE_TIME: + # nothing to do for time (this value is constant) + pass + case _: + raise octobot_commons.errors.InvalidParametersError(f"Invalid candle value: {candle_value}") + return candles_values diff --git a/packages/tentacles/Meta/DSL_operators/exchange_operators/exchange_public_data_operators/symbol_operators.py b/packages/tentacles/Meta/DSL_operators/exchange_operators/exchange_public_data_operators/symbol_operators.py new file mode 100644 index 0000000000..e1a6ba6bb9 --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/exchange_operators/exchange_public_data_operators/symbol_operators.py @@ -0,0 +1,89 @@ +# pylint: disable=missing-class-docstring,missing-function-docstring +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import typing + +import octobot_commons.constants as commons_constants +import octobot_commons.dsl_interpreter as dsl_interpreter +import octobot_trading.enums as trading_enums + +import tentacles.Meta.DSL_operators.exchange_operators.exchange_operator as exchange_operator + + +class _SymbolOperatorHost(typing.Protocol): + triggered_symbol: str + exchange_manager: typing.Any + + +def create_symbol_operators( + host: _SymbolOperatorHost, +) -> list[type[exchange_operator.ExchangeOperator]]: + return [ + _triggered_symbol_operator(host), + _market_expiry_operator(host), + ] + + +def _triggered_symbol_operator( + host: _SymbolOperatorHost, +) -> type[exchange_operator.ExchangeOperator]: + class _TriggeredSymbolOperator(exchange_operator.ExchangeOperator): + DESCRIPTION = "Returns the symbol that triggered the current DSL execution" + EXAMPLE = "triggered_symbol()" + + @staticmethod + def get_library() -> str: + return commons_constants.CONTEXTUAL_OPERATORS_LIBRARY + + @classmethod + def get_parameters(cls) -> list: + return [] + + @staticmethod + def get_name() -> str: + return "triggered_symbol" + + async def pre_compute(self) -> None: + await super().pre_compute() + self.value = host.triggered_symbol + + return _TriggeredSymbolOperator + + +def _market_expiry_operator( + host: _SymbolOperatorHost, +) -> type[exchange_operator.ExchangeOperator]: + class _MarketExpiryOperator(exchange_operator.ExchangeOperator): + DESCRIPTION = "Returns the expiry timestamp in milliseconds for the given symbol's market, or None" + EXAMPLE = "market_expiry(triggered_symbol())" + + @classmethod + def get_parameters(cls) -> list: + return [dsl_interpreter.OperatorParameter("symbol", "The market symbol", True, str)] + + @staticmethod + def get_name() -> str: + return "market_expiry" + + async def pre_compute(self) -> None: + await super().pre_compute() + symbol = self.get_computed_parameters()[0] + markets = host.exchange_manager.exchange.connector.client.markets or {} + self.value = (markets.get(symbol) or {}).get( + trading_enums.ExchangeConstantsMarketStatusColumns.EXPIRY.value + ) + + return _MarketExpiryOperator diff --git a/packages/tentacles/Meta/DSL_operators/exchange_operators/exchange_public_data_operators/ticker_operators.py b/packages/tentacles/Meta/DSL_operators/exchange_operators/exchange_public_data_operators/ticker_operators.py new file mode 100644 index 0000000000..8ec2dc6908 --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/exchange_operators/exchange_public_data_operators/ticker_operators.py @@ -0,0 +1,128 @@ +# pylint: disable=missing-class-docstring,missing-function-docstring +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.constants +import octobot_commons.errors +import octobot_commons.dsl_interpreter as dsl_interpreter + +import tentacles.Meta.DSL_operators.exchange_operators.exchange_operator as exchange_operator + + +TICKER_CLOSE_KEY = "close" +TICKER_OPEN_KEY = "open" +TICKER_HIGH_KEY = "high" +TICKER_LOW_KEY = "low" +TICKER_BASE_VOLUME_KEY = "baseVolume" +TICKER_LAST_KEY = "last" + + +def create_ticker_operators( + tickers_by_symbol: dict[str, dict], +) -> list[type[exchange_operator.ExchangeOperator]]: + + class _TickerOperator(exchange_operator.ExchangeOperator): + TICKER_FIELD: str = "" + + @staticmethod + def get_library() -> str: + return octobot_commons.constants.CONTEXTUAL_OPERATORS_LIBRARY + + @classmethod + def get_parameters(cls) -> list[dsl_interpreter.OperatorParameter]: + return [ + dsl_interpreter.OperatorParameter( + name="symbol", description="The symbol to get the ticker value for", + required=True, type=str, + ), + ] + + async def pre_compute(self) -> None: + await super().pre_compute() + symbol = self.get_computed_parameters()[0] + ticker = tickers_by_symbol.get(str(symbol)) + if ticker is None: + raise octobot_commons.errors.DSLInterpreterError( + f"No ticker data available for symbol '{symbol}'" + ) + value = ticker.get(self.TICKER_FIELD) + if value is None: + raise octobot_commons.errors.DSLInterpreterError( + f"Ticker field '{self.TICKER_FIELD}' is None for symbol '{symbol}'" + ) + self.value = value + + class _TickerCloseOperator(_TickerOperator): + DESCRIPTION = "Returns the close price from the latest fetched ticker" + EXAMPLE = "ticker_close(triggered_symbol())" + TICKER_FIELD = TICKER_CLOSE_KEY + + @staticmethod + def get_name() -> str: + return "ticker_close" + + class _TickerOpenOperator(_TickerOperator): + DESCRIPTION = "Returns the open price from the latest fetched ticker" + EXAMPLE = "ticker_open(triggered_symbol())" + TICKER_FIELD = TICKER_OPEN_KEY + + @staticmethod + def get_name() -> str: + return "ticker_open" + + class _TickerHighOperator(_TickerOperator): + DESCRIPTION = "Returns the high price from the latest fetched ticker" + EXAMPLE = "ticker_high(triggered_symbol())" + TICKER_FIELD = TICKER_HIGH_KEY + + @staticmethod + def get_name() -> str: + return "ticker_high" + + class _TickerLowOperator(_TickerOperator): + DESCRIPTION = "Returns the low price from the latest fetched ticker" + EXAMPLE = "ticker_low(triggered_symbol())" + TICKER_FIELD = TICKER_LOW_KEY + + @staticmethod + def get_name() -> str: + return "ticker_low" + + class _TickerVolumeOperator(_TickerOperator): + DESCRIPTION = "Returns the base volume from the latest fetched ticker" + EXAMPLE = "ticker_volume(triggered_symbol())" + TICKER_FIELD = TICKER_BASE_VOLUME_KEY + + @staticmethod + def get_name() -> str: + return "ticker_volume" + + class _TickerLastOperator(_TickerOperator): + DESCRIPTION = "Returns the last price from the latest fetched ticker" + EXAMPLE = "ticker_last(triggered_symbol())" + TICKER_FIELD = TICKER_LAST_KEY + + @staticmethod + def get_name() -> str: + return "ticker_last" + + return [ + _TickerCloseOperator, + _TickerOpenOperator, + _TickerHighOperator, + _TickerLowOperator, + _TickerVolumeOperator, + _TickerLastOperator, + ] diff --git a/packages/tentacles/Meta/DSL_operators/exchange_operators/metadata.json b/packages/tentacles/Meta/DSL_operators/exchange_operators/metadata.json new file mode 100644 index 0000000000..319240c835 --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/exchange_operators/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": [], + "tentacles-requirements": [] +} \ No newline at end of file diff --git a/packages/tentacles/Meta/DSL_operators/exchange_operators/tests/__init__.py b/packages/tentacles/Meta/DSL_operators/exchange_operators/tests/__init__.py new file mode 100644 index 0000000000..14a64e624d --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/exchange_operators/tests/__init__.py @@ -0,0 +1,76 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import pytest +import pytest_asyncio + +import octobot_commons.constants as commons_constants +import octobot_commons.tests.test_config as test_config +import octobot_backtesting.backtesting as backtesting_module +import octobot_backtesting.constants as backtesting_constants +import octobot_backtesting.time as backtesting_time +import octobot_trading.exchanges as exchanges +import octobot_trading.exchanges.exchange_manager as exchange_manager_module + + +pytestmark = pytest.mark.asyncio + + +@pytest_asyncio.fixture +async def backtesting_config(): + config = dict(test_config.load_test_config()) + config[backtesting_constants.CONFIG_BACKTESTING] = {} + config[backtesting_constants.CONFIG_BACKTESTING][commons_constants.CONFIG_ENABLED_OPTION] = True + return config + + +@pytest_asyncio.fixture +async def fake_backtesting(backtesting_config): + return backtesting_module.Backtesting( + config=backtesting_config, + exchange_ids=[], + matrix_id="", + backtesting_files=[], + ) + + +@pytest_asyncio.fixture +async def backtesting_exchange_manager(backtesting_config, fake_backtesting): + exchange_manager_instance = exchange_manager_module.ExchangeManager( + backtesting_config, "binanceus" + ) + exchange_manager_instance.is_backtesting = True + exchange_manager_instance.use_cached_markets = False + exchange_manager_instance.is_spot_only = True + exchange_manager_instance.is_margin = False + exchange_manager_instance.is_future = False + exchange_manager_instance.backtesting = fake_backtesting + exchange_manager_instance.backtesting.time_manager = backtesting_time.TimeManager( + backtesting_config + ) + await exchange_manager_instance.initialize(exchange_config_by_exchange=None) + try: + yield exchange_manager_instance + finally: + await exchange_manager_instance.stop() + + +@pytest_asyncio.fixture +async def backtesting_trader(backtesting_config, backtesting_exchange_manager): + trader_instance = exchanges.TraderSimulator( + backtesting_config, backtesting_exchange_manager + ) + await trader_instance.initialize() + return backtesting_config, backtesting_exchange_manager, trader_instance diff --git a/packages/tentacles/Meta/DSL_operators/exchange_operators/tests/exchange_personal_data_operators/test_cancel_order_operators.py b/packages/tentacles/Meta/DSL_operators/exchange_operators/tests/exchange_personal_data_operators/test_cancel_order_operators.py new file mode 100644 index 0000000000..1d84ca59f3 --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/exchange_operators/tests/exchange_personal_data_operators/test_cancel_order_operators.py @@ -0,0 +1,415 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import decimal +import mock +import pytest +import pytest_asyncio + +import octobot_commons.errors +import octobot_commons.dsl_interpreter as dsl_interpreter +import octobot_trading.enums +import octobot_trading.errors as trading_errors +import octobot_trading.personal_data as trading_personal_data +import octobot_trading.dsl + +import tentacles.Meta.DSL_operators.exchange_operators.exchange_personal_data_operators.cancel_order_operators as cancel_order_operators + +from tentacles.Meta.DSL_operators.exchange_operators.tests import ( + backtesting_config, + fake_backtesting, + backtesting_exchange_manager, + backtesting_trader, +) + +SYMBOL = "BTC/USDT" +EXCHANGE_ORDER_ID = "order-123" + + +def _create_mock_order(exchange_order_id: str, side: str = "buy", symbol: str = SYMBOL): + order = mock.Mock() + order.exchange_order_id = exchange_order_id + order.symbol = symbol + order.side = octobot_trading.enums.TradeOrderSide(side) + order.is_cancelled = mock.Mock(return_value=False) + order.is_closed = mock.Mock(return_value=False) + return order + + +@pytest_asyncio.fixture +async def cancel_order_operators_list(backtesting_trader): + _config, exchange_manager, _trader = backtesting_trader + return cancel_order_operators.create_cancel_order_operators(exchange_manager) + + +@pytest_asyncio.fixture +async def cancel_order_operators_with_trading_mode(backtesting_trader): + _config, exchange_manager, _trader = backtesting_trader + mock_trading_mode = mock.Mock() + mock_trading_mode.cancel_order = mock.AsyncMock(return_value=(True, None)) + mock_dependencies = mock.Mock() + operators_list = cancel_order_operators.create_cancel_order_operators( + exchange_manager, + trading_mode=mock_trading_mode, + dependencies=mock_dependencies, + ) + return operators_list, mock_trading_mode, mock_dependencies + + +@pytest_asyncio.fixture +async def interpreter(cancel_order_operators_list): + return dsl_interpreter.Interpreter( + dsl_interpreter.get_all_operators() + + cancel_order_operators_list + ) + + +@pytest_asyncio.fixture +async def no_exchange_manager_cancel_order_operators_list(): + return cancel_order_operators.create_cancel_order_operators(None) + + +@pytest_asyncio.fixture +async def no_exchange_manager_interpreter(no_exchange_manager_cancel_order_operators_list): + return dsl_interpreter.Interpreter( + dsl_interpreter.get_all_operators() + + no_exchange_manager_cancel_order_operators_list + ) + + +@pytest_asyncio.fixture +async def maybe_exchange_manager_interpreter(request, interpreter, no_exchange_manager_interpreter): + """Parametrized fixture that yields either interpreter or no_exchange_manager_interpreter.""" + selected_value = request.param + if selected_value == "interpreter": + return interpreter + elif selected_value == "no_exchange_manager_interpreter": + return no_exchange_manager_interpreter + raise ValueError(f"Invalid selected_value: {selected_value}") + + +class TestCancelOrderOperator: + @pytest.mark.asyncio + async def test_pre_compute_cancels_matching_orders(self, cancel_order_operators_list, backtesting_trader): + _config, exchange_manager, _trader = backtesting_trader + cancel_order_op_class, = cancel_order_operators_list + + order1 = _create_mock_order("order-1") + order2 = _create_mock_order("order-2") + mock_orders = [order1, order2] + + with mock.patch.object( + exchange_manager.exchange_personal_data.orders_manager, + "get_open_orders", + return_value=mock_orders, + ), mock.patch.object( + exchange_manager.trader, + "cancel_order", + mock.AsyncMock(side_effect=[True, True]), + ) as cancel_order_mock: + operator = cancel_order_op_class( + SYMBOL, + exchange_order_ids=["order-1", "order-2"], + ) + await operator.pre_compute() + + assert operator.value == {"cancelled_orders": ["order-1", "order-2"]} + assert cancel_order_mock.await_count == 2 + + @pytest.mark.asyncio + async def test_pre_compute_no_orders_to_cancel(self, cancel_order_operators_list, backtesting_trader): + _config, exchange_manager, _trader = backtesting_trader + cancel_order_op_class, = cancel_order_operators_list + + with mock.patch.object( + exchange_manager.exchange_personal_data.orders_manager, + "get_open_orders", + return_value=[], + ), mock.patch.object( + exchange_manager.trader, + "cancel_order", + mock.AsyncMock(), + ) as cancel_order_mock: + operator = cancel_order_op_class( + SYMBOL, + exchange_order_ids=["order-1"], + ) + with pytest.raises( + trading_errors.OrderDescriptionNotFoundError, + match="No .* order found matching", + ): + await operator.pre_compute() + + cancel_order_mock.assert_not_awaited() + + @pytest.mark.asyncio + async def test_pre_compute_filters_by_exchange_order_ids(self, cancel_order_operators_list, backtesting_trader): + _config, exchange_manager, _trader = backtesting_trader + cancel_order_op_class, = cancel_order_operators_list + + order1 = _create_mock_order("order-1") + order2 = _create_mock_order("order-2") + order3 = _create_mock_order("order-3") + mock_orders = [order1, order2, order3] + + with mock.patch.object( + exchange_manager.exchange_personal_data.orders_manager, + "get_open_orders", + return_value=mock_orders, + ), mock.patch.object( + exchange_manager.trader, + "cancel_order", + mock.AsyncMock(side_effect=[True, True]), + ) as cancel_order_mock: + operator = cancel_order_op_class( + SYMBOL, + exchange_order_ids=["order-1", "order-3"], + ) + await operator.pre_compute() + + assert operator.value == {"cancelled_orders": ["order-1", "order-3"]} + assert cancel_order_mock.await_count == 2 + + @pytest.mark.asyncio + async def test_pre_compute_filters_by_side(self, cancel_order_operators_list, backtesting_trader): + _config, exchange_manager, _trader = backtesting_trader + cancel_order_op_class, = cancel_order_operators_list + + buy_order = _create_mock_order("order-1", side="buy") + sell_order = _create_mock_order("order-2", side="sell") + mock_orders = [buy_order, sell_order] + + with mock.patch.object( + exchange_manager.exchange_personal_data.orders_manager, + "get_open_orders", + return_value=mock_orders, + ), mock.patch.object( + exchange_manager.trader, + "cancel_order", + mock.AsyncMock(return_value=True), + ) as cancel_order_mock: + operator = cancel_order_op_class( + SYMBOL, + side="buy", + exchange_order_ids=["order-1", "order-2"], + ) + await operator.pre_compute() + + assert operator.value == {"cancelled_orders": ["order-1"]} + cancel_order_mock.assert_awaited_once_with(buy_order, wait_for_cancelling=True) + + @pytest.mark.asyncio + async def test_pre_compute_skips_cancelled_orders(self, cancel_order_operators_list, backtesting_trader): + _config, exchange_manager, _trader = backtesting_trader + cancel_order_op_class, = cancel_order_operators_list + + order1 = _create_mock_order("order-1") + order1.is_cancelled = mock.Mock(return_value=True) + order2 = _create_mock_order("order-2") + mock_orders = [order1, order2] + + with mock.patch.object( + exchange_manager.exchange_personal_data.orders_manager, + "get_open_orders", + return_value=mock_orders, + ), mock.patch.object( + exchange_manager.trader, + "cancel_order", + mock.AsyncMock(return_value=True), + ) as cancel_order_mock: + operator = cancel_order_op_class( + SYMBOL, + exchange_order_ids=["order-1", "order-2"], + ) + await operator.pre_compute() + + assert operator.value == {"cancelled_orders": ["order-2"]} + cancel_order_mock.assert_awaited_once_with(order2, wait_for_cancelling=True) + + @pytest.mark.asyncio + async def test_pre_compute_skips_closed_orders(self, cancel_order_operators_list, backtesting_trader): + _config, exchange_manager, _trader = backtesting_trader + cancel_order_op_class, = cancel_order_operators_list + + order1 = _create_mock_order("order-1") + order1.is_closed = mock.Mock(return_value=True) + order2 = _create_mock_order("order-2") + mock_orders = [order1, order2] + + with mock.patch.object( + exchange_manager.exchange_personal_data.orders_manager, + "get_open_orders", + return_value=mock_orders, + ), mock.patch.object( + exchange_manager.trader, + "cancel_order", + mock.AsyncMock(return_value=True), + ) as cancel_order_mock: + operator = cancel_order_op_class( + SYMBOL, + exchange_order_ids=["order-1", "order-2"], + ) + await operator.pre_compute() + + assert operator.value == {"cancelled_orders": ["order-2"]} + cancel_order_mock.assert_awaited_once_with(order2, wait_for_cancelling=True) + + @pytest.mark.asyncio + async def test_pre_compute_does_not_append_when_cancel_fails(self, cancel_order_operators_list, backtesting_trader): + _config, exchange_manager, _trader = backtesting_trader + cancel_order_op_class, = cancel_order_operators_list + + order1 = _create_mock_order("order-1") + order2 = _create_mock_order("order-2") + mock_orders = [order1, order2] + + with mock.patch.object( + exchange_manager.exchange_personal_data.orders_manager, + "get_open_orders", + return_value=mock_orders, + ), mock.patch.object( + exchange_manager.trader, + "cancel_order", + mock.AsyncMock(side_effect=[False, True]), + ) as cancel_order_mock: + operator = cancel_order_op_class( + SYMBOL, + exchange_order_ids=["order-1", "order-2"], + ) + await operator.pre_compute() + + assert operator.value == {"cancelled_orders": ["order-2"]} + assert cancel_order_mock.await_count == 2 + + def test_compute_without_pre_compute(self, cancel_order_operators_list): + cancel_order_op_class, = cancel_order_operators_list + operator = cancel_order_op_class( + SYMBOL, + exchange_order_ids=["order-1"], + ) + with pytest.raises( + octobot_commons.errors.DSLInterpreterError, + match="has not been pre_computed", + ): + operator.compute() + + @pytest.mark.asyncio + async def test_pre_compute_uses_trading_mode_when_provided( + self, cancel_order_operators_with_trading_mode, backtesting_trader + ): + _config, exchange_manager, _trader = backtesting_trader + operators_list, mock_trading_mode, mock_dependencies = cancel_order_operators_with_trading_mode + cancel_order_op_class, = operators_list + + order1 = _create_mock_order("order-1") + mock_orders = [order1] + + with mock.patch.object( + exchange_manager.exchange_personal_data.orders_manager, + "get_open_orders", + return_value=mock_orders, + ): + operator = cancel_order_op_class( + SYMBOL, + exchange_order_ids=["order-1"], + ) + await operator.pre_compute() + + assert operator.value == {"cancelled_orders": ["order-1"]} + mock_trading_mode.cancel_order.assert_awaited_once() + call_args = mock_trading_mode.cancel_order.call_args + assert call_args[0][0] == order1 + assert call_args[1]["wait_for_cancelling"] is True + assert call_args[1]["dependencies"] is mock_dependencies + + @pytest.mark.asyncio + async def test_cancel_order_call_as_dsl(self, interpreter, backtesting_trader): + _config, exchange_manager, _trader = backtesting_trader + + if SYMBOL not in exchange_manager.client_symbols: + exchange_manager.client_symbols.append(SYMBOL) + if SYMBOL not in exchange_manager.exchange_config.traded_symbol_pairs: + exchange_manager.exchange_config.traded_symbol_pairs.append(SYMBOL) + + limit_buy = trading_personal_data.BuyLimitOrder(exchange_manager.trader) + limit_buy.update( + order_type=octobot_trading.enums.TraderOrderType.BUY_LIMIT, + symbol=SYMBOL, + exchange_order_id=EXCHANGE_ORDER_ID, + current_price=decimal.Decimal("50000"), + quantity=decimal.Decimal("0.01"), + price=decimal.Decimal("50000"), + ) + await exchange_manager.exchange_personal_data.orders_manager.upsert_order_instance(limit_buy) + + open_orders = exchange_manager.exchange_personal_data.orders_manager.get_open_orders(symbol=SYMBOL) + assert len(open_orders) == 1 + assert open_orders[0].exchange_order_id == EXCHANGE_ORDER_ID + + result = await interpreter.interprete( + f"cancel_order('{SYMBOL}', exchange_order_ids=['{EXCHANGE_ORDER_ID}'])" + ) + assert result == {"cancelled_orders": [EXCHANGE_ORDER_ID]} + + open_orders_after = exchange_manager.exchange_personal_data.orders_manager.get_open_orders(symbol=SYMBOL) + assert len(open_orders_after) == 0 + assert limit_buy.is_cancelled() + + @pytest.mark.asyncio + async def test_cancel_order_call_as_dsl_with_side(self, interpreter, backtesting_trader): + _config, exchange_manager, _trader = backtesting_trader + + buy_order = _create_mock_order("order-buy", side="buy") + mock_orders = [buy_order] + + with mock.patch.object( + exchange_manager.exchange_personal_data.orders_manager, + "get_open_orders", + return_value=mock_orders, + ), mock.patch.object( + exchange_manager.trader, + "cancel_order", + mock.AsyncMock(return_value=True), + ): + result = await interpreter.interprete( + f"cancel_order('{SYMBOL}', side='buy', exchange_order_ids=['order-buy'])" + ) + assert result == {"cancelled_orders": ["order-buy"]} + + +class TestGetDependencies: + """Tests for get_dependencies using DSL syntax and the interpreter.""" + + @pytest.mark.parametrize( + "maybe_exchange_manager_interpreter", + ["interpreter", "no_exchange_manager_interpreter"], + indirect=True, + ) + def test_cancel_order_get_dependencies_from_interpreter( + self, maybe_exchange_manager_interpreter + ): + maybe_exchange_manager_interpreter.prepare( + f"cancel_order('{SYMBOL}', exchange_order_ids=['{EXCHANGE_ORDER_ID}'])" + ) + assert maybe_exchange_manager_interpreter.get_dependencies() == [ + octobot_trading.dsl.SymbolDependency(symbol=SYMBOL), + ] + symbol = "ETH/USDT" + maybe_exchange_manager_interpreter.prepare( + f"cancel_order('{symbol}', tag='my_tag')" + ) + assert maybe_exchange_manager_interpreter.get_dependencies() == [ + octobot_trading.dsl.SymbolDependency(symbol=symbol), + ] diff --git a/packages/tentacles/Meta/DSL_operators/exchange_operators/tests/exchange_personal_data_operators/test_copy_exchange_account_operators.py b/packages/tentacles/Meta/DSL_operators/exchange_operators/tests/exchange_personal_data_operators/test_copy_exchange_account_operators.py new file mode 100644 index 0000000000..db597c5f8c --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/exchange_operators/tests/exchange_personal_data_operators/test_copy_exchange_account_operators.py @@ -0,0 +1,139 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import json + +import mock +import pytest +import pytest_asyncio + +import octobot_commons.dsl_interpreter as dsl_interpreter +import octobot_commons.dsl_interpreter.operators.re_callable_operator_mixin as re_callable_operator_mixin +import octobot_trading.dsl + +import octobot_copy.copiers.account_copier as account_copier_module +import octobot_copy.entities as copy_entities + +import tentacles.Meta.DSL_operators.exchange_operators.exchange_personal_data_operators.copy_exchange_account_operators as copy_exchange_account_operators + +from tentacles.Meta.DSL_operators.exchange_operators.tests import ( + backtesting_config, + fake_backtesting, + backtesting_exchange_manager, + backtesting_trader, +) + + +STRATEGY_ID = "test-copy-strategy-id" +REFERENCE_MARKET = "USDT" +REFERENCE_ACCOUNT_JSON = json.dumps( + {"content": {"BTC": {"available": "0.01", "total": "0.01"}}}, + separators=(",", ":"), +) + + +@pytest.fixture +def copy_exchange_interpreter(): + return dsl_interpreter.Interpreter( + dsl_interpreter.get_all_operators() + + copy_exchange_account_operators.create_copy_exchange_account_operators(None) + ) + + +@pytest_asyncio.fixture +async def copy_exchange_interpreter_with_exchange_manager(backtesting_trader): + _config, exchange_manager, _trader = backtesting_trader + return dsl_interpreter.Interpreter( + dsl_interpreter.get_all_operators() + + copy_exchange_account_operators.create_copy_exchange_account_operators(exchange_manager) + ) + + +class TestCreateCopyExchangeAccountOperators: + def test_returns_single_operator_class(self): + operator_classes = copy_exchange_account_operators.create_copy_exchange_account_operators(None) + assert len(operator_classes) == 1 + assert operator_classes[0].get_name() == "copy_exchange_account" + + +class TestGetName: + def test_operator_name(self): + operator_class = copy_exchange_account_operators.create_copy_exchange_account_operators(None)[0] + assert operator_class.get_name() == "copy_exchange_account" + + +class TestGetParameters: + def test_strategy_id_is_first_and_required(self): + operator_class = copy_exchange_account_operators.create_copy_exchange_account_operators(None)[0] + parameters = operator_class.get_parameters() + strategy_parameter = parameters[0] + assert strategy_parameter.name == "strategy_id" + assert strategy_parameter.required is True + + +class TestGetDependencies: + def test_includes_copy_trading_and_symbol_dependencies(self, copy_exchange_interpreter): + dsl_expression = ( + f"copy_exchange_account(strategy_id='{STRATEGY_ID}', reference_market='{REFERENCE_MARKET}', " + f"reference_account='{REFERENCE_ACCOUNT_JSON}', account_copy_settings='{{}}')" + ) + copy_exchange_interpreter.prepare(dsl_expression) + dependencies = copy_exchange_interpreter.get_dependencies() + # Parsed reference account is present: no refresh needed to fetch it + assert octobot_trading.dsl.CopyTradingDependency(strategy_id=STRATEGY_ID, refresh_required=False) in dependencies + assert octobot_trading.dsl.SymbolDependency(symbol="BTC/USDT") in dependencies + + def test_invalid_reference_account_skips_symbol_dependencies(self, copy_exchange_interpreter): + dsl_expression = ( + f"copy_exchange_account(strategy_id='{STRATEGY_ID}', reference_market='{REFERENCE_MARKET}', " + f"reference_account='not-json', account_copy_settings='{{}}')" + ) + copy_exchange_interpreter.prepare(dsl_expression) + dependencies = copy_exchange_interpreter.get_dependencies() + # Parse failed: refresh required to obtain reference account data + assert octobot_trading.dsl.CopyTradingDependency(strategy_id=STRATEGY_ID, refresh_required=True) in dependencies + assert not any( + isinstance(dependency, octobot_trading.dsl.SymbolDependency) + for dependency in dependencies + ) + + +class TestCopyExchangeAccountCallAsDsl: + @pytest.mark.asyncio + async def test_copy_exchange_account_call_as_dsl( + self, copy_exchange_interpreter_with_exchange_manager, backtesting_trader + ): + _config, _exchange_manager, _trader = backtesting_trader + account_copy_result = copy_entities.AccountCopyResult(created_orders=[]) + + dsl_expression = ( + f"copy_exchange_account(strategy_id='{STRATEGY_ID}', reference_market='{REFERENCE_MARKET}', " + f"reference_account='{REFERENCE_ACCOUNT_JSON}', account_copy_settings='{{}}')" + ) + + with mock.patch.object( + account_copier_module.AccountCopier, + "copy_account", + mock.AsyncMock(return_value=account_copy_result), + ) as copy_account_mock: + result = await copy_exchange_interpreter_with_exchange_manager.interprete(dsl_expression) + + copy_account_mock.assert_awaited_once() + + assert re_callable_operator_mixin.ReCallingOperatorResult.is_re_calling_operator_result(result) + re_calling_payload = result[re_callable_operator_mixin.ReCallingOperatorResult.__name__] + assert re_calling_payload["keyword"] == "copy_exchange_account" + last_execution_state = re_calling_payload["last_execution_result"]["state"] + assert last_execution_state["created_orders"] == [] diff --git a/packages/tentacles/Meta/DSL_operators/exchange_operators/tests/exchange_personal_data_operators/test_create_order_operators.py b/packages/tentacles/Meta/DSL_operators/exchange_operators/tests/exchange_personal_data_operators/test_create_order_operators.py new file mode 100644 index 0000000000..fd0b6a2079 --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/exchange_operators/tests/exchange_personal_data_operators/test_create_order_operators.py @@ -0,0 +1,812 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import decimal +import mock +import pytest +import pytest_asyncio + +import octobot_commons.constants as commons_constants +import octobot_commons.errors +import octobot_commons.dsl_interpreter as dsl_interpreter +import octobot_commons.symbols as commons_symbols +import octobot_trading.enums +import octobot_trading.errors as trading_errors +import octobot_trading.personal_data as personal_data +import octobot_trading.dsl + +import tentacles.Meta.DSL_operators.exchange_operators.exchange_personal_data_operators.create_order_operators as create_order_operators + +from tentacles.Meta.DSL_operators.exchange_operators.tests import ( + backtesting_config, + fake_backtesting, + backtesting_exchange_manager, + backtesting_trader, +) + +SYMBOL = "BTC/USDT" +AMOUNT = 0.01 +PRICE = "50000" +MARK_PRICE = decimal.Decimal("50000") + + +def _create_mock_order(symbol: str = SYMBOL, side: str = "buy", order_type=None): + order = mock.Mock() + order.symbol = symbol + order.side = octobot_trading.enums.TradeOrderSide(side) + order.order_type = order_type or octobot_trading.enums.TraderOrderType.BUY_MARKET + order.to_dict = mock.Mock(return_value={"symbol": symbol, "side": side}) + return order + + +@pytest_asyncio.fixture +async def create_order_operators_list(backtesting_trader): + _config, exchange_manager, _trader = backtesting_trader + return create_order_operators.create_create_order_operators(exchange_manager) + + +@pytest_asyncio.fixture +async def no_exchange_manager_create_order_operators_list(): + return create_order_operators.create_create_order_operators(None) + + +@pytest_asyncio.fixture +async def create_order_operators_with_trading_mode(backtesting_trader): + _config, exchange_manager, _trader = backtesting_trader + mock_trading_mode = mock.Mock() + mock_trading_mode.create_order = mock.AsyncMock() + mock_dependencies = mock.Mock() + return create_order_operators.create_create_order_operators( + exchange_manager, + trading_mode=mock_trading_mode, + dependencies=mock_dependencies, + ) + + +@pytest_asyncio.fixture +async def interpreter(create_order_operators_list): + return dsl_interpreter.Interpreter( + dsl_interpreter.get_all_operators() + + create_order_operators_list + ) + + +@pytest_asyncio.fixture +async def no_exchange_manager_interpreter(no_exchange_manager_create_order_operators_list): + return dsl_interpreter.Interpreter( + dsl_interpreter.get_all_operators() + + no_exchange_manager_create_order_operators_list + ) + + +@pytest_asyncio.fixture +async def maybe_exchange_manager_interpreter(request, interpreter, no_exchange_manager_interpreter): + """Parametrized fixture that yields either interpreter or no_exchange_manager_interpreter.""" + selected_value = request.param + if selected_value == "interpreter": + return interpreter + elif selected_value == "no_exchange_manager_interpreter": + return no_exchange_manager_interpreter + raise ValueError(f"Invalid selected_value: {selected_value}") + + +def _ensure_portfolio_config(backtesting_trader, portfolio_content): + _config, exchange_manager, _trader = backtesting_trader + if commons_constants.CONFIG_SIMULATOR not in _config: + _config[commons_constants.CONFIG_SIMULATOR] = {} + if commons_constants.CONFIG_STARTING_PORTFOLIO not in _config[commons_constants.CONFIG_SIMULATOR]: + _config[commons_constants.CONFIG_SIMULATOR][commons_constants.CONFIG_STARTING_PORTFOLIO] = {} + _config[commons_constants.CONFIG_SIMULATOR][commons_constants.CONFIG_STARTING_PORTFOLIO].update( + portfolio_content + ) + exchange_manager.exchange_personal_data.portfolio_manager.apply_forced_portfolio( + _config[commons_constants.CONFIG_SIMULATOR][commons_constants.CONFIG_STARTING_PORTFOLIO] + ) + + +def _ensure_market_order_trading_context(backtesting_trader): + """Set up portfolio, symbol config, and mark price for real simulated order creation.""" + _config, exchange_manager, _trader = backtesting_trader + _ensure_portfolio_config(backtesting_trader, {"BTC": 0, "USDT": 100000}) + + if SYMBOL not in exchange_manager.client_symbols: + exchange_manager.client_symbols.append(SYMBOL) + if SYMBOL not in exchange_manager.exchange_config.traded_symbol_pairs: + exchange_manager.exchange_config.traded_symbol_pairs.append(SYMBOL) + exchange_manager.exchange_config.traded_symbols.append( + commons_symbols.parse_symbol(SYMBOL) + ) + + symbol_data = exchange_manager.exchange_symbols_data.get_exchange_symbol_data( + SYMBOL, allow_creation=True + ) + symbol_data.handle_mark_price_update( + MARK_PRICE, octobot_trading.enums.MarkPriceSources.EXCHANGE_MARK_PRICE.value + ) + + +def _ensure_sell_order_trading_context(backtesting_trader): + """Set up portfolio with BTC for sell orders, symbol config, and mark price.""" + _config, exchange_manager, _trader = backtesting_trader + _ensure_portfolio_config(backtesting_trader, {"BTC": 1.0, "USDT": 0}) + + if SYMBOL not in exchange_manager.client_symbols: + exchange_manager.client_symbols.append(SYMBOL) + if SYMBOL not in exchange_manager.exchange_config.traded_symbol_pairs: + exchange_manager.exchange_config.traded_symbol_pairs.append(SYMBOL) + exchange_manager.exchange_config.traded_symbols.append( + commons_symbols.parse_symbol(SYMBOL) + ) + + symbol_data = exchange_manager.exchange_symbols_data.get_exchange_symbol_data( + SYMBOL, allow_creation=True + ) + symbol_data.handle_mark_price_update( + MARK_PRICE, octobot_trading.enums.MarkPriceSources.EXCHANGE_MARK_PRICE.value + ) + + + +class TestCreateOrderOnExchange: + @pytest.mark.asyncio + async def test_create_order_on_exchange_returns_order_via_trading_mode( + self, create_order_operators_with_trading_mode, backtesting_trader + ): + _config, exchange_manager, _trader = backtesting_trader + market_op_class, _limit_op_class, _stop_loss_op_class = create_order_operators_with_trading_mode + mock_order = _create_mock_order() + factory = market_op_class("buy", SYMBOL, AMOUNT).get_order_factory() + factory.trading_mode.create_order = mock.AsyncMock(return_value=mock_order) + + result = await factory.create_order_on_exchange(mock_order) + + assert result is mock_order + factory.trading_mode.create_order.assert_awaited_once_with( + mock_order, dependencies=factory.dependencies, wait_for_creation=True + ) + + @pytest.mark.asyncio + async def test_create_order_on_exchange_uses_trader_when_no_trading_mode( + self, create_order_operators_list, backtesting_trader + ): + _config, exchange_manager, _trader = backtesting_trader + market_op_class, _limit_op_class, _stop_loss_op_class = create_order_operators_list + mock_order = _create_mock_order() + factory = market_op_class("buy", SYMBOL, AMOUNT).get_order_factory() + + with mock.patch.object( + exchange_manager.trader, + "create_order", + mock.AsyncMock(return_value=mock_order), + ) as mock_create_order: + result = await factory.create_order_on_exchange(mock_order) + + assert result is mock_order + mock_create_order.assert_awaited_once_with( + mock_order, wait_for_creation=True + ) + + @pytest.mark.asyncio + async def test_create_order_on_exchange_forwards_wait_for_creation( + self, backtesting_trader + ): + _config, exchange_manager, _trader = backtesting_trader + operators = create_order_operators.create_create_order_operators( + exchange_manager, wait_for_creation=False + ) + market_op_class = operators[0] + mock_order = _create_mock_order() + factory = market_op_class("buy", SYMBOL, AMOUNT).get_order_factory() + + with mock.patch.object( + exchange_manager.trader, + "create_order", + mock.AsyncMock(return_value=mock_order), + ) as mock_create_order: + await factory.create_order_on_exchange(mock_order) + + mock_create_order.assert_awaited_once_with( + mock_order, wait_for_creation=False + ) + + +class TestCreateBaseOrderAndAssociatedElements: + @pytest.mark.asyncio + async def test_create_base_orders_and_associated_elements_raises_when_symbol_not_in_exchange( + self, create_order_operators_list, backtesting_trader + ): + UNKNOWN_SYMBOL = "NONEXISTENT/USDT" + _config, exchange_manager, _trader = backtesting_trader + market_op_class, _limit_op_class, _stop_loss_op_class = create_order_operators_list + factory = market_op_class("buy", SYMBOL, AMOUNT).get_order_factory() + + with pytest.raises( + trading_errors.UnSupportedSymbolError, + match=r"Symbol NONEXISTENT/USDT not found in exchange traded symbols", + ): + await factory.create_base_orders_and_associated_elements( + symbol=UNKNOWN_SYMBOL, + side="buy", + amount=AMOUNT, + order_type=octobot_trading.enums.TraderOrderType.BUY_MARKET, + ) + + +class TestMarketOrderOperator: + @pytest.mark.asyncio + async def test_pre_compute_creates_market_order( + self, create_order_operators_list, backtesting_trader + ): + _config, exchange_manager, _trader = backtesting_trader + market_op_class, _limit_op_class, _stop_loss_op_class = create_order_operators_list + mock_order = _create_mock_order() + + factory = market_op_class("buy", SYMBOL, AMOUNT).get_order_factory() + with mock.patch.object( + factory, + "create_base_orders_and_associated_elements", + mock.AsyncMock(return_value=[mock_order]), + ), mock.patch.object( + factory, + "create_order_on_exchange", + mock.AsyncMock(return_value=mock_order), + ): + operator = market_op_class("buy", SYMBOL, AMOUNT) + await operator.pre_compute() + + assert operator.value == {"created_orders": [{"symbol": SYMBOL, "side": "buy"}]} + factory.create_base_orders_and_associated_elements.assert_awaited_once() + call_kwargs = factory.create_base_orders_and_associated_elements.call_args[1] + assert call_kwargs["symbol"] == SYMBOL + assert call_kwargs["side"] == octobot_trading.enums.TradeOrderSide.BUY + assert call_kwargs["amount"] == AMOUNT + assert call_kwargs["order_type"] == octobot_trading.enums.TraderOrderType.BUY_MARKET + + @pytest.mark.asyncio + async def test_pre_compute_sell_market_order( + self, create_order_operators_list, backtesting_trader + ): + _config, exchange_manager, _trader = backtesting_trader + market_op_class, _limit_op_class, _stop_loss_op_class = create_order_operators_list + mock_order = _create_mock_order(side="sell") + + factory = market_op_class("sell", SYMBOL, AMOUNT).get_order_factory() + with mock.patch.object( + factory, + "create_base_orders_and_associated_elements", + mock.AsyncMock(return_value=[mock_order]), + ), mock.patch.object( + factory, + "create_order_on_exchange", + mock.AsyncMock(return_value=mock_order), + ): + operator = market_op_class("sell", SYMBOL, AMOUNT) + await operator.pre_compute() + + call_kwargs = factory.create_base_orders_and_associated_elements.call_args[1] + assert call_kwargs["order_type"] == octobot_trading.enums.TraderOrderType.SELL_MARKET + + @pytest.mark.asyncio + async def test_pre_compute_raises_when_create_order_fails( + self, create_order_operators_list, backtesting_trader + ): + _config, exchange_manager, _trader = backtesting_trader + market_op_class, _limit_op_class, _stop_loss_op_class = create_order_operators_list + mock_order = _create_mock_order() + + factory = market_op_class("buy", SYMBOL, AMOUNT).get_order_factory() + with mock.patch.object( + factory, + "create_base_orders_and_associated_elements", + mock.AsyncMock(return_value=[mock_order]), + ), mock.patch.object( + factory, + "create_order_on_exchange", + mock.AsyncMock(return_value=None), + ): + operator = market_op_class("buy", SYMBOL, AMOUNT) + with pytest.raises( + octobot_commons.errors.DSLInterpreterError, + match="Failed to create", + ): + await operator.pre_compute() + + def test_compute_without_pre_compute(self, create_order_operators_list): + market_op_class, _limit_op_class, _stop_loss_op_class = create_order_operators_list + operator = market_op_class("buy", SYMBOL, AMOUNT) + with pytest.raises( + octobot_commons.errors.DSLInterpreterError, + match="has not been pre_computed", + ): + operator.compute() + + @pytest.mark.asyncio + async def test_pre_compute_uses_trading_mode_when_provided( + self, create_order_operators_with_trading_mode, backtesting_trader + ): + _config, exchange_manager, _trader = backtesting_trader + market_op_class, _limit_op_class, _stop_loss_op_class = create_order_operators_with_trading_mode + mock_order = _create_mock_order() + + factory = market_op_class("buy", SYMBOL, AMOUNT).get_order_factory() + mock_trading_mode = factory.trading_mode + mock_trading_mode.create_order = mock.AsyncMock(return_value=mock_order) + + with mock.patch.object( + factory, + "create_base_orders_and_associated_elements", + mock.AsyncMock(return_value=[mock_order]), + ), mock.patch.object( + exchange_manager.trader, + "create_order", + mock.AsyncMock(return_value=mock_order), + ): + operator = market_op_class("buy", SYMBOL, AMOUNT) + await operator.pre_compute() + + assert operator.value == {"created_orders": [{"symbol": SYMBOL, "side": "buy"}]} + mock_trading_mode.create_order.assert_awaited_once() + call_args = mock_trading_mode.create_order.call_args + assert call_args[0][0] == mock_order + assert call_args[1]["dependencies"] is factory.dependencies + assert call_args[1]["wait_for_creation"] is True + + +class TestLimitOrderOperator: + @pytest.mark.asyncio + async def test_pre_compute_creates_limit_order( + self, create_order_operators_list, backtesting_trader + ): + _config, exchange_manager, _trader = backtesting_trader + _market_op_class, limit_op_class, _stop_loss_op_class = create_order_operators_list + mock_order = _create_mock_order(order_type=octobot_trading.enums.TraderOrderType.BUY_LIMIT) + + factory = limit_op_class("buy", SYMBOL, AMOUNT, PRICE).get_order_factory() + with mock.patch.object( + factory, + "create_base_orders_and_associated_elements", + mock.AsyncMock(return_value=[mock_order]), + ), mock.patch.object( + factory, + "create_order_on_exchange", + mock.AsyncMock(return_value=mock_order), + ): + operator = limit_op_class("buy", SYMBOL, AMOUNT, PRICE) + await operator.pre_compute() + + assert operator.value == {"created_orders": [{"symbol": SYMBOL, "side": "buy"}]} + call_kwargs = factory.create_base_orders_and_associated_elements.call_args[1] + assert call_kwargs["symbol"] == SYMBOL + assert call_kwargs["side"] == octobot_trading.enums.TradeOrderSide.BUY + assert call_kwargs["amount"] == AMOUNT + assert call_kwargs["price"] == PRICE + assert call_kwargs["order_type"] == octobot_trading.enums.TraderOrderType.BUY_LIMIT + + @pytest.mark.asyncio + async def test_pre_compute_forwards_allow_holdings_adaptation( + self, create_order_operators_list, backtesting_trader + ): + _config, exchange_manager, _trader = backtesting_trader + _market_op_class, limit_op_class, _stop_loss_op_class = create_order_operators_list + mock_order = _create_mock_order(order_type=octobot_trading.enums.TraderOrderType.BUY_LIMIT) + + factory = limit_op_class("buy", SYMBOL, AMOUNT, PRICE).get_order_factory() + with mock.patch.object( + factory, + "create_base_orders_and_associated_elements", + mock.AsyncMock(return_value=[mock_order]), + ), mock.patch.object( + factory, + "create_order_on_exchange", + mock.AsyncMock(return_value=mock_order), + ): + operator = limit_op_class( + "buy", SYMBOL, AMOUNT, PRICE, allow_holdings_adaptation=True + ) + await operator.pre_compute() + + call_kwargs = factory.create_base_orders_and_associated_elements.call_args[1] + assert call_kwargs["allow_holdings_adaptation"] is True + + @pytest.mark.asyncio + async def test_pre_compute_allow_holdings_adaptation_defaults_to_false( + self, create_order_operators_list, backtesting_trader + ): + _config, exchange_manager, _trader = backtesting_trader + _market_op_class, limit_op_class, _stop_loss_op_class = create_order_operators_list + mock_order = _create_mock_order(order_type=octobot_trading.enums.TraderOrderType.BUY_LIMIT) + + factory = limit_op_class("buy", SYMBOL, AMOUNT, PRICE).get_order_factory() + with mock.patch.object( + factory, + "create_base_orders_and_associated_elements", + mock.AsyncMock(return_value=[mock_order]), + ), mock.patch.object( + factory, + "create_order_on_exchange", + mock.AsyncMock(return_value=mock_order), + ): + operator = limit_op_class("buy", SYMBOL, AMOUNT, PRICE) + await operator.pre_compute() + + call_kwargs = factory.create_base_orders_and_associated_elements.call_args[1] + assert call_kwargs.get("allow_holdings_adaptation", False) is False + + +def _patch_stop_loss_supported(exchange_manager): + """Patch exchange to support STOP_LOSS orders (binanceus spot does not by default).""" + return mock.patch.object( + exchange_manager.exchange, + "is_supported_order_type", + mock.Mock(return_value=True), + ) + + +class TestStopLossOrderOperator: + @pytest.mark.asyncio + async def test_pre_compute_creates_stop_loss_order( + self, create_order_operators_list, backtesting_trader + ): + _config, exchange_manager, _trader = backtesting_trader + _market_op_class, _limit_op_class, stop_loss_op_class = create_order_operators_list + mock_order = _create_mock_order(order_type=octobot_trading.enums.TraderOrderType.STOP_LOSS) + + operator = stop_loss_op_class("buy", SYMBOL, AMOUNT, PRICE) + factory = operator.get_order_factory() + with _patch_stop_loss_supported(exchange_manager), mock.patch.object( + factory, + "create_base_orders_and_associated_elements", + mock.AsyncMock(return_value=[mock_order]), + ), mock.patch.object( + factory, + "create_order_on_exchange", + mock.AsyncMock(return_value=mock_order), + ): + await operator.pre_compute() + + call_kwargs = factory.create_base_orders_and_associated_elements.call_args[1] + assert call_kwargs["order_type"] == octobot_trading.enums.TraderOrderType.STOP_LOSS + + @pytest.mark.asyncio + async def test_pre_compute_raises_when_stop_loss_unsupported( + self, create_order_operators_list, backtesting_trader + ): + _config, exchange_manager, _trader = backtesting_trader + _market_op_class, _limit_op_class, stop_loss_op_class = create_order_operators_list + + with mock.patch.object( + exchange_manager.exchange, + "is_supported_order_type", + mock.Mock(return_value=False), + ): + operator = stop_loss_op_class("buy", SYMBOL, AMOUNT, PRICE) + with pytest.raises( + trading_errors.NotSupportedOrderTypeError, + match="STOP_LOSS orders are not supported", + ): + await operator.pre_compute() + + +class TestCreateOrderCallAsDsl: + @pytest.mark.asyncio + async def test_market_call_as_dsl(self, interpreter, backtesting_trader): + _config, exchange_manager, _trader = backtesting_trader + _ensure_market_order_trading_context(backtesting_trader) + + result = await interpreter.interprete( + f"market('buy', '{SYMBOL}', {AMOUNT})" + ) + + assert isinstance(result, dict) + assert "created_orders" in result + assert len(result["created_orders"]) == 1 + created_order = result["created_orders"][0] + assert created_order["symbol"] == SYMBOL + assert created_order["side"] == octobot_trading.enums.TradeOrderSide.BUY.value + assert "id" in created_order or "exchange_id" in created_order + + all_orders = exchange_manager.exchange_personal_data.orders_manager.get_all_orders( + symbol=SYMBOL + ) + assert all_orders == [] + trades = exchange_manager.exchange_personal_data.trades_manager.get_trades() + assert len(trades) == 1 + created_trade = trades[0] + assert created_trade.trade_type == octobot_trading.enums.TraderOrderType.BUY_MARKET + assert created_trade.side == octobot_trading.enums.TradeOrderSide.BUY + assert created_trade.executed_price == MARK_PRICE + assert created_trade.executed_quantity == decimal.Decimal(str(AMOUNT)) + + @pytest.mark.asyncio + async def test_limit_call_as_dsl( + self, interpreter, backtesting_trader, create_order_operators_list + ): + _config, exchange_manager, _trader = backtesting_trader + _ensure_market_order_trading_context(backtesting_trader) + + order_price = 45000 + result = await interpreter.interprete( + f"limit('buy', '{SYMBOL}', {AMOUNT}, {order_price}, reduce_only=True)" + ) + + assert isinstance(result, dict) + assert "created_orders" in result + assert len(result["created_orders"]) == 1 + created_order = result["created_orders"][0] + assert created_order["symbol"] == SYMBOL + assert created_order["side"] == octobot_trading.enums.TradeOrderSide.BUY.value + assert "id" in created_order or "exchange_id" in created_order + + all_orders = exchange_manager.exchange_personal_data.orders_manager.get_all_orders( + symbol=SYMBOL + ) + assert len(all_orders) == 1 + trades = exchange_manager.exchange_personal_data.trades_manager.get_trades() + assert len(trades) == 0 + created_order = all_orders[0] + assert isinstance(created_order, personal_data.BuyLimitOrder) + assert created_order.origin_price == decimal.Decimal(str(order_price)) + assert created_order.origin_quantity == decimal.Decimal(str(AMOUNT)) + assert created_order.reduce_only == True + + @pytest.mark.asyncio + async def test_limit_sell_with_take_profit_call_as_dsl( + self, interpreter, backtesting_trader + ): + _config, exchange_manager, _trader = backtesting_trader + _ensure_sell_order_trading_context(backtesting_trader) + + limit_price = 55000 + take_profit_price = 52000 + result = await interpreter.interprete( + f"limit('sell', '{SYMBOL}', {AMOUNT}, price='{limit_price}', " + f"take_profit_prices=['{take_profit_price}'])" + ) + + assert isinstance(result, dict) + assert "created_orders" in result + assert len(result["created_orders"]) == 1 + created_order = result["created_orders"][0] + assert created_order["symbol"] == SYMBOL + assert created_order["side"] == octobot_trading.enums.TradeOrderSide.SELL.value + assert "id" in created_order or "exchange_id" in created_order + + all_orders = exchange_manager.exchange_personal_data.orders_manager.get_all_orders( + symbol=SYMBOL + ) + assert len(all_orders) == 1 + base_order = all_orders[0] + assert isinstance(base_order, personal_data.SellLimitOrder) + assert base_order.origin_price == decimal.Decimal(str(limit_price)) + assert base_order.origin_quantity == decimal.Decimal(str(AMOUNT)) + assert len(base_order.chained_orders) == 1 + tp_order = base_order.chained_orders[0] + assert isinstance(tp_order, personal_data.BuyLimitOrder) + assert tp_order.side == octobot_trading.enums.TradeOrderSide.BUY + assert tp_order.origin_price == decimal.Decimal(str(take_profit_price)) + assert tp_order.origin_quantity == decimal.Decimal(str(AMOUNT)) + + @pytest.mark.asyncio + async def test_stop_loss_sell_call_as_dsl(self, interpreter, backtesting_trader): + _config, exchange_manager, _trader = backtesting_trader + _ensure_sell_order_trading_context(backtesting_trader) + + stop_price = 48000 + with _patch_stop_loss_supported(exchange_manager): + result = await interpreter.interprete( + f"stop_loss('sell', '{SYMBOL}', {AMOUNT}, price='{stop_price}')" + ) + + assert isinstance(result, dict) + assert "created_orders" in result + assert len(result["created_orders"]) == 1 + created_order = result["created_orders"][0] + assert created_order["symbol"] == SYMBOL + assert created_order["side"] == octobot_trading.enums.TradeOrderSide.SELL.value + assert "id" in created_order or "exchange_id" in created_order + + all_orders = exchange_manager.exchange_personal_data.orders_manager.get_all_orders( + symbol=SYMBOL + ) + assert len(all_orders) == 1 + created_order = all_orders[0] + assert isinstance(created_order, personal_data.StopLossOrder) + assert created_order.side == octobot_trading.enums.TradeOrderSide.SELL + assert created_order.origin_price == decimal.Decimal(str(stop_price)) + assert created_order.origin_quantity == decimal.Decimal(str(AMOUNT)) + + @pytest.mark.asyncio + async def test_stop_loss_call_as_dsl_raises_when_unsupported( + self, interpreter, backtesting_trader + ): + _config, exchange_manager, _trader = backtesting_trader + _ensure_sell_order_trading_context(backtesting_trader) + + stop_price = 48000 + with mock.patch.object( + exchange_manager.exchange, + "is_supported_order_type", + mock.Mock(return_value=False), + ): + with pytest.raises( + trading_errors.NotSupportedOrderTypeError, + match="STOP_LOSS orders are not supported", + ): + await interpreter.interprete( + f"stop_loss('sell', '{SYMBOL}', {AMOUNT}, price='{stop_price}')" + ) + + @pytest.mark.asyncio + async def test_limit_with_chained_stop_loss_call_as_dsl_raises_when_unsupported( + self, interpreter, backtesting_trader + ): + _config, exchange_manager, _trader = backtesting_trader + _ensure_market_order_trading_context(backtesting_trader) + + limit_price = 50000 + stop_loss_price = 48000 + with mock.patch.object( + exchange_manager.exchange, + "is_supported_order_type", + mock.Mock(return_value=False), + ): + with pytest.raises( + trading_errors.NotSupportedOrderTypeError, + match="STOP_LOSS orders are not supported", + ): + await interpreter.interprete( + f"limit('buy', '{SYMBOL}', {AMOUNT}, price='{limit_price}', " + f"stop_loss_price='{stop_loss_price}')" + ) + + @pytest.mark.asyncio + async def test_limit_buy_with_take_profit_and_stop_loss_call_as_dsl( + self, interpreter, backtesting_trader + ): + _config, exchange_manager, _trader = backtesting_trader + _ensure_market_order_trading_context(backtesting_trader) + + limit_price = 50000 + take_profit_price_offset = "10%" + stop_loss_price = 48000 + tag = "test_tag" + cancel_policy = personal_data.ChainedOrderFillingPriceOrderCancelPolicy.__name__ + active_order_swap_strategy = personal_data.TakeProfitFirstActiveOrderSwapStrategy.__name__ + with _patch_stop_loss_supported(exchange_manager): + result = await interpreter.interprete( + f"limit('buy', '{SYMBOL}', {AMOUNT}, price='{limit_price}', " + f"take_profit_prices=['{take_profit_price_offset}'], stop_loss_price='{stop_loss_price}', tag='{tag}', cancel_policy='{cancel_policy}', active_order_swap_strategy='{active_order_swap_strategy}')" + ) + + assert isinstance(result, dict) + assert "created_orders" in result + assert len(result["created_orders"]) == 1 + created_order = result["created_orders"][0] + assert created_order["symbol"] == SYMBOL + assert created_order["side"] == octobot_trading.enums.TradeOrderSide.BUY.value + assert "id" in created_order or "exchange_id" in created_order + + all_orders = exchange_manager.exchange_personal_data.orders_manager.get_all_orders( + symbol=SYMBOL + ) + assert len(all_orders) == 1 + base_order = all_orders[0] + assert isinstance(base_order, personal_data.BuyLimitOrder) + assert base_order.origin_price == decimal.Decimal(str(limit_price)) + assert base_order.origin_quantity == decimal.Decimal(str(AMOUNT)) + assert base_order.tag == tag + assert isinstance(base_order.cancel_policy, personal_data.ChainedOrderFillingPriceOrderCancelPolicy) + assert len(base_order.chained_orders) == 2 + stop_orders = [o for o in base_order.chained_orders if personal_data.is_stop_order(o.order_type)] + tp_orders = [o for o in base_order.chained_orders if not personal_data.is_stop_order(o.order_type)] + assert len(stop_orders) == 1 + assert len(tp_orders) == 1 + assert isinstance(stop_orders[0], personal_data.StopLossOrder) + assert isinstance(tp_orders[0], personal_data.SellLimitOrder) + assert tp_orders[0].tag == tag + assert stop_orders[0].tag == tag + assert stop_orders[0].side == octobot_trading.enums.TradeOrderSide.SELL + assert stop_orders[0].origin_price == decimal.Decimal(str(stop_loss_price)) + assert stop_orders[0].origin_quantity == decimal.Decimal(str(AMOUNT)) + assert tp_orders[0].side == octobot_trading.enums.TradeOrderSide.SELL + assert tp_orders[0].origin_price == decimal.Decimal("55000") # 50k + 10% + assert tp_orders[0].origin_quantity == decimal.Decimal(str(AMOUNT)) + order_group = tp_orders[0].order_group + assert isinstance(order_group, personal_data.OneCancelsTheOtherOrderGroup) + assert isinstance(order_group.active_order_swap_strategy, personal_data.TakeProfitFirstActiveOrderSwapStrategy) # non default strategy + assert tp_orders[0].order_group is stop_orders[0].order_group + + @pytest.mark.asyncio + async def test_limit_buy_with_many_take_profits_and_a_stop_loss_call_as_dsl( + self, interpreter, backtesting_trader + ): + _config, exchange_manager, _trader = backtesting_trader + _ensure_market_order_trading_context(backtesting_trader) + + limit_price = 50000 + take_profit_price_offset_1 = "10%" + take_profit_price_offset_2 = "20%" + take_profit_price_offset_3 = "30%" + take_profit_volume_percents = [50, 20, 30] + stop_loss_price = 48000 + trailing_profile = personal_data.TrailingProfileTypes.FILLED_TAKE_PROFIT.value + with _patch_stop_loss_supported(exchange_manager): + result = await interpreter.interprete( + f"limit('buy', '{SYMBOL}', {AMOUNT}, price='{limit_price}', " + f"take_profit_prices=['{take_profit_price_offset_1}', '{take_profit_price_offset_2}', '{take_profit_price_offset_3}'], take_profit_volume_percents=['{take_profit_volume_percents[0]}', '{take_profit_volume_percents[1]}', '{take_profit_volume_percents[2]}'], stop_loss_price='{stop_loss_price}', trailing_profile='{trailing_profile}')" + ) + + assert isinstance(result, dict) + assert "created_orders" in result + assert len(result["created_orders"]) == 1 + created_order = result["created_orders"][0] + assert created_order["symbol"] == SYMBOL + assert created_order["side"] == octobot_trading.enums.TradeOrderSide.BUY.value + assert "id" in created_order or "exchange_id" in created_order + + all_orders = exchange_manager.exchange_personal_data.orders_manager.get_all_orders( + symbol=SYMBOL + ) + assert len(all_orders) == 1 + base_order = all_orders[0] + assert isinstance(base_order, personal_data.BuyLimitOrder) + assert base_order.origin_price == decimal.Decimal(str(limit_price)) + assert base_order.origin_quantity == decimal.Decimal(str(AMOUNT)) + assert len(base_order.chained_orders) == 4 + stop_orders = [o for o in base_order.chained_orders if personal_data.is_stop_order(o.order_type)] + tp_orders = [o for o in base_order.chained_orders if not personal_data.is_stop_order(o.order_type)] + assert len(stop_orders) == 1 + assert len(tp_orders) == 3 + assert isinstance(stop_orders[0], personal_data.StopLossOrder) + assert isinstance(tp_orders[0], personal_data.SellLimitOrder) + assert stop_orders[0].side == octobot_trading.enums.TradeOrderSide.SELL + assert stop_orders[0].origin_price == decimal.Decimal(str(stop_loss_price)) + assert stop_orders[0].origin_quantity == decimal.Decimal(str(AMOUNT)) + for i, tp_order in enumerate(tp_orders): + assert tp_order.side == octobot_trading.enums.TradeOrderSide.SELL + assert tp_order.origin_price == decimal.Decimal("50000") * decimal.Decimal(str(1 + (i + 1) * 0.1)) + assert tp_order.origin_quantity == decimal.Decimal(str(AMOUNT)) * decimal.Decimal(str(take_profit_volume_percents[i] / 100)) + order_group = tp_order.order_group + assert isinstance(order_group, personal_data.TrailingOnFilledTPBalancedOrderGroup) + assert isinstance(order_group.active_order_swap_strategy, personal_data.StopFirstActiveOrderSwapStrategy) # default strategy + assert tp_order.order_group is stop_orders[0].order_group + + +class TestGetDependencies: + """Tests for get_dependencies using DSL syntax and the interpreter.""" + + @pytest.mark.parametrize( + "maybe_exchange_manager_interpreter", + ["interpreter", "no_exchange_manager_interpreter"], + indirect=True, + ) + def test_market_order_get_dependencies_from_interpreter_with_exchange_manager(self, maybe_exchange_manager_interpreter): + # symbol 1 + maybe_exchange_manager_interpreter.prepare(f"market('buy', '{SYMBOL}', {AMOUNT})") + assert maybe_exchange_manager_interpreter.get_dependencies() == [ + octobot_trading.dsl.SymbolDependency(symbol=SYMBOL), + ] + # other symbol 2 + symbol = "ETH/USDT" + maybe_exchange_manager_interpreter.prepare(f"market('sell', '{symbol}', 0.5)") + assert maybe_exchange_manager_interpreter.get_dependencies() == [ + octobot_trading.dsl.SymbolDependency(symbol=symbol), + ] + symbol = "SOL/USDT" + # symbol 3 as keyword argument + maybe_exchange_manager_interpreter.prepare(f"market('sell', symbol='{symbol}', amount=0.5)") + assert maybe_exchange_manager_interpreter.get_dependencies() == [ + octobot_trading.dsl.SymbolDependency(symbol=symbol), + ] diff --git a/packages/tentacles/Meta/DSL_operators/exchange_operators/tests/exchange_personal_data_operators/test_fetch_order_operators.py b/packages/tentacles/Meta/DSL_operators/exchange_operators/tests/exchange_personal_data_operators/test_fetch_order_operators.py new file mode 100644 index 0000000000..dbe855db9b --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/exchange_operators/tests/exchange_personal_data_operators/test_fetch_order_operators.py @@ -0,0 +1,409 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import contextlib +import mock +import pytest +import pytest_asyncio + +import octobot_commons.errors +import octobot_commons.dsl_interpreter as dsl_interpreter +import octobot_trading.dsl +import octobot_trading.enums as trading_enums +import octobot_trading.personal_data as trading_personal_data + +import tentacles.Meta.DSL_operators.exchange_operators.exchange_personal_data_operators.fetch_order_operators as fetch_order_operators + +from tentacles.Meta.DSL_operators.exchange_operators.tests import ( + backtesting_config, + fake_backtesting, + backtesting_exchange_manager, + backtesting_trader, +) + +SYMBOL = "BTC/USDT" +EXCHANGE_ORDER_ID = "order-123" +RAW_ORDER_SENTINEL = {"exchange_id": EXCHANGE_ORDER_ID, "symbol": SYMBOL} +FORMATTED_ORDER_SENTINEL = {"formatted": True, "exchange_order_id": EXCHANGE_ORDER_ID} + + +@pytest_asyncio.fixture +async def fetch_order_operators_list(backtesting_trader): + _config, exchange_manager, _trader = backtesting_trader + return fetch_order_operators.create_fetch_order_operators(exchange_manager) + + +@pytest_asyncio.fixture +async def interpreter(fetch_order_operators_list): + return dsl_interpreter.Interpreter( + dsl_interpreter.get_all_operators() + + fetch_order_operators_list + ) + + +@pytest_asyncio.fixture +async def no_exchange_manager_fetch_order_operators_list(): + return fetch_order_operators.create_fetch_order_operators(None) + + +@pytest_asyncio.fixture +async def no_exchange_manager_interpreter(no_exchange_manager_fetch_order_operators_list): + return dsl_interpreter.Interpreter( + dsl_interpreter.get_all_operators() + + no_exchange_manager_fetch_order_operators_list + ) + + +@pytest_asyncio.fixture +async def maybe_exchange_manager_interpreter(request, interpreter, no_exchange_manager_interpreter): + selected_value = request.param + if selected_value == "interpreter": + return interpreter + if selected_value == "no_exchange_manager_interpreter": + return no_exchange_manager_interpreter + raise ValueError(f"Invalid selected_value: {selected_value}") + + +class TestFetchOrderOperator: + @pytest.mark.asyncio + @pytest.mark.parametrize( + "trader_mode", + ("simulated", "real_trading"), + ) + async def test_pre_compute_returns_formatted_order_dict( + self, fetch_order_operators_list, backtesting_trader, trader_mode + ): + _config, exchange_manager, trader = backtesting_trader + fetch_order_op_class, = fetch_order_operators_list + mock_order = mock.Mock() + mock_order.to_dict = mock.Mock(return_value=FORMATTED_ORDER_SENTINEL) + + with contextlib.ExitStack() as stack: + if trader_mode == "simulated": + mock_order.symbol = SYMBOL + orders_get_mock = stack.enter_context(mock.patch.object( + exchange_manager.exchange_personal_data.orders_manager, + "get_order", + mock.Mock(return_value=mock_order), + )) + else: + stack.enter_context(mock.patch.object( + exchange_manager, "is_trader_simulated", False, + )) + exchange_get_mock = stack.enter_context(mock.patch.object( + exchange_manager.exchange, + "get_order", + mock.AsyncMock(return_value=RAW_ORDER_SENTINEL), + )) + create_from_raw_mock = stack.enter_context(mock.patch.object( + trading_personal_data, + "create_order_instance_from_raw", + mock.Mock(return_value=mock_order), + )) + + operator = fetch_order_op_class( + SYMBOL, + exchange_order_id=EXCHANGE_ORDER_ID, + ) + await operator.pre_compute() + + assert operator.value == FORMATTED_ORDER_SENTINEL + mock_order.to_dict.assert_called_once_with() + if trader_mode == "simulated": + orders_get_mock.assert_called_once_with(None, exchange_order_id=EXCHANGE_ORDER_ID) + else: + exchange_get_mock.assert_awaited_once_with(EXCHANGE_ORDER_ID, symbol=SYMBOL) + create_from_raw_mock.assert_called_once_with(trader, RAW_ORDER_SENTINEL) + + @pytest.mark.asyncio + @pytest.mark.parametrize( + "trader_mode", + ("simulated", "real_trading"), + ) + async def test_pre_compute_order_not_found( + self, fetch_order_operators_list, backtesting_trader, trader_mode + ): + _config, exchange_manager, _trader = backtesting_trader + fetch_order_op_class, = fetch_order_operators_list + + with contextlib.ExitStack() as stack: + if trader_mode == "simulated": + stack.enter_context(mock.patch.object( + exchange_manager.exchange_personal_data.orders_manager, + "get_order", + mock.Mock(side_effect=KeyError(EXCHANGE_ORDER_ID)), + )) + stack.enter_context(mock.patch.object( + exchange_manager.exchange_personal_data.trades_manager, + "get_trades", + mock.Mock(return_value=[]), + )) + else: + stack.enter_context(mock.patch.object( + exchange_manager, "is_trader_simulated", False, + )) + stack.enter_context(mock.patch.object( + exchange_manager.exchange, + "get_order", + mock.AsyncMock(return_value=None), + )) + operator = fetch_order_op_class( + SYMBOL, + exchange_order_id=EXCHANGE_ORDER_ID, + ) + await operator.pre_compute() + assert operator.value is None + + @pytest.mark.asyncio + @pytest.mark.parametrize( + "trader_mode", + ("simulated", "real_trading"), + ) + async def test_pre_compute_order_not_found_raises_when_raise_if_not_found( + self, fetch_order_operators_list, backtesting_trader, trader_mode + ): + _config, exchange_manager, _trader = backtesting_trader + fetch_order_op_class, = fetch_order_operators_list + with contextlib.ExitStack() as stack: + if trader_mode == "simulated": + stack.enter_context(mock.patch.object( + exchange_manager.exchange_personal_data.orders_manager, + "get_order", + mock.Mock(side_effect=KeyError(EXCHANGE_ORDER_ID)), + )) + stack.enter_context(mock.patch.object( + exchange_manager.exchange_personal_data.trades_manager, + "get_trades", + mock.Mock(return_value=[]), + )) + else: + stack.enter_context(mock.patch.object( + exchange_manager, "is_trader_simulated", False, + )) + stack.enter_context(mock.patch.object( + exchange_manager.exchange, + "get_order", + mock.AsyncMock(return_value=None), + )) + operator = fetch_order_op_class( + SYMBOL, + exchange_order_id=EXCHANGE_ORDER_ID, + raise_if_not_found=True, + ) + with pytest.raises( + octobot_commons.errors.InvalidParametersError, + match="No .* order found for symbol=.*exchange_order_id=", + ): + await operator.pre_compute() + + @pytest.mark.asyncio + async def test_pre_compute_symbol_mismatch_simulated( + self, fetch_order_operators_list, backtesting_trader + ): + _config, exchange_manager, _trader = backtesting_trader + fetch_order_op_class, = fetch_order_operators_list + mock_order = mock.Mock() + mock_order.symbol = "ETH/USDT" + + with mock.patch.object( + exchange_manager.exchange_personal_data.orders_manager, + "get_order", + mock.Mock(return_value=mock_order), + ): + operator = fetch_order_op_class( + SYMBOL, + exchange_order_id=EXCHANGE_ORDER_ID, + ) + with pytest.raises( + octobot_commons.errors.InvalidParametersError, + match="is for symbol", + ): + await operator.pre_compute() + + @pytest.mark.asyncio + async def test_pre_compute_simulated_resolves_from_trades_when_order_not_in_manager( + self, fetch_order_operators_list, backtesting_trader + ): + _config, exchange_manager, trader = backtesting_trader + fetch_order_op_class, = fetch_order_operators_list + mock_order_instance = mock.Mock() + mock_order_instance.to_dict = mock.Mock(return_value=FORMATTED_ORDER_SENTINEL) + + mock_trade = mock.Mock() + mock_trade.symbol = SYMBOL + mock_trade.to_dict = mock.Mock(return_value={trading_enums.ExchangeConstantsOrderColumns.ID.value: "t1"}) + mock_trade.has_been_executed = mock.Mock(return_value=True) + mock_trade.executed_quantity = mock.Mock() + mock_trade.get_time = mock.Mock(return_value=0.0) + + with mock.patch.object( + exchange_manager.exchange_personal_data.orders_manager, + "get_order", + mock.Mock(side_effect=KeyError(EXCHANGE_ORDER_ID)), + ), mock.patch.object( + exchange_manager.exchange_personal_data.trades_manager, + "get_trades", + mock.Mock(return_value=[mock_trade]), + ) as get_trades_mock, mock.patch.object( + trading_personal_data, + "create_order_from_dict", + mock.Mock(return_value=mock_order_instance), + ) as create_from_dict_mock: + operator = fetch_order_op_class( + SYMBOL, + exchange_order_id=EXCHANGE_ORDER_ID, + ) + await operator.pre_compute() + + assert operator.value == FORMATTED_ORDER_SENTINEL + get_trades_mock.assert_called_once_with(exchange_order_id=EXCHANGE_ORDER_ID) + create_from_dict_mock.assert_called_once() + assert trader is create_from_dict_mock.call_args[0][0] + order_dict_passed = create_from_dict_mock.call_args[0][1] + assert trading_enums.ExchangeConstantsOrderColumns.FILLED.value in order_dict_passed + + @pytest.mark.asyncio + async def test_pre_compute_requires_exchange_manager(self, no_exchange_manager_fetch_order_operators_list): + fetch_order_op_class, = no_exchange_manager_fetch_order_operators_list + operator = fetch_order_op_class( + SYMBOL, + exchange_order_id=EXCHANGE_ORDER_ID, + ) + with pytest.raises( + octobot_commons.errors.DSLInterpreterError, + match="exchange_manager and exchange_manager.trader are required for fetch_order operator", + ): + await operator.pre_compute() + + @pytest.mark.asyncio + async def test_pre_compute_requires_trader(self, fetch_order_operators_list, backtesting_trader): + _config, exchange_manager, _trader = backtesting_trader + fetch_order_op_class, = fetch_order_operators_list + previous_trader = exchange_manager.trader + exchange_manager.trader = None + try: + operator = fetch_order_op_class( + SYMBOL, + exchange_order_id=EXCHANGE_ORDER_ID, + ) + with pytest.raises( + octobot_commons.errors.DSLInterpreterError, + match="exchange_manager and exchange_manager.trader are required for fetch_order operator", + ): + await operator.pre_compute() + finally: + exchange_manager.trader = previous_trader + + @pytest.mark.asyncio + @pytest.mark.parametrize( + "trader_mode", + ("simulated", "real_trading"), + ) + async def test_fetch_order_call_as_dsl( + self, interpreter, backtesting_trader, trader_mode + ): + _config, exchange_manager, _trader = backtesting_trader + mock_order = mock.Mock() + mock_order.to_dict = mock.Mock(return_value=FORMATTED_ORDER_SENTINEL) + + with contextlib.ExitStack() as stack: + if trader_mode == "simulated": + mock_order.symbol = SYMBOL + stack.enter_context(mock.patch.object( + exchange_manager.exchange_personal_data.orders_manager, + "get_order", + mock.Mock(return_value=mock_order), + )) + else: + stack.enter_context(mock.patch.object( + exchange_manager, "is_trader_simulated", False, + )) + stack.enter_context(mock.patch.object( + exchange_manager.exchange, + "get_order", + mock.AsyncMock(return_value=RAW_ORDER_SENTINEL), + )) + stack.enter_context(mock.patch.object( + trading_personal_data, + "create_order_instance_from_raw", + mock.Mock(return_value=mock_order), + )) + resolved = await interpreter.interprete( + f"fetch_order('{SYMBOL}', exchange_order_id='{EXCHANGE_ORDER_ID}')" + ) + assert resolved == FORMATTED_ORDER_SENTINEL + + @pytest.mark.asyncio + @pytest.mark.parametrize( + "trader_mode", + ("simulated", "real_trading"), + ) + async def test_fetch_order_call_as_dsl_raise_if_not_found_true( + self, interpreter, backtesting_trader, trader_mode + ): + _config, exchange_manager, _trader = backtesting_trader + with contextlib.ExitStack() as stack: + if trader_mode == "simulated": + stack.enter_context(mock.patch.object( + exchange_manager.exchange_personal_data.orders_manager, + "get_order", + mock.Mock(side_effect=KeyError(EXCHANGE_ORDER_ID)), + )) + stack.enter_context(mock.patch.object( + exchange_manager.exchange_personal_data.trades_manager, + "get_trades", + mock.Mock(return_value=[]), + )) + else: + stack.enter_context(mock.patch.object( + exchange_manager, "is_trader_simulated", False, + )) + stack.enter_context(mock.patch.object( + exchange_manager.exchange, + "get_order", + mock.AsyncMock(return_value=None), + )) + with pytest.raises( + octobot_commons.errors.InvalidParametersError, + match="No .* order found for symbol=.*exchange_order_id=", + ): + await interpreter.interprete( + f"fetch_order('{SYMBOL}', exchange_order_id='{EXCHANGE_ORDER_ID}', " + f"raise_if_not_found=True)" + ) + + +class TestGetDependencies: + @pytest.mark.parametrize( + "maybe_exchange_manager_interpreter", + ["interpreter", "no_exchange_manager_interpreter"], + indirect=True, + ) + def test_fetch_order_get_dependencies_from_interpreter( + self, maybe_exchange_manager_interpreter + ): + maybe_exchange_manager_interpreter.prepare( + f"fetch_order('{SYMBOL}', exchange_order_id='{EXCHANGE_ORDER_ID}')" + ) + assert maybe_exchange_manager_interpreter.get_dependencies() == [ + octobot_trading.dsl.SymbolDependency(symbol=SYMBOL), + ] + other_symbol = "ETH/USDT" + maybe_exchange_manager_interpreter.prepare( + f"fetch_order('{other_symbol}', exchange_order_id='{EXCHANGE_ORDER_ID}')" + ) + assert maybe_exchange_manager_interpreter.get_dependencies() == [ + octobot_trading.dsl.SymbolDependency(symbol=other_symbol), + ] diff --git a/packages/tentacles/Meta/DSL_operators/exchange_operators/tests/exchange_personal_data_operators/test_futures_contracts_operators.py b/packages/tentacles/Meta/DSL_operators/exchange_operators/tests/exchange_personal_data_operators/test_futures_contracts_operators.py new file mode 100644 index 0000000000..4573381d56 --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/exchange_operators/tests/exchange_personal_data_operators/test_futures_contracts_operators.py @@ -0,0 +1,131 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import mock +import pytest +import pytest_asyncio + +import octobot_commons.errors +import octobot_commons.dsl_interpreter as dsl_interpreter + +import tentacles.Meta.DSL_operators.exchange_operators.exchange_personal_data_operators.futures_contracts_operators as futures_contracts_operators + +from tentacles.Meta.DSL_operators.exchange_operators.tests import ( + backtesting_config, + fake_backtesting, + backtesting_exchange_manager, + backtesting_trader, +) + +FUTURES_SYMBOL = "BTC/USDT:USDT" +LEVERAGE = 10 + + +@pytest_asyncio.fixture +async def futures_contracts_operators_list(backtesting_trader): + _config, exchange_manager, _trader = backtesting_trader + return futures_contracts_operators.create_futures_contracts_operators(exchange_manager) + + +@pytest_asyncio.fixture +async def interpreter(futures_contracts_operators_list): + return dsl_interpreter.Interpreter( + dsl_interpreter.get_all_operators() + + futures_contracts_operators_list + ) + + +class TestSetLeverageOperator: + @pytest.mark.asyncio + async def test_pre_compute_sets_leverage(self, futures_contracts_operators_list, backtesting_trader): + _config, exchange_manager, _trader = backtesting_trader + set_leverage_op_class, = futures_contracts_operators_list + + with mock.patch.object( + _trader, + "set_leverage", + mock.AsyncMock(return_value=True), + ) as set_leverage_mock: + operator = set_leverage_op_class(FUTURES_SYMBOL, LEVERAGE) + await operator.pre_compute() + + assert operator.value == float(LEVERAGE) + set_leverage_mock.assert_awaited_once_with( + FUTURES_SYMBOL, + None, + mock.ANY, + ) + call_args = set_leverage_mock.call_args + assert float(call_args[0][2]) == LEVERAGE + + @pytest.mark.asyncio + async def test_pre_compute_with_float_leverage(self, futures_contracts_operators_list, backtesting_trader): + _config, exchange_manager, _trader = backtesting_trader + set_leverage_op_class, = futures_contracts_operators_list + + leverage_value = 5.5 + with mock.patch.object( + _trader, + "set_leverage", + mock.AsyncMock(return_value=True), + ) as set_leverage_mock: + operator = set_leverage_op_class(FUTURES_SYMBOL, leverage_value) + await operator.pre_compute() + + assert operator.value == leverage_value + set_leverage_mock.assert_awaited_once() + + def test_compute_without_pre_compute(self, futures_contracts_operators_list): + set_leverage_op_class, = futures_contracts_operators_list + operator = set_leverage_op_class(FUTURES_SYMBOL, LEVERAGE) + with pytest.raises( + octobot_commons.errors.DSLInterpreterError, + match="has not been pre_computed", + ): + operator.compute() + + @pytest.mark.asyncio + async def test_set_leverage_call_as_dsl(self, interpreter, backtesting_trader): + _config, exchange_manager, _trader = backtesting_trader + + with mock.patch.object( + _trader, + "set_leverage", + mock.AsyncMock(return_value=True), + ) as set_leverage_mock: + result = await interpreter.interprete( + f"set_leverage('{FUTURES_SYMBOL}', {LEVERAGE})" + ) + assert result == float(LEVERAGE) + set_leverage_mock.assert_awaited_once_with( + FUTURES_SYMBOL, + None, + mock.ANY, + ) + + @pytest.mark.asyncio + async def test_set_leverage_call_as_dsl_with_leverage_param(self, interpreter, backtesting_trader): + _config, exchange_manager, _trader = backtesting_trader + + with mock.patch.object( + _trader, + "set_leverage", + mock.AsyncMock(return_value=True), + ) as set_leverage_mock: + result = await interpreter.interprete( + f"set_leverage('{FUTURES_SYMBOL}', leverage={LEVERAGE})" + ) + assert result == float(LEVERAGE) + set_leverage_mock.assert_awaited_once() diff --git a/packages/tentacles/Meta/DSL_operators/exchange_operators/tests/exchange_personal_data_operators/test_portfolio_operators.py b/packages/tentacles/Meta/DSL_operators/exchange_operators/tests/exchange_personal_data_operators/test_portfolio_operators.py new file mode 100644 index 0000000000..cbeed8fe99 --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/exchange_operators/tests/exchange_personal_data_operators/test_portfolio_operators.py @@ -0,0 +1,265 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import decimal +import mock +import octobot_commons.constants as commons_constants +import pytest +import pytest_asyncio + +import octobot_commons.errors +import octobot_commons.dsl_interpreter as dsl_interpreter +import octobot_trading.constants + +import tentacles.Meta.DSL_operators.exchange_operators.exchange_personal_data_operators.portfolio_operators as portfolio_operators + +from tentacles.Meta.DSL_operators.exchange_operators.tests import ( + backtesting_config, + fake_backtesting, + backtesting_exchange_manager, + backtesting_trader, +) + +ASSET_BTC = "BTC" +ASSET_USDT = "USDT" +ASSET_ETH = "ETH" + + +def _ensure_portfolio_config(backtesting_trader, portfolio_content): + _config, exchange_manager, _trader = backtesting_trader + if commons_constants.CONFIG_SIMULATOR not in _config: + _config[commons_constants.CONFIG_SIMULATOR] = {} + if commons_constants.CONFIG_STARTING_PORTFOLIO not in _config[commons_constants.CONFIG_SIMULATOR]: + _config[commons_constants.CONFIG_SIMULATOR][commons_constants.CONFIG_STARTING_PORTFOLIO] = {} + _config[commons_constants.CONFIG_SIMULATOR][commons_constants.CONFIG_STARTING_PORTFOLIO].update( + portfolio_content + ) + exchange_manager.exchange_personal_data.portfolio_manager.apply_forced_portfolio( + _config[commons_constants.CONFIG_SIMULATOR][commons_constants.CONFIG_STARTING_PORTFOLIO] + ) + + +@pytest_asyncio.fixture +async def portfolio_operators_list(backtesting_trader): + _config, exchange_manager, _trader = backtesting_trader + return portfolio_operators.create_portfolio_operators(exchange_manager) + + +@pytest_asyncio.fixture +async def interpreter(portfolio_operators_list): + return dsl_interpreter.Interpreter( + dsl_interpreter.get_all_operators() + + portfolio_operators_list + ) + + +class TestTotalOperator: + @pytest.mark.asyncio + async def test_pre_compute(self, portfolio_operators_list, backtesting_trader): + _config, exchange_manager, _trader = backtesting_trader + total_op_class, _, _ = portfolio_operators_list + + _ensure_portfolio_config(backtesting_trader, {ASSET_BTC: 1.5, ASSET_USDT: 1000}) + + operator = total_op_class(ASSET_BTC) + await operator.pre_compute() + assert operator.value == 1.5 + + @pytest.mark.asyncio + async def test_pre_compute_asset_not_in_portfolio(self, portfolio_operators_list, backtesting_trader): + _config, exchange_manager, _trader = backtesting_trader + total_op_class, _, _ = portfolio_operators_list + + _ensure_portfolio_config(backtesting_trader, {ASSET_BTC: 1.5, ASSET_USDT: 1000}) + + operator = total_op_class(ASSET_ETH) + await operator.pre_compute() + assert operator.value == float(octobot_trading.constants.ZERO) + + def test_compute_without_pre_compute(self, portfolio_operators_list): + total_op_class, _, _ = portfolio_operators_list + operator = total_op_class(ASSET_BTC) + with pytest.raises( + octobot_commons.errors.DSLInterpreterError, + match="has not been pre_computed", + ): + operator.compute() + + @pytest.mark.asyncio + async def test_total_call_as_dsl(self, interpreter, backtesting_trader): + _ensure_portfolio_config(backtesting_trader, {ASSET_BTC: 2.0, ASSET_USDT: 500}) + + assert await interpreter.interprete(f"total('{ASSET_BTC}')") == 2.0 + assert await interpreter.interprete(f"total('{ASSET_USDT}')") == 500.0 + assert await interpreter.interprete(f"total('{ASSET_ETH}')") == 0.0 + + +class TestAvailableOperator: + @pytest.mark.asyncio + async def test_pre_compute(self, portfolio_operators_list, backtesting_trader): + _config, exchange_manager, _trader = backtesting_trader + _, available_op_class, _ = portfolio_operators_list + + _ensure_portfolio_config(backtesting_trader, {ASSET_BTC: 1.5, ASSET_USDT: 1000}) + + operator = available_op_class(ASSET_BTC) + await operator.pre_compute() + assert operator.value == 1.5 + + @pytest.mark.asyncio + async def test_pre_compute_asset_not_in_portfolio(self, portfolio_operators_list, backtesting_trader): + _config, exchange_manager, _trader = backtesting_trader + _, available_op_class, _ = portfolio_operators_list + + _ensure_portfolio_config(backtesting_trader, {ASSET_BTC: 1.5, ASSET_USDT: 1000}) + + operator = available_op_class(ASSET_ETH) + await operator.pre_compute() + assert operator.value == float(octobot_trading.constants.ZERO) + + def test_compute_without_pre_compute(self, portfolio_operators_list): + _, available_op_class, _ = portfolio_operators_list + operator = available_op_class(ASSET_BTC) + with pytest.raises( + octobot_commons.errors.DSLInterpreterError, + match="has not been pre_computed", + ): + operator.compute() + + @pytest.mark.asyncio + async def test_available_call_as_dsl(self, interpreter, backtesting_trader): + _ensure_portfolio_config(backtesting_trader, {ASSET_BTC: 3.0, ASSET_USDT: 2000}) + + assert await interpreter.interprete(f"available('{ASSET_BTC}')") == 3.0 + assert await interpreter.interprete(f"available('{ASSET_USDT}')") == 2000.0 + assert await interpreter.interprete(f"available('{ASSET_ETH}')") == 0.0 + + +class TestWithdrawOperator: + NETWORK = "ethereum" + ADDRESS = "0x1234567890abcdef1234567890abcdef12345678" + WITHDRAW_RESULT = {"id": "withdrawal-123", "status": "ok"} + + @pytest.mark.asyncio + async def test_pre_compute(self, portfolio_operators_list, backtesting_trader): + _config, exchange_manager, _trader = backtesting_trader + _, _, withdraw_op_class = portfolio_operators_list + _ensure_portfolio_config(backtesting_trader, {ASSET_BTC: 1.0, ASSET_USDT: 1000}) + + with mock.patch.object( + exchange_manager.trader, + "withdraw", + mock.AsyncMock(return_value=self.WITHDRAW_RESULT), + ) as withdraw_mock: + operator = withdraw_op_class(ASSET_BTC, self.NETWORK, self.ADDRESS, 0.1) + await operator.pre_compute() + + assert operator.value == {"created_withdrawals": [self.WITHDRAW_RESULT]} + withdraw_mock.assert_awaited_once_with( + ASSET_BTC, + decimal.Decimal("0.1"), + self.NETWORK, + self.ADDRESS, + tag=None, + params={}, + ) + + @pytest.mark.asyncio + async def test_pre_compute_uses_available_balance_when_amount_omitted( + self, portfolio_operators_list, backtesting_trader + ): + _config, exchange_manager, _trader = backtesting_trader + _, _, withdraw_op_class = portfolio_operators_list + _ensure_portfolio_config(backtesting_trader, {ASSET_BTC: 0.5, ASSET_USDT: 1000}) + + with mock.patch.object( + exchange_manager.trader, + "withdraw", + mock.AsyncMock(return_value=self.WITHDRAW_RESULT), + ) as withdraw_mock: + operator = withdraw_op_class(ASSET_BTC, self.NETWORK, self.ADDRESS) + await operator.pre_compute() + + assert operator.value == {"created_withdrawals": [self.WITHDRAW_RESULT]} + withdraw_mock.assert_awaited_once_with( + ASSET_BTC, + decimal.Decimal("0.5"), + self.NETWORK, + self.ADDRESS, + tag=None, + params={}, + ) + + @pytest.mark.asyncio + async def test_pre_compute_with_tag_and_params( + self, portfolio_operators_list, backtesting_trader + ): + _config, exchange_manager, _trader = backtesting_trader + _, _, withdraw_op_class = portfolio_operators_list + _ensure_portfolio_config(backtesting_trader, {ASSET_BTC: 1.0}) + tag = "memo-123" + params = {"fee": "low"} + + with mock.patch.object( + exchange_manager.trader, + "withdraw", + mock.AsyncMock(return_value=self.WITHDRAW_RESULT), + ) as withdraw_mock: + operator = withdraw_op_class( + ASSET_BTC, self.NETWORK, self.ADDRESS, 0.1, tag=tag, params=params + ) + await operator.pre_compute() + + withdraw_mock.assert_awaited_once_with( + ASSET_BTC, + decimal.Decimal("0.1"), + self.NETWORK, + self.ADDRESS, + tag=tag, + params=params, + ) + + def test_compute_without_pre_compute(self, portfolio_operators_list): + _, _, withdraw_op_class = portfolio_operators_list + operator = withdraw_op_class(ASSET_BTC, self.NETWORK, self.ADDRESS, 0.1) + with pytest.raises( + octobot_commons.errors.DSLInterpreterError, + match="has not been pre_computed", + ): + operator.compute() + + @pytest.mark.asyncio + async def test_withdraw_call_as_dsl(self, interpreter, backtesting_trader): + _config, exchange_manager, _trader = backtesting_trader + _ensure_portfolio_config(backtesting_trader, {ASSET_BTC: 2.0, ASSET_USDT: 1000}) + + with mock.patch.object( + exchange_manager.trader, + "withdraw", + mock.AsyncMock(return_value={"id": "wd-456", "status": "ok"}), + ) as withdraw_mock: + result = await interpreter.interprete( + f"withdraw('{ASSET_BTC}', '{self.NETWORK}', '{self.ADDRESS}', 1.5)" + ) + + assert result == {"created_withdrawals": [{"id": "wd-456", "status": "ok"}]} + withdraw_mock.assert_awaited_once_with( + ASSET_BTC, + decimal.Decimal("1.5"), + self.NETWORK, + self.ADDRESS, + tag=None, + params={}, + ) diff --git a/packages/tentacles/Meta/DSL_operators/exchange_operators/tests/exchange_public_data_operators/__init__.py b/packages/tentacles/Meta/DSL_operators/exchange_operators/tests/exchange_public_data_operators/__init__.py new file mode 100644 index 0000000000..4aa037e5d7 --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/exchange_operators/tests/exchange_public_data_operators/__init__.py @@ -0,0 +1,270 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import mock +import pytest +import typing + +import numpy as np + +import octobot_commons.enums +import octobot_commons.errors +import octobot_commons.constants +import octobot_commons.dsl_interpreter as dsl_interpreter +import tentacles.Meta.DSL_operators.exchange_operators as exchange_operators + + +SYMBOL = "BTC/USDT" +SYMBOL2 = "ETH/USDT" +TIME_FRAME = "1h" +TIME_FRAME2 = "4h" +KLINE_SIGNATURE = 0.00666 + + +@pytest.fixture +def historical_prices(): + return np.array([ + 81.59, 81.06, 82.87, 83, 83.61, 83.15, 82.84, 83.99, 84.55, 84.36, 85.53, 86.54, 86.89, + 87.77, 87.29, 87.18, 87.01, 89.02, 89.68, 90.36, 92.83, 93.37, 93.02, 93.45, 94.13, + 93.12, 93.18, 92.08, 92.82, 92.92, 92.25, 92.22 + ]) + +@pytest.fixture +def historical_times(historical_prices): + return np.array([ + i + 10 for i in range(len(historical_prices)) + ], dtype=np.float64) + + +@pytest.fixture +def historical_volume(historical_prices): + base_volume_pattern = [ + # will create an int np.array, which will updated to float64 to comply with tulipy requirements + 903, 1000, 2342, 992, 900, 1231, 1211, 1113 + ] + return np.array(base_volume_pattern*(len(historical_prices) // len(base_volume_pattern) + 1), dtype=np.float64)[:len(historical_prices)] + + +def _get_candle_managers(historical_prices, historical_volume, historical_times): + btc_1h_candles_manager = mock.Mock( + get_symbol_open_candles=mock.Mock(side_effect=lambda _ : historical_prices.copy()), + get_symbol_high_candles=mock.Mock(side_effect=lambda _ : historical_prices.copy()), + get_symbol_low_candles=mock.Mock(side_effect=lambda _ : historical_prices.copy()), + get_symbol_close_candles=mock.Mock(side_effect=lambda _ : historical_prices.copy()), + get_symbol_volume_candles=mock.Mock(side_effect=lambda _ : historical_volume.copy()), + get_symbol_time_candles=mock.Mock(side_effect=lambda _ : historical_times.copy()), + time_candles_index=len(historical_times), + open_candles_index=len(historical_prices), + high_candles_index=len(historical_prices), + low_candles_index=len(historical_prices), + close_candles_index=len(historical_prices), + volume_candles_index=len(historical_volume), + time_candles=historical_times, + ) + eth_1h_candles_manager = mock.Mock( + get_symbol_open_candles=mock.Mock(side_effect=lambda _ : historical_prices.copy() / 2), + get_symbol_high_candles=mock.Mock(side_effect=lambda _ : historical_prices.copy() / 2), + get_symbol_low_candles=mock.Mock(side_effect=lambda _ : historical_prices.copy() / 2), + get_symbol_close_candles=mock.Mock(side_effect=lambda _ : historical_prices.copy() / 2), + get_symbol_volume_candles=mock.Mock(side_effect=lambda _ : historical_volume.copy() / 2), + get_symbol_time_candles=mock.Mock(side_effect=lambda _ : historical_times.copy() / 2), + time_candles_index=len(historical_times), + open_candles_index=len(historical_prices), + high_candles_index=len(historical_prices), + low_candles_index=len(historical_prices), + close_candles_index=len(historical_prices), + volume_candles_index=len(historical_volume), + time_candles=historical_times / 2, + ) + btc_4h_candles_manager = mock.Mock( + get_symbol_open_candles=mock.Mock(side_effect=lambda _ : historical_prices.copy() * 2), + get_symbol_high_candles=mock.Mock(side_effect=lambda _ : historical_prices.copy() * 2), + get_symbol_low_candles=mock.Mock(side_effect=lambda _ : historical_prices.copy() * 2), + get_symbol_close_candles=mock.Mock(side_effect=lambda _ : historical_prices.copy() * 2), + get_symbol_volume_candles=mock.Mock(side_effect=lambda _ : historical_volume.copy() * 2), + get_symbol_time_candles=mock.Mock(side_effect=lambda _ : historical_times.copy() * 2), + time_candles_index=len(historical_times), + open_candles_index=len(historical_prices), + high_candles_index=len(historical_prices), + low_candles_index=len(historical_prices), + close_candles_index=len(historical_prices), + volume_candles_index=len(historical_volume), + time_candles=historical_times * 2, + ) + return ( + btc_1h_candles_manager, + eth_1h_candles_manager, + btc_4h_candles_manager, + ) + + +def _get_kline(candles_manager: mock.Mock, signature: float, kline_time_delta: typing.Optional[float]) -> list: + kline = [0] * len(octobot_commons.enums.PriceIndexes) + kline[octobot_commons.enums.PriceIndexes.IND_PRICE_TIME.value] = ( + candles_manager.get_symbol_time_candles(-1)[-1] + kline_time_delta + if kline_time_delta is not None + else candles_manager.get_symbol_time_candles(-1)[-1] + ) + kline[octobot_commons.enums.PriceIndexes.IND_PRICE_OPEN.value] = candles_manager.get_symbol_open_candles(-1)[-1] + signature + kline[octobot_commons.enums.PriceIndexes.IND_PRICE_HIGH.value] = candles_manager.get_symbol_high_candles(-1)[-1] + signature + kline[octobot_commons.enums.PriceIndexes.IND_PRICE_LOW.value] = candles_manager.get_symbol_low_candles(-1)[-1] + signature + kline[octobot_commons.enums.PriceIndexes.IND_PRICE_CLOSE.value] = candles_manager.get_symbol_close_candles(-1)[-1] + signature + kline[octobot_commons.enums.PriceIndexes.IND_PRICE_VOL.value] = candles_manager.get_symbol_volume_candles(-1)[-1] + signature + return kline + + +def _get_symbol_data_factory( + btc_1h_candles_manager, eth_1h_candles_manager, btc_4h_candles_manager, kline_type: str +): + def _get_symbol_data(symbol: str, **kwargs): + symbol_candles = {} + one_h_candles_manager = btc_1h_candles_manager if symbol == SYMBOL else eth_1h_candles_manager if symbol == SYMBOL2 else None + four_h_candles_manager = btc_4h_candles_manager if symbol == SYMBOL else None # no 4h eth candles + if one_h_candles_manager is None and four_h_candles_manager is None: + raise octobot_commons.errors.InvalidParametersError(f"Symbol {symbol} not found") + symbol_candles[octobot_commons.enums.TimeFrames(TIME_FRAME)] = one_h_candles_manager + if four_h_candles_manager: + symbol_candles[octobot_commons.enums.TimeFrames(TIME_FRAME2)] = four_h_candles_manager + if kline_type == "no_kline": + symbol_klines = {} + elif kline_type == "same_time_kline": + symbol_klines = { + octobot_commons.enums.TimeFrames(TIME_FRAME): mock.Mock(kline=_get_kline(one_h_candles_manager, KLINE_SIGNATURE, None)), + } + if four_h_candles_manager: + symbol_klines[octobot_commons.enums.TimeFrames(TIME_FRAME2)] = mock.Mock(kline=_get_kline(four_h_candles_manager, KLINE_SIGNATURE, None)) + elif kline_type == "new_time_kline": + symbol_klines = { + octobot_commons.enums.TimeFrames(TIME_FRAME): mock.Mock(kline=_get_kline( + one_h_candles_manager, KLINE_SIGNATURE, + octobot_commons.enums.TimeFramesMinutes[octobot_commons.enums.TimeFrames(TIME_FRAME)] * octobot_commons.constants.MINUTE_TO_SECONDS + )), + } + if four_h_candles_manager: + symbol_klines[octobot_commons.enums.TimeFrames(TIME_FRAME2)] = mock.Mock(kline=_get_kline( + four_h_candles_manager, KLINE_SIGNATURE, + octobot_commons.enums.TimeFramesMinutes[octobot_commons.enums.TimeFrames(TIME_FRAME2)] * octobot_commons.constants.MINUTE_TO_SECONDS + )) + else: + raise NotImplementedError(f"Kline type {kline_type} not implemented") + return mock.Mock( + symbol_candles=symbol_candles, + symbol_klines=symbol_klines + ) + return _get_symbol_data + + +@pytest.fixture +def exchange_manager_with_candles(historical_prices, historical_volume, historical_times): + btc_1h_candles_manager, eth_1h_candles_manager, btc_4h_candles_manager = _get_candle_managers( + historical_prices, historical_volume, historical_times + ) + return mock.Mock( + id="exchange_manager_id", + exchange_name="binance", + exchange_symbols_data=mock.Mock( + get_exchange_symbol_data=_get_symbol_data_factory( + btc_1h_candles_manager, eth_1h_candles_manager, btc_4h_candles_manager, "no_kline" + ) + ) + ) + + +@pytest.fixture +def exchange_manager_with_candles_and_klines(historical_prices, historical_volume, historical_times): + btc_1h_candles_manager, eth_1h_candles_manager, btc_4h_candles_manager = _get_candle_managers( + historical_prices, historical_volume, historical_times + ) + return mock.Mock( + id="exchange_manager_id", + exchange_name="binance", + exchange_symbols_data=mock.Mock( + get_exchange_symbol_data=_get_symbol_data_factory( + btc_1h_candles_manager, eth_1h_candles_manager, btc_4h_candles_manager, "same_time_kline" + ) + ) + ) + + +@pytest.fixture +def exchange_manager_with_candles_and_new_candle_klines(historical_prices, historical_volume, historical_times): + btc_1h_candles_manager, eth_1h_candles_manager, btc_4h_candles_manager = _get_candle_managers( + historical_prices, historical_volume, historical_times + ) + return mock.Mock( + id="exchange_manager_id", + exchange_name="binance", + exchange_symbols_data=mock.Mock( + get_exchange_symbol_data=_get_symbol_data_factory( + btc_1h_candles_manager, eth_1h_candles_manager, btc_4h_candles_manager, "new_time_kline" + ) + ) + ) + + +@pytest.fixture +def candle_manager_by_time_frame_by_symbol(historical_prices, historical_volume, historical_times): + btc_1h_candles_manager, eth_1h_candles_manager, btc_4h_candles_manager = _get_candle_managers( + historical_prices, historical_volume, historical_times + ) + return { + TIME_FRAME: { + SYMBOL: btc_1h_candles_manager, + SYMBOL2: eth_1h_candles_manager, + }, + TIME_FRAME2: { + SYMBOL: btc_4h_candles_manager, + }, + } + + +@pytest.fixture +def interpreter(exchange_manager_with_candles): + return dsl_interpreter.Interpreter( + dsl_interpreter.get_all_operators() + + exchange_operators.create_ohlcv_operators(exchange_manager_with_candles, SYMBOL, TIME_FRAME) + ) + + +@pytest.fixture +def interpreter_without_exchange_data(): + return dsl_interpreter.Interpreter( + dsl_interpreter.get_all_operators() + + exchange_operators.create_ohlcv_operators(None, None, None, None) + ) + + +@pytest.fixture +def interpreter_with_exchange_manager_and_klines(exchange_manager_with_candles_and_klines): + return dsl_interpreter.Interpreter( + dsl_interpreter.get_all_operators() + + exchange_operators.create_ohlcv_operators(exchange_manager_with_candles_and_klines, SYMBOL, TIME_FRAME) + ) + + +@pytest.fixture +def interpreter_with_exchange_manager_and_new_candle_klines(exchange_manager_with_candles_and_new_candle_klines): + return dsl_interpreter.Interpreter( + dsl_interpreter.get_all_operators() + + exchange_operators.create_ohlcv_operators(exchange_manager_with_candles_and_new_candle_klines, SYMBOL, TIME_FRAME) + ) + + +@pytest.fixture +def interpreter_with_candle_manager_by_time_frame_by_symbol(candle_manager_by_time_frame_by_symbol): + return dsl_interpreter.Interpreter( + dsl_interpreter.get_all_operators() + + exchange_operators.create_ohlcv_operators(None, SYMBOL, TIME_FRAME, candle_manager_by_time_frame_by_symbol) + ) diff --git a/packages/tentacles/Meta/DSL_operators/exchange_operators/tests/exchange_public_data_operators/test_mocks.py b/packages/tentacles/Meta/DSL_operators/exchange_operators/tests/exchange_public_data_operators/test_mocks.py new file mode 100644 index 0000000000..915f06a527 --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/exchange_operators/tests/exchange_public_data_operators/test_mocks.py @@ -0,0 +1,118 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import pytest + +import numpy as np + +import octobot_commons.enums +import octobot_commons.constants + +from tentacles.Meta.DSL_operators.exchange_operators.tests.exchange_public_data_operators import ( + historical_prices, + historical_volume, + historical_times, + KLINE_SIGNATURE, + TIME_FRAME, + exchange_manager_with_candles, + exchange_manager_with_candles_and_klines, + exchange_manager_with_candles_and_new_candle_klines, + candle_manager_by_time_frame_by_symbol, + interpreter, + interpreter_with_candle_manager_by_time_frame_by_symbol, + interpreter_with_exchange_manager_and_new_candle_klines, + interpreter_with_exchange_manager_and_klines +) + + +@pytest.mark.asyncio +async def test_interpreter_mock(interpreter, historical_prices, historical_volume, historical_times): + assert np.array_equal(await interpreter.interprete("open"), historical_prices) + assert await interpreter.interprete("open[-1]") == historical_prices[-1] == 92.22 + assert np.array_equal(await interpreter.interprete("high"), historical_prices) + assert await interpreter.interprete("high[-1]") == historical_prices[-1] == 92.22 + assert np.array_equal(await interpreter.interprete("low"), historical_prices) + assert await interpreter.interprete("low[-1]") == historical_prices[-1] == 92.22 + assert np.array_equal(await interpreter.interprete("close"), historical_prices) + assert await interpreter.interprete("close[-1]") == historical_prices[-1] == 92.22 + assert np.array_equal(await interpreter.interprete("volume"), historical_volume) + assert await interpreter.interprete("volume[-1]") == historical_volume[-1] == 1113 + assert np.array_equal(await interpreter.interprete("time"), historical_times) + assert await interpreter.interprete("time[-1]") == historical_times[-1] == 41 + + +@pytest.mark.asyncio +async def test_interpreter_with_exchange_manager_and_klines_mock( + interpreter_with_exchange_manager_and_klines, historical_prices, historical_volume, historical_times +): + kline_adapted_historical_prices = historical_prices.copy() + kline_adapted_historical_prices[-1] += KLINE_SIGNATURE + assert np.array_equal(await interpreter_with_exchange_manager_and_klines.interprete("open"), kline_adapted_historical_prices) + assert await interpreter_with_exchange_manager_and_klines.interprete("open[-1]") == kline_adapted_historical_prices[-1] == 92.22 + KLINE_SIGNATURE + assert np.array_equal(await interpreter_with_exchange_manager_and_klines.interprete("high"), kline_adapted_historical_prices) + assert await interpreter_with_exchange_manager_and_klines.interprete("high[-1]") == kline_adapted_historical_prices[-1] == 92.22 + KLINE_SIGNATURE + assert np.array_equal(await interpreter_with_exchange_manager_and_klines.interprete("low"), kline_adapted_historical_prices) + assert await interpreter_with_exchange_manager_and_klines.interprete("low[-1]") == kline_adapted_historical_prices[-1] == 92.22 + KLINE_SIGNATURE + assert np.array_equal(await interpreter_with_exchange_manager_and_klines.interprete("close"), kline_adapted_historical_prices) + assert await interpreter_with_exchange_manager_and_klines.interprete("close[-1]") == kline_adapted_historical_prices[-1] == 92.22 + KLINE_SIGNATURE + kline_adapted_historical_volume = historical_volume.copy() + kline_adapted_historical_volume[-1] += KLINE_SIGNATURE + assert np.array_equal(await interpreter_with_exchange_manager_and_klines.interprete("volume"), + kline_adapted_historical_volume) + assert await interpreter_with_exchange_manager_and_klines.interprete("volume[-1]") == historical_volume[-1] + KLINE_SIGNATURE == 1113 + KLINE_SIGNATURE + assert np.array_equal(await interpreter_with_exchange_manager_and_klines.interprete("time"), historical_times) + assert await interpreter_with_exchange_manager_and_klines.interprete("time[-1]") == historical_times[-1] == 41 + + +@pytest.mark.asyncio +async def test_interpreter_with_exchange_manager_and_new_candle_klines_mock( + interpreter_with_exchange_manager_and_new_candle_klines, historical_prices, historical_volume, historical_times +): + kline_adapted_historical_prices = np.append(historical_prices[1:], historical_prices[-1] + KLINE_SIGNATURE) + assert len(historical_prices) == len(kline_adapted_historical_prices) + assert np.array_equal(await interpreter_with_exchange_manager_and_new_candle_klines.interprete("open"), kline_adapted_historical_prices) + assert await interpreter_with_exchange_manager_and_new_candle_klines.interprete("open[-1]") == kline_adapted_historical_prices[-1] == 92.22 + KLINE_SIGNATURE + assert np.array_equal(await interpreter_with_exchange_manager_and_new_candle_klines.interprete("high"), kline_adapted_historical_prices) + assert await interpreter_with_exchange_manager_and_new_candle_klines.interprete("high[-1]") == kline_adapted_historical_prices[-1] == 92.22 + KLINE_SIGNATURE + assert np.array_equal(await interpreter_with_exchange_manager_and_new_candle_klines.interprete("low"), kline_adapted_historical_prices) + assert await interpreter_with_exchange_manager_and_new_candle_klines.interprete("low[-1]") == kline_adapted_historical_prices[-1] == 92.22 + KLINE_SIGNATURE + assert np.array_equal(await interpreter_with_exchange_manager_and_new_candle_klines.interprete("close"), kline_adapted_historical_prices) + assert await interpreter_with_exchange_manager_and_new_candle_klines.interprete("close[-1]") == kline_adapted_historical_prices[-1] == 92.22 + KLINE_SIGNATURE + kline_adapted_historical_volume = np.append(historical_volume[1:], historical_volume[-1] + KLINE_SIGNATURE) + assert np.array_equal(await interpreter_with_exchange_manager_and_new_candle_klines.interprete("volume"), + kline_adapted_historical_volume) + assert await interpreter_with_exchange_manager_and_new_candle_klines.interprete("volume[-1]") == historical_volume[-1] + KLINE_SIGNATURE == 1113 + KLINE_SIGNATURE + new_kline_time = historical_times[-1] + octobot_commons.enums.TimeFramesMinutes[octobot_commons.enums.TimeFrames(TIME_FRAME)] * octobot_commons.constants.MINUTE_TO_SECONDS + kline_adapted_historical_times = np.append(historical_times[1:], new_kline_time) + assert np.array_equal(await interpreter_with_exchange_manager_and_new_candle_klines.interprete("time"), kline_adapted_historical_times) + assert await interpreter_with_exchange_manager_and_new_candle_klines.interprete("time[-1]") == kline_adapted_historical_times[-1] == new_kline_time + + +@pytest.mark.asyncio +async def test_interpreter_with_candle_manager_by_time_frame_by_symbol_mock( + interpreter_with_candle_manager_by_time_frame_by_symbol, historical_prices, historical_volume, historical_times +): + assert np.array_equal(await interpreter_with_candle_manager_by_time_frame_by_symbol.interprete("open"), historical_prices) + assert await interpreter_with_candle_manager_by_time_frame_by_symbol.interprete("open[-1]") == historical_prices[-1] == 92.22 + assert np.array_equal(await interpreter_with_candle_manager_by_time_frame_by_symbol.interprete("high"), historical_prices) + assert await interpreter_with_candle_manager_by_time_frame_by_symbol.interprete("high[-1]") == historical_prices[-1] == 92.22 + assert np.array_equal(await interpreter_with_candle_manager_by_time_frame_by_symbol.interprete("low"), historical_prices) + assert await interpreter_with_candle_manager_by_time_frame_by_symbol.interprete("low[-1]") == historical_prices[-1] == 92.22 + assert np.array_equal(await interpreter_with_candle_manager_by_time_frame_by_symbol.interprete("close"), historical_prices) + assert await interpreter_with_candle_manager_by_time_frame_by_symbol.interprete("close[-1]") == historical_prices[-1] == 92.22 + assert np.array_equal(await interpreter_with_candle_manager_by_time_frame_by_symbol.interprete("volume"), historical_volume) + assert await interpreter_with_candle_manager_by_time_frame_by_symbol.interprete("volume[-1]") == historical_volume[-1] == 1113 + assert np.array_equal(await interpreter_with_candle_manager_by_time_frame_by_symbol.interprete("time"), historical_times) + assert await interpreter_with_candle_manager_by_time_frame_by_symbol.interprete("time[-1]") == historical_times[-1] == 41 diff --git a/packages/tentacles/Meta/DSL_operators/exchange_operators/tests/exchange_public_data_operators/test_ohlcv_operators.py b/packages/tentacles/Meta/DSL_operators/exchange_operators/tests/exchange_public_data_operators/test_ohlcv_operators.py new file mode 100644 index 0000000000..ae121cec95 --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/exchange_operators/tests/exchange_public_data_operators/test_ohlcv_operators.py @@ -0,0 +1,262 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import pytest +import mock + +import numpy as np + +import octobot_commons.errors +import octobot_commons.enums +import octobot_commons.constants +import octobot_commons.logging +import octobot_commons.dsl_interpreter as dsl_interpreter +import octobot_trading.api +import octobot_trading.constants +import tentacles.Meta.DSL_operators.exchange_operators as exchange_operators +import tentacles.Meta.DSL_operators.exchange_operators.exchange_public_data_operators.ohlcv_operators as ohlcv_operators + + +from tentacles.Meta.DSL_operators.exchange_operators.tests.exchange_public_data_operators import ( + SYMBOL, + TIME_FRAME, + KLINE_SIGNATURE, + historical_prices, + historical_volume, + historical_times, + exchange_manager_with_candles, + exchange_manager_with_candles_and_klines, + exchange_manager_with_candles_and_new_candle_klines, + candle_manager_by_time_frame_by_symbol, + interpreter_with_candle_manager_by_time_frame_by_symbol, + interpreter_with_exchange_manager_and_klines, + interpreter_with_exchange_manager_and_new_candle_klines, + interpreter, + interpreter_without_exchange_data, +) + + +@pytest.fixture +def expected_values(request, historical_prices, historical_volume, historical_times): + select_value = request.param + if select_value == "price": + return historical_prices + elif select_value == "volume": + return historical_volume + elif select_value == "time": + return historical_times + raise octobot_commons.errors.InvalidParametersError(f"Invalid select_value: {select_value}") + + +@pytest.fixture +def operator(request): + return request.param + + +@pytest.mark.asyncio +@pytest.mark.parametrize("operator, expected_values", [ + ("open", "price"), + ("high", "price"), + ("low", "price"), + ("close", "price"), + ("volume", "volume"), + ("time", "time") +], indirect=True) # use indirect=True to pass fixtures as a parameter +async def test_ohlcv_operators_basic_calls_without_klines( + interpreter, interpreter_with_candle_manager_by_time_frame_by_symbol, + operator, expected_values +): + # test with both interpreter data sources + for _interpreter in [interpreter, interpreter_with_candle_manager_by_time_frame_by_symbol]: + # no param, use context values: SYMBOL, TIME_FRAME: BTC/USDT, 1h + operator_value = await _interpreter.interprete(operator) + assert np.array_equal(operator_value, expected_values) + # ensure symbol parameters are used when provided + assert np.array_equal(await _interpreter.interprete(f"{operator}('ETH/USDT')"), expected_values / 2) # 1h ETH + assert np.array_equal(await _interpreter.interprete(f"{operator}('BTC/USDT')"), expected_values) # 1h BTC + + # ensure time frame is used when provided + assert np.array_equal(await _interpreter.interprete(f"{operator}(None, '4h')"), expected_values * 2) # 4h BTC + assert np.array_equal(await _interpreter.interprete(f"{operator}(None, '1h')"), expected_values) # 1h BTC + + # ensure symbol and time frame are used when provided + assert np.array_equal(await _interpreter.interprete(f"{operator}('BTC/USDT', '1h')"), expected_values) # 4h BTC rsi value + assert np.array_equal(await _interpreter.interprete(f"{operator}('BTC/USDT', '4h')"), expected_values * 2) # 4h BTC rsi value + assert np.array_equal(await _interpreter.interprete(f"{operator}('ETH/USDT', '1h')"), expected_values / 2) # 1h ETH rsi value + with pytest.raises(KeyError): # no 4h ETH candles + await _interpreter.interprete(f"{operator}('ETH/USDT', '4h')") + + +def _adapted_for_kline(values: np.ndarray, operator: str, time_delay: float) -> np.ndarray: + adapted = values.copy() + if time_delay > 0: + adapted = np.append(adapted[1:], adapted[-1] + (time_delay if operator == "time" else KLINE_SIGNATURE)) + else: + adapted[-1] += (0 if operator == "time" else KLINE_SIGNATURE) + return adapted + + +@pytest.mark.asyncio +@pytest.mark.parametrize("operator, expected_values", [ + ("open", "price"), + ("high", "price"), + ("low", "price"), + ("close", "price"), + ("volume", "volume"), + ("time", "time") +], indirect=True) # use indirect=True to pass fixtures as a parameter +async def test_ohlcv_operators_basic_calls_with_klines( + interpreter_with_exchange_manager_and_klines, operator, expected_values +): + # test with both interpreter data sources + _interpreter = interpreter_with_exchange_manager_and_klines + # no param, use context values: SYMBOL, TIME_FRAME: BTC/USDT, 1h + operator_value = await _interpreter.interprete(operator) + kline_adapted_value = _adapted_for_kline(expected_values, operator, 0) + assert np.array_equal(operator_value, kline_adapted_value) + # ensure symbol parameters are used when provided + assert np.array_equal(await _interpreter.interprete(f"{operator}('ETH/USDT')"), _adapted_for_kline(expected_values / 2, operator, 0)) # 1h ETH + assert np.array_equal(await _interpreter.interprete(f"{operator}('BTC/USDT')"), kline_adapted_value) # 1h BTC + + # ensure time frame is used when provided + assert np.array_equal(await _interpreter.interprete(f"{operator}(None, '4h')"), _adapted_for_kline(expected_values * 2, operator, 0)) # 4h BTC + assert np.array_equal(await _interpreter.interprete(f"{operator}(None, '1h')"), kline_adapted_value) # 1h BTC + + # ensure symbol and time frame are used when provided + assert np.array_equal(await _interpreter.interprete(f"{operator}('BTC/USDT', '1h')"), kline_adapted_value) # 4h BTC rsi value + assert np.array_equal(await _interpreter.interprete(f"{operator}('BTC/USDT', '4h')"), _adapted_for_kline(expected_values * 2, operator, 0)) # 4h BTC rsi value + assert np.array_equal(await _interpreter.interprete(f"{operator}('ETH/USDT', '1h')"), _adapted_for_kline(expected_values / 2, operator, 0)) # 1h ETH rsi value + with pytest.raises(KeyError): # no 4h ETH candles + await _interpreter.interprete(f"{operator}('ETH/USDT', '4h')") + + +@pytest.mark.asyncio +@pytest.mark.parametrize("operator, expected_values", [ + ("open", "price"), + ("high", "price"), + ("low", "price"), + ("close", "price"), + ("volume", "volume"), + ("time", "time") +], indirect=True) # use indirect=True to pass fixtures as a parameter +async def test_ohlcv_operators_basic_calls_with_new_candle_klines( + interpreter_with_exchange_manager_and_new_candle_klines, operator, expected_values +): + # test with both interpreter data sources + _interpreter = interpreter_with_exchange_manager_and_new_candle_klines + # no param, use context values: SYMBOL, TIME_FRAME: BTC/USDT, 1h + operator_value = await _interpreter.interprete(operator) + one_hour_time_delay = octobot_commons.enums.TimeFramesMinutes[octobot_commons.enums.TimeFrames("1h")] * octobot_commons.constants.MINUTE_TO_SECONDS + four_hours_time_delay = octobot_commons.enums.TimeFramesMinutes[octobot_commons.enums.TimeFrames("4h")] * octobot_commons.constants.MINUTE_TO_SECONDS + kline_adapted_value = _adapted_for_kline(expected_values, operator, one_hour_time_delay) + assert np.array_equal(operator_value, kline_adapted_value) + # ensure symbol parameters are used when provided + assert np.array_equal(await _interpreter.interprete(f"{operator}('ETH/USDT')"), _adapted_for_kline(expected_values / 2, operator, one_hour_time_delay)) # 1h ETH + assert np.array_equal(await _interpreter.interprete(f"{operator}('BTC/USDT')"), kline_adapted_value) # 1h BTC + + # ensure time frame is used when provided + assert np.array_equal(await _interpreter.interprete(f"{operator}(None, '4h')"), _adapted_for_kline(expected_values * 2, operator, four_hours_time_delay)) # 4h BTC + assert np.array_equal(await _interpreter.interprete(f"{operator}(None, '1h')"), kline_adapted_value) # 1h BTC + + # ensure symbol and time frame are used when provided + assert np.array_equal(await _interpreter.interprete(f"{operator}('BTC/USDT', '1h')"), kline_adapted_value) # 4h BTC rsi value + assert np.array_equal(await _interpreter.interprete(f"{operator}('BTC/USDT', '4h')"), _adapted_for_kline(expected_values * 2, operator, four_hours_time_delay)) # 4h BTC rsi value + assert np.array_equal(await _interpreter.interprete(f"{operator}('ETH/USDT', '1h')"), _adapted_for_kline(expected_values / 2, operator, one_hour_time_delay)) # 1h ETH rsi value + with pytest.raises(KeyError): # no 4h ETH candles + await _interpreter.interprete(f"{operator}('ETH/USDT', '4h')") + + # with unknown kline time: unknown kline is ignored + def _get_kline(symbol_data, time_frame): + kline = octobot_trading.api.get_symbol_klines(symbol_data, time_frame) + kline[octobot_commons.enums.PriceIndexes.IND_PRICE_TIME.value] = 1000 + return kline + + bot_log_mock = mock.Mock( + error=mock.Mock() + ) + with mock.patch.object( + ohlcv_operators, "_get_kline", side_effect=_get_kline + ) as _get_kline_mock, mock.patch.object( + octobot_commons.logging, "get_logger", mock.Mock(return_value=bot_log_mock) + ): + operator_value = await _interpreter.interprete(operator) + _get_kline_mock.assert_called_once() + # not == kline adapted value because unknown kline is ignored + assert np.array_equal(operator_value, kline_adapted_value) is False + assert np.array_equal(operator_value, expected_values) + bot_log_mock.error.assert_called_once() + assert "kline time (1000) is not equal to last candle time not the last time" in bot_log_mock.error.call_args[0][0] + + +class TestGetDependencies: + """Tests for get_dependencies using DSL syntax and the interpreter.""" + + @pytest.mark.asyncio + @pytest.mark.parametrize("operator", [ + "open", + "high", + "low", + "close", + "volume", + "time" + ]) + async def test_ohlcv_operators_dependencies( + self, interpreter, interpreter_without_exchange_data, operator + ): + interpreter.prepare(f"{operator}") + assert interpreter.get_dependencies() == [ + exchange_operators.ExchangeDataDependency( + symbol=SYMBOL, + time_frame=TIME_FRAME, + data_source=octobot_trading.constants.OHLCV_CHANNEL + ) + ] + + # same dependency for all operators + interpreter.prepare(f"{operator} + close + volume") + assert interpreter.get_dependencies() == [ + exchange_operators.ExchangeDataDependency( + symbol=SYMBOL, + time_frame=TIME_FRAME, + data_source=octobot_trading.constants.OHLCV_CHANNEL + ) + ] + + # SYMBOL + ETH/USDT dependency + interpreter.prepare(f"{operator} + close('ETH/USDT') + volume") + assert interpreter.get_dependencies() == [ + exchange_operators.ExchangeDataDependency( + symbol=SYMBOL, + time_frame=TIME_FRAME, + data_source=octobot_trading.constants.OHLCV_CHANNEL + ), + exchange_operators.ExchangeDataDependency( + symbol="ETH/USDT", + time_frame=None, + data_source=octobot_trading.constants.OHLCV_CHANNEL + ), + ] + + # now without exchange manager: SYMBOL is not returned as a dependency: only dynamic dependencies are returned + interpreter_without_exchange_data.prepare(f"{operator}") + assert interpreter_without_exchange_data.get_dependencies() == [] + interpreter_without_exchange_data.prepare(f"{operator} + close('ETH/USDT') + volume") + assert interpreter_without_exchange_data.get_dependencies() == [ + exchange_operators.ExchangeDataDependency( + symbol="ETH/USDT", + time_frame=None, + data_source=octobot_trading.constants.OHLCV_CHANNEL + ), + ] diff --git a/packages/tentacles/Meta/DSL_operators/exchange_operators/tests/exchange_public_data_operators/test_symbol_operators.py b/packages/tentacles/Meta/DSL_operators/exchange_operators/tests/exchange_public_data_operators/test_symbol_operators.py new file mode 100644 index 0000000000..ca2a0d18aa --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/exchange_operators/tests/exchange_public_data_operators/test_symbol_operators.py @@ -0,0 +1,79 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import pytest +import mock + +import octobot_commons.dsl_interpreter as dsl_interpreter +import octobot_trading.enums as trading_enums +import tentacles.Meta.DSL_operators.exchange_operators as exchange_operators + + +SYMBOL = "BTC/USDT" +EXPIRY_TIMESTAMP = 1700000000000 + + +@pytest.fixture +def host(): + return mock.Mock( + triggered_symbol=SYMBOL, + exchange_manager=mock.Mock( + exchange=mock.Mock( + connector=mock.Mock( + client=mock.Mock( + markets={ + SYMBOL: { + trading_enums.ExchangeConstantsMarketStatusColumns.EXPIRY.value: EXPIRY_TIMESTAMP, + }, + "ETH/USDT": {}, + } + ) + ) + ) + ), + ) + + +@pytest.fixture +def interpreter(host): + return dsl_interpreter.Interpreter( + dsl_interpreter.get_all_operators() + + exchange_operators.create_symbol_operators(host) + ) + + +@pytest.mark.asyncio +async def test_triggered_symbol(interpreter): + assert await interpreter.interprete("triggered_symbol()") == SYMBOL + + +@pytest.mark.asyncio +async def test_market_expiry_with_expiry(interpreter): + assert await interpreter.interprete(f"market_expiry('{SYMBOL}')") == EXPIRY_TIMESTAMP + + +@pytest.mark.asyncio +async def test_market_expiry_without_expiry(interpreter): + assert await interpreter.interprete("market_expiry('ETH/USDT')") is None + + +@pytest.mark.asyncio +async def test_market_expiry_unknown_symbol(interpreter): + assert await interpreter.interprete("market_expiry('UNKNOWN/PAIR')") is None + + +@pytest.mark.asyncio +async def test_market_expiry_with_triggered_symbol(interpreter): + assert await interpreter.interprete("market_expiry(triggered_symbol())") == EXPIRY_TIMESTAMP diff --git a/packages/tentacles/Meta/DSL_operators/exchange_operators/tests/exchange_public_data_operators/test_ticker_operators.py b/packages/tentacles/Meta/DSL_operators/exchange_operators/tests/exchange_public_data_operators/test_ticker_operators.py new file mode 100644 index 0000000000..804bf2b486 --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/exchange_operators/tests/exchange_public_data_operators/test_ticker_operators.py @@ -0,0 +1,80 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import pytest + +import octobot_commons.errors +import octobot_commons.dsl_interpreter as dsl_interpreter +import tentacles.Meta.DSL_operators.exchange_operators as exchange_operators + + +SYMBOL = "BTC/USDT" +TICKERS = { + SYMBOL: { + "close": 50000.0, + "open": 49000.0, + "high": 51000.0, + "low": 48000.0, + "baseVolume": 1234.5, + "last": 50100.0, + }, + "ETH/USDT": { + "close": 3000.0, + "open": 2900.0, + "high": 3100.0, + "low": 2800.0, + "baseVolume": 5678.9, + "last": 3050.0, + }, +} + + +@pytest.fixture +def interpreter(): + return dsl_interpreter.Interpreter( + dsl_interpreter.get_all_operators() + + exchange_operators.create_ticker_operators(TICKERS) + ) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("operator, field", [ + ("ticker_close", "close"), + ("ticker_open", "open"), + ("ticker_high", "high"), + ("ticker_low", "low"), + ("ticker_volume", "baseVolume"), + ("ticker_last", "last"), +]) +async def test_ticker_operators(interpreter, operator, field): + assert await interpreter.interprete(f"{operator}('{SYMBOL}')") == TICKERS[SYMBOL][field] + assert await interpreter.interprete(f"{operator}('ETH/USDT')") == TICKERS["ETH/USDT"][field] + + +@pytest.mark.asyncio +async def test_ticker_unknown_symbol(interpreter): + with pytest.raises(octobot_commons.errors.DSLInterpreterError, match="No ticker data"): + await interpreter.interprete("ticker_close('UNKNOWN/PAIR')") + + +@pytest.mark.asyncio +async def test_ticker_none_field(): + tickers_with_none = {SYMBOL: {"close": None}} + interp = dsl_interpreter.Interpreter( + dsl_interpreter.get_all_operators() + + exchange_operators.create_ticker_operators(tickers_with_none) + ) + with pytest.raises(octobot_commons.errors.DSLInterpreterError, match="is None"): + await interp.interprete(f"ticker_close('{SYMBOL}')") diff --git a/packages/tentacles/Meta/DSL_operators/python_std_operators/__init__.py b/packages/tentacles/Meta/DSL_operators/python_std_operators/__init__.py new file mode 100644 index 0000000000..d042b56fe3 --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/python_std_operators/__init__.py @@ -0,0 +1,143 @@ +# pylint: disable=R0801 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import tentacles.Meta.DSL_operators.python_std_operators.base_binary_operators as dsl_interpreter_base_binary_operators +from tentacles.Meta.DSL_operators.python_std_operators.base_binary_operators import ( + AddOperator, + SubOperator, + MultOperator, + DivOperator, + FloorDivOperator, + ModOperator, + PowOperator, +) +import tentacles.Meta.DSL_operators.python_std_operators.base_compare_operators as dsl_interpreter_base_compare_operators +from tentacles.Meta.DSL_operators.python_std_operators.base_compare_operators import ( + EqOperator, + NotEqOperator, + LtOperator, + LtEOperator, + GtOperator, + GtEOperator, + IsOperator, + IsNotOperator, + InOperator, + NotInOperator, +) +import tentacles.Meta.DSL_operators.python_std_operators.base_unary_operators as dsl_interpreter_base_unary_operators +from tentacles.Meta.DSL_operators.python_std_operators.base_unary_operators import ( + UAddOperator, + USubOperator, + NotOperator, + InvertOperator, +) +import tentacles.Meta.DSL_operators.python_std_operators.base_nary_operators as dsl_interpreter_base_nary_operators +from tentacles.Meta.DSL_operators.python_std_operators.base_nary_operators import ( + AndOperator, + OrOperator, +) +import tentacles.Meta.DSL_operators.python_std_operators.base_call_operators as dsl_interpreter_base_call_operators +from tentacles.Meta.DSL_operators.python_std_operators.base_call_operators import ( + MinOperator, + MaxOperator, + MeanOperator, + SqrtOperator, + AbsOperator, + RoundOperator, + FloorOperator, + GetOperator, + CeilOperator, + SinOperator, + CosOperator, + OscillatorOperator, + ValueIfOperator, + ErrorOperator, +) +import tentacles.Meta.DSL_operators.python_std_operators.base_resetting_operators as dsl_interpreter_base_resetting_operators +from tentacles.Meta.DSL_operators.python_std_operators.base_resetting_operators import ( + LoopUntilOperator, + WaitOperator, +) +import tentacles.Meta.DSL_operators.python_std_operators.base_name_operators as dsl_interpreter_base_name_operators +from tentacles.Meta.DSL_operators.python_std_operators.base_name_operators import ( + PiOperator, +) +import tentacles.Meta.DSL_operators.python_std_operators.base_expression_operators as dsl_interpreter_base_expression_operators +from tentacles.Meta.DSL_operators.python_std_operators.base_expression_operators import ( + IfExpOperator, +) +import tentacles.Meta.DSL_operators.python_std_operators.base_subscripting_operators as dsl_interpreter_base_subscripting_operators +from tentacles.Meta.DSL_operators.python_std_operators.base_subscripting_operators import ( + SubscriptOperator, + SliceOperator, +) +import tentacles.Meta.DSL_operators.python_std_operators.base_iterable_operators as dsl_interpreter_base_iterable_operators +from tentacles.Meta.DSL_operators.python_std_operators.base_iterable_operators import ( + ListOperator, +) +import tentacles.Meta.DSL_operators.python_std_operators.base_time_operators as dsl_interpreter_base_time_operators +from tentacles.Meta.DSL_operators.python_std_operators.base_time_operators import ( + NowMsOperator, +) + +__all__ = [ + "AddOperator", + "SubOperator", + "MultOperator", + "DivOperator", + "FloorDivOperator", + "ModOperator", + "PowOperator", + "EqOperator", + "NotEqOperator", + "LtOperator", + "LtEOperator", + "GtOperator", + "GtEOperator", + "IsOperator", + "IsNotOperator", + "InOperator", + "NotInOperator", + "UAddOperator", + "USubOperator", + "NotOperator", + "InvertOperator", + "AndOperator", + "OrOperator", + "MinOperator", + "MaxOperator", + "MeanOperator", + "SqrtOperator", + "AbsOperator", + "RoundOperator", + "FloorOperator", + "GetOperator", + "CeilOperator", + "SinOperator", + "CosOperator", + "OscillatorOperator", + "ValueIfOperator", + "LoopUntilOperator", + "WaitOperator", + "PiOperator", + "IfExpOperator", + "SubscriptOperator", + "SliceOperator", + "ListOperator", + "ErrorOperator", + "NowMsOperator", +] diff --git a/packages/tentacles/Meta/DSL_operators/python_std_operators/base_binary_operators.py b/packages/tentacles/Meta/DSL_operators/python_std_operators/base_binary_operators.py new file mode 100644 index 0000000000..dc91ac4eba --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/python_std_operators/base_binary_operators.py @@ -0,0 +1,118 @@ +# pylint: disable=missing-class-docstring,missing-function-docstring +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import ast + +import octobot_commons.dsl_interpreter.operators.binary_operator as dsl_interpreter_binary_operator +import octobot_commons.dsl_interpreter.operator as dsl_interpreter_operator + + +class AddOperator(dsl_interpreter_binary_operator.BinaryOperator): + NAME = "+" + DESCRIPTION = "Addition operator. Adds two operands together." + EXAMPLE = "5 + 3" + + @staticmethod + def get_name() -> str: + return ast.Add.__name__ + + def compute(self) -> dsl_interpreter_operator.ComputedOperatorParameterType: + left, right = self.get_computed_left_and_right_parameters() + return left + right + + +class SubOperator(dsl_interpreter_binary_operator.BinaryOperator): + NAME = "-" + DESCRIPTION = "Subtraction operator. Subtracts the right operand from the left operand." + EXAMPLE = "5 - 3" + + @staticmethod + def get_name() -> str: + return ast.Sub.__name__ + + def compute(self) -> dsl_interpreter_operator.ComputedOperatorParameterType: + left, right = self.get_computed_left_and_right_parameters() + return left - right + + +class MultOperator(dsl_interpreter_binary_operator.BinaryOperator): + NAME = "*" + DESCRIPTION = "Multiplication operator. Multiplies two operands." + EXAMPLE = "5 * 3" + + @staticmethod + def get_name() -> str: + return ast.Mult.__name__ + + def compute(self) -> dsl_interpreter_operator.ComputedOperatorParameterType: + left, right = self.get_computed_left_and_right_parameters() + return left * right + + +class DivOperator(dsl_interpreter_binary_operator.BinaryOperator): + NAME = "/" + DESCRIPTION = "Division operator. Divides the left operand by the right operand." + EXAMPLE = "10 / 2" + + @staticmethod + def get_name() -> str: + return ast.Div.__name__ + + def compute(self) -> dsl_interpreter_operator.ComputedOperatorParameterType: + left, right = self.get_computed_left_and_right_parameters() + return left / right + + +class FloorDivOperator(dsl_interpreter_binary_operator.BinaryOperator): + NAME = "//" + DESCRIPTION = "Floor division operator. Divides the left operand by the right operand and returns the floor of the result." + EXAMPLE = "10 // 3" + + @staticmethod + def get_name() -> str: + return ast.FloorDiv.__name__ + + def compute(self) -> dsl_interpreter_operator.ComputedOperatorParameterType: + left, right = self.get_computed_left_and_right_parameters() + return left // right + + +class ModOperator(dsl_interpreter_binary_operator.BinaryOperator): + NAME = "%" + DESCRIPTION = "Modulo operator. Returns the remainder after dividing the left operand by the right operand." + EXAMPLE = "10 % 3" + + @staticmethod + def get_name() -> str: + return ast.Mod.__name__ + + def compute(self) -> dsl_interpreter_operator.ComputedOperatorParameterType: + left, right = self.get_computed_left_and_right_parameters() + return left % right + + +class PowOperator(dsl_interpreter_binary_operator.BinaryOperator): + NAME = "**" + DESCRIPTION = "Exponentiation operator. Raises the left operand to the power of the right operand." + EXAMPLE = "2 ** 3" + + @staticmethod + def get_name() -> str: + return ast.Pow.__name__ + + def compute(self) -> dsl_interpreter_operator.ComputedOperatorParameterType: + left, right = self.get_computed_left_and_right_parameters() + return left**right diff --git a/packages/tentacles/Meta/DSL_operators/python_std_operators/base_call_operators.py b/packages/tentacles/Meta/DSL_operators/python_std_operators/base_call_operators.py new file mode 100644 index 0000000000..ab24a4b03c --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/python_std_operators/base_call_operators.py @@ -0,0 +1,364 @@ +# pylint: disable=missing-class-docstring,missing-function-docstring +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import math +import time + +import octobot_commons.constants as octobot_commons_constants +import octobot_commons.errors +import octobot_commons.dsl_interpreter as dsl_interpreter +import octobot_commons.json_util as json_util + + + +class MinOperator(dsl_interpreter.CallOperator): + MIN_PARAMS = 1 + NAME = "min" + DESCRIPTION = "Returns the minimum value from the given operands." + EXAMPLE = "min(1, 2, 3)" + + @staticmethod + def get_name() -> str: + return "min" + + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + operands = self.get_computed_parameters() + return min(operands) + + +class MaxOperator(dsl_interpreter.CallOperator): + MIN_PARAMS = 1 + NAME = "max" + DESCRIPTION = "Returns the maximum value from the given operands." + EXAMPLE = "max(1, 2, 3)" + + @staticmethod + def get_name() -> str: + return "max" + + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + operands = self.get_computed_parameters() + return max(operands) + + +class MeanOperator(dsl_interpreter.CallOperator): + MIN_PARAMS = 1 + NAME = "mean" + DESCRIPTION = "Returns the arithmetic mean (average) of the given numeric operands." + EXAMPLE = "mean(1, 2, 3, 4)" + + @staticmethod + def get_name() -> str: + return "mean" + + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + operands = self.get_computed_parameters() + # Ensure all operands are numeric + numeric_operands = [] + for operand in operands: + if isinstance(operand, (int, float)): + numeric_operands.append(operand) + else: + raise octobot_commons.errors.InvalidParametersError( + f"mean() requires numeric arguments, got {type(operand).__name__}" + ) + return sum(numeric_operands) / len(numeric_operands) + + +class SqrtOperator(dsl_interpreter.CallOperator): + MIN_PARAMS = 1 + MAX_PARAMS = 1 + NAME = "sqrt" + DESCRIPTION = "Returns the square root of the given numeric operand." + EXAMPLE = "sqrt(16)" + + @staticmethod + def get_name() -> str: + return "sqrt" + + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + computed_parameters = self.get_computed_parameters() + operand = computed_parameters[0] + if isinstance(operand, (int, float)): + return math.sqrt(operand) + raise octobot_commons.errors.InvalidParametersError( + f"sqrt() requires a numeric argument, got {type(operand).__name__}" + ) + + +class AbsOperator(dsl_interpreter.CallOperator): + MIN_PARAMS = 1 + MAX_PARAMS = 1 + NAME = "abs" + DESCRIPTION = "Returns the absolute value of the given operand." + EXAMPLE = "abs(-5)" + + @staticmethod + def get_name() -> str: + return "abs" + + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + computed_parameters = self.get_computed_parameters() + operand = computed_parameters[0] + return abs(operand) + + +class RoundOperator(dsl_interpreter.CallOperator): + NAME = "round" + DESCRIPTION = "Rounds the given numeric value to the specified number of decimal digits. If digits is not provided, rounds to the nearest integer." + EXAMPLE = "round(3.14159, 2)" + + @staticmethod + def get_name() -> str: + return "round" + + @classmethod + def get_parameters(cls) -> list[dsl_interpreter.OperatorParameter]: + return [ + dsl_interpreter.OperatorParameter(name="value", description="the value to round", required=True, type=list), + dsl_interpreter.OperatorParameter(name="digits", description="the number of digits to round to", required=False, type=int), + ] + + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + computed_parameters = self.get_computed_parameters() + operand = computed_parameters[0] + digits = int(computed_parameters[1]) if len(computed_parameters) == 2 else 0 + if isinstance(operand, (int, float)): + return round(operand, digits) + raise octobot_commons.errors.InvalidParametersError( + f"round() requires a numeric argument, got {type(operand).__name__}" + ) + + +class FloorOperator(dsl_interpreter.CallOperator): + MIN_PARAMS = 1 + MAX_PARAMS = 1 + NAME = "floor" + DESCRIPTION = "Returns the floor of the given numeric operand (largest integer less than or equal to the value)." + EXAMPLE = "floor(3.7)" + + @staticmethod + def get_name() -> str: + return "floor" + + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + computed_parameters = self.get_computed_parameters() + operand = computed_parameters[0] + if isinstance(operand, (int, float)): + return math.floor(operand) + raise octobot_commons.errors.InvalidParametersError( + f"floor() requires a numeric argument, got {type(operand).__name__}" + ) + + +class CeilOperator(dsl_interpreter.CallOperator): + MIN_PARAMS = 1 + MAX_PARAMS = 1 + NAME = "ceil" + DESCRIPTION = "Returns the ceiling of the given numeric operand (smallest integer greater than or equal to the value)." + EXAMPLE = "ceil(3.2)" + + @staticmethod + def get_name() -> str: + return "ceil" + + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + computed_parameters = self.get_computed_parameters() + operand = computed_parameters[0] + if isinstance(operand, (int, float)): + return math.ceil(operand) + raise octobot_commons.errors.InvalidParametersError( + f"ceil() requires a numeric argument, got {type(operand).__name__}" + ) + + +class SinOperator(dsl_interpreter.CallOperator): + MIN_PARAMS = 1 + MAX_PARAMS = 1 + NAME = "sin" + DESCRIPTION = "Returns the sine of the given numeric operand (in radians)." + EXAMPLE = "sin(1.23)" + + @staticmethod + def get_name() -> str: + return "sin" + + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + computed_parameters = self.get_computed_parameters() + operand = computed_parameters[0] + if isinstance(operand, (int, float)): + return math.sin(operand) + raise octobot_commons.errors.InvalidParametersError( + f"sin() requires a numeric argument, got {type(operand).__name__}" + ) + + +class CosOperator(dsl_interpreter.CallOperator): + MIN_PARAMS = 1 + MAX_PARAMS = 1 + NAME = "cos" + DESCRIPTION = "Returns the cosine of the given numeric operand (in radians)." + EXAMPLE = "cos(1.23)" + + @staticmethod + def get_name() -> str: + return "cos" + + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + computed_parameters = self.get_computed_parameters() + operand = computed_parameters[0] + if isinstance(operand, (int, float)): + return math.cos(operand) + raise octobot_commons.errors.InvalidParametersError( + f"cos() requires a numeric argument, got {type(operand).__name__}" + ) + + +class OscillatorOperator(dsl_interpreter.CallOperator): + MIN_PARAMS = 3 + MAX_PARAMS = 3 + NAME = "oscillate" + DESCRIPTION = "Returns the base value with a time-based oscillating component added. The oscillation uses a sine wave with the specified maximum percentage of the base value and period in minutes." + EXAMPLE = "oscillate(100, 10, 60)" + + @staticmethod + def get_name() -> str: + return "oscillate" + + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + computed_parameters = self.get_computed_parameters() + base_value = computed_parameters[0] + max_oscillating_percent = computed_parameters[1] + period_minutes = computed_parameters[2] + + # Validate all parameters are numeric + if not isinstance(base_value, (int, float)): + raise octobot_commons.errors.InvalidParametersError( + f"oscillate() requires a numeric base value, got {type(base_value).__name__}" + ) + if not isinstance(max_oscillating_percent, (int, float)): + raise octobot_commons.errors.InvalidParametersError( + f"oscillate() requires a numeric max oscillating percent, got {type(max_oscillating_percent).__name__}" + ) + if not isinstance(period_minutes, (int, float)) or period_minutes <= 0: + raise octobot_commons.errors.InvalidParametersError( + f"oscillate() requires a positive numeric period in minutes, got {type(period_minutes).__name__}" + ) + + oscillation_range = base_value * (max_oscillating_percent / 100) + period_seconds = period_minutes * 60 + phase = 2 * math.pi * (time.time() / period_seconds) + oscillation = math.sin(phase) + oscillation_value = oscillation_range * oscillation + + return base_value + oscillation_value + + +class GetOperator(dsl_interpreter.CallOperator): + MIN_PARAMS = 3 + MAX_PARAMS = 3 + NAME = "get" + DESCRIPTION = ( + "Returns element.get(key, default) for mapping-like objects. " + "On TypeError or AttributeError, returns default." + ) + EXAMPLE = 'get(dict_var, "color", "blue")' + + @staticmethod + def get_name() -> str: + return "get" + + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + computed_parameters = self.get_computed_parameters() + element = computed_parameters[0] + key = computed_parameters[1] + default = computed_parameters[2] + try: + return element.get(key, default) # type: ignore + except (TypeError, AttributeError): + return default + + +class ValueIfOperator(dsl_interpreter.PreComputingCallOperator): + NAME = "value_if" + DESCRIPTION = ( + "Returns the computed value if the inner DSL expression evaluates to a truthy result; " + "otherwise False. If the condition string contains the LOCAL_VALUE_PLACEHOLDER placeholder, " + "that placeholder is replaced by repr(sanitize(value)) and the result is the full inner expression. " + "Otherwise the inner expression is repr(value) followed by the condition string (suffix mode)." + ) + EXAMPLE = "value_if(15, ' > 12') or value_if(order, \"get(LOCAL_VALUE_PLACEHOLDER, 'status', 'x') == 'open'\")" + + @staticmethod + def get_name() -> str: + return "value_if" + + @classmethod + def get_parameters(cls) -> list[dsl_interpreter.OperatorParameter]: + return [ + dsl_interpreter.OperatorParameter( + name="value", + description=( + "the value to compare; used as repr(value) in suffix mode or to replace " + "LOCAL_VALUE_PLACEHOLDER with repr(sanitize(value)) when present" + ), + required=True, + type=object, + ), + dsl_interpreter.OperatorParameter( + name="condition", + description=( + "DSL string: either a suffix appended after repr(value), or a full expression " + "where LOCAL_VALUE_PLACEHOLDER is replaced by repr(sanitize(value))" + ), + required=True, + type=str, + ), + ] + + async def pre_compute(self) -> None: + await super().pre_compute() + param_by_name = self.get_computed_value_by_parameter() + computed_value = param_by_name["value"] + condition_script = param_by_name["condition"] + if not isinstance(condition_script, str): + raise octobot_commons.errors.InvalidParametersError( + f"value_if() requires condition to be a str, got {type(condition_script).__name__}" + ) + if octobot_commons_constants.LOCAL_VALUE_PLACEHOLDER not in condition_script: + inner_expression = repr(computed_value) + condition_script + else: + inner_expression = condition_script.replace( + octobot_commons_constants.LOCAL_VALUE_PLACEHOLDER, + repr(json_util.sanitize(computed_value)), + ) + nested_interpreter = dsl_interpreter.Interpreter(dsl_interpreter.get_all_operators()) + condition_result = await nested_interpreter.interprete(inner_expression) + self.value = computed_value if bool(condition_result) else False + + +class ErrorOperator(dsl_interpreter.CallOperator): + NAME = "error" + DESCRIPTION = "Raises a ErrorStatementEncountered exception with the given parameters." + EXAMPLE = "error('123-error')" + + @staticmethod + def get_name() -> str: + return "error" + + def compute(self): + params = self.get_computed_parameters() + raise octobot_commons.errors.ErrorStatementEncountered(*params) diff --git a/packages/tentacles/Meta/DSL_operators/python_std_operators/base_compare_operators.py b/packages/tentacles/Meta/DSL_operators/python_std_operators/base_compare_operators.py new file mode 100644 index 0000000000..75c1b415d8 --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/python_std_operators/base_compare_operators.py @@ -0,0 +1,160 @@ +# pylint: disable=missing-class-docstring,missing-function-docstring +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import ast + +import octobot_commons.dsl_interpreter.operators.compare_operator as dsl_interpreter_compare_operator +import octobot_commons.dsl_interpreter.operator as dsl_interpreter_operator + + +class EqOperator(dsl_interpreter_compare_operator.CompareOperator): + NAME = "==" + DESCRIPTION = "Equality operator. Returns True if the left operand equals the right operand." + EXAMPLE = "5 == 5" + + @staticmethod + def get_name() -> str: + return ast.Eq.__name__ + + def compute(self) -> dsl_interpreter_operator.ComputedOperatorParameterType: + left, right = self.get_computed_left_and_right_parameters() + return left == right + + +class NotEqOperator(dsl_interpreter_compare_operator.CompareOperator): + NAME = "!=" + DESCRIPTION = "Inequality operator. Returns True if the left operand does not equal the right operand." + EXAMPLE = "5 != 3" + + @staticmethod + def get_name() -> str: + return ast.NotEq.__name__ + + def compute(self) -> dsl_interpreter_operator.ComputedOperatorParameterType: + left, right = self.get_computed_left_and_right_parameters() + return left != right + + +class LtOperator(dsl_interpreter_compare_operator.CompareOperator): + NAME = "<" + DESCRIPTION = "Less than operator. Returns True if the left operand is less than the right operand." + EXAMPLE = "3 < 5" + + @staticmethod + def get_name() -> str: + return ast.Lt.__name__ + + def compute(self) -> dsl_interpreter_operator.ComputedOperatorParameterType: + left, right = self.get_computed_left_and_right_parameters() + return left < right + + +class LtEOperator(dsl_interpreter_compare_operator.CompareOperator): + NAME = "<=" + DESCRIPTION = "Less than or equal operator. Returns True if the left operand is less than or equal to the right operand." + EXAMPLE = "5 <= 5" + + @staticmethod + def get_name() -> str: + return ast.LtE.__name__ + + def compute(self) -> dsl_interpreter_operator.ComputedOperatorParameterType: + left, right = self.get_computed_left_and_right_parameters() + return left <= right + + +class GtOperator(dsl_interpreter_compare_operator.CompareOperator): + NAME = ">" + DESCRIPTION = "Greater than operator. Returns True if the left operand is greater than the right operand." + EXAMPLE = "5 > 3" + + @staticmethod + def get_name() -> str: + return ast.Gt.__name__ + + def compute(self) -> dsl_interpreter_operator.ComputedOperatorParameterType: + left, right = self.get_computed_left_and_right_parameters() + return left > right + + +class GtEOperator(dsl_interpreter_compare_operator.CompareOperator): + NAME = ">=" + DESCRIPTION = "Greater than or equal operator. Returns True if the left operand is greater than or equal to the right operand." + EXAMPLE = "5 >= 5" + + @staticmethod + def get_name() -> str: + return ast.GtE.__name__ + + def compute(self) -> dsl_interpreter_operator.ComputedOperatorParameterType: + left, right = self.get_computed_left_and_right_parameters() + return left >= right + + +class IsOperator(dsl_interpreter_compare_operator.CompareOperator): + NAME = "is" + DESCRIPTION = "Identity operator. Returns True if the left operand is the same object as the right operand." + EXAMPLE = "x is None" + + @staticmethod + def get_name() -> str: + return ast.Is.__name__ + + def compute(self) -> dsl_interpreter_operator.ComputedOperatorParameterType: + left, right = self.get_computed_left_and_right_parameters() + return left is right + + +class IsNotOperator(dsl_interpreter_compare_operator.CompareOperator): + NAME = "is not" + DESCRIPTION = "Negated identity operator. Returns True if the left operand is not the same object as the right operand." + EXAMPLE = "x is not None" + + @staticmethod + def get_name() -> str: + return ast.IsNot.__name__ + + def compute(self) -> dsl_interpreter_operator.ComputedOperatorParameterType: + left, right = self.get_computed_left_and_right_parameters() + return left is not right + + +class InOperator(dsl_interpreter_compare_operator.CompareOperator): + NAME = "in" + DESCRIPTION = "Membership operator. Returns True if the left operand is found in the right operand (container)." + EXAMPLE = "3 in [1, 2, 3]" + + @staticmethod + def get_name() -> str: + return ast.In.__name__ + + def compute(self) -> dsl_interpreter_operator.ComputedOperatorParameterType: + left, right = self.get_computed_left_and_right_parameters() + return left in right + + +class NotInOperator(dsl_interpreter_compare_operator.CompareOperator): + NAME = "not in" + DESCRIPTION = "Negated membership operator. Returns True if the left operand is not found in the right operand (container)." + EXAMPLE = "4 not in [1, 2, 3]" + + @staticmethod + def get_name() -> str: + return ast.NotIn.__name__ + + def compute(self) -> dsl_interpreter_operator.ComputedOperatorParameterType: + left, right = self.get_computed_left_and_right_parameters() + return left not in right diff --git a/packages/tentacles/Meta/DSL_operators/python_std_operators/base_expression_operators.py b/packages/tentacles/Meta/DSL_operators/python_std_operators/base_expression_operators.py new file mode 100644 index 0000000000..ada3bf5590 --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/python_std_operators/base_expression_operators.py @@ -0,0 +1,67 @@ +# pylint: disable=missing-function-docstring +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import ast + +import octobot_commons.dsl_interpreter.operators.expression_operator as dsl_interpreter_expression_operator +import octobot_commons.dsl_interpreter.operator as dsl_interpreter_operator + + +class IfExpOperator(dsl_interpreter_expression_operator.ExpressionOperator): + """ + Base class for if expression operators: a if b else c + If expression operators have three operands: condition, true expression, false expression. + """ + NAME = "if ... else" + DESCRIPTION = "Conditional expression operator. Returns the body expression if the test condition is True, otherwise returns the orelse expression." + EXAMPLE = "5 if True else 3" + + def __init__( + self, + test: dsl_interpreter_operator.OperatorParameterType, + body: dsl_interpreter_operator.OperatorParameterType, + orelse: dsl_interpreter_operator.OperatorParameterType, + ): + super().__init__(test, body, orelse) + self.test = test + self.body = body + self.orelse = orelse + + @staticmethod + def get_name() -> str: + return ast.IfExp.__name__ + + def compute(self) -> dsl_interpreter_operator.ComputedOperatorParameterType: + # Compute the test condition + test_value = ( + self.test.compute() + if isinstance(self.test, dsl_interpreter_operator.Operator) + else self.test + ) + # Evaluate the condition (truthy check) + if test_value: + # Return body if condition is True + return ( + self.body.compute() + if isinstance(self.body, dsl_interpreter_operator.Operator) + else self.body + ) + # Return orelse if condition is False + return ( + self.orelse.compute() + if isinstance(self.orelse, dsl_interpreter_operator.Operator) + else self.orelse + ) diff --git a/packages/tentacles/Meta/DSL_operators/python_std_operators/base_iterable_operators.py b/packages/tentacles/Meta/DSL_operators/python_std_operators/base_iterable_operators.py new file mode 100644 index 0000000000..eb25ead05f --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/python_std_operators/base_iterable_operators.py @@ -0,0 +1,38 @@ +# pylint: disable=missing-function-docstring +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import ast + +import octobot_commons.dsl_interpreter.operators.iterable_operator as dsl_interpreter_iterable_operator +import octobot_commons.dsl_interpreter.operator as dsl_interpreter_operator + + +class ListOperator(dsl_interpreter_iterable_operator.IterableOperator): + """ + List operator: [1, 2, 3] + List operator have one or more operands. + """ + NAME = "[...]" + DESCRIPTION = "List constructor operator. Creates a list from the given operands." + EXAMPLE = "[1, 2, 3]" + + @staticmethod + def get_name() -> str: + return ast.List.__name__ + + def compute(self) -> dsl_interpreter_operator.ComputedOperatorParameterType: + # Compute the test condition + return list(self.get_computed_parameters()) diff --git a/packages/tentacles/Meta/DSL_operators/python_std_operators/base_name_operators.py b/packages/tentacles/Meta/DSL_operators/python_std_operators/base_name_operators.py new file mode 100644 index 0000000000..758cff7ad5 --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/python_std_operators/base_name_operators.py @@ -0,0 +1,48 @@ +# pylint: disable=missing-class-docstring,missing-function-docstring +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import math + +import octobot_commons.dsl_interpreter.operators.name_operator as dsl_interpreter_name_operator +import octobot_commons.dsl_interpreter.operator as dsl_interpreter_operator + + +class PiOperator(dsl_interpreter_name_operator.NameOperator): + MAX_PARAMS = 0 + NAME = "pi" + DESCRIPTION = "Mathematical constant pi (π), approximately 3.14159." + EXAMPLE = "pi" + + @staticmethod + def get_name() -> str: + return "pi" + + def compute(self) -> dsl_interpreter_operator.ComputedOperatorParameterType: + return math.pi + + +class NaNOperator(dsl_interpreter_name_operator.NameOperator): + MAX_PARAMS = 0 + NAME = "nan" + DESCRIPTION = "Not a Number constant. Represents an undefined or unrepresentable numeric value." + EXAMPLE = "nan" + + @staticmethod + def get_name() -> str: + return "nan" + + def compute(self) -> dsl_interpreter_operator.ComputedOperatorParameterType: + return float("nan") diff --git a/packages/tentacles/Meta/DSL_operators/python_std_operators/base_nary_operators.py b/packages/tentacles/Meta/DSL_operators/python_std_operators/base_nary_operators.py new file mode 100644 index 0000000000..616d915734 --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/python_std_operators/base_nary_operators.py @@ -0,0 +1,58 @@ +# pylint: disable=missing-class-docstring,missing-function-docstring +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import ast + +import octobot_commons.dsl_interpreter.operators.n_ary_operator as dsl_interpreter_n_ary_operator +import octobot_commons.dsl_interpreter.operator as dsl_interpreter_operator + + +class AndOperator(dsl_interpreter_n_ary_operator.NaryOperator): + MIN_PARAMS = 1 + MAX_PARAMS = None + NAME = "and" + DESCRIPTION = "Logical AND operator. Returns True if all operands are truthy, otherwise returns False." + EXAMPLE = "True and False" + + @staticmethod + def get_name() -> str: + return ast.And.__name__ + + def compute(self) -> dsl_interpreter_operator.ComputedOperatorParameterType: + for parameter in self.parameters: + value = self._get_computed_parameter(parameter) + if not value: + return False + return True + + +class OrOperator(dsl_interpreter_n_ary_operator.NaryOperator): + MIN_PARAMS = 1 + MAX_PARAMS = None + NAME = "or" + DESCRIPTION = "Logical OR operator. Returns True if any operand is truthy, otherwise returns False." + EXAMPLE = "True or False" + + @staticmethod + def get_name() -> str: + return ast.Or.__name__ + + def compute(self) -> dsl_interpreter_operator.ComputedOperatorParameterType: + for parameter in self.parameters: + value = self._get_computed_parameter(parameter) + if value: + return True + return False diff --git a/packages/tentacles/Meta/DSL_operators/python_std_operators/base_resetting_operators.py b/packages/tentacles/Meta/DSL_operators/python_std_operators/base_resetting_operators.py new file mode 100644 index 0000000000..64d67e278d --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/python_std_operators/base_resetting_operators.py @@ -0,0 +1,303 @@ +# pylint: disable=missing-class-docstring,missing-function-docstring +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import typing +import time +import asyncio +import random + +import octobot_commons.errors +import octobot_commons.dsl_interpreter as dsl_interpreter + + +""" +Resetting operators are ReCallableOperatorMixin that can be called multiple times +in order to execute a long lasting operation that can take several steps to complete. +""" + + +class WaitOperator(dsl_interpreter.PreComputingCallOperator, dsl_interpreter.ReCallableOperatorMixin): + NAME = "wait" + DESCRIPTION = "Pauses execution for the specified number of seconds. If return_remaining_time is True, instantly returns the remaining time to wait." + EXAMPLE = "wait(5)" + + @staticmethod + def get_name() -> str: + return "wait" + + @classmethod + def get_parameters(cls) -> list[dsl_interpreter.OperatorParameter]: + return [ + dsl_interpreter.OperatorParameter(name="min_seconds", description="minimum number of seconds to wait", required=True, type=float), + dsl_interpreter.OperatorParameter(name="max_seconds", description="maximum number of seconds to wait", required=False, type=float, default=None), + dsl_interpreter.OperatorParameter(name="return_remaining_time", description="if True, instantly returns the remaining time to wait", required=False, type=bool, default=False), + ] + cls.get_re_callable_parameters() + + async def pre_compute(self) -> None: + await super().pre_compute() + param_by_name = self.get_computed_value_by_parameter() + if param_by_name["return_remaining_time"]: + self.value = self._compute_remaining_time(param_by_name) + else: + await asyncio.sleep(self._compute_sleep_time(param_by_name)) + self.value = None + + def _compute_remaining_time( + self, param_by_name: dict[str, typing.Any] + ) -> typing.Optional[dict[str, typing.Any]]: + current_time = time.time() + if last_execution_result := self.get_last_execution_result(param_by_name): + last_execution_time = last_execution_result[ + dsl_interpreter.ReCallingOperatorResultKeys.LAST_EXECUTION_TIME.value + ] + waiting_time = ( + last_execution_result[dsl_interpreter.ReCallingOperatorResultKeys.WAITING_TIME.value] + - (current_time - last_execution_time) + ) + else: + waiting_time = self._compute_sleep_time(param_by_name) + if waiting_time <= 0: + # done waiting + return None + + return self.create_re_callable_result_dict( + keyword=self.get_name(), + last_execution_time=current_time, + waiting_time=waiting_time, + script_override=self.re_create_script(param_by_name), + ) + + def _compute_sleep_time(self, param_by_name: dict[str, typing.Any]) -> float: + min_seconds = param_by_name["min_seconds"] + if min_seconds < 0: + raise octobot_commons.errors.InvalidParametersError( + f"wait() requires a non-negative numeric argument (seconds), got {min_seconds}" + ) + max_seconds = param_by_name["max_seconds"] + if max_seconds is None: + return min_seconds + return random.randrange(int(min_seconds) * 1000, int(max_seconds) * 1000) / 1000 + + +class LoopUntilOperator(dsl_interpreter.PreComputingCallOperator, dsl_interpreter.ReCallableOperatorMixin): + NAME = "loop_until" + DESCRIPTION = ( + "Re-evaluates a condition after retry_interval until it is true. " + "Optional timeout and max_attempts stop the loop with ErrorStatementEncountered; " + "if both are omitted, loops until the condition is true. " + "Returns the condition value when it becomes true." + ) + EXAMPLE = "loop_until(x > 0, 1, timeout=30, max_attempts=10)" + + LOOP_START_TIME_KEY = "loop_until_start_time" + ATTEMPT_COUNT_KEY = "loop_until_attempt_count" + + @staticmethod + def get_name() -> str: + return "loop_until" + + @classmethod + def get_parameters(cls) -> list[dsl_interpreter.OperatorParameter]: + return [ + dsl_interpreter.OperatorParameter( + name="condition", + description="expression that must become true", + required=True, + type=bool, + ), + dsl_interpreter.OperatorParameter( + name="retry_interval", + description="seconds to wait between condition checks", + required=True, + type=float, + ), + dsl_interpreter.OperatorParameter( + name="timeout", + description="if set, maximum total seconds; if still false, raises ErrorStatementEncountered", + required=False, + type=float, + default=None, + ), + dsl_interpreter.OperatorParameter( + name="max_attempts", + description="if set, maximum condition evaluations; if still false, raises ErrorStatementEncountered", + required=False, + type=int, + default=None, + ), + dsl_interpreter.OperatorParameter( + name="return_remaining_time", + description="if True, instantly returns the remaining time until the next check", + required=False, + type=bool, + default=False, + ), + ] + cls.get_re_callable_parameters() + + async def pre_compute(self) -> None: + await super().pre_compute() + param_by_name = self.get_computed_value_by_parameter() + self._validate_loop_until_params(param_by_name) + if param_by_name["return_remaining_time"]: + self.value = await self._compute_return_remaining(param_by_name) + else: + self.value = await self._run_blocking_loop(param_by_name) + + def _validate_loop_until_params(self, param_by_name: dict[str, typing.Any]) -> None: + timeout_value = param_by_name["timeout"] + max_attempts_value = param_by_name["max_attempts"] + retry_interval = param_by_name["retry_interval"] + if retry_interval < 0: + raise octobot_commons.errors.InvalidParametersError( + f"loop_until() requires a non-negative retry_interval, got {retry_interval}" + ) + if timeout_value is not None and timeout_value < 0: + raise octobot_commons.errors.InvalidParametersError( + f"loop_until() requires a non-negative timeout, got {timeout_value}" + ) + if max_attempts_value is not None and max_attempts_value < 1: + raise octobot_commons.errors.InvalidParametersError( + f"loop_until() requires max_attempts >= 1 when set, got {max_attempts_value}" + ) + + def _extra_loop_state(self, last_execution_result: typing.Optional[dict]) -> dict[str, typing.Any]: + if not last_execution_result: + return {} + skip_keys = { + dsl_interpreter.ReCallingOperatorResultKeys.WAITING_TIME.value, + dsl_interpreter.ReCallingOperatorResultKeys.LAST_EXECUTION_TIME.value, + } + return { + key: value + for key, value in last_execution_result.items() + if key not in skip_keys + } + + def _compute_remaining_retry_wait( + self, current_time: float, last_execution_result: typing.Optional[dict] + ) -> typing.Optional[float]: + if not last_execution_result: + return None + last_execution_time = last_execution_result[ + dsl_interpreter.ReCallingOperatorResultKeys.LAST_EXECUTION_TIME.value + ] + base_waiting_time = last_execution_result[dsl_interpreter.ReCallingOperatorResultKeys.WAITING_TIME.value] + waiting_time = base_waiting_time - (current_time - last_execution_time) + if waiting_time <= 0: + # reset waiting timee + return base_waiting_time + return waiting_time + + def _read_loop_start_and_attempts( + self, + last_execution_result: typing.Optional[dict], + current_time: float, + ) -> tuple[float, int]: + if not last_execution_result: + return current_time, 0 + loop_start = last_execution_result.get(self.LOOP_START_TIME_KEY, current_time) + attempt_count = last_execution_result.get(self.ATTEMPT_COUNT_KEY, 0) + return loop_start, attempt_count + + async def _compute_return_remaining( + self, param_by_name: dict[str, typing.Any] + ) -> typing.Any: + current_time = time.time() + + if condition_result := param_by_name.get("condition"): + return condition_result + + last_execution_result = self.get_last_execution_result(param_by_name) + loop_start_time, previous_attempt_count = self._read_loop_start_and_attempts( + last_execution_result, current_time + ) + attempt_count = previous_attempt_count + 1 + max_attempts = param_by_name.get("max_attempts") + timeout = param_by_name.get("timeout") + + try: + remaining_wait = self._compute_remaining_retry_wait(current_time, last_execution_result) + except KeyError: + remaining_wait = None + if remaining_wait is None: + # this is the first execution: validate the timeout and max_attempts values + if timeout is not None and ( + current_time - loop_start_time >= timeout + ): + raise octobot_commons.errors.MaxAttemptsExceededError( + "loop_until: timeout exceeded before condition became true" + ) + if max_attempts is not None and attempt_count >= max_attempts: + raise octobot_commons.errors.MaxAttemptsExceededError( + "loop_until: max_attempts exceeded before condition became true" + ) + remaining_wait = float(param_by_name["retry_interval"]) + else: + # this is not the first execution: check exit conditions + if timeout is not None and ( + current_time - loop_start_time >= timeout + ): + raise octobot_commons.errors.MaxAttemptsExceededError( + "loop_until: timeout exceeded before condition became true" + ) + if ( + max_attempts is not None + and attempt_count >= max_attempts + ): + raise octobot_commons.errors.MaxAttemptsExceededError( + "loop_until: max_attempts exceeded before condition became true" + ) + + return self.create_re_callable_result_dict( + keyword=self.get_name(), + last_execution_time=current_time, + waiting_time=remaining_wait, + **{ + self.LOOP_START_TIME_KEY: loop_start_time, + self.ATTEMPT_COUNT_KEY: attempt_count, + }, + ) + + async def _evaluate_condition_async(self) -> typing.Any: + condition_arg = self.get_input_value_by_parameter()["condition"] + if isinstance(condition_arg, dsl_interpreter.Operator): + await condition_arg.pre_compute() + return condition_arg.compute() + return self._get_computed_parameter(condition_arg) + + async def _run_blocking_loop( + self, param_by_name: dict[str, typing.Any] + ) -> typing.Any: + retry_interval = float(param_by_name["retry_interval"]) + loop_start_time = time.time() + attempt_count = 0 + while True: + attempt_count += 1 + condition_result = await self._evaluate_condition_async() + if bool(condition_result): + return condition_result + current_time = time.time() + if param_by_name["timeout"] is not None and ( + current_time - loop_start_time >= param_by_name["timeout"] + ): + raise octobot_commons.errors.ErrorStatementEncountered( + "loop_until: timeout exceeded before condition became true" + ) + if param_by_name["max_attempts"] is not None and attempt_count >= param_by_name["max_attempts"]: + raise octobot_commons.errors.ErrorStatementEncountered( + "loop_until: max_attempts exceeded before condition became true" + ) + await asyncio.sleep(retry_interval) diff --git a/packages/tentacles/Meta/DSL_operators/python_std_operators/base_subscripting_operators.py b/packages/tentacles/Meta/DSL_operators/python_std_operators/base_subscripting_operators.py new file mode 100644 index 0000000000..7883a6c9d1 --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/python_std_operators/base_subscripting_operators.py @@ -0,0 +1,120 @@ +# pylint: disable=missing-function-docstring +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import ast +import numpy as np +import typing + +import octobot_commons.errors +import octobot_commons.dsl_interpreter.operators.subscripting_operator as dsl_interpreter_subscripting_operator +import octobot_commons.dsl_interpreter.operator as dsl_interpreter_operator + + +class SubscriptOperator(dsl_interpreter_subscripting_operator.SubscriptingOperator): + """ + Base class for subscripting operators: array[index] + Subscripting operators have three operands: the array/list, the index or slice and the context. + """ + NAME = "[...]" + DESCRIPTION = "Subscripting operator. Accesses an element from a list or array using an index." + EXAMPLE = "my_list[0]" + + def __init__( + self, + array_or_list: dsl_interpreter_operator.OperatorParameterType, + index_or_slice: dsl_interpreter_operator.OperatorParameterType, + context: dsl_interpreter_operator.OperatorParameterType, + **kwargs: typing.Any + ): + """ + Initialize the subscripting operator with its array, index and context. + """ + super().__init__(array_or_list, index_or_slice, context, **kwargs) + + def get_computed_array_or_list_and_index_or_slice_and_context_parameters( + self, + ) -> typing.Tuple[ + dsl_interpreter_operator.ComputedOperatorParameterType, + dsl_interpreter_operator.ComputedOperatorParameterType, + dsl_interpreter_operator.ComputedOperatorParameterType, + ]: + """ + Get the computed array/list, index/slice and context of the subscripting operator. + """ + computed_parameters = self.get_computed_parameters() + if len(computed_parameters) != 3: + raise octobot_commons.errors.InvalidParametersError(f"Unsupported {self.__class__.__name__}: expected three parameters, got {len(computed_parameters)}") + if not isinstance(computed_parameters, (list, tuple, np.ndarray)): + raise octobot_commons.errors.InvalidParametersError(f"Unsupported {self.__class__.__name__} computed parameters 1 type: {type(computed_parameters).__name__}") + return computed_parameters[0], computed_parameters[1], computed_parameters[2] + + @staticmethod + def get_name() -> str: + return ast.Subscript.__name__ + + def compute(self) -> dsl_interpreter_operator.ComputedOperatorParameterType: + # Compute the test condition + array_or_list, index, context = self.get_computed_array_or_list_and_index_or_slice_and_context_parameters() + if isinstance(context, ast.Load): + return array_or_list[index] + raise octobot_commons.errors.InvalidParametersError(f"Unsupported {self.__class__.__name__} context type: {type(context).__name__}") + + +class SliceOperator(dsl_interpreter_subscripting_operator.SubscriptingOperator): + """ + Operator for creating slice objects: slice(lower, upper, step) + Used for array slicing like array[start:stop:step] + """ + NAME = "[start:stop:step]" + DESCRIPTION = "Slice operator. Creates a slice object for array/list slicing with optional start, stop, and step parameters." + EXAMPLE = "my_list[1:5:2]" + + @staticmethod + def get_name() -> str: + return ast.Slice.__name__ + + def get_computed_lower_and_upper_and_step_parameters( + self, + ) -> typing.Tuple[ + dsl_interpreter_operator.ComputedOperatorParameterType, + dsl_interpreter_operator.ComputedOperatorParameterType, + dsl_interpreter_operator.ComputedOperatorParameterType, + ]: + """ + Get the computed lower, upper and step of the slice operator. + """ + computed_parameters = self.get_computed_parameters() + if len(computed_parameters) > 3: + raise octobot_commons.errors.InvalidParametersError(f"Unsupported {self.__class__.__name__}: expected at most three parameters, got {len(computed_parameters)}") + lower = int(computed_parameters[0]) if len(computed_parameters) > 0 and computed_parameters[0] is not None else None + upper = int(computed_parameters[1]) if len(computed_parameters) > 1 and computed_parameters[1] is not None else None + step = int(computed_parameters[2]) if len(computed_parameters) > 2 and computed_parameters[2] is not None else None + return lower, upper, step + + def compute(self) -> slice: + """ + Compute and return a Python slice object. + """ + maybe_lower, maybe_upper, maybe_step = self.get_computed_lower_and_upper_and_step_parameters() + if maybe_lower is not None: + if maybe_upper is not None: + if maybe_step is not None: + return slice(maybe_lower, maybe_upper, maybe_step) + return slice(maybe_lower, maybe_upper, None) + return slice(maybe_lower, None, None) + if maybe_upper is not None: + return slice(None, maybe_upper, None) + return slice(None, None, None) diff --git a/packages/tentacles/Meta/DSL_operators/python_std_operators/base_time_operators.py b/packages/tentacles/Meta/DSL_operators/python_std_operators/base_time_operators.py new file mode 100644 index 0000000000..f9d5171508 --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/python_std_operators/base_time_operators.py @@ -0,0 +1,34 @@ +# pylint: disable=missing-class-docstring,missing-function-docstring +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import time + +import octobot_commons.dsl_interpreter as dsl_interpreter + + +class NowMsOperator(dsl_interpreter.CallOperator): + MIN_PARAMS = 0 + MAX_PARAMS = 0 + NAME = "now_ms" + DESCRIPTION = "Returns the current time in milliseconds since epoch." + EXAMPLE = "now_ms()" + + @staticmethod + def get_name() -> str: + return "now_ms" + + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + return int(time.time() * 1000) diff --git a/packages/tentacles/Meta/DSL_operators/python_std_operators/base_unary_operators.py b/packages/tentacles/Meta/DSL_operators/python_std_operators/base_unary_operators.py new file mode 100644 index 0000000000..28ff077c07 --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/python_std_operators/base_unary_operators.py @@ -0,0 +1,77 @@ +# pylint: disable=missing-class-docstring,missing-function-docstring +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import ast + +import octobot_commons.dsl_interpreter.operators.unary_operator as dsl_interpreter_unary_operator +import octobot_commons.dsl_interpreter.operator as dsl_interpreter_operator + + +class UAddOperator(dsl_interpreter_unary_operator.UnaryOperator): + NAME = "+" + DESCRIPTION = "Unary plus operator. Returns the operand unchanged (mainly for symmetry with unary minus)." + EXAMPLE = "+5" + + @staticmethod + def get_name() -> str: + return ast.UAdd.__name__ + + def compute(self) -> dsl_interpreter_operator.ComputedOperatorParameterType: + operand = self.get_computed_operand() + return +operand + + +class USubOperator(dsl_interpreter_unary_operator.UnaryOperator): + NAME = "-" + DESCRIPTION = "Unary minus operator. Negates the operand (multiplies by -1)." + EXAMPLE = "-5" + + @staticmethod + def get_name() -> str: + return ast.USub.__name__ + + def compute(self) -> dsl_interpreter_operator.ComputedOperatorParameterType: + operand = self.get_computed_operand() + return -operand + + +class NotOperator(dsl_interpreter_unary_operator.UnaryOperator): + NAME = "not" + DESCRIPTION = "Logical NOT operator. Returns True if the operand is falsy, False if it is truthy." + EXAMPLE = "not True" + + @staticmethod + def get_name() -> str: + return ast.Not.__name__ + + def compute(self) -> dsl_interpreter_operator.ComputedOperatorParameterType: + operand = self.get_computed_operand() + return not operand + + +class InvertOperator(dsl_interpreter_unary_operator.UnaryOperator): + NAME = "~" + DESCRIPTION = "Bitwise NOT operator. Inverts all bits of the operand. In this implementation, it behaves as logical NOT." + EXAMPLE = "~True" + + @staticmethod + def get_name() -> str: + return ast.Invert.__name__ + + def compute(self) -> dsl_interpreter_operator.ComputedOperatorParameterType: + operand = self.get_computed_operand() + return not operand # ~operand has been deprecated in favor of "not" + # return ~operand diff --git a/packages/tentacles/Meta/DSL_operators/python_std_operators/metadata.json b/packages/tentacles/Meta/DSL_operators/python_std_operators/metadata.json new file mode 100644 index 0000000000..319240c835 --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/python_std_operators/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": [], + "tentacles-requirements": [] +} \ No newline at end of file diff --git a/packages/tentacles/Meta/DSL_operators/python_std_operators/tests/test_base_operators.py b/packages/tentacles/Meta/DSL_operators/python_std_operators/tests/test_base_operators.py new file mode 100644 index 0000000000..48a835e00b --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/python_std_operators/tests/test_base_operators.py @@ -0,0 +1,308 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import decimal +import math +import pytest +import mock +import time + +import octobot_commons.dsl_interpreter as dsl_interpreter +import octobot_commons.errors +import octobot_commons.constants as commons_constants + +import tentacles.Meta.DSL_operators.python_std_operators.base_call_operators as base_call_operators + + +@pytest.fixture +def interpreter(): + return dsl_interpreter.Interpreter(dsl_interpreter.get_all_operators()) + + +@pytest.mark.asyncio +async def test_interpreter_basic_operations(interpreter): + # constants + assert await interpreter.interprete("True") is True + assert await interpreter.interprete("'test'") == "test" + assert await interpreter.interprete('"test"') == "test" + + # unary operators + assert await interpreter.interprete("1") == 1 + assert await interpreter.interprete("-11") == -11 + assert await interpreter.interprete("+11") == +11 + assert await interpreter.interprete("not True") is False + assert await interpreter.interprete("~ False") is True + + # binary operators + assert await interpreter.interprete("1 + 2") == 3 + assert await interpreter.interprete("1 - 2") == -1 + assert await interpreter.interprete("4 * 2") == 8 + assert await interpreter.interprete("1 / 2") == 0.5 + assert await interpreter.interprete("1 % 3") == 1 + assert await interpreter.interprete("1 // 2") == 0 + assert await interpreter.interprete("3 ** 2") == 9 + + # compare operators + assert await interpreter.interprete("1 < 2") is True + assert await interpreter.interprete("1 <= 2") is True + assert await interpreter.interprete("2 <= 2") is True + assert await interpreter.interprete("1 > 2") is False + assert await interpreter.interprete("2 >= 2") is True + assert await interpreter.interprete("1 == 2") is False + assert await interpreter.interprete("1 != 2") is True + assert await interpreter.interprete("1 is 2") is False + assert await interpreter.interprete("1 is not 2") is True + assert await interpreter.interprete("'1' in '123'") is True + assert await interpreter.interprete("'4' in '123'") is False + assert await interpreter.interprete("1 in [1, 2, 3]") is True + assert await interpreter.interprete("4 in [1, 2, 3]") is False + assert await interpreter.interprete("1 not in [1, 2, 3]") is False + assert await interpreter.interprete("4 not in [1, 2, 3]") is True + + # variables + assert await interpreter.interprete("pi") == math.pi + assert await interpreter.interprete("pi + 1") == math.pi + 1 + assert math.isnan(await interpreter.interprete("nan")) + assert math.isnan(await interpreter.interprete("nan + 1")) + + # expressions + assert await interpreter.interprete("1 if True else 2") == 1 + assert await interpreter.interprete("1 if False else 2") == 2 + assert await interpreter.interprete("1 if 1 < 2 else 2") == 1 + assert await interpreter.interprete("1 if 1 > 2 else 2") == 2 + assert await interpreter.interprete("1 if 1 == 1 else 2") == 1 + assert await interpreter.interprete("1 if 1 != 2 else 2") == 1 + assert await interpreter.interprete("1 if 1 is 1 else 2") == 1 + assert await interpreter.interprete("1 if 1 is not 2 else 2") == 1 + + # subscripting operators + assert await interpreter.interprete("[1, 2, 3][:]") == [1, 2, 3] + assert await interpreter.interprete("[1, 2, 3][0]") == 1 + assert await interpreter.interprete("[1, 2, 3][0:2]") == [1, 2] + assert await interpreter.interprete("[1, 2, 3][2:]") == [3] + assert await interpreter.interprete("[1, 2, 3][:1]") == [1] + assert await interpreter.interprete("[1, 2, 3][:-1]") == [1, 2] + assert await interpreter.interprete("[1, 2, 3][-1]") == 3 + assert await interpreter.interprete("[1, 2, 3, 4, 5, 6][0:6:2]") == [1, 3, 5] + + +@pytest.mark.asyncio +async def test_interpreter_mixed_basic_operations(interpreter): + assert await interpreter.interprete("1 + 2 * 3") == 7 + assert await interpreter.interprete("(1 + 2) * 3") == 9 + assert await interpreter.interprete("(1 + 2) * 3 + 5 / 2 + 10") == 21.5 + assert await interpreter.interprete("(1 + 2) * 3 if 1 < 2 else 10 + pi") == 9 + assert await interpreter.interprete("(1 + 2) * 3 if 1 > 2 else 10 + pi") == 10 + math.pi + assert await interpreter.interprete("1 < 2 and 2 < 3") is True + assert await interpreter.interprete("1 < 2 and 2 < 3 and True and 1") is True + assert await interpreter.interprete("1 < 2 and 2 > 3") is False + assert await interpreter.interprete("1 < 2 or 2 > 3") is True + assert await interpreter.interprete("1 < 2 or 2 > 3 or True or False or 0") is True + assert await interpreter.interprete("1 > 2 or 2 > 3") is False + assert await interpreter.interprete("not (1 < 2 and 2 < 3)") is False + assert await interpreter.interprete("not (1 < 2 and 2 > 3)") is True + assert await interpreter.interprete("not (1 > 2 or 2 > 3)") is True + assert await interpreter.interprete("not (1 > 2 or 2 < 3)") is False + + +@pytest.mark.asyncio +async def test_and_or_short_circuit(interpreter): + # and short-circuits: False and (None - 1) should not crash + assert await interpreter.interprete("False and None - 1") is False + assert await interpreter.interprete("None is not None and None - 1 > 0") is False + # or short-circuits: True or (None - 1) should not crash + assert await interpreter.interprete("True or None - 1") is True + assert await interpreter.interprete("1 > 0 or None - 1 > 0") is True + + +@pytest.mark.asyncio +async def test_interpreter_call_operations(interpreter): + assert await interpreter.interprete("max(1, 2, 3)") == 3 + assert await interpreter.interprete("min(1, 2, 3)") == 1 + assert await interpreter.interprete("abs(-1)") == 1 + assert await interpreter.interprete("abs(1)") == 1 + assert await interpreter.interprete("sqrt(4)") == 2 + assert await interpreter.interprete("mean(1, 2, 3)") == 2 + assert await interpreter.interprete("mean(50, 110.2)") == 80.1 + assert await interpreter.interprete("mean(3)") == 3 + assert await interpreter.interprete("round(1.23456789, 2)") == 1.23 + assert await interpreter.interprete("round(1.23456789, 2.22)") == 1.23 + assert await interpreter.interprete("round(1.23456789)") == 1 + assert await interpreter.interprete("floor(1.23456789)") == 1 + assert await interpreter.interprete("ceil(1.23456789)") == 2 + assert await interpreter.interprete("sin(0)") == 0 + assert abs(await interpreter.interprete("sin(pi/2)") - 1) < 1e-10 + assert abs(await interpreter.interprete("sin(pi)") - 0) < 1e-10 + assert await interpreter.interprete("cos(0)") == 1 + assert abs(await interpreter.interprete("cos(pi/2)") - 0) < 1e-10 + assert abs(await interpreter.interprete("cos(pi)") - (-1)) < 1e-10 + assert 90 <= await interpreter.interprete("oscillate(100, 10, 60)") <= 110 # 100 ± 10% + assert 40 <= await interpreter.interprete("oscillate(50, 20, 30)") <= 60 # 50 ± 20% + assert 190 <= await interpreter.interprete("oscillate(200, 5, 120)") <= 210 # 200 ± 5% + assert 185 <= await interpreter.interprete("oscillate(150 + oscillate(50, 10, 60), 5, 120)") <= 215 # 200 ± 5% + + assert await interpreter.interprete("get({'color': 'red'}, 'color', 'blue')") == "red" + assert await interpreter.interprete("get({'color': 'red'}, 'missing', 'blue')") == "blue" + assert await interpreter.interprete("get(None, 'k', 'd')") == "d" + assert await interpreter.interprete("get(1, 'k', 'd')") == "d" + assert await interpreter.interprete("get({'a': 1}, [], 'd')") == "d" + + +@pytest.mark.asyncio +async def test_interpreter_oscillate_operations(interpreter): + for time_mock in range(int(time.time()), int(time.time()) + 3600, 1): + with mock.patch.object(time, 'time', return_value=time_mock * 0.1241): + # always returns a value between 90 and 110 + assert 90 <= await interpreter.interprete("oscillate(100, 10, 60.221)") <= 110 + + +@pytest.mark.asyncio +async def test_interpreter_mixed_call_and_basic_operations(interpreter): + assert await interpreter.interprete("max(sqrt(9), abs(-4), 3 + 6)") == 9 + assert await interpreter.interprete("min(sqrt(9), abs(-4), 3 + 6)") == 3 + assert await interpreter.interprete("abs(min(sqrt(9), abs(-4), 3 + 6))") == 3 + assert await interpreter.interprete("sqrt(max(1, 2, 3, 4))") == 2 + assert await interpreter.interprete("sqrt(2**2)") == 2 + assert await interpreter.interprete("sqrt(min(1, 2, 3))") == 1 + assert await interpreter.interprete("abs(sqrt(max(1, 2, 4)))") == 2 + assert await interpreter.interprete("abs(sqrt(min(1, 2, 4)))") == 1 + assert await interpreter.interprete("mean(4, 5) + 1 + mean(1, 1 + 1, 3)") == 7.5 + assert abs(await interpreter.interprete("sin(pi/2) + cos(0)") - 2) < 1e-10 + assert abs(await interpreter.interprete("sin(pi/4) * cos(pi/4)") - 0.5) < 1e-10 + assert abs(await interpreter.interprete("sqrt(sin(pi/2)**2 + cos(pi/2)**2)") - 1) < 1e-10 + + +@pytest.mark.asyncio +async def test_interpreter_insupported_operations(interpreter): + with pytest.raises(octobot_commons.errors.UnsupportedOperatorError): + await interpreter.interprete("1 & 2") + with pytest.raises(octobot_commons.errors.UnsupportedOperatorError): + await interpreter.interprete("1 | 2") + with pytest.raises(octobot_commons.errors.UnsupportedOperatorError): + await interpreter.interprete("3 ^ 2") + with pytest.raises(octobot_commons.errors.UnsupportedOperatorError): + await interpreter.interprete("1 << 2") + with pytest.raises(octobot_commons.errors.UnsupportedOperatorError): + await interpreter.interprete("1 >> 2") + with pytest.raises(octobot_commons.errors.UnsupportedOperatorError): + await interpreter.interprete("my_variable") + with pytest.raises(octobot_commons.errors.UnsupportedOperatorError): + await interpreter.interprete("unknown_operator(1)") + with pytest.raises(octobot_commons.errors.InvalidParametersError): + await interpreter.interprete("mean(1, 'a')") + with pytest.raises(octobot_commons.errors.InvalidParametersError): + await interpreter.interprete("mean()") + with pytest.raises(octobot_commons.errors.InvalidParametersError): + await interpreter.interprete("sin('a')") + with pytest.raises(octobot_commons.errors.InvalidParametersError): + await interpreter.interprete("cos('a')") + with pytest.raises(octobot_commons.errors.InvalidParametersError): + await interpreter.interprete("oscillate('a', 10, 60)") + with pytest.raises(octobot_commons.errors.InvalidParametersError): + await interpreter.interprete("oscillate(100, 'b', 60)") + with pytest.raises(octobot_commons.errors.InvalidParametersError): + await interpreter.interprete("oscillate(100, 10, 'c')") + with pytest.raises(octobot_commons.errors.InvalidParametersError): + await interpreter.interprete("oscillate(100)") + with pytest.raises(octobot_commons.errors.InvalidParametersError): + await interpreter.interprete("oscillate(100, 10)") + with pytest.raises(octobot_commons.errors.InvalidParametersError): + await interpreter.interprete("oscillate(100, 10, 60, 70)") + with pytest.raises(octobot_commons.errors.InvalidParametersError): + await interpreter.interprete("oscillate(100, 10, -1)") + with pytest.raises(octobot_commons.errors.InvalidParametersError): + await interpreter.interprete("oscillate(100, 10, 0)") + with pytest.raises(octobot_commons.errors.InvalidParametersError): + await interpreter.interprete("get({})") + with pytest.raises(octobot_commons.errors.InvalidParametersError): + await interpreter.interprete("get(1, 2)") + with pytest.raises(octobot_commons.errors.InvalidParametersError): + await interpreter.interprete("get(1, 2, 3, 4)") + + +@pytest.mark.asyncio +async def test_error_operator(interpreter): + assert "error" in interpreter.operators_by_name + + with pytest.raises(octobot_commons.errors.ErrorStatementEncountered, match="123-error"): + await interpreter.interprete("error('123-error')") + + with pytest.raises(octobot_commons.errors.ErrorStatementEncountered): + await interpreter.interprete("error") + + with pytest.raises(octobot_commons.errors.ErrorStatementEncountered, match="123-error"): + await interpreter.interprete("error('123-error') if True else 'ok'") + + assert await interpreter.interprete("error('123-error') if False else 'ok'") == "ok" + + +@pytest.mark.asyncio +async def test_value_if_operator(interpreter): + assert "value_if" in interpreter.operators_by_name + + # truthy inner result returns value + assert await interpreter.interprete("value_if(15, ' > 12')") == 15 + + # falsy inner result returns False + assert await interpreter.interprete("value_if(15, ' < 10')") is False + + # string value via repr (inner: 'ab' in 'abc') + assert await interpreter.interprete('value_if(\'ab\', " in \'abc\'")') == "ab" + + # truthy non-boolean inner result (inner expression evaluates to 5) + assert await interpreter.interprete("value_if(0, ' + 5')") == 0 + + # invalid condition type + with pytest.raises(octobot_commons.errors.InvalidParametersError): + await interpreter.interprete("value_if(1, 2)") + + # computed value (not a literal) + assert await interpreter.interprete("value_if(10 + 5, ' > 12')") == 15 + assert await interpreter.interprete("value_if(min(3, 1, 4), ' > 0')") == 1 + assert await interpreter.interprete("value_if((2 + 3) * 2, ' == 10')") == 10 + + # computed condition via string concatenation + assert await interpreter.interprete("value_if(15, ' >' + ' 12')") == 15 + assert await interpreter.interprete("value_if(15, ' <' + ' 10')") is False + + # both value and condition are computed sub-expressions + assert await interpreter.interprete("value_if(min(5, 9, 3), ' >' + ' 2')") == 3 + assert await interpreter.interprete("value_if((2 + 3) * 2, ' ==' + ' 10 - 1 + 1')") == 10 + assert await interpreter.interprete("value_if((2 + 3) * 2, ' ==' + ' 10 - 1 + 2')") is False + + # LOCAL_VALUE_PLACEHOLDER in condition: substitute repr(sanitize(value)), full expression as inner script + assert await interpreter.interprete( + f"value_if({{'status': 'open', 'qty': 1.5}}, \"get({commons_constants.LOCAL_VALUE_PLACEHOLDER}, 'status', 'closed') == 'open'\")" + ) == {"status": "open", "qty": 1.5} + assert await interpreter.interprete( + f"value_if({{'status': 'closed', 'qty': 1.5}}, \"get({commons_constants.LOCAL_VALUE_PLACEHOLDER}, 'status', 'closed') == 'open'\")" + ) is False + + # same placeholder pattern with dict keys built from inner operations (e.g. 'sta'+'tus' -> 'status') + assert await interpreter.interprete( + "value_if({'sta'+'tus': 'open'}, \"get(LOCAL_VALUE_PLACEHOLDER, 'status', 'closed') == 'open'\")" + ) == {"status": "open"} + assert await interpreter.interprete( + "value_if({'sta'+'tus': 'closed'}, \"get(LOCAL_VALUE_PLACEHOLDER, 'status', 'closed') == 'open'\")" + ) is False + + # LOCAL_VALUE_PLACEHOLDER + decimal in value dict (sanitize before repr for inner DSL) + payload = {"status": "open", "qty": decimal.Decimal("0.25")} + condition = ( + f"get({commons_constants.LOCAL_VALUE_PLACEHOLDER}, 'status', 'closed') == 'open'" + ) + operator = base_call_operators.ValueIfOperator(payload, condition) + await operator.pre_compute() + assert operator.compute() == {"status": "open", "qty": 0.25} diff --git a/packages/tentacles/Meta/DSL_operators/python_std_operators/tests/test_base_resetting_operators.py b/packages/tentacles/Meta/DSL_operators/python_std_operators/tests/test_base_resetting_operators.py new file mode 100644 index 0000000000..88f3e4a280 --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/python_std_operators/tests/test_base_resetting_operators.py @@ -0,0 +1,399 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import asyncio +import time + +import pytest +import mock + +import tentacles.Meta.DSL_operators.python_std_operators.base_resetting_operators as base_resetting_operators +import octobot_commons.dsl_interpreter as dsl_interpreter +import octobot_commons.errors + + +class _LoopUntilSingleEvalTestConditionOperator(dsl_interpreter.CallOperator): + MIN_PARAMS = 0 + MAX_PARAMS = 0 + + @staticmethod + def get_name() -> str: + return "test_loop_until_single_eval_condition" + + def compute(self): + return True + + +@pytest.fixture +def interpreter(): + return dsl_interpreter.Interpreter(dsl_interpreter.get_all_operators()) + + +class TestWaitOperator: + @pytest.mark.asyncio + async def test_wait_operator(self, interpreter): + assert "wait" in interpreter.operators_by_name + + # wait(0) returns None after 0 seconds (instant) + assert await interpreter.interprete("wait(0)") is None + + with mock.patch.object(asyncio, "sleep", new=mock.AsyncMock()) as mock_sleep: + await interpreter.interprete("wait(1)") + mock_sleep.assert_awaited_once_with(1) + + mock_sleep.reset_mock() + + # wait with return_remaining_time=True returns ReCallingOperatorResult dict (wrapped format) + with mock.patch.object(time, "time", return_value=1000.0): + result = await interpreter.interprete("wait(5, return_remaining_time=True)") + assert dsl_interpreter.ReCallingOperatorResult.__name__ in result + inner = result[dsl_interpreter.ReCallingOperatorResult.__name__] + assert inner == { + "last_execution_result": { + dsl_interpreter.ReCallingOperatorResultKeys.LAST_EXECUTION_TIME.value: 1000.0, + dsl_interpreter.ReCallingOperatorResultKeys.WAITING_TIME.value: 5, + dsl_interpreter.ReCallingOperatorResultKeys.SCRIPT_OVERRIDE.value: "wait(5, max_seconds=None, return_remaining_time=True)", + }, + "keyword": "wait", + } + mock_sleep.assert_not_called() + + @pytest.mark.asyncio + async def test_wait_operator_unit(self): + operator = base_resetting_operators.WaitOperator(1) + + # _compute_sleep_time with min_seconds only + assert operator._compute_sleep_time({"min_seconds": 1, "max_seconds": None}) == 1 + assert operator._compute_sleep_time({"min_seconds": 0, "max_seconds": None}) == 0 + + # _compute_sleep_time with negative raises + with pytest.raises(octobot_commons.errors.InvalidParametersError, match="non-negative"): + operator._compute_sleep_time({"min_seconds": -1, "max_seconds": None}) + + # _compute_sleep_time with min and max - returns value in range (mock random) + with mock.patch.object(base_resetting_operators.random, "randrange", return_value=1500): + assert operator._compute_sleep_time({"min_seconds": 1, "max_seconds": 2}) == 1.5 + + # _compute_remaining_time with no previous + with mock.patch.object(base_resetting_operators.time, "time", return_value=1000.0): + with mock.patch.object(base_resetting_operators.random, "randrange", return_value=3000): + result = operator._compute_remaining_time({ + "min_seconds": 1, "max_seconds": 4, + dsl_interpreter.ReCallableOperatorMixin.LAST_EXECUTION_RESULT_KEY: None, + }) + assert result is not None + last_result = result[dsl_interpreter.ReCallingOperatorResult.__name__]["last_execution_result"] + assert last_result[dsl_interpreter.ReCallingOperatorResultKeys.LAST_EXECUTION_TIME.value] == 1000.0 + assert last_result[dsl_interpreter.ReCallingOperatorResultKeys.WAITING_TIME.value] == 3.0 + + # _compute_remaining_time with previous (ReCallingOperatorResult wrapped format) + with mock.patch.object(base_resetting_operators.time, "time", return_value=1002.0): + result = operator._compute_remaining_time({ + "min_seconds": 1, "max_seconds": None, + dsl_interpreter.ReCallableOperatorMixin.LAST_EXECUTION_RESULT_KEY: { + dsl_interpreter.ReCallingOperatorResult.__name__: { + "last_execution_result": { + dsl_interpreter.ReCallingOperatorResultKeys.LAST_EXECUTION_TIME.value: 1000.0, + dsl_interpreter.ReCallingOperatorResultKeys.WAITING_TIME.value: 5.0, + }, + }, + }, + }) + assert result is not None + last_result = result[dsl_interpreter.ReCallingOperatorResult.__name__]["last_execution_result"] + assert last_result[dsl_interpreter.ReCallingOperatorResultKeys.LAST_EXECUTION_TIME.value] == 1002.0 + assert last_result[dsl_interpreter.ReCallingOperatorResultKeys.WAITING_TIME.value] == 3.0 # 5 - (1002 - 1000) + + # No mock: ensure random and time are actually called and return valid values + min_sec, max_sec = 1, 3 + sleep_times = [ + operator._compute_sleep_time({"min_seconds": min_sec, "max_seconds": max_sec}) + for _ in range(20) + ] + for sleep_time in sleep_times: + assert min_sec <= sleep_time < max_sec + assert len(set(sleep_times)) > 1 # random produces varying values + + result = operator._compute_remaining_time({ + "min_seconds": 2, "max_seconds": 5, + dsl_interpreter.ReCallableOperatorMixin.LAST_EXECUTION_RESULT_KEY: None, + }) + assert result is not None + last_result = result[dsl_interpreter.ReCallingOperatorResult.__name__]["last_execution_result"] + assert dsl_interpreter.ReCallingOperatorResultKeys.LAST_EXECUTION_TIME.value in last_result + assert dsl_interpreter.ReCallingOperatorResultKeys.WAITING_TIME.value in last_result + assert 2 <= last_result[dsl_interpreter.ReCallingOperatorResultKeys.WAITING_TIME.value] < 5 + assert last_result[dsl_interpreter.ReCallingOperatorResultKeys.LAST_EXECUTION_TIME.value] > 0 + + previous = { + dsl_interpreter.ReCallingOperatorResult.__name__: { + "last_execution_result": { + dsl_interpreter.ReCallingOperatorResultKeys.LAST_EXECUTION_TIME.value: time.time() - 1.0, + dsl_interpreter.ReCallingOperatorResultKeys.WAITING_TIME.value: 5.0, + }, + }, + } + result = operator._compute_remaining_time({ + "min_seconds": 1, "max_seconds": None, + dsl_interpreter.ReCallableOperatorMixin.LAST_EXECUTION_RESULT_KEY: previous, + }) + assert result is not None + last_result = result[dsl_interpreter.ReCallingOperatorResult.__name__]["last_execution_result"] + prev_last_result = previous[dsl_interpreter.ReCallingOperatorResult.__name__]["last_execution_result"] + assert last_result[dsl_interpreter.ReCallingOperatorResultKeys.WAITING_TIME.value] <= 5.0 # time has passed + assert last_result[dsl_interpreter.ReCallingOperatorResultKeys.LAST_EXECUTION_TIME.value] >= prev_last_result[dsl_interpreter.ReCallingOperatorResultKeys.LAST_EXECUTION_TIME.value] + + @pytest.mark.asyncio + async def test_wait_operator_pre_compute(self): + operator = base_resetting_operators.WaitOperator(0) + with mock.patch.object(asyncio, "sleep", new=mock.AsyncMock()) as mock_sleep: + await operator.pre_compute() + mock_sleep.assert_awaited_once_with(0) + + operator_with_return = base_resetting_operators.WaitOperator(2, return_remaining_time=True) + with mock.patch.object(asyncio, "sleep", new=mock.AsyncMock()) as mock_sleep: + await operator_with_return.pre_compute() + mock_sleep.assert_not_awaited() + assert operator_with_return.value is not None + assert isinstance(operator_with_return.value, dict) + last_result = operator_with_return.value[dsl_interpreter.ReCallingOperatorResult.__name__]["last_execution_result"] + assert last_result[dsl_interpreter.ReCallingOperatorResultKeys.WAITING_TIME.value] == 2 + + @pytest.mark.asyncio + async def test_wait_operator_invalid_parameters(self, interpreter): + with pytest.raises(octobot_commons.errors.InvalidParametersError, match="non-negative"): + await interpreter.interprete("wait(-1)") + + +class TestLoopUntilOperator: + @pytest.mark.asyncio + async def test_loop_until_registered(self, interpreter): + assert "loop_until" in interpreter.operators_by_name + + @pytest.mark.asyncio + async def test_loop_until_blocking_immediate_true(self, interpreter): + assert await interpreter.interprete("loop_until(1<2, 0, max_attempts=1)") is True + + @pytest.mark.asyncio + async def test_loop_until_blocking_returns_condition_value(self, interpreter): + assert await interpreter.interprete("loop_until(42, 0, max_attempts=1)") == 42 + + @pytest.mark.asyncio + async def test_loop_until_blocking_max_attempts_exceeded(self, interpreter): + with pytest.raises(octobot_commons.errors.ErrorStatementEncountered, match="max_attempts"): + await interpreter.interprete("loop_until(False, 0, max_attempts=1)") + + @pytest.mark.asyncio + async def test_loop_until_blocking_max_attempts_after_retries(self, interpreter): + with mock.patch.object(asyncio, "sleep", new=mock.AsyncMock()) as mock_sleep: + with pytest.raises(octobot_commons.errors.ErrorStatementEncountered, match="max_attempts"): + await interpreter.interprete("loop_until(False, 0, max_attempts=3)") + assert mock_sleep.await_count == 2 + + @pytest.mark.asyncio + async def test_loop_until_blocking_condition_true_after_iterations(self, interpreter): + with mock.patch.object( + base_resetting_operators.LoopUntilOperator, + "_evaluate_condition_async", + new=mock.AsyncMock(side_effect=[False, False, "ready"]), + ): + with mock.patch.object(asyncio, "sleep", new=mock.AsyncMock()) as mock_sleep: + final_value = await interpreter.interprete( + "loop_until(False, 1, max_attempts=10, timeout=60)" + ) + assert final_value == "ready" + assert mock_sleep.await_count == 2 + + @pytest.mark.asyncio + async def test_loop_until_blocking_timeout(self, interpreter): + with mock.patch.object( + base_resetting_operators.time, + "time", + side_effect=[0.0, 0.0, 100.0], + ): + with mock.patch.object(asyncio, "sleep", new=mock.AsyncMock()): + with pytest.raises(octobot_commons.errors.ErrorStatementEncountered, match="timeout"): + await interpreter.interprete("loop_until(False, 1, timeout=10)") + + @pytest.mark.asyncio + async def test_loop_until_invalid_retry_interval(self, interpreter): + with pytest.raises(octobot_commons.errors.InvalidParametersError, match="retry_interval"): + await interpreter.interprete("loop_until(True, -1, max_attempts=1)") + + @pytest.mark.asyncio + async def test_loop_until_invalid_max_attempts(self, interpreter): + with pytest.raises(octobot_commons.errors.InvalidParametersError, match="max_attempts"): + await interpreter.interprete("loop_until(True, 1, max_attempts=0)") + + @pytest.mark.asyncio + async def test_loop_until_return_remaining_first_failure_schedules_wait(self): + operator = base_resetting_operators.LoopUntilOperator( + False, + 5, + max_attempts=10, + return_remaining_time=True, + ) + with mock.patch.object(base_resetting_operators.time, "time", return_value=1000.0): + await operator.pre_compute() + assert isinstance(operator.value, dict) + last_result = operator.value[dsl_interpreter.ReCallingOperatorResult.__name__][ + "last_execution_result" + ] + assert last_result[dsl_interpreter.ReCallingOperatorResultKeys.WAITING_TIME.value] == 5.0 + assert last_result[dsl_interpreter.ReCallingOperatorResultKeys.LAST_EXECUTION_TIME.value] == 1000.0 + assert last_result[base_resetting_operators.LoopUntilOperator.ATTEMPT_COUNT_KEY] == 1 + assert last_result[base_resetting_operators.LoopUntilOperator.LOOP_START_TIME_KEY] == 1000.0 + + @pytest.mark.asyncio + async def test_loop_until_return_remaining_condition_true_immediately(self): + operator = base_resetting_operators.LoopUntilOperator( + True, + 5, + max_attempts=10, + return_remaining_time=True, + ) + await operator.pre_compute() + assert operator.value is True + + @pytest.mark.asyncio + async def test_loop_until_return_remaining_preserves_loop_state_while_waiting(self): + wrapped_previous = { + dsl_interpreter.ReCallableOperatorMixin.LAST_EXECUTION_RESULT_KEY: { + dsl_interpreter.ReCallingOperatorResult.__name__: { + "last_execution_result": { + dsl_interpreter.ReCallingOperatorResultKeys.LAST_EXECUTION_TIME.value: 1000.0, + dsl_interpreter.ReCallingOperatorResultKeys.WAITING_TIME.value: 5.0, + base_resetting_operators.LoopUntilOperator.LOOP_START_TIME_KEY: 999.0, + base_resetting_operators.LoopUntilOperator.ATTEMPT_COUNT_KEY: 1, + }, + }, + }, + } + operator = base_resetting_operators.LoopUntilOperator( + False, + 5, + max_attempts=10, + return_remaining_time=True, + **wrapped_previous, + ) + with mock.patch.object(base_resetting_operators.time, "time", return_value=1002.0): + await operator.pre_compute() + last_result = operator.value[dsl_interpreter.ReCallingOperatorResult.__name__][ + "last_execution_result" + ] + assert last_result[dsl_interpreter.ReCallingOperatorResultKeys.WAITING_TIME.value] == 3.0 + assert last_result[base_resetting_operators.LoopUntilOperator.ATTEMPT_COUNT_KEY] == 2 + assert last_result[base_resetting_operators.LoopUntilOperator.LOOP_START_TIME_KEY] == 999.0 + + @pytest.mark.asyncio + async def test_loop_until_return_remaining_max_attempts_while_still_waiting(self): + wrapped_previous = { + dsl_interpreter.ReCallableOperatorMixin.LAST_EXECUTION_RESULT_KEY: { + dsl_interpreter.ReCallingOperatorResult.__name__: { + "last_execution_result": { + dsl_interpreter.ReCallingOperatorResultKeys.LAST_EXECUTION_TIME.value: 1000.0, + dsl_interpreter.ReCallingOperatorResultKeys.WAITING_TIME.value: 5.0, + base_resetting_operators.LoopUntilOperator.LOOP_START_TIME_KEY: 999.0, + base_resetting_operators.LoopUntilOperator.ATTEMPT_COUNT_KEY: 1, + }, + }, + }, + } + operator = base_resetting_operators.LoopUntilOperator( + False, + 5, + max_attempts=2, + return_remaining_time=True, + **wrapped_previous, + ) + with mock.patch.object(base_resetting_operators.time, "time", return_value=1002.0): + with pytest.raises(octobot_commons.errors.ErrorStatementEncountered, match="max_attempts"): + await operator.pre_compute() + + @pytest.mark.asyncio + async def test_loop_until_return_remaining_true_while_wait_incomplete_short_circuits_wait(self): + wrapped_previous = { + dsl_interpreter.ReCallableOperatorMixin.LAST_EXECUTION_RESULT_KEY: { + dsl_interpreter.ReCallingOperatorResult.__name__: { + "last_execution_result": { + dsl_interpreter.ReCallingOperatorResultKeys.LAST_EXECUTION_TIME.value: 1000.0, + dsl_interpreter.ReCallingOperatorResultKeys.WAITING_TIME.value: 5.0, + base_resetting_operators.LoopUntilOperator.LOOP_START_TIME_KEY: 998.0, + base_resetting_operators.LoopUntilOperator.ATTEMPT_COUNT_KEY: 1, + }, + }, + }, + } + operator = base_resetting_operators.LoopUntilOperator( + True, + 5, + max_attempts=10, + return_remaining_time=True, + **wrapped_previous, + ) + await operator.pre_compute() + assert operator.value is True + + @pytest.mark.asyncio + async def test_loop_until_return_remaining_true_branch_single_condition_eval_via_interpreter( + self, + ): + operators = list(dsl_interpreter.get_all_operators()) + operators.append(_LoopUntilSingleEvalTestConditionOperator) + interpreter_with_test_condition = dsl_interpreter.Interpreter(operators) + with mock.patch.object( + _LoopUntilSingleEvalTestConditionOperator, + "compute", + mock.Mock(return_value=True), + ) as mock_condition_compute: + result = await interpreter_with_test_condition.interprete( + "loop_until(test_loop_until_single_eval_condition(), 0, max_attempts=10, " + "return_remaining_time=True)" + ) + assert result is True + mock_condition_compute.assert_called_once() + + @pytest.mark.asyncio + async def test_loop_until_return_remaining_max_attempts_after_wait(self): + wrapped_previous = { + dsl_interpreter.ReCallableOperatorMixin.LAST_EXECUTION_RESULT_KEY: { + dsl_interpreter.ReCallingOperatorResult.__name__: { + "last_execution_result": { + dsl_interpreter.ReCallingOperatorResultKeys.LAST_EXECUTION_TIME.value: 1000.0, + dsl_interpreter.ReCallingOperatorResultKeys.WAITING_TIME.value: 2.0, + base_resetting_operators.LoopUntilOperator.LOOP_START_TIME_KEY: 990.0, + base_resetting_operators.LoopUntilOperator.ATTEMPT_COUNT_KEY: 1, + }, + }, + }, + } + operator = base_resetting_operators.LoopUntilOperator( + False, + 1, + max_attempts=2, + return_remaining_time=True, + **wrapped_previous, + ) + with mock.patch.object(base_resetting_operators.time, "time", return_value=1005.0): + with pytest.raises(octobot_commons.errors.ErrorStatementEncountered, match="max_attempts"): + await operator.pre_compute() + + @pytest.mark.asyncio + async def test_loop_until_compute_remaining_retry_wait_no_previous(self): + operator = base_resetting_operators.LoopUntilOperator(False, 3, max_attempts=5) + assert operator._compute_remaining_retry_wait( + 1000.0, + None + ) is None diff --git a/packages/tentacles/Meta/DSL_operators/python_std_operators/tests/test_base_time_operators.py b/packages/tentacles/Meta/DSL_operators/python_std_operators/tests/test_base_time_operators.py new file mode 100644 index 0000000000..8674b260e6 --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/python_std_operators/tests/test_base_time_operators.py @@ -0,0 +1,44 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import mock +import time +import pytest + +import octobot_commons.dsl_interpreter as dsl_interpreter +import tentacles.Meta.DSL_operators.python_std_operators.base_time_operators as base_time_operators + + +@pytest.fixture +def interpreter(): + return dsl_interpreter.Interpreter( + dsl_interpreter.get_all_operators() + [base_time_operators.NowMsOperator] + ) + + +@pytest.mark.asyncio +async def test_now_ms_returns_current_time(interpreter): + fixed_time = 1700000000.123 + with mock.patch.object(time, "time", return_value=fixed_time): + result = await interpreter.interprete("now_ms()") + assert result == 1700000000123 + + +@pytest.mark.asyncio +async def test_now_ms_in_expression(interpreter): + fixed_time = 1700000000.0 + with mock.patch.object(time, "time", return_value=fixed_time): + result = await interpreter.interprete("now_ms() > 0") + assert result is True diff --git a/packages/tentacles/Meta/DSL_operators/python_std_operators/tests/test_dictionnaries.py b/packages/tentacles/Meta/DSL_operators/python_std_operators/tests/test_dictionnaries.py new file mode 100644 index 0000000000..68c0d6bdd0 --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/python_std_operators/tests/test_dictionnaries.py @@ -0,0 +1,49 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import pytest + +import octobot_commons.constants +import octobot_commons.dsl_interpreter + + +@pytest.mark.parametrize( + "libraries", + [tuple(), (octobot_commons.constants.BASE_OPERATORS_LIBRARY, )] +) +def test_get_all_operators(libraries): + assert octobot_commons.dsl_interpreter.get_all_operators(*libraries) is not None + assert len(octobot_commons.dsl_interpreter.get_all_operators(*libraries)) > 0 + operators = octobot_commons.dsl_interpreter.get_all_operators(*libraries) + operator_types = [ + octobot_commons.dsl_interpreter.BinaryOperator, + octobot_commons.dsl_interpreter.UnaryOperator, + octobot_commons.dsl_interpreter.CompareOperator, + octobot_commons.dsl_interpreter.NaryOperator, + octobot_commons.dsl_interpreter.CallOperator, + octobot_commons.dsl_interpreter.NameOperator, + ] + operator_by_type = { + operator_type.__name__: [] for operator_type in operator_types + } + for operator in operators: + name = operator.get_name() + assert len(name) > 0 + for operator_type in operator_types: + if issubclass(operator, operator_type): + operator_by_type[operator_type.__name__].append(operator) + break + for operator_type, operators in operator_by_type.items(): + assert len(operators) > 1, f"Expected at least 2 {operator_type} operators. {operator_by_type=}" diff --git a/packages/tentacles/Meta/DSL_operators/ta_operators/__init__.py b/packages/tentacles/Meta/DSL_operators/ta_operators/__init__.py new file mode 100644 index 0000000000..d124fba4f8 --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/ta_operators/__init__.py @@ -0,0 +1,26 @@ +# pylint: disable=R0801 +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import tentacles.Meta.DSL_operators.ta_operators.tulipy_technical_analysis_operators as tulipy_technical_analysis_operators +from tentacles.Meta.DSL_operators.ta_operators.tulipy_technical_analysis_operators import ( + RSIOperator, +) + + +__all__ = [ + "RSIOperator", +] diff --git a/packages/tentacles/Meta/DSL_operators/ta_operators/metadata.json b/packages/tentacles/Meta/DSL_operators/ta_operators/metadata.json new file mode 100644 index 0000000000..319240c835 --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/ta_operators/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": [], + "tentacles-requirements": [] +} \ No newline at end of file diff --git a/packages/tentacles/Meta/DSL_operators/ta_operators/ta_operator.py b/packages/tentacles/Meta/DSL_operators/ta_operators/ta_operator.py new file mode 100644 index 0000000000..b6ac54bc5c --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/ta_operators/ta_operator.py @@ -0,0 +1,30 @@ +# pylint: disable=missing-class-docstring,missing-function-docstring +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.dsl_interpreter.operators.call_operator as dsl_interpreter_call_operator + + +TA_LIBRARY = "ta" + + +class TAOperator(dsl_interpreter_call_operator.CallOperator): + + @staticmethod + def get_library() -> str: + """ + Get the library of the operator. + """ + return TA_LIBRARY diff --git a/packages/tentacles/Meta/DSL_operators/ta_operators/tests/test_docs_examples.py b/packages/tentacles/Meta/DSL_operators/ta_operators/tests/test_docs_examples.py new file mode 100644 index 0000000000..924e358aa1 --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/ta_operators/tests/test_docs_examples.py @@ -0,0 +1,50 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import pytest + +from tentacles.Meta.DSL_operators.exchange_operators.tests.exchange_public_data_operators import ( + historical_prices, + historical_volume, + historical_times, + exchange_manager_with_candles, + interpreter, +) + + +@pytest.mark.asyncio +async def test_mm_formulas_docs_examples(interpreter): + # ensure examples in the docs are working (meaning returning a parsable number) + assert round(await interpreter.interprete("close[-1]"), 2) == 92.22 + assert round(await interpreter.interprete("open[-1]"), 2) == 92.22 + assert round(await interpreter.interprete("high[-3]"), 2) == 92.92 + assert round(await interpreter.interprete("low[-1]"), 2) == 92.22 + assert round(await interpreter.interprete("volume[-2]"), 2) == 1211 + assert round(await interpreter.interprete("time[-1]"), 2) == 41 + assert round(await interpreter.interprete("ma(close, 12)[-1]"), 2) == 92.95 + assert round(await interpreter.interprete("ema(open, 24)[-1]"), 2) == 90.21 + assert round(await interpreter.interprete("vwma(close, volume, 4)[-1]"), 2) == 92.54 + assert round(await interpreter.interprete("rsi(close, 14)[-1]"), 2) == 67.55 + assert round(await interpreter.interprete("max(close[-1], open[-1])"), 2) == 92.22 + assert round(await interpreter.interprete("min(ma(close, 12)[-1], ema(open, 24)[-1])"), 2) == 90.21 + assert round(await interpreter.interprete("mean(close[-1], open[-1], high[-1], low[-1])"), 2) == 92.22 + assert round(await interpreter.interprete("round(ma(close, 12)[-1], 2)"), 2) == 92.95 + assert round(await interpreter.interprete("floor(close[-1])"), 2) == 92 + assert round(await interpreter.interprete("ceil(close[-1])"), 2) == 93 + assert round(await interpreter.interprete("abs(close[-1] - open[-1])"), 2) == 0 + assert 0 < await interpreter.interprete("sin(3.14)") < 0.01 + assert await interpreter.interprete("cos(3*pi)") == -1 + assert 900 <= await interpreter.interprete("oscillate(1000, 10, 60)") <= 1100 + assert round(await interpreter.interprete("100 if close[-1] > open[-1] else (90 + 1)"), 2) == 91 diff --git a/packages/tentacles/Meta/DSL_operators/ta_operators/tests/test_tulipy_technical_analysis_operators.py b/packages/tentacles/Meta/DSL_operators/ta_operators/tests/test_tulipy_technical_analysis_operators.py new file mode 100644 index 0000000000..6b74f53fd4 --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/ta_operators/tests/test_tulipy_technical_analysis_operators.py @@ -0,0 +1,202 @@ +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import pytest + +import octobot_commons.errors +from tentacles.Meta.DSL_operators.exchange_operators.tests.exchange_public_data_operators import ( + historical_prices, + historical_volume, + historical_times, + exchange_manager_with_candles, + interpreter, +) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("operator, static_parameters", [ + # list all operator and "possible" invalid parameters + ("rsi", ["", "(close)", "(close, 14, 20)"]), + ("macd", ["", "(close)", "(close, 'a')", "(close, 'a', 26)", "('a', 14, 26, 9, 0)"]), + ("ma", ["", "(close)", "(close, 14, 20)"]), + ("ema", ["", "(close)", "(close, 14, 20)"]), + ("vwma", ["", "(close)", "('a', 14)", "(close, 'a')", "(close, 14, 11, 20)"]), +]) +async def test_operator_invalid_static_parameters(interpreter, operator, static_parameters): + for param in static_parameters: + with pytest.raises(octobot_commons.errors.InvalidParametersError, match=f"{operator} "): + # static validation + interpreter.prepare(f"{operator}{param}") + with pytest.raises(octobot_commons.errors.InvalidParametersError): + # dynamic validation + await interpreter.interprete(f"{operator}{param}") + + +@pytest.mark.asyncio +@pytest.mark.parametrize("operator, dynamic_parameters", [ + # list all operator and "possible" invalid parameters + ("rsi", ["('a', 14)", "(close, 'a')"]), + ("macd", ["('a', 14, 26, 9)", "(close, 'a', 26, 9)", "(close, 14, 'a', 9)", "(close, 14, 26, 'a')"]), + ("ma", ["('a', 14)", "(close, 'a')"]), + ("ema", ["('a', 14)", "(close, 'a')"]), + ("vwma", ["(close, volume, 'a')", "(close, 14, 20)"]), +]) +async def test_operator_invalid_dynamic_parameters(interpreter, operator, dynamic_parameters): + for param in dynamic_parameters: + # static validation: do not raise + interpreter.prepare(f"{operator}{param}") + with pytest.raises(octobot_commons.errors.InvalidParametersError): + # dynamic validation + await interpreter.interprete(f"{operator}{param}") + + +@pytest.mark.asyncio +@pytest.mark.parametrize("operator, dynamic_parameters", [ + # list all operator and invalid parameters that should raise a tulipy error that will be converted to a TypeError + ("rsi", ["(close, 999999)", "(close, 0)", "(close, -1)"]), + ("macd", ["(close, 14, 99999, 2)", "(close, 99999, 12, 2)", "(close, 0, 12, 2)", "(close, 7, 12, -1)"]), + ("ma", ["(close, 999999)", "(close, 0)", "(close, -1)"]), + ("ema", ["(close, -1)"]), + ("vwma", ["(close, volume, 999999)", "(close, volume, 0)", "(close, volume, -1)"]), +]) +async def test_operator_converted_tulipy_error(interpreter, operator, dynamic_parameters): + for param in dynamic_parameters: + # static validation: do not raise + interpreter.prepare(f"{operator}{param}") + with pytest.raises(TypeError): + # dynamic validation + await interpreter.interprete(f"{operator}{param}") + + +@pytest.mark.asyncio +async def test_operator_operations(interpreter): + # ensure the output is a list and can be used in arithmetic operations + assert isinstance(await interpreter.interprete("rsi(close, 14)"), list) + assert await interpreter.interprete("round(rsi(close, 26)[-1], 2)") == 74.3 + assert await interpreter.interprete("round(rsi(close, 14)[-1], 2)") == 67.55 + assert await interpreter.interprete("round(rsi(close, 26)[-1] - rsi(close, 14)[-1], 2)") == 6.74 + + # combine ma & vwma + ma = await interpreter.interprete("ma(close, 14)") + vwma = await interpreter.interprete("vwma(close, volume, 14)") + assert round(ma[-1], 2) == 92.53 + assert round(vwma[-1], 2) == 92.37 + assert round(ma[-1]*0.7 + vwma[-1]*0.3, 2) == 92.48 + assert await interpreter.interprete("round(ma(close, 14)[-1]*0.7 + vwma(close, volume, 14)[-1]*0.3, 2)") == 92.48 + + +@pytest.mark.asyncio +async def test_rsi_operator(interpreter): + rsi = await interpreter.interprete("rsi(close, 14)") + rounded_rsi = [round(v, 2) for v in rsi] + assert rounded_rsi == [ + 79.56, 78.6, 77.04, 81.67, 82.88, 84.06, 87.44, 88.03, 85.21, 85.81, 86.73, + 78.58, 78.71, 70.4, 72.5, 72.78, 67.78, 67.55 + ] + # different periods, different result + rsi = await interpreter.interprete("rsi(close, 20)") + rounded_rsi = [round(v, 2) for v in rsi] + assert rounded_rsi == [ + 85.71, 86.2, 84.2, 84.66, 85.37, 79.61, 79.7, 73.72, 75.04, 75.22, 71.62, 71.46 + ] + + assert await interpreter.interprete("round(rsi(close, 26)[-1], 2)") == 74.3 + assert await interpreter.interprete("round(rsi(close, 14)[-1], 2)") == 67.55 + assert await interpreter.interprete("round(rsi(close, 26)[-1] - rsi(close, 14)[-1], 2)") == 6.74 + + +@pytest.mark.asyncio +async def test_macd_operator(interpreter): + macd = await interpreter.interprete("macd(close, 12, 26, 9)") + rounded_macd = [round(v, 2) for v in macd] + assert rounded_macd == [0.0, -0.03, -0.14, -0.18, -0.22, -0.29, -0.34] + + # different parameters, different result + macd = await interpreter.interprete("macd(close, 9, 26, 9)") + rounded_macd = [round(v, 2) for v in macd] + assert rounded_macd == [ + 0.0, -0.09, -0.29, -0.36, -0.41, -0.52, -0.59 + ] + + macd = await interpreter.interprete("macd(close, 9, 20, 9)") + rounded_macd = [round(v, 2) for v in macd] + assert rounded_macd == [ + 0.0, 0.26, 0.41, 0.41, 0.38, 0.36, 0.21, 0.07, -0.14, -0.23, -0.29, -0.4, -0.46 + ] + + macd = await interpreter.interprete("macd(close, 9, 20, 6)") + rounded_macd = [round(v, 2) for v in macd] + assert rounded_macd == [ + 0.0, 0.23, 0.35, 0.32, 0.28, 0.25, 0.1, -0.01, -0.19, -0.24, -0.26, -0.33, -0.37 + ] + + +@pytest.mark.asyncio +async def test_ma_operator(interpreter): + ma = await interpreter.interprete("ma(close, 14)") + rounded_ma = [round(v, 2) for v in ma] + assert rounded_ma == [ + 84.12, 84.53, 84.97, 85.26, 85.7, 86.13, 86.64, 87.36, 88.03, 88.63, + 89.28, 89.9, 90.37, 90.82, 91.12, 91.52, 91.93, 92.3, 92.53 + ] + + # different periods, different result + ma = await interpreter.interprete("ma(close, 20)") + rounded_ma = [round(v, 2) for v in ma] + assert rounded_ma == [ + 85.41, 85.98, 86.59, 87.1, 87.62, 88.15, 88.65, 89.16, 89.57, 89.98, + 90.41, 90.75, 91.03 + ] + + +@pytest.mark.asyncio +async def test_vwma_operator(interpreter): + vwma = await interpreter.interprete("vwma(close, volume, 14)") + rounded_vwma = [round(v, 2) for v in vwma] + assert rounded_vwma == [ + # different results from ma(close, 14) + 84.15, 84.51, 84.87, 85.29, 85.66, 86.3, 86.76, 87.37, 88.02, 88.55, + 89.1, 89.9, 90.31, 90.87, 91.16, 91.53, 91.91, 92.19, 92.37 + ] + # different periods, different result + vwma = await interpreter.interprete("vwma(close, volume, 20)") + rounded_vwma = [round(v, 2) for v in vwma] + assert rounded_vwma == [ + 85.52, 85.93, 86.5, 87.19, 87.66, 88.06, 88.53, 89.24, 89.6, 89.9, + 90.27, 90.84, 91.08 + ] + + +@pytest.mark.asyncio +async def test_ema_operator(interpreter): + ema = await interpreter.interprete("ema(close, 14)") + rounded_ema = [round(v, 2) for v in ema] + assert rounded_ema == [ + # different results from ma(close, 14) + 81.59, 81.52, 81.7, 81.87, 82.1, 82.24, 82.32, 82.55, 82.81, 83.02, + 83.35, 83.78, 84.19, 84.67, 85.02, 85.31, 85.53, 86.0, 86.49, 87.01, + 87.78, 88.53, 89.13, 89.7, 90.29, 90.67, 91.0, 91.15, 91.37, 91.58, + 91.67, 91.74 + ] + + # different periods, different result + ema = await interpreter.interprete("ema(close, 20)") + rounded_ema = [round(v, 2) for v in ema] + assert rounded_ema == [ + 81.59, 81.54, 81.67, 81.79, 81.97, 82.08, 82.15, 82.33, 82.54, 82.71, + 82.98, 83.32, 83.66, 84.05, 84.36, 84.63, 84.85, 85.25, 85.67, 86.12, + 86.76, 87.39, 87.92, 88.45, 88.99, 89.38, 89.75, 89.97, 90.24, 90.5, + 90.66, 90.81 + ] diff --git a/packages/tentacles/Meta/DSL_operators/ta_operators/tulipy_technical_analysis_operators.py b/packages/tentacles/Meta/DSL_operators/ta_operators/tulipy_technical_analysis_operators.py new file mode 100644 index 0000000000..78f7652bc0 --- /dev/null +++ b/packages/tentacles/Meta/DSL_operators/ta_operators/tulipy_technical_analysis_operators.py @@ -0,0 +1,166 @@ +# pylint: disable=missing-class-docstring,missing-function-docstring +# Drakkar-Software OctoBot-Commons +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import tulipy +import numpy as np + +import octobot_commons.errors +import tentacles.Meta.DSL_operators.ta_operators.ta_operator as ta_operator +import octobot_commons.dsl_interpreter as dsl_interpreter + + +def _to_numpy_array(data): + if isinstance(data, list): + return np.array(data, dtype=np.float64) + elif isinstance(data, tuple): + return np.array(list(data), dtype=np.float64) + elif isinstance(data, np.ndarray): + if data.dtype != np.float64: + return data.astype(np.float64) + return data + else: + raise octobot_commons.errors.InvalidParametersError(f"Unsupported data type: {type(data)}") + + +def _to_int(value): + if isinstance(value, int): + return value + elif isinstance(value, float): + return int(value) + else: + raise octobot_commons.errors.InvalidParametersError(f"Unsupported value type: {type(value)}") + + +def converted_tulipy_error(f): + def converted_tulipy_error_wrapper(*args, **kwargs): + try: + return f(*args, **kwargs) + except tulipy.InvalidOptionError as err: + raise TypeError( + f"Invalid technical indicator parameter - {err.__class__.__name__}" + ) from err + return converted_tulipy_error_wrapper + + +class RSIOperator(ta_operator.TAOperator): + DESCRIPTION = "Returns the Relative Strength Index (RSI) of the given array of numbers" + EXAMPLE = "rsi([100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110], 14)" + + @staticmethod + def get_name() -> str: + return "rsi" + + @classmethod + def get_parameters(cls) -> list[dsl_interpreter.OperatorParameter]: + return [ + dsl_interpreter.OperatorParameter(name="data", description="the data to compute the RSI on", required=True, type=list), + dsl_interpreter.OperatorParameter(name="period", description="the period to use for the RSI", required=True, type=int), + ] + + @converted_tulipy_error + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + operands = self.get_computed_parameters() + return list(tulipy.rsi(_to_numpy_array(operands[0]), period=_to_int(operands[1]))) + + +class MACDOperator(ta_operator.TAOperator): + DESCRIPTION = "Returns the Moving Average Convergence Divergence (MACD) of the given array of numbers" + EXAMPLE = "macd(close('BTC/USDT', '1h'), 12, 26, 9)" + + @staticmethod + def get_name() -> str: + return "macd" + + @classmethod + def get_parameters(cls) -> list[dsl_interpreter.OperatorParameter]: + return [ + dsl_interpreter.OperatorParameter(name="data", description="the data to compute the MACD on", required=True, type=list), + dsl_interpreter.OperatorParameter(name="short_period", description="the short period to use for the MACD", required=True, type=int), + dsl_interpreter.OperatorParameter(name="long_period", description="the long period to use for the MACD", required=True, type=int), + dsl_interpreter.OperatorParameter(name="signal_period", description="the signal period to use for the MACD", required=True, type=int), + ] + + @converted_tulipy_error + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + operands = self.get_computed_parameters() + macd, macd_signal, macd_hist = tulipy.macd( + _to_numpy_array(operands[0]), short_period=_to_int(operands[1]), long_period=_to_int(operands[2]), signal_period=_to_int(operands[3]) + ) + return list(macd_hist) + + +class MAOperator(ta_operator.TAOperator): + DESCRIPTION = "Returns the moving average of the given array of numbers" + EXAMPLE = "ma(close('BTC/USDT', '1h'), 14)" + + @staticmethod + def get_name() -> str: + return "ma" + + @classmethod + def get_parameters(cls) -> list[dsl_interpreter.OperatorParameter]: + return [ + dsl_interpreter.OperatorParameter(name="data", description="the data to compute the moving average on", required=True, type=list), + dsl_interpreter.OperatorParameter(name="period", description="the period to use for the moving average", required=True, type=int), + ] + + @converted_tulipy_error + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + operands = self.get_computed_parameters() + return list(tulipy.sma(_to_numpy_array(operands[0]), period=_to_int(operands[1]))) + + +class EMAOperator(ta_operator.TAOperator): + DESCRIPTION = "Returns the exponential moving average of the given array of numbers" + EXAMPLE = "ema(close('BTC/USDT', '1h'), 14)" + + @staticmethod + def get_name() -> str: + return "ema" + + @classmethod + def get_parameters(cls) -> list[dsl_interpreter.OperatorParameter]: + return [ + dsl_interpreter.OperatorParameter(name="data", description="the data to compute the exponential moving average on", required=True, type=list), + dsl_interpreter.OperatorParameter(name="period", description="the period to use for the exponential moving average", required=True, type=int), + ] + + @converted_tulipy_error + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + operands = self.get_computed_parameters() + return list(tulipy.ema(_to_numpy_array(operands[0]), period=_to_int(operands[1]))) + + +class VWMAOperator(ta_operator.TAOperator): + DESCRIPTION = "Returns the volume weighted moving average of the given array of numbers" + EXAMPLE = "vwma(close('BTC/USDT', '1h'), volume('BTC/USDT', '1h'), 14)" + + @staticmethod + def get_name() -> str: + return "vwma" + + @classmethod + def get_parameters(cls) -> list[dsl_interpreter.OperatorParameter]: + return [ + dsl_interpreter.OperatorParameter(name="data", description="the data to compute the volume weighted moving average on", required=True, type=list), + dsl_interpreter.OperatorParameter(name="volume", description="the volume data to use for the volume weighted moving average", required=True, type=list), + dsl_interpreter.OperatorParameter(name="period", description="the period to use for the volume weighted moving average", required=True, type=int), + ] + + @converted_tulipy_error + def compute(self) -> dsl_interpreter.ComputedOperatorParameterType: + operands = self.get_computed_parameters() + return list(tulipy.vwma(_to_numpy_array(operands[0]), _to_numpy_array(operands[1]), period=_to_int(operands[2]))) diff --git a/packages/tentacles/Meta/Keywords/scripting_library/TA/__init__.py b/packages/tentacles/Meta/Keywords/scripting_library/TA/__init__.py new file mode 100644 index 0000000000..b51fe9e01c --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/TA/__init__.py @@ -0,0 +1,17 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from .trigger import * diff --git a/packages/tentacles/Meta/Keywords/scripting_library/TA/trigger/__init__.py b/packages/tentacles/Meta/Keywords/scripting_library/TA/trigger/__init__.py new file mode 100644 index 0000000000..a128f17335 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/TA/trigger/__init__.py @@ -0,0 +1,18 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + + +from .eval_triggered import * diff --git a/packages/tentacles/Meta/Keywords/scripting_library/TA/trigger/eval_triggered.py b/packages/tentacles/Meta/Keywords/scripting_library/TA/trigger/eval_triggered.py new file mode 100644 index 0000000000..cd7a790ec8 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/TA/trigger/eval_triggered.py @@ -0,0 +1,247 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import octobot_commons.constants as commons_constants +import octobot_commons.errors as commons_errors +import octobot_commons.enums as commons_enums +import octobot_commons.dict_util as dict_util +import octobot_evaluators.matrix as matrix +import octobot_evaluators.enums as evaluators_enums +import octobot_tentacles_manager.api as tentacles_manager_api +import octobot_trading.modes.script_keywords as script_keywords +import tentacles.Meta.Keywords.scripting_library.UI.inputs.triggers as triggers + + +# 10000000000 = Sat, 20 Nov 2286 17:46:40 GMT to select all values +ALL_VALUES_CACHE_KEY = 10000000000.0 + + +def _is_first_candle_only(context): + if not context.exchange_manager.is_backtesting: + # this is a backtesting only optimization + return False + tentacle_config = context.tentacle.get_local_config() + return tentacle_config.get(triggers.TRIGGER_ONLY_ON_THE_FIRST_CANDLE_KEY, False) + + +def _is_first_candle_call(context, init_key): + # TODO: figure out if we currently are in the 1st call of the given candle (careful with timeframes) + return not context.symbol_writer.are_data_initialized_by_key.get(init_key, False) + + +async def evaluator_get_result( + context: script_keywords.Context, + tentacle_class, + time_frame=None, + symbol: str = None, + trigger: bool = False, + value_key=commons_enums.CacheDatabaseColumns.VALUE.value, + cache_key=None, + config_name: str = None, + config: dict = None +): + tentacle_class = tentacles_manager_api.get_tentacle_class_from_string(tentacle_class) \ + if isinstance(tentacle_class, str) else tentacle_class + config_name = context.get_config_name_or_default(tentacle_class, config_name) + init_key = _get_init_key(context, config_name) + is_first_candle_only = _is_first_candle_only(context) + should_trigger = not is_first_candle_only or (is_first_candle_only and _is_first_candle_call(context, init_key)) + if not context.symbol_writer.are_data_initialized_by_key.get(init_key, False) or (should_trigger and trigger): + with context.adapted_trigger_timestamp(tentacle_class, config_name): + # always trigger when asked to then return the triggered evaluation return + return (await _trigger_single_evaluation(context, tentacle_class, value_key, cache_key, + config_name, config, init_key))[0] + if tentacle_class.use_cache(): + # try reading from cache + try: + with context.adapted_trigger_timestamp(tentacle_class, config_name): + await context.ensure_tentacle_cache_requirements(tentacle_class, config_name) + value, is_missing = await context.get_cached_value(value_key=value_key, + cache_key=cache_key, + tentacle_name=tentacle_class.__name__, + config_name=config_name) + if not is_missing: + return value + except commons_errors.UninitializedCache as e: + if tentacle_class is not None and trigger is False: + raise commons_errors.UninitializedCache(f"Can't read cache from {tentacle_class} before initializing " + f"it. Either activate this tentacle or set the 'trigger' " + f"parameter to True (error: {e})") from None + + _ensure_cache_when_set_value_key(value_key, tentacle_class) + # read from evaluation matrix + for value in _tentacle_values(context, tentacle_class, time_frame=time_frame, symbol=symbol): + return value + + +async def evaluator_get_results( + context: script_keywords.Context, + tentacle_class, + time_frame=None, + symbol: str = None, + trigger: bool = False, + value_key=commons_enums.CacheDatabaseColumns.VALUE.value, + cache_key=None, + limit: int = -1, + max_history: bool = False, + config_name: str = None, + config: dict = None +): + cache_key = ALL_VALUES_CACHE_KEY if max_history else cache_key + tentacle_class = tentacles_manager_api.get_tentacle_class_from_string(tentacle_class) \ + if isinstance(tentacle_class, str) else tentacle_class + config_name = context.get_config_name_or_default(tentacle_class, config_name) + init_key = _get_init_key(context, config_name) + is_first_candle_only = _is_first_candle_only(context) + should_trigger = not is_first_candle_only or (is_first_candle_only and _is_first_candle_call(context, init_key)) + if not context.symbol_writer.are_data_initialized_by_key.get(init_key, False) or (should_trigger and trigger): + with context.adapted_trigger_timestamp(tentacle_class, config_name): + # always trigger when asked to + eval_result, _ = await _trigger_single_evaluation(context, tentacle_class, value_key, cache_key, + config_name, config, init_key) + if limit == 1: + # return already if only one value to return + return eval_result + if tentacle_class.use_cache(): + try: + with context.adapted_trigger_timestamp(tentacle_class, config_name): + await context.ensure_tentacle_cache_requirements(tentacle_class, config_name) + # can return multiple values + return await context.get_cached_values(value_key=value_key, cache_key=cache_key, limit=limit, + tentacle_name=tentacle_class.__name__, config_name=config_name) + except commons_errors.UninitializedCache: + if tentacle_class is not None and trigger is False: + raise commons_errors.UninitializedCache(f"Can't read cache from {tentacle_class} before initializing " + f"it. Either activate this tentacle or set the 'trigger' " + f"parameter to True") from None + _ensure_cache_when_set_value_key(value_key, tentacle_class) + if limit == 1: + # read from evaluation matrix + for value in _tentacle_values(context, tentacle_class, time_frame=time_frame, symbol=symbol): + return value + raise commons_errors.MissingDataError(f"No evaluator value for {tentacle_class.__name__}") + else: + raise commons_errors.ConfigEvaluatorError(f"Evaluator cache is required to get more than one historical value " + f"of an evaluator. Cache is disabled on {tentacle_class.__name__}") + + +def _ensure_cache_when_set_value_key(value_key, tentacle_class): + if not tentacle_class.use_cache() and value_key != commons_enums.CacheDatabaseColumns.VALUE.value: + raise commons_errors.ConfigEvaluatorError(f"Evaluator cache is required to read a value_key different from " + f"the evaluator output evaluation. " + f"Cache is disabled on {tentacle_class.__name__}") + + +async def _trigger_single_evaluation(context, tentacle_class, value_key, cache_key, config_name, config, init_key): + config_name, cleaned_config_name, config, tentacles_setup_config, tentacle_config = \ + context.get_tentacle_config_elements(tentacle_class, config_name, config) + async with context.local_nested_tentacle_config(tentacle_class, config_name, True): + is_eval_result_set = False + eval_result = evaluator_instance = None + if cleaned_config_name not in tentacle_config or \ + not context.symbol_writer.are_data_initialized_by_key.get(init_key, False): + # always call _init_nested_call the 1st time the evaluation chain is triggered to make sure scripts + # are executed entirely at least once + # might need to merge config with tentacles_manager_api.get_tentacle_config if evaluator is + # not filling default config values + init_config = {**tentacle_config.get(cleaned_config_name, {}), **config} + eval_result, error, evaluator_instance = await _init_nested_call( + context, tentacle_class, config_name, cleaned_config_name, + tentacles_setup_config, tentacle_config, init_config + ) + if error is None: + is_eval_result_set = True + try: + tentacle_config = tentacle_config[cleaned_config_name] + except KeyError as e: + raise commons_errors.ConfigEvaluatorError(f"Missing evaluator configuration with name {e}") + # apply forced config if any + dict_util.nested_update_dict(tentacle_config, config) + await script_keywords.save_user_input( + context, + config_name, + commons_constants.NESTED_TENTACLE_CONFIG, + tentacle_config, + {}, + is_nested_config=context.nested_depth > 1, + nested_tentacle=tentacle_class.get_name() + ) + if not is_eval_result_set: + eval_result, _, evaluator_instance = (await tentacle_class.single_evaluation( + tentacles_setup_config, + tentacle_config, + context=context + )) + if value_key == commons_enums.CacheDatabaseColumns.VALUE.value and cache_key is None: + return eval_result, evaluator_instance.specific_config + else: + value, is_missing = await context.get_cached_value(value_key=value_key, + cache_key=cache_key, + tentacle_name=tentacle_class.__name__, + config_name=config_name, + ignore_requirement=True) + return None if is_missing else value, evaluator_instance.specific_config + + +async def _init_nested_call(context, tentacle_class, config_name, cleaned_config_name, + tentacles_setup_config, tentacle_config, config): + evaluation, error, evaluator_instance = await tentacle_class.single_evaluation( + tentacles_setup_config, + config, + context=context, + ignore_cache=True + ) + tentacle_config[cleaned_config_name] = evaluator_instance.specific_config + if error is not None: + _invalidate_call_and_parents_init_status(context, config_name) + else: + context.symbol_writer.are_data_initialized_by_key[_get_init_key(context, config_name)] = True + return evaluation, error, evaluator_instance + + +def _get_init_key(context, config_name): + return f"{config_name}_{context.time_frame}" + + +def _invalidate_call_and_parents_init_status(context, config_name): + # set are_data_initialized_by_key to False for this evaluator and its parent calls to ensure init is called + # again later and the evaluator can be run entirely + context.symbol_writer.are_data_initialized_by_key[_get_init_key(context, config_name)] = False + for nested_config_name in context.nested_config_names: + context.symbol_writer.are_data_initialized_by_key[_get_init_key(context, nested_config_name)] = False + + +def _tentacle_values(context, + tentacle_class, + time_frames=None, + symbols=None, + time_frame=None, + symbol=None): + tentacle_name = tentacle_class if isinstance(tentacle_class, str) else tentacle_class.get_name() + symbols = [context.symbol or symbol] or symbols + time_frames = [context.time_frame or time_frame] or time_frames + for symbol in symbols: + for time_frame in time_frames: + for tentacle_type in evaluators_enums.EvaluatorMatrixTypes: + for evaluated_ta_node in matrix.get_tentacles_value_nodes( + context.matrix_id, + matrix.get_tentacle_nodes(context.matrix_id, + exchange_name=context.exchange_name, + tentacle_type=tentacle_type.value, + tentacle_name=tentacle_name), + symbol=symbol, + time_frame=time_frame): + yield evaluated_ta_node.node_value diff --git a/packages/tentacles/Meta/Keywords/scripting_library/UI/__init__.py b/packages/tentacles/Meta/Keywords/scripting_library/UI/__init__.py new file mode 100644 index 0000000000..5f824b232c --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/UI/__init__.py @@ -0,0 +1,19 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + + +from .inputs import * +from .plots import * diff --git a/packages/tentacles/Meta/Keywords/scripting_library/UI/inputs/__init__.py b/packages/tentacles/Meta/Keywords/scripting_library/UI/inputs/__init__.py new file mode 100644 index 0000000000..acdf4d9dc3 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/UI/inputs/__init__.py @@ -0,0 +1,21 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from .library_user_inputs import * +from .select_time_frame import * +from .select_candle import * +from .select_history import * +from .triggers import * diff --git a/packages/tentacles/Meta/Keywords/scripting_library/UI/inputs/library_user_inputs.py b/packages/tentacles/Meta/Keywords/scripting_library/UI/inputs/library_user_inputs.py new file mode 100644 index 0000000000..6dea97abec --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/UI/inputs/library_user_inputs.py @@ -0,0 +1,110 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.enums as commons_enums +import octobot_commons.constants as commons_constants +import octobot_commons.configuration as commons_configuration +import tentacles.Meta.Keywords.scripting_library.TA.trigger.eval_triggered as eval_triggered +import octobot_tentacles_manager.api as tentacles_manager_api + + +def _find_configuration(nested_configuration, nested_config_names, element): + for key, config in nested_configuration.items(): + if len(nested_config_names) == 0 and key == element: + return config + if isinstance(config, dict) and (len(nested_config_names) == 0 or key == nested_config_names[0]): + found_config = _find_configuration(config, nested_config_names[1:], element) + if found_config is not None: + return found_config + return None + + +async def external_user_input( + ctx, + name, + tentacle, + config_name=None, + trigger_if_necessary=True, + include_tentacle_as_requirement=True, + config: dict = None +): + triggered = False + try: + if config_name is None: + query = await ctx.run_data_writer.search() + raw_value = await ctx.run_data_writer.select( + commons_enums.DBTables.INPUTS.value, + (query.name == name) & (query.tentacle == tentacle) + ) + if raw_value: + return raw_value[0]["value"] + else: + # look for the user input in non nested user inputs + user_inputs = await commons_configuration.get_user_inputs(ctx.run_data_writer) + # First try with the current top level tentacle (faster and to avoid name conflicts) + top_tentacle_config = ctx.top_level_tentacle.get_local_config() + tentacle_config = _find_configuration(top_tentacle_config, + ctx.nested_config_names, + config_name.replace(" ", "_")) + if tentacle_config is None: + # Then try with the current local tentacle, then use all tentacles + current_tentacle_config = ctx.tentacle.get_local_config() + tentacle_config = current_tentacle_config.get(config_name.replace(" ", "_"), None) + if tentacle_config is None: + for local_user_input in user_inputs: + if not local_user_input["is_nested_config"] and \ + local_user_input["input_type"] == commons_constants.NESTED_TENTACLE_CONFIG: + tentacle_config = _find_configuration(local_user_input["value"], + ctx.nested_config_names, + config_name.replace(" ", "_")) + if tentacle_config is not None: + break + if not trigger_if_necessary: + # look into nested config as well since the tentacle wont be triggered + for local_user_input in user_inputs: + if local_user_input["is_nested_config"] and \ + local_user_input["input_type"] == commons_constants.NESTED_TENTACLE_CONFIG: + if local_user_input["name"] == config_name: + tentacle_config = local_user_input["value"] + break + tentacle_config = _find_configuration(local_user_input["value"], + ctx.nested_config_names, + config_name.replace(" ", "_")) + if tentacle_config is not None: + break + if tentacle_config is None and trigger_if_necessary: + tentacle_class = tentacles_manager_api.get_tentacle_class_from_string(tentacle) \ + if isinstance(tentacle, str) else tentacle + _, tentacle_config = await eval_triggered._trigger_single_evaluation( + ctx, tentacle_class, + commons_enums.CacheDatabaseColumns.VALUE.value, + None, + config_name, config) + triggered = True + try: + return None if tentacle_config is None else tentacle_config[name.replace(" ", "_")] + except KeyError: + return None + finally: + if include_tentacle_as_requirement and not triggered and trigger_if_necessary: + # to register the tentacle as requirement: trigger its evaluation in a nested context + tentacle_class = tentacles_manager_api.get_tentacle_class_from_string(tentacle) \ + if isinstance(tentacle, str) else tentacle + await eval_triggered._trigger_single_evaluation( + ctx, tentacle_class, + commons_enums.CacheDatabaseColumns.VALUE.value, + None, + config_name, config) + return None diff --git a/packages/tentacles/Meta/Keywords/scripting_library/UI/inputs/select_candle.py b/packages/tentacles/Meta/Keywords/scripting_library/UI/inputs/select_candle.py new file mode 100644 index 0000000000..e2d48b59ca --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/UI/inputs/select_candle.py @@ -0,0 +1,48 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_trading.modes.script_keywords.basic_keywords as basic_keywords +import tentacles.Meta.Keywords.scripting_library.data.reading.exchange_public_data as exchange_public_data + + +async def user_select_candle( + ctx, + name="Select Candle Source", + def_val="close", + time_frame=None, + symbol=None, + limit=-1, + enable_volume=True, + return_source_name=False, + max_history=False, + show_in_summary=True, + show_in_optimizer=True, + order=None, +): + available_data_src = ["open", "high", "low", "close", "hl2", "hlc3", "ohlc4", + "Heikin Ashi open", "Heikin Ashi high", "Heikin Ashi low", "Heikin Ashi close"] + if enable_volume: + available_data_src.append("volume") + + data_source = await basic_keywords.user_input(ctx, name, "options", def_val, options=available_data_src, + show_in_summary=show_in_summary, show_in_optimizer=show_in_optimizer, + order=order) + candle_source = await exchange_public_data.get_candles_from_name( + ctx, source_name=data_source, time_frame=time_frame, symbol=symbol, limit=limit, max_history=max_history + ) + if return_source_name: + return candle_source, data_source + else: + return candle_source diff --git a/packages/tentacles/Meta/Keywords/scripting_library/UI/inputs/select_history.py b/packages/tentacles/Meta/Keywords/scripting_library/UI/inputs/select_history.py new file mode 100644 index 0000000000..9d9d98bb64 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/UI/inputs/select_history.py @@ -0,0 +1,31 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import octobot_trading.modes.script_keywords.basic_keywords as basic_keywords +import octobot_trading.constants as trading_constants + + +async def set_candles_history_size( + ctx, + def_val=trading_constants.DEFAULT_CANDLE_HISTORY_SIZE, + name=trading_constants.CONFIG_CANDLES_HISTORY_SIZE_TITLE, + show_in_summary=False, + show_in_optimizer=False, + order=999, +): + return await basic_keywords.user_input(ctx, name, "int", def_val, + show_in_summary=show_in_summary, show_in_optimizer=show_in_optimizer, + order=order) diff --git a/packages/tentacles/Meta/Keywords/scripting_library/UI/inputs/select_time_frame.py b/packages/tentacles/Meta/Keywords/scripting_library/UI/inputs/select_time_frame.py new file mode 100644 index 0000000000..bbe12bd862 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/UI/inputs/select_time_frame.py @@ -0,0 +1,90 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import octobot_commons.time_frame_manager as time_frame_manager +import octobot_commons.constants as commons_constants +import octobot_commons.errors as commons_errors +import octobot_trading.modes.script_keywords.basic_keywords as basic_keywords +import octobot_evaluators.evaluators as evaluators +import octobot_evaluators.matrix as matrix + + +async def user_select_time_frame( + ctx, + def_val="1h", + name="Timeframe", + show_in_summary=True, + show_in_optimizer=True, + order=None +): + available_timeframes = time_frame_manager.sort_time_frames(ctx.exchange_manager.client_time_frames) + selected_timeframe = await basic_keywords.user_input(ctx, name, "options", def_val, options=available_timeframes, + show_in_summary=show_in_summary, + show_in_optimizer=show_in_optimizer, order=order) + return selected_timeframe + + +async def user_multi_select_time_frame( + ctx, + def_val="1h", + name="Timeframe", + show_in_summary=True, + show_in_optimizer=True, + order=None +): + available_timeframes = time_frame_manager.sort_time_frames(ctx.exchange_manager.client_time_frames) + selected_timeframe = await basic_keywords.user_input(ctx, name, "multiple-options", def_val, + options=available_timeframes, show_in_summary=show_in_summary, + show_in_optimizer=show_in_optimizer, order=order) + return selected_timeframe + + +async def set_trigger_time_frames( + ctx, + def_val=None, + show_in_summary=True, + show_in_optimizer=False, + order=None +): + available_timeframes = [ + tf.value + for tf in time_frame_manager.sort_time_frames( + ctx.exchange_manager.exchange_config.get_relevant_time_frames() + ) + ] + def_val = def_val or available_timeframes[0] + name = commons_constants.CONFIG_TRIGGER_TIMEFRAMES.replace("_", " ") + trigger_timeframes = await basic_keywords.user_input(ctx, name, "multiple-options", def_val, + options=available_timeframes, show_in_summary=show_in_summary, + show_in_optimizer=show_in_optimizer, flush_if_necessary=True, + order=order) + if ctx.time_frame not in trigger_timeframes: + if isinstance(ctx.tentacle, evaluators.AbstractEvaluator): + # For evaluators, make sure that undesired time frames are not in matrix anymore. + # Otherwise a strategy might wait for their value before pushing its evaluation to trading modes + matrix.delete_tentacle_node( + matrix_id=ctx.tentacle.matrix_id, + tentacle_path=matrix.get_matrix_default_value_path( + exchange_name=ctx.exchange_manager.exchange_name, + tentacle_type=ctx.tentacle.evaluator_type.value, + tentacle_name=ctx.tentacle.get_name(), + cryptocurrency=ctx.cryptocurrency, + symbol=ctx.symbol, + time_frame=ctx.time_frame if ctx.time_frame else None + ) + ) + raise commons_errors.ExecutionAborted(f"Execution aborted: disallowed time frame: {ctx.time_frame}") + return trigger_timeframes diff --git a/packages/tentacles/Meta/Keywords/scripting_library/UI/inputs/triggers.py b/packages/tentacles/Meta/Keywords/scripting_library/UI/inputs/triggers.py new file mode 100644 index 0000000000..2d5b7c69db --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/UI/inputs/triggers.py @@ -0,0 +1,30 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_trading.modes.script_keywords.basic_keywords as basic_keywords + + +TRIGGER_ONLY_ON_THE_FIRST_CANDLE_KEY = "trigger_only_on_the_first_candle" + + +async def trigger_only_on_the_first_candle(ctx, + default_value, + show_in_summary=False, + show_in_optimizer=False, + order=700): + return await basic_keywords.user_input(ctx, TRIGGER_ONLY_ON_THE_FIRST_CANDLE_KEY, "boolean", default_value, + show_in_summary=show_in_summary, + show_in_optimizer=show_in_optimizer, + order=order) diff --git a/packages/tentacles/Meta/Keywords/scripting_library/UI/plots/__init__.py b/packages/tentacles/Meta/Keywords/scripting_library/UI/plots/__init__.py new file mode 100644 index 0000000000..ab8d63704f --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/UI/plots/__init__.py @@ -0,0 +1,17 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from .displayed_elements import DisplayedElements diff --git a/packages/tentacles/Meta/Keywords/scripting_library/UI/plots/displayed_elements.py b/packages/tentacles/Meta/Keywords/scripting_library/UI/plots/displayed_elements.py new file mode 100644 index 0000000000..a7532e493c --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/UI/plots/displayed_elements.py @@ -0,0 +1,529 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import octobot_trading.enums as trading_enums +import octobot_trading.constants as trading_constants +import octobot_commons.enums as commons_enums +import octobot_commons.errors as commons_errors +import octobot_commons.constants as commons_constants +import octobot_commons.databases as databases +import octobot_commons.display as display +import octobot_backtesting.api as backtesting_api +import octobot_trading.api as trading_api + + +class DisplayedElements(display.DisplayTranslator): + TABLE_KEY_TO_COLUMN = { + commons_enums.PlotAttributes.X.value: "Time", + commons_enums.PlotAttributes.Y.value: "Value", + commons_enums.PlotAttributes.Z.value: "Value", + commons_enums.PlotAttributes.OPEN.value: "Open", + commons_enums.PlotAttributes.HIGH.value: "High", + commons_enums.PlotAttributes.LOW.value: "Low", + commons_enums.PlotAttributes.CLOSE.value: "Close", + commons_enums.PlotAttributes.VOLUME.value: "Volume", + commons_enums.DBRows.SYMBOL.value: "Symbol", + } + + async def fill_from_database(self, trading_mode, database_manager, exchange_name, symbol, time_frame, exchange_id, + with_inputs=True, symbols=None, time_frames=None): + async with databases.MetaDatabase.database(database_manager) as meta_db: + graphs_by_parts = {} + inputs = [] + candles = [] + cached_values = [] + if trading_mode.is_backtestable(): + exchange_name, symbol, time_frame = \ + await self._adapt_inputs_for_backtesting_results(meta_db, exchange_name, symbol, time_frame) + run_db = meta_db.get_run_db() + metadata_rows = await run_db.all(commons_enums.DBTables.METADATA.value) + metadata = metadata_rows[0] if metadata_rows else None + if symbols is not None: + symbols.extend(metadata[commons_enums.BacktestingMetadata.SYMBOLS.value]) + if time_frames is not None: + time_frames.extend(metadata[commons_enums.BacktestingMetadata.TIME_FRAMES.value]) + account_type = trading_api.get_account_type_from_run_metadata(metadata) \ + if database_manager.is_backtesting() \ + else trading_api.get_account_type_from_exchange_manager( + trading_api.get_exchange_manager_from_exchange_id(exchange_id) + ) + dbs = [ + run_db, + meta_db.get_transactions_db(account_type, exchange_name), + meta_db.get_orders_db(account_type, exchange_name), + meta_db.get_trades_db(account_type, exchange_name), + meta_db.get_symbol_db(exchange_name, symbol) + ] + for index, db in enumerate(dbs): + for table_name in await db.tables(): + display_data = await db.all(table_name) + if table_name == commons_enums.DBTables.INPUTS.value: + inputs += display_data + if table_name == commons_enums.DBTables.CANDLES_SOURCE.value: + candles += display_data + if table_name == commons_enums.DBTables.CACHE_SOURCE.value: + cached_values += display_data + else: + try: + filter_symbol = index != len(dbs) - 1 # don't filter symbol for symbol db + filtered_data = self._filter_and_adapt_displayed_elements( + display_data, symbol, time_frame, table_name, filter_symbol + ) + chart = display_data[0][commons_enums.DisplayedElementTypes.CHART.value] + if chart is None: + continue + if chart in graphs_by_parts: + graphs_by_parts[chart][table_name] = filtered_data + else: + graphs_by_parts[chart] = {table_name: filtered_data} + except (IndexError, KeyError): + # some table have no chart + pass + try: + run_start_time, run_end_time = await self._get_run_window(meta_db.get_run_db()) + except IndexError: + run_start_time = run_end_time = 0 + first_candle_time, last_candle_time = \ + await self._add_candles(graphs_by_parts, candles, exchange_name, exchange_id, symbol, time_frame, + run_start_time, run_end_time) + await self._add_cached_values(graphs_by_parts, cached_values, time_frame, + first_candle_time, last_candle_time) + self._plot_graphs(graphs_by_parts) + if with_inputs: + with self.part(commons_enums.DBTables.INPUTS.value, + element_type=commons_enums.DisplayedElementTypes.INPUT.value) as part: + self.add_user_inputs(inputs, part) + + async def _adapt_inputs_for_backtesting_results(self, meta_db, exchange_name, symbol, time_frame): + if not await meta_db.run_dbs_identifier.exchange_base_identifier_exists(exchange_name): + single_exchange = await meta_db.run_dbs_identifier.get_single_existing_exchange() + if single_exchange is None: + # no single exchange with data + raise commons_errors.MissingExchangeDataError( + f"No data for {exchange_name}. This run might have happened on other exchange(s)" + ) + else: + # retarget exchange_name + exchange_name = single_exchange + if not await meta_db.run_dbs_identifier.symbol_base_identifier_exists(exchange_name, symbol): + run_metadata = await meta_db.get_run_db().all(commons_enums.DBTables.METADATA.value) + try: + symbols = run_metadata[0].get(commons_enums.DBRows.SYMBOLS.value, []) + if len(symbols) > 0: + # retarget symbol + symbol = symbols[0] + else: + # no single exchange with data + raise commons_errors.MissingExchangeDataError( + f"No symbol related data for {exchange_name}" + ) + except IndexError: + # no run metadata, try to continue + pass + return exchange_name, symbol, time_frame + + def _plot_graphs(self, graphs_by_parts): + for part, datasets in graphs_by_parts.items(): + with self.part(part, element_type=commons_enums.DisplayedElementTypes.CHART.value) as part: + for title, dataset in datasets.items(): + if not dataset: + continue + x = [] + y = [] + open = [] + high = [] + low = [] + close = [] + volume = [] + text = [] + color = [] + size = [] + shape = [] + if dataset[0].get(commons_enums.PlotAttributes.X.value, None) is None: + x = None + if dataset[0].get(commons_enums.PlotAttributes.Y.value, None) is None: + y = None + if dataset[0].get(commons_enums.PlotAttributes.OPEN.value, None) is None: + open = None + if dataset[0].get(commons_enums.PlotAttributes.HIGH.value, None) is None: + high = None + if dataset[0].get(commons_enums.PlotAttributes.LOW.value, None) is None: + low = None + if dataset[0].get(commons_enums.PlotAttributes.CLOSE.value, None) is None: + close = None + if dataset[0].get(commons_enums.PlotAttributes.VOLUME.value, None) is None: + volume = None + if dataset[0].get(commons_enums.PlotAttributes.TEXT.value, None) is None: + text = None + if dataset[0].get(commons_enums.PlotAttributes.COLOR.value, None) is None: + color = None + if dataset[0].get(commons_enums.PlotAttributes.SIZE.value, None) is None: + size = None + if dataset[0].get(commons_enums.PlotAttributes.SHAPE.value, None) is None: + shape = None + own_yaxis = dataset[0].get(commons_enums.PlotAttributes.OWN_YAXIS.value, False) + for data in dataset: + if x is not None: + x.append(data[commons_enums.PlotAttributes.X.value]) + if y is not None: + y.append(data[commons_enums.PlotAttributes.Y.value]) + if open is not None: + open.append(data[commons_enums.PlotAttributes.OPEN.value]) + if high is not None: + high.append(data[commons_enums.PlotAttributes.HIGH.value]) + if low is not None: + low.append(data[commons_enums.PlotAttributes.LOW.value]) + if close is not None: + close.append(data[commons_enums.PlotAttributes.CLOSE.value]) + if volume is not None: + volume.append(data[commons_enums.PlotAttributes.VOLUME.value]) + if text is not None: + text.append(data[commons_enums.PlotAttributes.TEXT.value]) + if color is not None: + color.append(data[commons_enums.PlotAttributes.COLOR.value]) + if size is not None: + size.append(data[commons_enums.PlotAttributes.SIZE.value]) + if shape is not None: + shape.append(data[commons_enums.PlotAttributes.SHAPE.value]) + # use log scale for all positive charts + y_type = None + if title == commons_enums.DBTables.CANDLES_SOURCE.value \ + or 0 <= min(d.get(commons_enums.PlotAttributes.Y.value, 0) for d in dataset): + y_type = "log" + + part.plot( + kind=data.get(commons_enums.PlotAttributes.KIND.value, None), + x=x, + y=y, + open=open, + high=high, + low=low, + close=close, + volume=volume, + title=title, + text=text, + x_type="date", + y_type=y_type, + mode=data.get(commons_enums.PlotAttributes.MODE.value, None), + own_yaxis=own_yaxis, + color=color, + size=size, + symbol=shape) + + def _adapt_for_display(self, table_name, filtered_elements): + if table_name == commons_enums.DBTables.TRANSACTIONS.value: + # only display liquidations + filtered_elements = [ + display_element + for display_element in filtered_elements + if display_element.get("trigger_source", None) == trading_enums.PNLTransactionSource.LIQUIDATION.value + ] + for display_element in filtered_elements: + display_element[commons_enums.PlotAttributes.COLOR.value] = "red" + display_element[commons_enums.PlotAttributes.SHAPE.value] = commons_enums.PlotAttributes.X.value + display_element[commons_enums.PlotAttributes.SIZE.value] = 15 + display_element[commons_enums.PlotAttributes.TEXT.value] = f"Liquidation ({abs(display_element.get('closed_quantity', 0))} liquidated)" + display_element[commons_enums.PlotAttributes.Y.value] = display_element["order_exit_price"] + elif table_name == commons_enums.DBTables.ORDERS.value: + # adapt order details for display + for display_element in filtered_elements: + order_details = display_element[trading_constants.STORAGE_ORIGIN_VALUE] + side = order_details[trading_enums.ExchangeConstantsOrderColumns.SIDE.value] + display_element[commons_enums.PlotAttributes.COLOR.value] = "red" \ + if side == trading_enums.TradeOrderSide.SELL.value else "green" + display_element[commons_enums.PlotAttributes.SHAPE.value] = "line-ew-open" + display_element[commons_enums.PlotAttributes.MODE.value] = "markers" + display_element[commons_enums.PlotAttributes.KIND.value] = "scattergl" + display_element[commons_enums.PlotAttributes.SIZE.value] = 15 + display_element[commons_enums.PlotAttributes.TEXT.value] = \ + f"{order_details[trading_enums.ExchangeConstantsOrderColumns.TYPE.value]} " \ + f"{order_details[trading_enums.ExchangeConstantsOrderColumns.AMOUNT.value]} " \ + f"{order_details[trading_enums.ExchangeConstantsOrderColumns.QUANTITY_CURRENCY.value]} " \ + f"at {order_details[trading_enums.ExchangeConstantsOrderColumns.PRICE.value]}" + display_element[commons_enums.PlotAttributes.Y.value] = \ + order_details[trading_enums.ExchangeConstantsOrderColumns.PRICE.value] + display_element[commons_enums.PlotAttributes.X.value] = \ + order_details[trading_enums.ExchangeConstantsOrderColumns.TIMESTAMP.value] * 1000 + display_element[commons_enums.DisplayedElementTypes.CHART.value] = \ + commons_enums.PlotCharts.MAIN_CHART.value + return filtered_elements + + def _filter_and_adapt_displayed_elements(self, elements, symbol, time_frame, table_name, filter_symbol): + default_symbol = None if filter_symbol else symbol + filtered_elements = [ + display_element + for display_element in elements + if ( + display_element.get(commons_enums.DBRows.SYMBOL.value, default_symbol) == symbol + and display_element.get(commons_enums.DBRows.TIME_FRAME.value) == time_frame + ) or ( + display_element.get(trading_constants.STORAGE_ORIGIN_VALUE, {}) + .get(trading_enums.ExchangeConstantsOrderColumns.SYMBOL.value, default_symbol) == symbol + ) + ] + return self._adapt_for_display(table_name, filtered_elements) + + async def _get_run_window(self, run_database): + run_metadata = (await run_database.all(commons_enums.DBTables.METADATA.value))[0] + end_time = run_metadata.get("end_time", 0) + if end_time == -1: + # live mode + return 0, 0 + return run_metadata.get("start_time", 0), end_time + + async def _add_cached_values(self, graphs_by_parts, cached_values, time_frame, start_time, end_time): + start_time = start_time + end_time = end_time + for cached_value_metadata in cached_values: + if cached_value_metadata.get(commons_enums.DBRows.TIME_FRAME.value, None) == time_frame: + try: + chart = cached_value_metadata[commons_enums.DisplayedElementTypes.CHART.value] + x_shift = cached_value_metadata["x_shift"] + values = sorted(await self._get_cached_values_to_display(cached_value_metadata, x_shift, + start_time, end_time), + key=lambda x: x[commons_enums.PlotAttributes.X.value]) + try: + graphs_by_parts[chart][cached_value_metadata[commons_enums.PlotAttributes.TITLE.value]] = values + except KeyError: + if chart not in graphs_by_parts: + graphs_by_parts[chart] = {} + try: + graphs_by_parts[chart] = \ + {cached_value_metadata[commons_enums.PlotAttributes.TITLE.value]: values} + except KeyError: + graphs_by_parts[chart] = {commons_enums.PlotAttributes.TITLE.value: values} + except KeyError: + # some table have no chart + pass + + async def _get_cached_values_to_display(self, cached_value_metadata, x_shift, start_time, end_time): + cache_file = cached_value_metadata[commons_enums.PlotAttributes.VALUE.value] + cache_displayed_value = plotted_displayed_value = cached_value_metadata["cache_value"] + kind = cached_value_metadata[commons_enums.PlotAttributes.KIND.value] + mode = cached_value_metadata[commons_enums.PlotAttributes.MODE.value] + own_yaxis = cached_value_metadata[commons_enums.PlotAttributes.OWN_YAXIS.value] + condition = cached_value_metadata.get("condition", None) + try: + cache_database = databases.CacheDatabase(cache_file) + cache_type = (await cache_database.get_metadata())[commons_enums.CacheDatabaseColumns.TYPE.value] + if cache_type == databases.CacheTimestampDatabase.__name__: + cache = await cache_database.get_cache() + for cache_val in cache: + try: + if isinstance(cache_val[cache_displayed_value], bool): + plotted_displayed_value = self._get_cache_displayed_value(cache_val, cache_displayed_value) + if plotted_displayed_value is None: + self.logger.error(f"Impossible to plot {cache_displayed_value}: unset y axis value") + return [] + else: + break + except KeyError: + pass + except Exception as e: + print(e) + plotted_values = [] + for values in cache: + try: + if condition is None or condition == values[cache_displayed_value]: + x = (values[commons_enums.CacheDatabaseColumns.TIMESTAMP.value] + x_shift) * 1000 + if (start_time == end_time == 0) or start_time <= x <= end_time: + y = values[plotted_displayed_value] + if not isinstance(x, list) and isinstance(y, list): + for y_val in y: + plotted_values.append({ + commons_enums.PlotAttributes.X.value: x, + commons_enums.PlotAttributes.Y.value: y_val, + commons_enums.PlotAttributes.KIND.value: kind, + commons_enums.PlotAttributes.MODE.value: mode, + commons_enums.PlotAttributes.OWN_YAXIS.value: own_yaxis, + }) + else: + plotted_values.append({ + commons_enums.PlotAttributes.X.value: x, + commons_enums.PlotAttributes.Y.value: y, + commons_enums.PlotAttributes.KIND.value: kind, + commons_enums.PlotAttributes.MODE.value: mode, + commons_enums.PlotAttributes.OWN_YAXIS.value: own_yaxis, + }) + except KeyError: + pass + return plotted_values + self.logger.error(f"Unhandled cache type to display: {cache_type}") + except TypeError: + self.logger.error(f"Missing cache type in {cache_file} metadata file") + except commons_errors.DatabaseNotFoundError as ex: + self.logger.warning(f"Missing cache values ({ex})") + return [] + + @staticmethod + def _get_cache_displayed_value(cache_val, base_displayed_value): + for key in cache_val.keys(): + separator_split_key = key.split(commons_constants.CACHE_RELATED_DATA_SEPARATOR) + if base_displayed_value == separator_split_key[0] and len(separator_split_key) == 2: + return key + return None + + async def _add_candles(self, graphs_by_parts, candles_list, exchange_name, exchange_id, symbol, time_frame, + run_start_time, run_end_time): + first_candle_time = last_candle_time = 0 + for candles_metadata in candles_list: + if candles_metadata.get(commons_enums.DBRows.TIME_FRAME.value) == time_frame: + try: + chart = candles_metadata[commons_enums.DisplayedElementTypes.CHART.value] + candles = await self._get_candles_to_display(candles_metadata, exchange_name, + exchange_id, symbol, time_frame, + run_start_time, run_end_time) + try: + graphs_by_parts[chart][commons_enums.DBTables.CANDLES.value] = candles + except KeyError: + graphs_by_parts[chart] = {commons_enums.DBTables.CANDLES.value: candles} + # candles are assumed to be ordered + if first_candle_time == 0 or first_candle_time < candles[0][commons_enums.PlotAttributes.X.value]: + first_candle_time = candles[0][commons_enums.PlotAttributes.X.value] + if last_candle_time == 0 or last_candle_time > candles[-1][commons_enums.PlotAttributes.X.value]: + last_candle_time = candles[-1][commons_enums.PlotAttributes.X.value] + except KeyError: + # some table have no chart + pass + return first_candle_time, last_candle_time + + async def _get_candles_to_display(self, candles_metadata, exchange_name, exchange_id, symbol, time_frame, + run_start_time, run_end_time): + if candles_metadata[commons_enums.DBRows.VALUE.value] == commons_constants.LOCAL_BOT_DATA: + exchange_manager = trading_api.get_exchange_manager_from_exchange_id(exchange_id) + array_candles = trading_api.get_symbol_historical_candles( + trading_api.get_symbol_data(exchange_manager, symbol, allow_creation=False), time_frame + ) + return [ + { + commons_enums.PlotAttributes.X.value: time * 1000, + commons_enums.PlotAttributes.OPEN.value: array_candles[commons_enums.PriceIndexes.IND_PRICE_OPEN.value][index], + commons_enums.PlotAttributes.HIGH.value: array_candles[commons_enums.PriceIndexes.IND_PRICE_HIGH.value][index], + commons_enums.PlotAttributes.LOW.value: array_candles[commons_enums.PriceIndexes.IND_PRICE_LOW.value][index], + commons_enums.PlotAttributes.CLOSE.value: array_candles[commons_enums.PriceIndexes.IND_PRICE_CLOSE.value][index], + commons_enums.PlotAttributes.VOLUME.value: array_candles[commons_enums.PriceIndexes.IND_PRICE_VOL.value][index], + commons_enums.PlotAttributes.KIND.value: "candlestick", + commons_enums.PlotAttributes.MODE.value: "lines", + } + for index, time in enumerate(array_candles[commons_enums.PriceIndexes.IND_PRICE_TIME.value]) + if (run_start_time == run_end_time == 0) or run_start_time <= time <= run_end_time + ] + db_candles = await backtesting_api.get_all_ohlcvs(candles_metadata[commons_enums.DBRows.VALUE.value], + exchange_name, + symbol, + commons_enums.TimeFrames(time_frame), + inferior_timestamp=run_start_time if run_start_time > 0 + else -1, + superior_timestamp=run_end_time if run_end_time > 0 else -1) + return [ + { + commons_enums.PlotAttributes.X.value: db_candle[commons_enums.PriceIndexes.IND_PRICE_TIME.value] * 1000, + commons_enums.PlotAttributes.OPEN.value: db_candle[commons_enums.PriceIndexes.IND_PRICE_OPEN.value], + commons_enums.PlotAttributes.HIGH.value: db_candle[commons_enums.PriceIndexes.IND_PRICE_HIGH.value], + commons_enums.PlotAttributes.LOW.value: db_candle[commons_enums.PriceIndexes.IND_PRICE_LOW.value], + commons_enums.PlotAttributes.CLOSE.value: db_candle[commons_enums.PriceIndexes.IND_PRICE_CLOSE.value], + commons_enums.PlotAttributes.VOLUME.value: db_candle[commons_enums.PriceIndexes.IND_PRICE_VOL.value], + commons_enums.PlotAttributes.KIND.value: "candlestick", + commons_enums.PlotAttributes.MODE.value: "lines", + } + for index, db_candle in enumerate(db_candles) + ] + + def plot( + self, + x, + y=None, + open=None, + high=None, + low=None, + close=None, + volume=None, + x_type="date", + y_type=None, + title=None, + text=None, + kind="scattergl", + mode="lines", + line_shape="linear", + own_xaxis=False, + own_yaxis=False, + color=None, + size=None, + symbol=None, + ): + element = display.Element( + kind, + x, + y, + open=open, + high=high, + low=low, + close=close, + volume=volume, + x_type=x_type, + y_type=y_type, + title=title, + text=text, + mode=mode, + line_shape=line_shape, + own_xaxis=own_xaxis, + own_yaxis=own_yaxis, + type=commons_enums.DisplayedElementTypes.CHART.value, + color=color, + size=size, + symbol=symbol + ) + self.elements.append(element) + + def table( + self, + name, + columns, + rows, + searches + ): + element = display.Element( + None, + None, + None, + title=name, + columns=columns, + rows=rows, + searches=searches, + type=commons_enums.DisplayedElementTypes.TABLE.value + ) + self.elements.append(element) + + def value(self, label, value): + element = display.Element( + None, + None, + None, + title=label, + value=str(value), + type=commons_enums.DisplayedElementTypes.VALUE.value + ) + self.elements.append(element) + + def html_value(self, html): + element = display.Element( + None, + None, + None, + html=html, + type=commons_enums.DisplayedElementTypes.VALUE.value + ) + self.elements.append(element) diff --git a/packages/tentacles/Meta/Keywords/scripting_library/__init__.py b/packages/tentacles/Meta/Keywords/scripting_library/__init__.py new file mode 100644 index 0000000000..d79ea902ce --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/__init__.py @@ -0,0 +1,32 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from .data import * +from .UI import * +from .orders import * +from .TA import * +from .settings import * +from .backtesting import * +from .alerts import * +from .configuration import * +from .exchanges import * + +# shortcut to octobot-trading keywords +from octobot_trading.modes.script_keywords.basic_keywords import * +from octobot_trading.modes.script_keywords.dsl import * +from octobot_trading.modes.script_keywords.context_management import Context +from octobot_trading.enums import * +from octobot_commons.enums import BacktestingMetadata, DBTables, DBRows diff --git a/packages/tentacles/Meta/Keywords/scripting_library/alerts/__init__.py b/packages/tentacles/Meta/Keywords/scripting_library/alerts/__init__.py new file mode 100644 index 0000000000..fa0d14e56a --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/alerts/__init__.py @@ -0,0 +1 @@ +from .notifications import * diff --git a/packages/tentacles/Meta/Keywords/scripting_library/alerts/notifications.py b/packages/tentacles/Meta/Keywords/scripting_library/alerts/notifications.py new file mode 100644 index 0000000000..a0dc197f19 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/alerts/notifications.py @@ -0,0 +1,12 @@ +import octobot_services.api as services_api +import octobot_services.enums as services_enum + + +async def send_alert(title, alert_content, + level: services_enum.NotificationLevel = services_enum.NotificationLevel.INFO, + sound=services_enum.NotificationSound.NO_SOUND): + await services_api.send_notification(services_api.create_notification(alert_content, title=title, level=level, + markdown_text=alert_content, + sound=sound, + category=services_enum. + NotificationCategory.TRADING_SCRIPT_ALERTS)) diff --git a/packages/tentacles/Meta/Keywords/scripting_library/backtesting/__init__.py b/packages/tentacles/Meta/Keywords/scripting_library/backtesting/__init__.py new file mode 100644 index 0000000000..6c8f68951c --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/backtesting/__init__.py @@ -0,0 +1,23 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from .metadata import * +from .run_data_analysis import * +from .backtesting_data_selector import * +from .backtesting_settings import * +from .default_backtesting_run_analysis_script import * +from .backtesting_intialization import * +from .backtesting_data_collector import * diff --git a/packages/tentacles/Meta/Keywords/scripting_library/backtesting/backtesting_data_collector.py b/packages/tentacles/Meta/Keywords/scripting_library/backtesting/backtesting_data_collector.py new file mode 100644 index 0000000000..85052d66eb --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/backtesting/backtesting_data_collector.py @@ -0,0 +1,418 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import contextlib +import time +import typing +import datetime + +import octobot_commons +import octobot_commons.constants as common_constants +import octobot_commons.enums as common_enums +import octobot_commons.profiles as commons_profiles +import octobot_commons.timestamp_util as timestamp_util +import octobot_commons.symbols +import octobot_commons.logging + +import octobot_trading.exchanges +import octobot_trading.exchanges.util.exchange_data as exchange_data_import +import octobot_trading.util.test_tools.exchanges_test_tools as exchanges_test_tools +import octobot_tentacles_manager.api + +import octobot.community +import octobot.enums +import octobot.constants as constants + +import tentacles.Meta.Keywords.scripting_library.errors as scr_errors +import tentacles.Meta.Keywords.scripting_library.configuration as scr_configuration +import tentacles.Meta.Keywords.scripting_library.exchanges as src_exchanges +import tentacles.Meta.Keywords.scripting_library.constants as scr_constants + +import tentacles.Meta.Keywords.scripting_library.errors as errors + + +async def init_exchange_market_status_and_populate_backtesting_exchange_data( + exchange_data: exchange_data_import.ExchangeData, + profile_data: commons_profiles.ProfileData, + backend_type: typing.Optional[octobot.enums.CommunityHistoricalBackendType] = None, +) -> exchange_data_import.ExchangeData: + """ + Initializes the exchange market status and populates the backtesting exchange data. + If a backend type is provided, it will use the historical client to populate the backtesting exchange data. + Otherwise, it will use the ccxt exchange manager to populate the backtesting exchange data. + """ + async with data_collector_ccxt_exchange_manager( + profile_data, exchange_data + ) as exchange_manager: + if backend_type is not None: + async with octobot.community.history_backend_client( + backend_type=backend_type + ) as historical_client: + return await populate_backtesting_exchange_data_from_historical_client( + exchange_data, profile_data, historical_client, exchange_manager.exchange_name + ) + return await fetch_and_populate_backtesting_exchange_data( + exchange_data, profile_data, exchange_manager + ) + + +async def fetch_and_populate_backtesting_exchange_data( + exchange_data: exchange_data_import.ExchangeData, + profile_data: commons_profiles.ProfileData, + exchange_manager: octobot_trading.exchanges.ExchangeManager, +) -> exchange_data_import.ExchangeData: + start_time, end_time, time_frames, symbols = _get_backtesting_run_details(profile_data) + for time_frame in time_frames: + await exchanges_test_tools.add_symbols_details( + exchange_manager, symbols, time_frame.value, exchange_data, + start_time=start_time, end_time=end_time, + close_price_only=False, + include_latest_candle=False, + ) + first_candle_times = [] + for market in exchange_data.markets: + first_candle_times.append(market.time[0]) + _ensure_start_time(exchange_data, start_time, first_candle_times) + return exchange_data + + +def _get_backtesting_run_details( + profile_data: commons_profiles.ProfileData, +) -> (float, float, list[common_enums.TimeFrames], list[str]): + start_time = get_backtesting_start_time(profile_data) + end_time = time.time() + time_frames = [ + common_enums.TimeFrames(tf) + for tf in scr_configuration.get_time_frames(profile_data, for_historical_data=True) + ] + if ( + scr_configuration.requires_price_update_timeframe(profile_data) + and scr_constants.PRICE_UPDATE_TIME_FRAME.value not in time_frames + ): + time_frames.append(scr_constants.PRICE_UPDATE_TIME_FRAME) + symbols = profile_data.get_traded_symbols() + return start_time, end_time, time_frames, symbols + + +def get_backtesting_start_time( + profile_data: commons_profiles.ProfileData +) -> float: + return time.time() - profile_data.backtesting_context.start_time_delta + + +def iter_fetched_ohlcvs(ohlcvs: list[list[typing.Union[float, str]]]): + ohlcvs_by_symbol = {} + for ohlcv in ohlcvs: + time_frame = ohlcv[0] + symbol = ohlcv[1] + candles = ohlcv[2:] + if symbol not in ohlcvs_by_symbol: + ohlcvs_by_symbol[symbol] = {} + if time_frame not in ohlcvs_by_symbol[symbol]: + ohlcvs_by_symbol[symbol][time_frame] = [] + ohlcvs_by_symbol[symbol][time_frame].append(candles) + for symbol, time_frames in ohlcvs_by_symbol.items(): + for time_frame, ohlcvs in time_frames.items(): + yield symbol, time_frame, ohlcvs + + +async def populate_backtesting_exchange_data_from_historical_client( + exchange_data: exchange_data_import.ExchangeData, + profile_data: commons_profiles.ProfileData, + historical_client: octobot.community.HistoricalBackendClient, + exchange_name: str +) -> exchange_data_import.ExchangeData: + start_time, end_time, time_frames, symbols = _get_backtesting_run_details(profile_data) + first_traded_symbols, last_traded_symbols, first_historical_config_time = ( + scr_configuration.get_oldest_historical_config_symbols_and_time(profile_data, start_time) + ) + exchange_data.exchange_details.name = profile_data.backtesting_context.exchanges[0] # todo handle multi exchanges + scr_configuration.set_backtesting_portfolio(profile_data, exchange_data) + exchange_data, updated_start_time = await update_backtesting_symbols_data( + historical_client, profile_data, exchange_name, symbols, time_frames, exchange_data, start_time, end_time, + first_traded_symbols, last_traded_symbols, first_historical_config_time + ) + if not scr_configuration.can_convert_ref_market_to_usd_like(exchange_data, profile_data): + # usd like convert + try: + usd_like_time_frame = time_frames[0] + symbol = await find_usd_like_symbol_from_available_history( + historical_client, exchange_data.exchange_details.name, + profile_data.trading.reference_market, usd_like_time_frame, updated_start_time, end_time, + ) + await update_backtesting_symbols_data( + historical_client, profile_data, exchange_name, [symbol], [usd_like_time_frame], + exchange_data, updated_start_time, end_time, [symbol], [symbol], + first_historical_config_time, close_price_only=True, + ) + except scr_errors.InvalidBacktestingDataError as err: + # can't convert ref market into usd like value + _get_logger().error(f"Can't convert ref market into usd like value: {err}") + except KeyError as err: + # can't convert ref market into usd like value + _get_logger().error( + f"Can't convert ref market into usd like value: missing {err} timeframe values" + ) + return exchange_data + + +async def init_backtesting_exchange_market_status_cache( + exchange_data: exchange_data_import.ExchangeData, + profile_data: commons_profiles.ProfileData, +): + async with data_collector_ccxt_exchange_manager(profile_data, exchange_data): + # nothing to do, initializing the exchange manager is enough to fetch market statuses + pass + + +@contextlib.asynccontextmanager +async def data_collector_ccxt_exchange_manager( + profile_data: commons_profiles.ProfileData, + exchange_data: exchange_data_import.ExchangeData, +): + exchange_data.exchange_details.name = profile_data.backtesting_context.exchanges[0] + tentacles_setup_config = octobot_tentacles_manager.api.get_full_tentacles_setup_config() + exchange_config_by_exchange = profile_data.get_config_by_tentacle() + async with src_exchanges.local_ccxt_exchange_manager( + exchange_data, tentacles_setup_config, + exchange_config_by_exchange=exchange_config_by_exchange, + ) as exchange_manager: + try: + yield exchange_manager + except Exception as err: + _get_logger().exception(err) + raise + + +async def fetch_candles_history_range( + historical_client: octobot.community.HistoricalBackendClient, + exchange: str, symbol: str, time_frame: common_enums.TimeFrames +) -> (float, float): + return await historical_client.fetch_candles_history_range(exchange, symbol, time_frame) + + +async def find_usd_like_symbol_from_available_history( + historical_client: octobot.community.HistoricalBackendClient, + exchange_name: str, base: str, time_frame: common_enums.TimeFrames, + first_open_time: float, last_open_time: float, +) -> str: + for usd_like_coin in common_constants.USD_LIKE_COINS: + symbol = octobot_commons.symbols.merge_currencies(base, usd_like_coin) + first_candle_time, last_candle_time = await fetch_candles_history_range( + # always use production db + historical_client, exchange_name, symbol, time_frame + ) + if not (last_candle_time and first_candle_time): + continue + try: + ensure_compatible_candle_time( + exchange_name, symbol, time_frame, + first_open_time, last_open_time, first_candle_time, last_candle_time, + True, True, True, first_open_time, + False + ) + # did not raise: symbol can be used + return symbol + except scr_errors.InvalidBacktestingDataError: + # can't use this symbol, proceed to the next one + continue + raise scr_errors.InvalidBacktestingDataError( + f"No USD-like up to date candles found to convert {base} into USD-like on {exchange_name} {time_frame.value} " + f"for first_open_time={first_open_time} last_open_time={last_open_time}" + ) + + +async def update_backtesting_symbols_data( + historical_client: octobot.community.HistoricalBackendClient, + profile_data: commons_profiles.ProfileData, + exchange_name: str, symbols: list, time_frames: list, + exchange_data: exchange_data_import.ExchangeData, + start_time: float, end_time: float, + first_traded_symbols: list, last_traded_symbols: list, first_traded_symbols_time: float, + close_price_only: bool = False, + requires_traded_symbol_prices_at_all_time: bool = True, +) -> (exchange_data_import.ExchangeData, float): + updated_start_times = [] + is_custom_strategy = octobot.community.models.is_custom_strategy_profile(profile_data) + # can adapt backtesting start and end time on custom strategies that require symbol prices at all time + allow_any_backtesting_start_and_end_time = is_custom_strategy and requires_traded_symbol_prices_at_all_time + + all_ohlcvs = await historical_client.fetch_extended_candles_history( + exchange_name, symbols, time_frames, start_time, end_time + ) + + for symbol, str_time_frame, ohlcvs in iter_fetched_ohlcvs(all_ohlcvs): + time_frame = common_enums.TimeFrames(str_time_frame) + # do not take current incomplete candle into account + last_open_time = end_time - common_enums.TimeFramesMinutes[time_frame] * common_constants.MINUTE_TO_SECONDS + # When symbol in is first_traded_symbols, it should be available from the start + # EXCEPT for custom strategies that might require trading pairs that don't exist for long enough + # (when compatible with trading mode). + # Otherwise, when it is available doesn't really matter. + # If it's not available from the start, adapt start time to start as early as possible, + # latest being first_traded_symbols_time. + required_from_the_start = symbol in first_traded_symbols and ( + requires_traded_symbol_prices_at_all_time or not is_custom_strategy + ) + required_till_the_end = symbol in last_traded_symbols + updated_start_time = ensure_ohlcv_validity( + ohlcvs, exchange_name, symbol, time_frame, start_time, last_open_time, + required_from_the_start, required_till_the_end, first_traded_symbols_time, + allow_any_backtesting_start_and_end_time + ) + if updated_start_time is not None: + updated_start_times.append(updated_start_time) + exchange_data.markets.append(exchange_data_import.MarketDetails( + symbol=symbol, + time_frame=time_frame.value, + close=[ohlcv[common_enums.PriceIndexes.IND_PRICE_CLOSE.value] for ohlcv in ohlcvs], + open=[ohlcv[common_enums.PriceIndexes.IND_PRICE_OPEN.value] for ohlcv in ohlcvs] + if not close_price_only else [], + high=[ohlcv[common_enums.PriceIndexes.IND_PRICE_HIGH.value] for ohlcv in ohlcvs] + if not close_price_only else [], + low=[ohlcv[common_enums.PriceIndexes.IND_PRICE_LOW.value] for ohlcv in ohlcvs] + if not close_price_only else [], + volume=[ohlcv[common_enums.PriceIndexes.IND_PRICE_VOL.value] for ohlcv in ohlcvs] + if not close_price_only else [], + time=[ohlcv[common_enums.PriceIndexes.IND_PRICE_TIME.value] for ohlcv in ohlcvs], + )) + updated_start_time = _ensure_start_time( + exchange_data, start_time, updated_start_times + ) + return exchange_data, updated_start_time + + +def _ensure_start_time( + exchange_data: exchange_data_import.ExchangeData, ideal_start_time: float, updated_start_times: list[float] +) -> float: + updated_start_time = max(updated_start_times) if updated_start_times else ideal_start_time + if updated_start_time != ideal_start_time: + # start time changed: remove extra candles + _get_logger().warning( + f"Adapting backtesting start time according to data availability. " + f"Updated start time: {timestamp_util.convert_timestamp_to_datetime(updated_start_time)}. " + f"Initial start time: {timestamp_util.convert_timestamp_to_datetime(ideal_start_time)}" + ) + adapt_exchange_data_for_updated_start_time(exchange_data, updated_start_time) + return updated_start_time + + +def ensure_ohlcv_validity( + ohlcvs: list, exchange: str, symbol: str, time_frame: common_enums.TimeFrames, + start_time: float, last_open_time: float, required_from_the_start: bool, required_till_the_end: bool, + first_traded_symbols_time: float, allow_any_backtesting_start_and_end_time: bool +) -> typing.Optional[float]: + if not ohlcvs: + raise errors.InvalidBacktestingDataError(f"No {symbol} {time_frame.value} {exchange} OHLCV data") + # ensure history is going approximately to start_time + first_candle_time = ohlcvs[0][common_enums.PriceIndexes.IND_PRICE_TIME.value] + last_candle_time = ohlcvs[-1][common_enums.PriceIndexes.IND_PRICE_TIME.value] + return ensure_compatible_candle_time( + exchange, symbol, time_frame, start_time, last_open_time, first_candle_time, last_candle_time, + False, required_from_the_start, required_till_the_end, first_traded_symbols_time, + allow_any_backtesting_start_and_end_time + ) + + +def adapt_exchange_data_for_updated_start_time( + exchange_data: exchange_data_import.ExchangeData, first_candle_time: float +): + _get_logger().info(f"Filtering out backtesting candles to start at {first_candle_time}") + for market in exchange_data.markets: + market.time = [ + candle_time + for candle_time in market.time + if candle_time >= first_candle_time + ] + market.close = market.close[-len(market.time):] + market.open = market.open[-len(market.time):] + market.high = market.high[-len(market.time):] + market.low = market.low[-len(market.time):] + market.volume = market.volume[-len(market.time):] + + +def ensure_compatible_candle_time( + exchange: str, symbol: str, time_frame: common_enums.TimeFrames, + first_open_time: float, last_open_time: float, first_candle_time: float, last_candle_time: float, + allow_candles_beyond_range: bool, required_from_the_start: bool, required_till_the_end: bool, + first_traded_symbols_time: float, allow_any_backtesting_start_and_end_time: bool +) -> typing.Optional[float]: + adapted_start_time = None + # ensure history is going approximately to first_open_time + if not allow_candles_beyond_range: + # first_candle_time starting before the first_open_time (more candles than required) + if first_candle_time < first_open_time - constants.BACKTESTING_DATA_ALLOWED_PRICE_WINDOW: + raise errors.InvalidBacktestingDataError( + f"{symbol} {time_frame.value} {exchange} OHLCV data starts too early " + f"({first_candle_time} vs {first_open_time})" + ) + time_frame_seconds = common_enums.TimeFramesMinutes[time_frame] * common_constants.MINUTE_TO_SECONDS + if first_candle_time > first_open_time + time_frame_seconds: + if required_from_the_start: + max_allowed_delayed_start = first_traded_symbols_time + constants.BACKTESTING_DATA_ALLOWED_PRICE_WINDOW + # missing initial candles, align start time to the first candle time when possible + if allow_any_backtesting_start_and_end_time or first_candle_time < max_allowed_delayed_start: + adapted_start_time = first_candle_time + _get_logger().info( + f"{symbol} {time_frame.value} {exchange} OHLCV data starts too late " + f"({first_candle_time} vs {first_open_time}): this is acceptable, start time is adapted to " + f"{first_candle_time} (delta: {datetime.timedelta(seconds=first_candle_time - first_open_time)})" + ) + else: + raise errors.InvalidBacktestingDataError( + f"{symbol} {time_frame.value} {exchange} OHLCV data starts too late " + f"({first_candle_time} vs {first_open_time})" + ) + else: + _get_logger().info( + f"{symbol} {time_frame.value} {exchange} OHLCV data starts too late " + f"({first_candle_time} vs {first_open_time}): this is acceptable, this symbol is not required from " + f"the start" + ) + # ensure history is going approximately until last_open_time + if not allow_candles_beyond_range: + # last_open_time ending after the last_candle_time (more candles than required) + if last_open_time < last_candle_time: + raise errors.InvalidBacktestingDataError( + f"{symbol} {time_frame.value} {exchange} OHLCV data ends too late ({last_open_time} vs {last_candle_time})" + ) + + if last_open_time - constants.BACKTESTING_DATA_ALLOWED_PRICE_WINDOW > last_candle_time: + if required_till_the_end: + raise errors.InvalidBacktestingDataError( + f"{symbol} {time_frame.value} {exchange} OHLCV data ends too early ({last_candle_time} vs {last_open_time})" + ) + else: + _get_logger().info( + f"{symbol} {time_frame.value} {exchange} OHLCV data ends too early " + f"({last_candle_time} vs {last_open_time}): this is acceptable, this symbol is not required till " + f"the end of the run" + ) + if adapted_start_time is not None and not allow_any_backtesting_start_and_end_time: + # ensure adapted_start_time is not reducing too much the global backtesting duration + ideal_duration = last_open_time - first_open_time + adapted_duration = last_candle_time - adapted_start_time + if adapted_duration < ideal_duration * constants.BACKTESTING_MIN_DURATION_RATIO: + raise errors.InvalidBacktestingDataError( + f"{symbol} {time_frame.value} {exchange} OHLCV adapted backtesting start time starts too late resulting " + f"in a {round(adapted_duration/common_constants.DAYS_TO_SECONDS, 1)} days backtesting duration " + f"vs {round(ideal_duration/common_constants.DAYS_TO_SECONDS, 1)} ideal days. Min allowed is " + f"{round(ideal_duration * constants.BACKTESTING_MIN_DURATION_RATIO / common_constants.DAYS_TO_SECONDS, 1)} days." + ) + return adapted_start_time + + +def _get_logger(): + return octobot_commons.logging.get_logger("ScriptingBacktestingDataCollector") diff --git a/packages/tentacles/Meta/Keywords/scripting_library/backtesting/backtesting_data_selector.py b/packages/tentacles/Meta/Keywords/scripting_library/backtesting/backtesting_data_selector.py new file mode 100644 index 0000000000..d9a6000fb7 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/backtesting/backtesting_data_selector.py @@ -0,0 +1,57 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_backtesting.api as backtesting_api +import octobot_commons.enums as commons_enums +import octobot_commons.constants as commons_constants +import tentacles.Meta.Keywords.scripting_library.data.reading.exchange_public_data as exchange_public_data + + +def backtesting_start_time(ctx): + return backtesting_api.get_backtesting_starting_time(ctx.exchange_manager.exchange.backtesting) + + +def backtesting_first_full_candle_time(ctx): + return _align_time_to_time_frame(backtesting_start_time(ctx), ctx.time_frame, False) + + +async def backtesting_is_first_full_candle(ctx): + current_t = await exchange_public_data.current_candle_time(ctx) + first_c = _align_time_to_time_frame(backtesting_start_time(ctx), ctx.time_frame, False) + return current_t == first_c + + +def backtesting_end_time(ctx): + return backtesting_api.get_backtesting_ending_time(ctx.exchange_manager.exchange.backtesting) + + +def backtesting_last_full_candle_time(ctx): + return _align_time_to_time_frame(backtesting_end_time(ctx), ctx.time_frame, True) + + +def _align_time_to_time_frame(reference_time, time_frame, align_backwards): + time_frame_sec = commons_enums.TimeFramesMinutes[commons_enums.TimeFrames(time_frame)] \ + * commons_constants.MINUTE_TO_SECONDS + time_delta = reference_time % time_frame_sec + if align_backwards: + # the last full candle time is the backtesting end time moved back to the start of the candle + potential_candle_time = reference_time - time_frame_sec + else: + # the first full candle time the backtesting start time moved forward to the start of the 1st candle + potential_candle_time = reference_time - time_frame_sec + time_delta = time_frame_sec - time_delta if time_delta > 0 else 0 + # align back to the UTC time + return potential_candle_time - time_delta if align_backwards else potential_candle_time + time_delta + diff --git a/packages/tentacles/Meta/Keywords/scripting_library/backtesting/backtesting_intialization.py b/packages/tentacles/Meta/Keywords/scripting_library/backtesting/backtesting_intialization.py new file mode 100644 index 0000000000..bd2a94b5e4 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/backtesting/backtesting_intialization.py @@ -0,0 +1,174 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import logging +import contextlib +import typing + +import octobot_commons.profiles as commons_profiles +import octobot_commons.configuration as commons_configuration +import octobot_commons.logging as commons_logging +import octobot_commons.symbols as commons_symbols +import octobot_commons.list_util as list_util +import octobot_commons.enums as common_enums + +import octobot_backtesting.backtest_data +import octobot_backtesting.api + +import octobot_tentacles_manager.configuration +import octobot_tentacles_manager.api + +import octobot.backtesting.independent_backtesting +import octobot.backtesting.minimal_data_importer as minimal_data_importer + +import octobot_trading.exchanges.util.exchange_data as exchange_data_import +import octobot_trading.api + +import tentacles.Meta.Keywords.scripting_library as scripting_library + + +@contextlib.asynccontextmanager +async def init_and_run_backtesting( + exchange_data: exchange_data_import.ExchangeData, + profile_data: commons_profiles.ProfileData, +) -> typing.AsyncGenerator[octobot.backtesting.independent_backtesting.IndependentBacktesting, None]: + """ + Initialize and run backtesting. + Usage: + async with init_and_run_backtesting(exchange_data, profile_data) as independent_backtesting: + # use independent_backtesting to get backtesting results before it gets stopped + """ + async with run_backtesting( + exchange_data, + profile_data, + scripting_library.create_backtesting_config(profile_data, exchange_data), + octobot_tentacles_manager.api.get_full_tentacles_setup_config(profile_data), + ) as independent_backtesting: + yield independent_backtesting + + +@contextlib.asynccontextmanager +async def run_backtesting( + exchange_data: exchange_data_import.ExchangeData, + profile_data: commons_profiles.ProfileData, + backtesting_config: commons_configuration.Configuration, + tentacles_config: octobot_tentacles_manager.configuration.TentaclesSetupConfiguration, + enable_logs: bool = False, +) -> typing.AsyncGenerator[octobot.backtesting.independent_backtesting.IndependentBacktesting, None]: + with octobot_tentacles_manager.configuration.local_get_config_proxy(scripting_library.empty_config_proxy): + backtest_data = await _init_backtest_data( + exchange_data, backtesting_config, tentacles_config + ) + independent_backtesting = None + try: + with commons_logging.temporary_log_level(logging.INFO): + independent_backtesting = _init_independent_backtesting( + exchange_data, profile_data, backtest_data, enable_logs=enable_logs + ) + await independent_backtesting.initialize_and_run(log_errors=True) + await independent_backtesting.join_backtesting_updater(None) + # independent_backtesting.log_report() # uncomment to debug + yield independent_backtesting + finally: + if independent_backtesting is not None: + with commons_logging.temporary_log_level(logging.INFO): + await independent_backtesting.clear_fetched_data() + await independent_backtesting.stop(memory_check=False, should_raise=False) + + +def _init_independent_backtesting( + exchange_data: exchange_data_import.ExchangeData, + profile_data: commons_profiles.ProfileData, + backtest_data: octobot_backtesting.backtest_data.BacktestData, + enable_logs: bool = False, + services_config: typing.Optional[dict] = None, +) -> "octobot.backtesting.independent_backtesting.IndependentBacktesting": + independent_backtesting = octobot.backtesting.independent_backtesting.IndependentBacktesting( + backtest_data.config, + backtest_data.tentacles_config, + backtest_data.data_files, + run_on_common_part_only=True, + start_timestamp=None, + end_timestamp=None, + enable_logs=enable_logs, + stop_when_finished=False, + run_on_all_available_time_frames=False, + enforce_total_databases_max_size_after_run=False, + enable_storage=False, + backtesting_data=backtest_data, + config_by_tentacle={ + tentacle.name: tentacle.config + for tentacle in profile_data.tentacles + }, + services_config=services_config or {}, + ) + independent_backtesting.symbols_to_create_exchange_classes.update({ + exchange: [ + commons_symbols.parse_symbol(s) + for s in list_util.deduplicate([ + market_details.symbol + for market_details in exchange_data.markets + if market_details.has_full_candles() + ]) + ] + for exchange in [exchange_data.exchange_details.name] # TODO handle multi exchanges + }) + return independent_backtesting + + +async def _init_backtest_data( + exchange_data: exchange_data_import.ExchangeData, + backtesting_config: commons_configuration.Configuration, + tentacles_config: octobot_tentacles_manager.configuration.TentaclesSetupConfiguration, +) -> octobot_backtesting.backtest_data.BacktestData: + backtest_data = await octobot_backtesting.api.create_and_init_backtest_data( + [], backtesting_config.config, tentacles_config, True + ) + backtest_data.use_cached_markets = True + await _init_importers(exchange_data, backtest_data) + importer = next(iter(backtest_data.importers_by_data_file.values())) + start_time, end_time = await importer.get_data_timestamp_interval() + await _init_preloaded_candle_managers(exchange_data, backtest_data, start_time, end_time) + return backtest_data + + +async def _init_importers( + exchange_data: exchange_data_import.ExchangeData, + backtest_data: octobot_backtesting.backtest_data.BacktestData, +): + backtest_data.data_files = [f"simulated_{exchange_data.exchange_details.name}_file.data"] + backtest_data.default_importer = minimal_data_importer.MinimalDataImporter # type: ignore + await backtest_data.initialize() + for importer in backtest_data.importers_by_data_file.values(): + importer.update_from_exchange_data(exchange_data) # type: ignore + + +async def _init_preloaded_candle_managers( + exchange_data: exchange_data_import.ExchangeData, + backtest_data: octobot_backtesting.backtest_data.BacktestData, + start_time, + end_time +): + for exchange_details in [exchange_data.exchange_details]: + for market_details in exchange_data.markets: + if not market_details.has_full_candles(): + continue + key = backtest_data._get_key( + exchange_details.name, market_details.symbol, common_enums.TimeFrames(market_details.time_frame), + start_time, end_time + ) + backtest_data.preloaded_candle_managers[key] = await octobot_trading.api.create_preloaded_candles_manager( + market_details.get_formatted_candles() + ) diff --git a/packages/tentacles/Meta/Keywords/scripting_library/backtesting/backtesting_settings.py b/packages/tentacles/Meta/Keywords/scripting_library/backtesting/backtesting_settings.py new file mode 100644 index 0000000000..94e0b6569d --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/backtesting/backtesting_settings.py @@ -0,0 +1,52 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_backtesting.api as backtesting_api + + +def set_backtesting_iteration_timeout(ctx, iteration_timeout_in_seconds: int): + if ctx.exchange_manager.is_backtesting: + backtesting_api.set_iteration_timeout( + ctx.exchange_manager.exchange.backtesting, + iteration_timeout_in_seconds + ) + + +def register_backtesting_timestamp_whitelist(ctx, timestamps, check_callback=None, append_to_whitelist=True): + if check_callback is None: + def _open_order_and_position_check(): + # by default, avoid skipping timestamps when there are open orders or active positions + if ctx.exchange_manager.exchange_personal_data.orders_manager.get_open_orders(): + return True + for position in ctx.exchange_manager.exchange_personal_data.positions_manager.positions.values(): + if not position.is_idle(): + return True + return False + + check_callback = _open_order_and_position_check + if ctx.exchange_manager.is_backtesting and \ + backtesting_api.get_backtesting_timestamp_whitelist(ctx.exchange_manager.exchange.backtesting) \ + != sorted(set(timestamps)): + return backtesting_api.register_backtesting_timestamp_whitelist( + ctx.exchange_manager.exchange.backtesting, + timestamps, + check_callback, + append_to_whitelist=append_to_whitelist + ) + + +def is_registered_backtesting_timestamp_whitelist(ctx): + return ctx.exchange_manager.is_backtesting and \ + backtesting_api.get_backtesting_timestamp_whitelist(ctx.exchange_manager.exchange.backtesting) is not None diff --git a/packages/tentacles/Meta/Keywords/scripting_library/backtesting/default_backtesting_run_analysis_script.py b/packages/tentacles/Meta/Keywords/scripting_library/backtesting/default_backtesting_run_analysis_script.py new file mode 100644 index 0000000000..7b7d0a16f1 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/backtesting/default_backtesting_run_analysis_script.py @@ -0,0 +1,241 @@ +import datetime as datetime +import json as json + +import octobot_commons.enums as commons_enums +import octobot_commons.pretty_printer as pretty_printer +import octobot_services.constants as services_constants +import tentacles.Meta.Keywords.scripting_library.backtesting.run_data_analysis as run_data_analysis +import octobot_trading.modes.script_keywords as script_keywords + + +async def default_backtesting_analysis_script(ctx: script_keywords.Context): + async with ctx.backtesting_results() as (run_data, run_display): + historical_values = await run_data_analysis.load_historical_values(run_data, None) + if ctx.backtesting_analysis_settings["plot_pnl_on_main_chart"]: + with run_display.part("main-chart") as part: + try: + await run_data_analysis.plot_historical_portfolio_value( + run_data, part, + historical_values=historical_values, + ) + await run_data_analysis.plot_historical_pnl_value( + run_data, part, x_as_trade_count=False, + own_yaxis=True, + include_unitary=ctx.backtesting_analysis_settings["plot_trade_gains_on_main_chart"], + historical_values=historical_values, + ) + except Exception as err: + ctx.logger.exception(err, True, f"Error when computing main chant graphs {err}") + with run_display.part("backtesting-run-overview") as part: + try: + if ctx.backtesting_analysis_settings.get("plot_hist_portfolio_on_backtesting_chart", True): + await run_data_analysis.plot_historical_portfolio_value( + run_data, part, + historical_values=historical_values, + ) + if ctx.backtesting_analysis_settings["plot_pnl_on_backtesting_chart"]: + await run_data_analysis.plot_historical_pnl_value( + run_data, part, x_as_trade_count=False, + own_yaxis=True, + include_unitary=ctx.backtesting_analysis_settings["plot_trade_gains_on_backtesting_chart"], + historical_values=historical_values, + ) + if ctx.backtesting_analysis_settings["plot_best_case_growth_on_backtesting_chart"]: + await run_data_analysis.plot_best_case_growth( + run_data, part, x_as_trade_count=True, own_yaxis=False, + historical_values=historical_values, + ) + if ctx.backtesting_analysis_settings["plot_funding_fees_on_backtesting_chart"]: + await run_data_analysis.plot_historical_funding_fees( + run_data, part, own_yaxis=True, + ) + if ctx.backtesting_analysis_settings["plot_wins_and_losses_count_on_backtesting_chart"]: + await run_data_analysis.plot_historical_wins_and_losses( + run_data, part, own_yaxis=True, x_as_trade_count=False, + historical_values=historical_values, + ) + if ctx.backtesting_analysis_settings["plot_win_rate_on_backtesting_chart"]: + await run_data_analysis.plot_historical_win_rates( + run_data, part, own_yaxis=True, x_as_trade_count=False, + historical_values=historical_values, + ) + # await plot_withdrawals(run_data, part) + except Exception as err: + ctx.logger.exception(err, True, f"Error when computing run overview graphs {err}") + if ctx.backtesting_analysis_settings["display_backtest_details"]: + with run_display.part("backtesting-details", "value") as part: + try: + backtesting_report = await get_backtesting_report_template( + run_data, ctx.backtesting_analysis_settings, historical_values + ) + await run_data_analysis.display_html(part, backtesting_report) + except Exception as err: + ctx.logger.exception(err, True, f"Error when computing details part {err}") + if ctx.backtesting_analysis_settings["display_trades_and_positions"]: + with run_display.part("list-of-trades-part", "table") as part: + try: + await run_data_analysis.plot_trades(run_data, part, historical_values=historical_values) + await run_data_analysis.plot_orders(run_data, part, historical_values=historical_values) + await run_data_analysis.plot_positions(run_data, part) + # await plot_table(run_data, part, "SMA 1") # plot any cache key as a table + except Exception as err: + ctx.logger.exception(err, True, f"Error when computing trades part {err}") + return run_display + + +async def get_backtesting_report_template(run_data, backtesting_analysis_settings, historical_values): + price_data, _, _, _, _, metadata = historical_values + optimizer_id_display = get_column_display(commons_enums.BacktestingMetadata.OPTIMIZER_ID.value, + commons_enums.BacktestingMetadata.OPTIMIZER_ID.value) \ + if commons_enums.BacktestingMetadata.OPTIMIZER_ID.value in metadata.keys() else "" + paid_fees_display = get_column_display(services_constants.PAID_FEES_STR, + metadata["paid_fees"]) if "paid_fees" in metadata.keys() else "" + performance_summary = "" + reference_market = metadata[commons_enums.DBRows.REFERENCE_MARKET.value] + if backtesting_analysis_settings.get("display_backtest_details_performances", True): + start_portfolio_value, end_portfolio_value = await run_data_analysis.get_portfolio_values( + run_data, historical_values=historical_values + ) + gains = f"{pretty_printer.get_min_string_from_number(metadata[commons_enums.BacktestingMetadata.GAINS.value])} " \ + f"({pretty_printer.get_min_string_from_number(metadata[commons_enums.BacktestingMetadata.PERCENT_GAINS.value])}%)" + performance_summary \ + += get_section_display("Performance", + get_column_display(commons_enums.BacktestingMetadata.START_PORTFOLIO.value, + get_portfolio_display( + metadata[commons_enums.BacktestingMetadata.START_PORTFOLIO.value] + )) + + get_column_display(commons_enums.BacktestingMetadata.END_PORTFOLIO.value, + get_portfolio_display( + metadata[ + commons_enums.BacktestingMetadata.END_PORTFOLIO.value])) + + get_column_display(f"{commons_enums.BacktestingMetadata.START_PORTFOLIO.value} " + f"{reference_market} value", + start_portfolio_value) + + get_column_display(f"{commons_enums.BacktestingMetadata.END_PORTFOLIO.value} " + f"{reference_market} value", + end_portfolio_value) + + get_column_display(f"{reference_market} gains", gains) + + get_column_display( + commons_enums.BacktestingMetadata.MARKETS_PROFITABILITY.value, + metadata.get(commons_enums.BacktestingMetadata.MARKETS_PROFITABILITY.value, {}) + ) + + get_column_display( + commons_enums.BacktestingMetadata.TRADES.value + " (entries and exits)", + metadata[commons_enums.BacktestingMetadata.TRADES.value]) + # todo fix those values + # + get_column_display(commons_enums.BacktestingMetadata.ENTRIES.value, + # metadata[commons_enums.BacktestingMetadata.ENTRIES.value]) + + # get_column_display(commons_enums.BacktestingMetadata.WINS.value, + # metadata[commons_enums.BacktestingMetadata.WINS.value]) + + # get_column_display(commons_enums.BacktestingMetadata.LOSES.value, + # metadata[commons_enums.BacktestingMetadata.LOSES.value]) + + # get_column_display(commons_enums.BacktestingMetadata.WIN_RATE.value, + # metadata[commons_enums.BacktestingMetadata.WIN_RATE.value]) + + # get_column_display(commons_enums.BacktestingMetadata.DRAW_DOWN.value, + # metadata[commons_enums.BacktestingMetadata.DRAW_DOWN.value]) + + # get_column_display( + # commons_enums.BacktestingMetadata.COEFFICIENT_OF_DETERMINATION_MAX_BALANCE.value, + # metadata[commons_enums.BacktestingMetadata + # .COEFFICIENT_OF_DETERMINATION_MAX_BALANCE.value]) + + # paid_fees_display + ) + if backtesting_analysis_settings.get("display_backtest_details_general", True): + performance_summary \ + += get_section_display("General", + get_column_display(commons_enums.BacktestingMetadata.NAME.value, + metadata[commons_enums.BacktestingMetadata.NAME.value]) + + get_column_display(commons_enums.BacktestingMetadata.OPTIMIZATION_CAMPAIGN.value, + metadata[commons_enums.BacktestingMetadata. + OPTIMIZATION_CAMPAIGN.value]) + + optimizer_id_display + + get_column_display(commons_enums.BacktestingMetadata.ID.value, + metadata[commons_enums.BacktestingMetadata.ID.value]) + + get_column_display(commons_enums.DBRows.EXCHANGES.value, + metadata[commons_enums.DBRows.EXCHANGES.value]) + + get_column_display(commons_enums.BacktestingMetadata.BACKTESTING_FILES.value, + metadata[commons_enums.BacktestingMetadata.BACKTESTING_FILES.value])) + if backtesting_analysis_settings.get("display_backtest_details_details", True): + performance_summary \ + += get_section_display("Details", + get_column_display(commons_enums.BacktestingMetadata.TIME_FRAMES.value, + get_badges_from_list( + metadata[commons_enums.BacktestingMetadata.TIME_FRAMES.value])) + + get_column_display(commons_enums.BacktestingMetadata.START_TIME.value, + datetime.datetime.fromtimestamp( + metadata[commons_enums.DBRows.START_TIME.value])) + + get_column_display(commons_enums.BacktestingMetadata.END_TIME.value, + datetime.datetime.fromtimestamp( + metadata[commons_enums.DBRows.END_TIME.value])) + + get_column_display(commons_enums.BacktestingMetadata.SYMBOLS.value, + get_badges_from_list( + metadata[commons_enums.BacktestingMetadata.SYMBOLS.value])) + + get_column_display(f"{commons_enums.BacktestingMetadata.DURATION.value} (s)", + metadata[commons_enums.BacktestingMetadata.DURATION.value]) + + get_column_display(commons_enums.BacktestingMetadata.LEVERAGE.value, + metadata[commons_enums.BacktestingMetadata.LEVERAGE.value]) + + get_column_display("Backtesting time", + datetime.datetime.fromtimestamp( + metadata[commons_enums.BacktestingMetadata.TIMESTAMP.value])) + ) + + if backtesting_analysis_settings.get("display_backtest_details_strategy_settings", True): + performance_summary \ + += get_section_display("Strategy Settings", + get_user_inputs_display(metadata) + ) + + return performance_summary + + +def get_section_display(title, content): + return f''' +
+
+

{title}

+
+ {content} +
+ ''' + + +def get_column_display(title, value): + return f''' +
+
+ {title} +
+
+ {', '.join(value) if (isinstance(value, list) and value and isinstance(value[0], (int, float, str))) + else pretty_printer.get_min_string_from_number(value) if isinstance(value, float) + else ', '.join(f"{key}: {val}" for key, val in value.items()) if isinstance(value, dict) + else value} +
+
+ ''' + + +def get_badges_from_list(_list): + _html = "" + for _item in _list: + _html += f'{_item}' + return _html + + +def get_portfolio_display(_dict): + _html = "" + _dict_str = _dict.replace("\'", '"') + _dict_str = json.loads(_dict_str) + for _key in _dict_str: + _html += f'{_key}: {pretty_printer.get_min_string_from_number(_dict_str[_key]["total"])}' + return _html + + +def get_user_inputs_display(metadata): + content = "" + for _evaluator in metadata['user inputs']: + _section_content = "" + for input_name in metadata['user inputs'][_evaluator]: + _section_content += get_column_display(input_name, metadata['user inputs'][_evaluator][input_name]) + + content += get_section_display(_evaluator, _section_content) + return content diff --git a/packages/tentacles/Meta/Keywords/scripting_library/backtesting/metadata.py b/packages/tentacles/Meta/Keywords/scripting_library/backtesting/metadata.py new file mode 100644 index 0000000000..077326eccd --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/backtesting/metadata.py @@ -0,0 +1,94 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.databases as databases +import octobot_commons.errors as commons_errors +import octobot_commons.enums as commons_enums +import tentacles.Meta.Keywords.scripting_library.data as data + + +def set_script_name(ctx, name): + ctx.tentacle.script_name = name + + +async def _read_backtesting_metadata(optimizer_run_dbs_identifier, metadata_list, optimizer_id): + async with data.MetadataReader.database(optimizer_run_dbs_identifier.get_backtesting_metadata_identifier()) \ + as reader: + try: + metadata = await reader.read() + for metadata_element in metadata: + metadata_element[commons_enums.BacktestingMetadata.OPTIMIZER_ID.value] = optimizer_id + metadata_list += metadata + except commons_errors.DatabaseNotFoundError: + pass + + +async def read_metadata(runs_to_load_settings, trading_mode, include_optimizer_runs=False): + metadata = [] + optimizer_run_dbs_identifiers = [] + run_dbs_identifier = databases.RunDatabasesIdentifier(trading_mode) + try: + campaigns_to_load = runs_to_load_settings["campaigns"] + except KeyError: + campaigns_to_load = runs_to_load_settings["campaigns"] = {} + available_campaigns = await run_dbs_identifier.get_optimization_campaign_names() + campaigns = {} + for optimization_campaign_name in available_campaigns: + if optimization_campaign_name in campaigns_to_load: + if campaigns_to_load[optimization_campaign_name]: + campaigns[optimization_campaign_name] = True + else: + campaigns[optimization_campaign_name] = False + continue + else: + campaigns[optimization_campaign_name] = True + + backtesting_run_dbs_identifier = databases.RunDatabasesIdentifier(trading_mode, optimization_campaign_name, + backtesting_id="1") + if include_optimizer_runs: + optimizer_ids = await backtesting_run_dbs_identifier.get_optimizer_run_ids() + if optimizer_ids: + optimizer_run_dbs_identifiers = [ + databases.RunDatabasesIdentifier(trading_mode, optimization_campaign_name, + optimizer_id=optimizer_id) + for optimizer_id in optimizer_ids] + try: + await _read_backtesting_metadata(backtesting_run_dbs_identifier, metadata, 0) + except commons_errors.DatabaseNotFoundError: + pass + for optimizer_run_dbs_identifier in optimizer_run_dbs_identifiers: + await _read_backtesting_metadata(optimizer_run_dbs_identifier, metadata, + optimizer_run_dbs_identifier.optimizer_id) + return campaigns, metadata + + +async def _read_bot_recording_metadata(run_dbs_identifier, metadata_list): + async with data.MetadataReader.database(run_dbs_identifier.get_bot_live_metadata_identifier()) \ + as reader: + try: + metadata = await reader.read() + metadata_list += metadata + except commons_errors.DatabaseNotFoundError: + pass + + +async def read_bot_recording_runs_metadata(trading_mode): + metadata = [] + run_dbs_identifier = databases.RunDatabasesIdentifier(trading_mode) + try: + await _read_bot_recording_metadata(run_dbs_identifier, metadata) + except commons_errors.DatabaseNotFoundError: + pass + return metadata diff --git a/packages/tentacles/Meta/Keywords/scripting_library/backtesting/run_data_analysis.py b/packages/tentacles/Meta/Keywords/scripting_library/backtesting/run_data_analysis.py new file mode 100644 index 0000000000..236ef8e85f --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/backtesting/run_data_analysis.py @@ -0,0 +1,931 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import json +import sortedcontainers + +import octobot_trading.enums as trading_enums +import octobot_trading.constants as trading_constants +import octobot_trading.exchange_data as trading_exchange_data +import octobot_trading.personal_data as trading_personal_data +import octobot_trading.personal_data.portfolios.portfolio_util as portfolio_util +import octobot_trading.api as trading_api +import octobot_backtesting.api as backtesting_api +import octobot_commons.symbols.symbol_util as symbol_util +import octobot_commons.constants +import octobot_commons.databases as databases +import octobot_commons.enums as commons_enums +import octobot_commons.errors as commons_errors +import octobot_commons.time_frame_manager as time_frame_manager +import octobot_commons.logging + + +def get_logger(): + return octobot_commons.logging.get_logger("BacktestingRunData") + + +async def get_candles(candles_sources, exchange, symbol, time_frame, metadata): + return await backtesting_api.get_all_ohlcvs(candles_sources[0][commons_enums.DBRows.VALUE.value], + exchange, + symbol, + commons_enums.TimeFrames(time_frame), + inferior_timestamp=metadata[commons_enums.DBRows.START_TIME.value], + superior_timestamp=metadata[commons_enums.DBRows.END_TIME.value]) + + +async def get_trades(meta_database, metadata, symbol): + account_type = trading_api.get_account_type_from_run_metadata(metadata) + return await meta_database.get_trades_db(account_type).select( + commons_enums.DBTables.TRADES.value, + (await meta_database.get_trades_db(account_type).search()).symbol == symbol + ) + + +async def get_metadata(meta_database): + return (await meta_database.get_run_db().all(commons_enums.DBTables.METADATA.value))[0] + + +async def get_transactions(meta_database, transaction_type=None, transaction_types=None): + account_type = trading_api.get_account_type_from_run_metadata(await get_metadata(meta_database)) + if transaction_type is not None: + query = (await meta_database.get_transactions_db(account_type).search()).type == transaction_type + elif transaction_types is not None: + query = (await meta_database.get_transactions_db(account_type).search()).type.one_of(transaction_types) + else: + return await meta_database.get_transactions_db(account_type).all(commons_enums.DBTables.TRANSACTIONS.value) + return await meta_database.get_transactions_db(account_type).select(commons_enums.DBTables.TRANSACTIONS.value, + query) + + +async def get_starting_portfolio(meta_database) -> dict: + portfolio = (await meta_database.get_run_db().all(commons_enums.DBTables.METADATA.value))[0][ + commons_enums.BacktestingMetadata.START_PORTFOLIO.value] + return json.loads(portfolio.replace("'", '"')) + + +async def load_historical_values(meta_database, exchange, with_candles=True, + with_trades=True, with_portfolio=True, time_frame=None): + price_data = {} + trades_data = {} + moving_portfolio_data = {} + trading_type = "spot" + metadata = {} + run_global_metadata = {} + try: + starting_portfolio = await get_starting_portfolio(meta_database) + metadata = await get_metadata(meta_database) + run_global_metadata = await meta_database.get_backtesting_metadata_from_run() + + exchange = exchange or meta_database.run_dbs_identifier.context.exchange_name \ + or metadata[commons_enums.DBRows.EXCHANGES.value][0] # TODO handle multi exchanges + ref_market = metadata[commons_enums.DBRows.REFERENCE_MARKET.value] + trading_type = metadata[commons_enums.DBRows.TRADING_TYPE.value] + contracts = metadata[commons_enums.DBRows.FUTURE_CONTRACTS.value][exchange] if trading_type == "future" else {} + # init data + for pair in run_global_metadata[commons_enums.DBRows.SYMBOLS.value]: + symbol = symbol_util.parse_symbol(pair).base + is_inverse_contract = trading_type == "future" and trading_api.is_inverse_future_contract( + trading_enums.FutureContractType(contracts[pair]["contract_type"]) + ) + if symbol != ref_market or is_inverse_contract: + candles_sources = await meta_database.get_symbol_db(exchange, pair).all( + commons_enums.DBTables.CANDLES_SOURCE.value + ) + if time_frame is None: + time_frames = [source[commons_enums.DBRows.TIME_FRAME.value] for source in candles_sources] + time_frame = time_frame_manager.find_min_time_frame(time_frames) if time_frames else time_frame + if with_candles and pair not in price_data: + # convert candles timestamp in millis + raw_candles = await get_candles(candles_sources, exchange, pair, time_frame, metadata) + for candle in raw_candles: + candle[commons_enums.PriceIndexes.IND_PRICE_TIME.value] = \ + candle[commons_enums.PriceIndexes.IND_PRICE_TIME.value] * 1000 + price_data[pair] = raw_candles + if with_trades and pair not in trades_data: + trades_data[pair] = await get_trades(meta_database, metadata, pair) + if with_portfolio: + try: + moving_portfolio_data[symbol] = starting_portfolio[symbol][ + octobot_commons.constants.PORTFOLIO_TOTAL] + except KeyError: + moving_portfolio_data[symbol] = 0 + try: + moving_portfolio_data[ref_market] = starting_portfolio[ref_market][ + octobot_commons.constants.PORTFOLIO_TOTAL] + except KeyError: + moving_portfolio_data[ref_market] = 0 + except IndexError: + pass + return price_data, trades_data, moving_portfolio_data, trading_type, metadata, run_global_metadata + + +async def backtesting_data(meta_database, data_label): + metadata_from_run = await meta_database.get_backtesting_metadata_from_run() + for key, value in metadata_from_run.items(): + if key == data_label: + return value + account_type = trading_api.get_account_type_from_run_metadata(metadata_from_run) + for reader in meta_database.all_basic_run_db(account_type): + for table in await reader.tables(): + if table == data_label: + return await reader.all(table) + for row in await reader.all(table): + for key, value in row.items(): + if key == data_label: + return value + return None + + +async def _get_grouped_funding_fees(meta_database, group_key): + funding_fees_history = await get_transactions(meta_database, + transaction_type=trading_enums.TransactionType.FUNDING_FEE.value) + funding_fees_history = sorted(funding_fees_history, key=lambda f: f[commons_enums.PlotAttributes.X.value]) + funding_fees_history_by_key = {} + for funding_fee in funding_fees_history: + try: + funding_fees_history_by_key[funding_fee[group_key]].append(funding_fee) + except KeyError: + funding_fees_history_by_key[funding_fee[group_key]] = [funding_fee] + return funding_fees_history_by_key + + +async def plot_historical_funding_fees(meta_database, plotted_element, own_yaxis=True): + funding_fees_history_by_currency = await _get_grouped_funding_fees( + meta_database, + trading_enums.FeePropertyColumns.CURRENCY.value + ) + for currency, fees in funding_fees_history_by_currency.items(): + cumulative_fees = [] + previous_fee = 0 + for fee in fees: + cumulated_fee = fee["quantity"] + previous_fee + cumulative_fees.append(cumulated_fee) + previous_fee = cumulated_fee + plotted_element.plot( + mode="scatter", + x=[fee[commons_enums.PlotAttributes.X.value] for fee in fees], + y=cumulative_fees, + title=f"{currency} paid funding fees", + own_yaxis=own_yaxis, + line_shape="hv") + + +def _position_factory(symbol, contract_data): + # TODO: historical unrealized pnl, maybe find a better solution that this + import mock + class _TraderMock: + def __init__(self): + self.exchange_manager = mock.Mock() + self.simulate = True + + contract = trading_exchange_data.FutureContract( + symbol, + trading_enums.MarginType(contract_data["margin_type"]), + trading_enums.FutureContractType(contract_data["contract_type"]) + ) + return trading_personal_data.create_position_from_type(_TraderMock(), contract) + + +def _evaluate_portfolio(portfolio, price_data, use_start_value): + handled_currencies = [] + value = 0 + + vals = {} + for pair, candles in price_data.items(): + if not candles: + # Ignore symbols without candles to avoid crashing report generation on partial datasets. + continue + candle = candles[0 if use_start_value else len(candles) - 1] + symbol, ref_market = symbol_util.parse_symbol(pair).base_and_quote() + if symbol not in handled_currencies: + value += portfolio.get(symbol, {}).get(octobot_commons.constants.PORTFOLIO_TOTAL, 0) * candle[ + commons_enums.PriceIndexes.IND_PRICE_OPEN.value + ] + vals[symbol] = candle[ + commons_enums.PriceIndexes.IND_PRICE_OPEN.value + ] + handled_currencies.append(symbol) + if ref_market not in handled_currencies: + value += portfolio.get(ref_market, {}).get(octobot_commons.constants.PORTFOLIO_TOTAL, 0) + handled_currencies.append(ref_market) + return value + + +async def get_portfolio_values(meta_database, exchange=None, historical_values=None): + price_data, trades_data, moving_portfolio_data, trading_type, metadata, _ = \ + historical_values or await load_historical_values(meta_database, exchange, with_portfolio=False, with_trades=False) + starting_portfolio = json.loads(metadata[commons_enums.BacktestingMetadata.START_PORTFOLIO.value].replace("'", '"')) + ending_portfolio = json.loads(metadata[commons_enums.BacktestingMetadata.END_PORTFOLIO.value].replace("'", '"')) + return _evaluate_portfolio( + starting_portfolio, + price_data, + True, + ), _evaluate_portfolio( + ending_portfolio, + price_data, + False, + ) + + +async def plot_historical_portfolio_value( + meta_database, plotted_element, exchange=None, own_yaxis=False, historical_values=None +): + price_data, trades_data, moving_portfolio_data, trading_type, metadata, _ = \ + historical_values or await load_historical_values(meta_database, exchange) + price_data_by_time = {} + for symbol, candles in price_data.items(): + price_data_by_time[symbol] = { + candle[commons_enums.PriceIndexes.IND_PRICE_TIME.value]: candle + for candle in candles + } + if trading_type == "future": + # TODO: historical unrealized pnl + pass + for pair in trades_data: + trades_data[pair] = sorted(trades_data[pair], key=lambda tr: tr[commons_enums.PlotAttributes.X.value]) + funding_fees_history_by_pair = await _get_grouped_funding_fees(meta_database, + commons_enums.DBRows.SYMBOL.value) + value_data = sortedcontainers.SortedDict() + pairs = list(trades_data) + if pairs: + pair = pairs[0] + candles = price_data_by_time[pair] + value_data = sortedcontainers.SortedDict({ + t: 0 + for t in candles + }) + trade_index_by_pair = {p: 0 for p in pairs} + funding_fees_index_by_pair = {p: 0 for p in pairs} + # TODO multi exchanges + exchange_name = metadata[commons_enums.DBRows.EXCHANGES.value][0] + # TODO hedge mode with multi position by pair + # if metadata[commons_enums.DBRows.FUTURE_CONTRACTS.value] and \ + # exchange_name in metadata[commons_enums.DBRows.FUTURE_CONTRACTS.value]: + # positions_by_pair = { + # pair: _position_factory(pair, + # metadata[commons_enums.DBRows.FUTURE_CONTRACTS.value][exchange_name][pair]) + # for pair in pairs + # } + # else: + # positions_by_pair = {} + # TODO update position instead of portfolio when filled orders and apply position unrealized pnl to portfolio + for candle_time, ref_candle in candles.items(): + current_candles = {} + for pair in pairs: + if candle_time not in price_data_by_time[pair]: + # no price data for this time in this pair + continue + other_candle = price_data_by_time[pair][candle_time] + current_candles[pair] = other_candle + symbol, ref_market = symbol_util.parse_symbol(pair).base_and_quote() + moving_portfolio_data[ref_market] = moving_portfolio_data.get(ref_market, 0) + moving_portfolio_data[symbol] = moving_portfolio_data.get(symbol, 0) + # part 1: compute portfolio total value after trade update when any + # 1.1: trades + # start iteration where it last stopped to reduce complexity + for trade_index, trade in enumerate(trades_data[pair][trade_index_by_pair[pair]:]): + # handle trades that are both older and at the current candle starting from the last trade index + # (older trades to handle the ones that might be from candles we dont have data on) + if trade[commons_enums.PlotAttributes.X.value] <= candle_time: + if trade[commons_enums.PlotAttributes.SIDE.value] == trading_enums.TradeOrderSide.SELL.value: + moving_portfolio_data[symbol] -= trade[commons_enums.PlotAttributes.VOLUME.value] + moving_portfolio_data[ref_market] += trade[commons_enums.PlotAttributes.VOLUME.value] * \ + trade[commons_enums.PlotAttributes.Y.value] + else: + moving_portfolio_data[symbol] += trade[commons_enums.PlotAttributes.VOLUME.value] + moving_portfolio_data[ref_market] -= trade[commons_enums.PlotAttributes.VOLUME.value] * \ + trade[commons_enums.PlotAttributes.Y.value] + moving_portfolio_data[trade[commons_enums.DBRows.FEES_CURRENCY.value]] -= \ + trade[commons_enums.DBRows.FEES_AMOUNT.value] + + # last trade case: as there is not trade afterwards, the next condition would never be filled, + # force trade_index_by_pair[pair] increment + if all(it_trade[commons_enums.PlotAttributes.X.value] == + trade[commons_enums.PlotAttributes.X.value] + for it_trade in trades_data[pair][trade_index_by_pair[pair]:]): + trade_index_by_pair[pair] += 1 + break + + if trade[commons_enums.PlotAttributes.X.value] > \ + ref_candle[commons_enums.PriceIndexes.IND_PRICE_TIME.value]: + # no need to continue iterating, save current index for new candle + trade_index_by_pair[pair] += trade_index + break + # 1.2: funding fees + # start iteration where it last stopped to reduce complexity + for funding_fee_index, funding_fee \ + in enumerate(funding_fees_history_by_pair.get(pair, [])[funding_fees_index_by_pair[pair]:]): + if funding_fee[commons_enums.PlotAttributes.X.value] == candle_time: + moving_portfolio_data[funding_fee[trading_enums.FeePropertyColumns.CURRENCY.value]] -= \ + funding_fee["quantity"] + if funding_fee[commons_enums.PlotAttributes.X.value] > candle_time: + # no need to continue iterating, save current index for new candle + funding_fees_index_by_pair[pair] = funding_fee_index # TODO + break + # part 2: now that portfolio is up-to-date, compute portfolio total value + handled_currencies = [] + for pair, other_candle in current_candles.items(): + symbol, ref_market = symbol_util.parse_symbol(pair).base_and_quote() + if symbol not in handled_currencies: + value_data[candle_time] = \ + value_data[candle_time] + \ + moving_portfolio_data[symbol] * other_candle[ + commons_enums.PriceIndexes.IND_PRICE_OPEN.value + ] + handled_currencies.append(symbol) + if ref_market not in handled_currencies: + value_data[candle_time] = value_data[candle_time] + moving_portfolio_data[ref_market] + handled_currencies.append(ref_market) + plotted_element.plot( + mode="scatter", + x=list(value_data.keys()), + y=list(value_data.values()), + title="Portfolio value", + own_yaxis=own_yaxis + ) + + +def _read_pnl_from_trades(x_data, pnl_data, cumulative_pnl_data, trades_history, x_as_trade_count): + buy_order_volume_by_price_by_currency = { + symbol_util.parse_symbol(symbol).base: {} + for symbol in trades_history.keys() + } + all_trades = [] + buy_fees = 0 + sell_fees = 0 + for trades in trades_history.values(): + all_trades += trades + for trade in sorted(all_trades, key=lambda x: x[commons_enums.PlotAttributes.X.value]): + currency, ref_market = symbol_util.parse_symbol(trade[commons_enums.DBRows.SYMBOL.value]).base_and_quote() + trade_volume = trade[commons_enums.PlotAttributes.VOLUME.value] + buy_order_volume_by_price = buy_order_volume_by_price_by_currency[currency] + if trade[commons_enums.PlotAttributes.SIDE.value] == trading_enums.TradeOrderSide.BUY.value: + fees = trade[commons_enums.DBRows.FEES_AMOUNT.value] + fees_multiplier = 1 if trade[commons_enums.DBRows.FEES_CURRENCY.value] == currency \ + else 1 / trade[commons_enums.PlotAttributes.Y.value] + paid_fees = fees * fees_multiplier + buy_fees += paid_fees * trade[commons_enums.PlotAttributes.Y.value] + buy_cost = trade_volume * trade[commons_enums.PlotAttributes.Y.value] + if trade[commons_enums.PlotAttributes.Y.value] in buy_order_volume_by_price: + buy_order_volume_by_price[buy_cost / (trade_volume - paid_fees)] += trade_volume - paid_fees + else: + buy_order_volume_by_price[buy_cost / (trade_volume - paid_fees)] = trade_volume - paid_fees + elif trade[commons_enums.PlotAttributes.SIDE.value] == trading_enums.TradeOrderSide.SELL.value: + remaining_sell_volume = trade_volume + volume_by_bought_prices = {} + for order_price in list(buy_order_volume_by_price.keys()): + if buy_order_volume_by_price[order_price] > remaining_sell_volume: + buy_order_volume_by_price[order_price] -= remaining_sell_volume + volume_by_bought_prices[order_price] = remaining_sell_volume + remaining_sell_volume = 0 + elif buy_order_volume_by_price[order_price] == remaining_sell_volume: + buy_order_volume_by_price.pop(order_price) + volume_by_bought_prices[order_price] = remaining_sell_volume + remaining_sell_volume = 0 + else: + # buy_order_volume_by_price[order_price] < remaining_sell_volume + buy_volume = buy_order_volume_by_price.pop(order_price) + volume_by_bought_prices[order_price] = buy_volume + remaining_sell_volume -= buy_volume + if remaining_sell_volume <= 0: + break + if volume_by_bought_prices: + # use total_bought_volume only to avoid taking pre-existing open positions into account + # (ex if started with already 10 btc) + # total obtained (in ref market) – sell order fees – buy costs (in ref market before fees) + buy_cost = sum(price * volume for price, volume in volume_by_bought_prices.items()) + fees = trade[commons_enums.DBRows.FEES_AMOUNT.value] + fees_multiplier = 1 if trade[commons_enums.DBRows.FEES_CURRENCY.value] == ref_market \ + else trade[commons_enums.PlotAttributes.Y.value] + sell_fees += fees * fees_multiplier + local_pnl = trade[commons_enums.PlotAttributes.Y.value] * \ + trade_volume - (fees * fees_multiplier) - buy_cost + pnl_data.append(local_pnl) + cumulative_pnl_data.append(local_pnl + cumulative_pnl_data[-1]) + if x_as_trade_count: + x_data.append(len(pnl_data) - 1) + else: + x_data.append(trade[commons_enums.PlotAttributes.X.value]) + else: + get_logger().error(f"Unknown trade side: {trade}") + + +def _read_pnl_from_transactions(x_data, pnl_data, cumulative_pnl_data, trading_transactions_history, x_as_trade_count): + previous_value = 0 + for transaction in trading_transactions_history: + transaction_pnl = 0 if transaction["realised_pnl"] is None else transaction["realised_pnl"] + transaction_quantity = 0 if transaction["quantity"] is None else transaction["quantity"] + local_quantity = transaction_pnl + transaction_quantity + cumulated_pnl = local_quantity + previous_value + pnl_data.append(local_quantity) + cumulative_pnl_data.append(cumulated_pnl) + previous_value = cumulated_pnl + if x_as_trade_count: + x_data.append(len(pnl_data) - 1) + else: + x_data.append(transaction[commons_enums.PlotAttributes.X.value]) + + +async def _get_historical_pnl(meta_database, plotted_element, include_cumulative, include_unitary, + exchange=None, x_as_trade_count=True, own_yaxis=False, historical_values=None): + # PNL: + # 1. open position: consider position opening fee from PNL + # 2. close position: consider closed amount + closing fee into PNL + # what is a trade ? + # futures: when position going to 0 (from long/short) => trade is closed + # spot: when position lowered => trade is closed + price_data, trades_data, _, _, _, _ = historical_values or await load_historical_values(meta_database, exchange) + if not (price_data and next(iter(price_data.values()))): + return + x_data = [0 if x_as_trade_count + else next(iter(price_data.values()))[0][commons_enums.PriceIndexes.IND_PRICE_TIME.value]] + pnl_data = [0] + cumulative_pnl_data = [0] + trading_transactions_history = await get_transactions( + meta_database, + transaction_types=(trading_enums.TransactionType.TRADING_FEE.value, + trading_enums.TransactionType.FUNDING_FEE.value, + trading_enums.TransactionType.REALISED_PNL.value, + trading_enums.TransactionType.CLOSE_REALISED_PNL.value) + ) + if trading_transactions_history: + # can rely on pnl history + _read_pnl_from_transactions(x_data, pnl_data, cumulative_pnl_data, + trading_transactions_history, x_as_trade_count) + else: + # recreate pnl history from trades + _read_pnl_from_trades(x_data, pnl_data, cumulative_pnl_data, trades_data, x_as_trade_count) + + if not x_as_trade_count: + # x axis is time: add a value at the end of the axis if missing to avoid a missing values at the end feeling + last_time_value = next(iter(price_data.values()))[-1][commons_enums.PriceIndexes.IND_PRICE_TIME.value] + if x_data[-1] != last_time_value: + # append the latest value at the end of the x axis + x_data.append(last_time_value) + pnl_data.append(0) + cumulative_pnl_data.append(cumulative_pnl_data[-1]) + + if include_unitary: + plotted_element.plot( + kind="bar", + x=x_data, + y=pnl_data, + x_type="tick0" if x_as_trade_count else "date", + title="P&L per trade", + own_yaxis=own_yaxis) + + if include_cumulative: + plotted_element.plot( + mode="scatter", + x=x_data, + y=cumulative_pnl_data, + x_type="tick0" if x_as_trade_count else "date", + title="Cumulative P&L", + own_yaxis=own_yaxis, + line_shape="hv") + + +async def total_paid_fees(meta_database, all_trades): + paid_fees = 0 + fees_currency = None + trading_transactions_history = await get_transactions( + meta_database, + transaction_types=(trading_enums.TransactionType.FUNDING_FEE.value,) + ) + if trading_transactions_history: + for transaction in trading_transactions_history: + if fees_currency is None: + fees_currency = transaction["currency"] + if transaction["currency"] != fees_currency: + get_logger().error(f"Unknown funding fee value: {transaction}") + else: + # - because funding fees are stored as negative number when paid (positive when "gained") + paid_fees -= transaction["quantity"] + for trade in all_trades: + currency = symbol_util.parse_symbol(trade[commons_enums.DBRows.SYMBOL.value]).base + if trade[commons_enums.DBRows.FEES_CURRENCY.value] == currency: + if trade[commons_enums.DBRows.FEES_CURRENCY.value] == fees_currency: + paid_fees += trade[commons_enums.DBRows.FEES_AMOUNT.value] + else: + paid_fees += trade[commons_enums.DBRows.FEES_AMOUNT.value] * \ + trade[commons_enums.PlotAttributes.Y.value] + else: + if trade[commons_enums.DBRows.FEES_CURRENCY.value] == fees_currency: + paid_fees += trade[commons_enums.DBRows.FEES_AMOUNT.value] / \ + trade[commons_enums.PlotAttributes.Y.value] + else: + paid_fees += trade[commons_enums.DBRows.FEES_AMOUNT.value] + return paid_fees + + +async def plot_historical_pnl_value(meta_database, plotted_element, exchange=None, x_as_trade_count=True, + own_yaxis=False, include_cumulative=True, include_unitary=True, + historical_values=None): + return await _get_historical_pnl(meta_database, plotted_element, include_cumulative, include_unitary, + exchange=exchange, x_as_trade_count=x_as_trade_count, own_yaxis=own_yaxis, + historical_values=historical_values) + + +def _plot_table_data(data, plotted_element, data_name, additional_key_to_label, additional_columns, + datum_columns_callback): + if not data: + get_logger().debug(f"Nothing to create a table from when reading {data_name}") + return + column_render = _get_default_column_render() + types = _get_default_types() + key_to_label = { + **plotted_element.TABLE_KEY_TO_COLUMN, + **additional_key_to_label + } + columns = _get_default_columns(plotted_element, data, column_render, key_to_label) + additional_columns + if datum_columns_callback: + for datum in data: + datum_columns_callback(datum) + rows = _get_default_rows(data, columns) + searches = _get_default_searches(columns, types) + plotted_element.table( + data_name, + columns=columns, + rows=rows, + searches=searches + ) + + +async def plot_trades(meta_database, plotted_element, historical_values=None): + if historical_values: + _, trades_data, _, _, _, _ = historical_values + data = [] + for trades in trades_data.values(): + data += trades + else: + account_type = trading_api.get_account_type_from_run_metadata(await get_metadata(meta_database)) + data = await meta_database.get_trades_db(account_type).all(commons_enums.DBTables.TRADES.value) + key_to_label = { + commons_enums.PlotAttributes.Y.value: "Price", + commons_enums.PlotAttributes.TYPE.value: "Type", + commons_enums.PlotAttributes.SIDE.value: "Side", + } + additional_columns = [ + { + "field": "total", + "label": "Total", + "render": None + }, { + "field": "fees", + "label": "Fees", + "render": None + } + ] + + def datum_columns_callback(datum): + datum["total"] = datum["cost"] + datum["fees"] = f'{datum["fees_amount"]} {datum["fees_currency"]}' + + _plot_table_data(data, plotted_element, commons_enums.DBTables.TRADES.value, + key_to_label, additional_columns, datum_columns_callback) + + +async def plot_orders(meta_database, plotted_element, historical_values=None): + if historical_values: + _, _, _, _, metadata, _ = historical_values + else: + metadata = await get_metadata(meta_database) + account_type = trading_api.get_account_type_from_run_metadata(metadata) + data = [ + order[trading_constants.STORAGE_ORIGIN_VALUE] + for order in await meta_database.get_orders_db(account_type).all(commons_enums.DBTables.ORDERS.value) + ] + key_to_label = { + trading_enums.ExchangeConstantsOrderColumns.TIMESTAMP.value: "Time", + trading_enums.ExchangeConstantsOrderColumns.PRICE.value: "Price", + trading_enums.ExchangeConstantsOrderColumns.AMOUNT.value: "Amount", + trading_enums.ExchangeConstantsOrderColumns.TYPE.value: "Type", + trading_enums.ExchangeConstantsOrderColumns.SIDE.value: "Side", + } + additional_columns = [ + { + "field": "total", + "label": "Total", + "render": None + } + ] + + def datum_columns_callback(datum): + datum["total"] = datum[trading_enums.ExchangeConstantsOrderColumns.COST.value] + datum[trading_enums.ExchangeConstantsOrderColumns.TIMESTAMP.value] *= 1000 + + _plot_table_data(data, plotted_element, commons_enums.DBTables.ORDERS.value, + key_to_label, additional_columns, datum_columns_callback) + + +async def plot_withdrawals(meta_database, plotted_element): + withdrawal_history = await get_transactions( + meta_database, + transaction_types=(trading_enums.TransactionType.BLOCKCHAIN_WITHDRAWAL.value,) + ) + # apply quantity to y for each withdrawal + for withdrawal in withdrawal_history: + withdrawal[commons_enums.PlotAttributes.Y.value] = withdrawal["quantity"] + key_to_label = { + commons_enums.PlotAttributes.Y.value: "Quantity", + "currency": "Currency", + commons_enums.PlotAttributes.SIDE.value: "Side", + } + additional_columns = [] + + _plot_table_data(withdrawal_history, plotted_element, "Withdrawals", + key_to_label, additional_columns, None) + + +async def plot_positions(meta_database, plotted_element): + realized_pnl_history = await get_transactions( + meta_database, + transaction_types=(trading_enums.TransactionType.CLOSE_REALISED_PNL.value,) + ) + key_to_label = { + commons_enums.PlotAttributes.X.value: "Exit time", + "first_entry_time": "Entry time", + "average_entry_price": "Average entry price", + "average_exit_price": "Average exit price", + "cumulated_closed_quantity": "Cumulated closed quantity", + "realised_pnl": "Realised PNL", + commons_enums.PlotAttributes.SIDE.value: "Side", + "trigger_source": "Closed by", + } + + _plot_table_data(realized_pnl_history, plotted_element, "Positions", key_to_label, [], None) + + +async def display(plotted_element, label, value): + plotted_element.value(label, value) + + +async def display_html(plotted_element, html): + plotted_element.html_value(html) + + +async def plot_table(meta_database, plotted_element, data_source, columns=None, rows=None, + searches=None, column_render=None, types=None, cache_value=None): + data = [] + metadata = await get_metadata(meta_database) + account_type = trading_api.get_account_type_from_run_metadata(metadata) + if data_source == commons_enums.DBTables.TRADES.value: + data = await meta_database.get_trades_db(account_type).all(commons_enums.DBTables.TRADES.value) + elif data_source == commons_enums.DBTables.ORDERS.value: + data = await meta_database.get_orders_db(account_type).all(commons_enums.DBTables.ORDERS.value) + else: + exchange = meta_database.run_dbs_identifier.context.exchange_name + symbol = meta_database.run_dbs_identifier.context.symbol + symbol_db = meta_database.get_symbol_db(exchange, symbol) + if cache_value is None: + data = await symbol_db.all(data_source) + else: + query = (await symbol_db.search()).title == data_source + cache_data = await symbol_db.select(commons_enums.DBTables.CACHE_SOURCE.value, query) + if cache_data: + try: + cache_database = databases.CacheDatabase(cache_data[0][commons_enums.PlotAttributes.VALUE.value]) + cache = await cache_database.get_cache() + x_shift = cache_data[0]["x_shift"] + data = [ + { + commons_enums.PlotAttributes.X.value: (cache_element[commons_enums.CacheDatabaseColumns.TIMESTAMP.value] + x_shift) * 1000, + commons_enums.PlotAttributes.Y.value: cache_element[cache_value] + } + for cache_element in cache + ] + except KeyError as e: + get_logger().warning(f"Missing cache values when plotting data: {e}") + except commons_errors.DatabaseNotFoundError as e: + get_logger().warning(f"Missing cache values when plotting data: {e}") + + if not data: + get_logger().debug(f"Nothing to create a table from when reading {data_source}") + return + column_render = column_render or _get_default_column_render() + types = types or _get_default_types() + columns = columns or _get_default_columns(plotted_element, data, column_render) + rows = rows or _get_default_rows(data, columns) + searches = searches or _get_default_searches(columns, types) + plotted_element.table( + data_source, + columns=columns, + rows=rows, + searches=searches) + + +def _get_default_column_render(): + return { + "Time": "datetime", + "Entry time": "datetime", + "Exit time": "datetime" + } + + +def _get_default_types(): + return { + "Time": "datetime", + "Entry time": "datetime", + "Exit time": "datetime" + } + + +def _get_default_columns(plotted_element, data, column_render, key_to_label=None): + key_to_label = key_to_label or plotted_element.TABLE_KEY_TO_COLUMN + return [ + { + "field": row_key, + "label": key_to_label[row_key], + "render": column_render.get(key_to_label[row_key], None) + } + for row_key, row_value in data[0].items() + if row_key in key_to_label and row_value is not None + ] + + +def _get_default_rows(data, columns): + column_fields = set(col["field"] for col in columns) + return [ + {key: val for key, val in row.items() if key in column_fields} + for row in data + ] + + +def _get_default_searches(columns, types): + return [ + { + "field": col["field"], + "label": col["label"], + "type": types.get(col["label"]) + } + for col in columns + ] + + +def _get_wins_and_losses_from_transactions(x_data, wins_and_losses_data, trading_transactions_history, + x_as_trade_count): + for transaction in trading_transactions_history: + transaction_pnl = 0 if transaction["realised_pnl"] is None else transaction["realised_pnl"] + current_cumulative_wins = wins_and_losses_data[-1] if wins_and_losses_data else 0 + if transaction_pnl < 0: + wins_and_losses_data.append(current_cumulative_wins - 1) + elif transaction_pnl > 0: + wins_and_losses_data.append(current_cumulative_wins + 1) + else: + continue + + if x_as_trade_count: + x_data.append(len(wins_and_losses_data) - 1) + else: + x_data.append(transaction[commons_enums.PlotAttributes.X.value]) + + +def _get_wins_and_losses_from_trades(x_data, wins_and_losses_data, trades_history, x_as_trade_count): + # todo + pass + + +async def plot_historical_wins_and_losses(meta_database, plotted_element, exchange=None, x_as_trade_count=False, + own_yaxis=True, historical_values=None): + price_data, trades_data, _, _, _, _ = historical_values or await load_historical_values(meta_database, exchange) + if not (price_data and next(iter(price_data.values()))): + return + x_data = [] + wins_and_losses_data = [] + trading_transactions_history = await get_transactions( + meta_database, + transaction_types=(trading_enums.TransactionType.TRADING_FEE.value, + trading_enums.TransactionType.FUNDING_FEE.value, + trading_enums.TransactionType.REALISED_PNL.value, + trading_enums.TransactionType.CLOSE_REALISED_PNL.value) + ) + if trading_transactions_history: + # can rely on pnl history + _get_wins_and_losses_from_transactions(x_data, wins_and_losses_data, + trading_transactions_history, x_as_trade_count) + else: + # recreate pnl history from trades + return # todo not implemented yet + # _read_pnl_from_trades(x_data, pnl_data, cumulative_pnl_data, trades_data, x_as_trade_count) + + plotted_element.plot( + mode="scatter", + x=x_data, + y=wins_and_losses_data, + x_type="tick0" if x_as_trade_count else "date", + title="wins and losses count", + own_yaxis=own_yaxis, + line_shape="hv") + + +def _get_win_rates_from_transactions(x_data, win_rates_data, trading_transactions_history, + x_as_trade_count): + wins_count = 0 + losses_count = 0 + for transaction in trading_transactions_history: + transaction_pnl = 0 if transaction["realised_pnl"] is None else transaction["realised_pnl"] + if transaction_pnl < 0: + losses_count += 1 + elif transaction_pnl > 0: + wins_count += 1 + else: + continue + + win_rates_data.append((wins_count/(losses_count+wins_count))*100) + if x_as_trade_count: + x_data.append(len(win_rates_data) - 1) + else: + x_data.append(transaction[commons_enums.PlotAttributes.X.value]) + + +def _get_win_rates_from_trades(x_data, win_rates_data, trades_history, x_as_trade_count): + # todo + pass + + +async def plot_historical_win_rates(meta_database, plotted_element, exchange=None, + x_as_trade_count=False, own_yaxis=True, historical_values=None): + price_data, trades_data, _, _, _, _ = historical_values or await load_historical_values(meta_database, exchange) + if not (price_data and next(iter(price_data.values()))): + return + x_data = [] + win_rates_data = [] + trading_transactions_history = await get_transactions( + meta_database, + transaction_types=(trading_enums.TransactionType.TRADING_FEE.value, + trading_enums.TransactionType.FUNDING_FEE.value, + trading_enums.TransactionType.REALISED_PNL.value, + trading_enums.TransactionType.CLOSE_REALISED_PNL.value) + ) + if trading_transactions_history: + # can rely on pnl history + _get_win_rates_from_transactions(x_data, win_rates_data, + trading_transactions_history, x_as_trade_count) + else: + # recreate pnl history from trades + return # todo not implemented yet + # _get_win_rates_from_trades(x_data, pnl_data, cumulative_pnl_data, trades_data, x_as_trade_count) + + plotted_element.plot( + mode="scatter", + x=x_data, + y=win_rates_data, + x_type="tick0" if x_as_trade_count else "date", + title="win rate", + own_yaxis=own_yaxis, + line_shape="hv") + + +async def _get_best_case_growth_from_transactions(trading_transactions_history, + x_as_trade_count, meta_database): + ref_market = meta_database.run_db._database.adaptor.database.storage.cache[commons_enums.DBTables.METADATA.value]['1']['ref_market'] + start_balance = meta_database.run_db._database.adaptor.database.storage.cache[commons_enums.DBTables.PORTFOLIO.value]['1'][ref_market]['total'] + best_case_data, _, start_balance, end_balance, x_data \ + = await portfolio_util.get_coefficient_of_determination_data(transactions=trading_transactions_history, + start_balance=start_balance, + use_high_instead_of_end_balance=True, + x_as_trade_count=x_as_trade_count) + if best_case_data: + return x_data, best_case_data + return [], [] + + +async def plot_best_case_growth(meta_database, plotted_element, exchange=None, + x_as_trade_count=False, own_yaxis=False, historical_values=None): + price_data, trades_data, _, _, _, _ = historical_values or await load_historical_values(meta_database, exchange) + if not (price_data and next(iter(price_data.values()))): + return + x_data = [] + best_case_data = [] + trading_transactions_history = await get_transactions( + meta_database, + transaction_types=(trading_enums.TransactionType.TRADING_FEE.value, + trading_enums.TransactionType.FUNDING_FEE.value, + trading_enums.TransactionType.REALISED_PNL.value, + trading_enums.TransactionType.CLOSE_REALISED_PNL.value) + ) + if trading_transactions_history: + # can rely on pnl history + x_data, best_case_data = await _get_best_case_growth_from_transactions(trading_transactions_history, + x_as_trade_count, meta_database) + + plotted_element.plot( + mode="scatter", + x=x_data, + y=best_case_data, + x_type="tick0" if x_as_trade_count else "date", + title="best case growth", + own_yaxis=own_yaxis, + line_shape="hv") diff --git a/packages/tentacles/Meta/Keywords/scripting_library/configuration/__init__.py b/packages/tentacles/Meta/Keywords/scripting_library/configuration/__init__.py new file mode 100644 index 0000000000..5f52cde243 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/configuration/__init__.py @@ -0,0 +1,19 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +from tentacles.Meta.Keywords.scripting_library.configuration.profile_data_configuration import * +from tentacles.Meta.Keywords.scripting_library.configuration.tentacles_configuration import * +from tentacles.Meta.Keywords.scripting_library.configuration.indexes_configuration import * +from tentacles.Meta.Keywords.scripting_library.configuration.exchanges_configuration import * diff --git a/packages/tentacles/Meta/Keywords/scripting_library/configuration/exchanges_configuration.py b/packages/tentacles/Meta/Keywords/scripting_library/configuration/exchanges_configuration.py new file mode 100644 index 0000000000..22d9cb4563 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/configuration/exchanges_configuration.py @@ -0,0 +1,37 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.constants as constants + + +# TODO later: find a way to store this in exchange tentacles instead and use exchange.get_default_reference_market +# Issue: hollaex based exchanages require an exchange configuration to be identified as such +_SPECIFIC_REFERENCE_MARKET_PER_EXCHANGE: dict[str, str] = { + "coinbase": "USDC", + "binance": "USDC", +} +_EXCHANGES_WITH_DIFFERENT_PUBLIC_DATA_AFTER_AUTH = set[str]([ + "mexc", + "lbank", +]) + +def get_default_reference_market_per_exchange(exchanges: list[str]) -> dict[str, str]: + return {exchange: get_default_exchange_reference_market(exchange) for exchange in exchanges} + +def get_default_exchange_reference_market(exchange: str) -> str: + return _SPECIFIC_REFERENCE_MARKET_PER_EXCHANGE.get(exchange, constants.DEFAULT_REFERENCE_MARKET) + +def is_exchange_with_different_public_data_after_auth(exchange: str) -> bool: + return exchange in _EXCHANGES_WITH_DIFFERENT_PUBLIC_DATA_AFTER_AUTH diff --git a/packages/tentacles/Meta/Keywords/scripting_library/configuration/indexes_configuration.py b/packages/tentacles/Meta/Keywords/scripting_library/configuration/indexes_configuration.py new file mode 100644 index 0000000000..c63b6507b6 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/configuration/indexes_configuration.py @@ -0,0 +1,131 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import typing +import octobot_commons +import octobot_commons.constants as common_constants +import octobot_commons.enums as common_enums +import octobot_commons.profiles as commons_profiles +import octobot_commons.profiles.profile_data as commons_profile_data +import octobot_commons.symbols + +import octobot_evaluators.constants as evaluators_constants + +import octobot_trading.constants as trading_constants + +import tentacles.Trading.Mode.index_trading_mode.index_trading as index_trading +import octobot_copy.enums as rebalancer_enums +import tentacles.Meta.Keywords.scripting_library.configuration.exchanges_configuration as exchanges_configuration + + +def create_index_config_from_tentacles_config( + tentacles_config: list[commons_profile_data.TentaclesData], exchange: str, + starting_funds: float, backtesting_start_time_delta: float +) -> commons_profiles.ProfileData: + trading_mode_config = tentacles_config[0].config + distribution = trading_mode_config[index_trading.IndexTradingModeProducer.INDEX_CONTENT] + reference_market = exchanges_configuration.get_default_exchange_reference_market(exchange) + # replace USD by reference market + for element in distribution: + if element[rebalancer_enums.DistributionKeys.NAME] == "USD": + element[rebalancer_enums.DistributionKeys.NAME] = reference_market + coins_by_symbol = { + element[rebalancer_enums.DistributionKeys.NAME]: element[rebalancer_enums.DistributionKeys.NAME] + for element in distribution + } + rebalance_cap = trading_mode_config[index_trading.IndexTradingModeProducer.REBALANCE_TRIGGER_MIN_PERCENT] + min_funds = starting_funds / 10 + selected_rebalance_trigger_profile = trading_mode_config.get(index_trading.IndexTradingModeProducer.SELECTED_REBALANCE_TRIGGER_PROFILE, None) + rebalance_trigger_profiles = trading_mode_config.get(index_trading.IndexTradingModeProducer.REBALANCE_TRIGGER_PROFILES, None) + profile_data_dict = generate_index_config( + distribution, rebalance_cap, selected_rebalance_trigger_profile, rebalance_trigger_profiles, reference_market, exchange, + min_funds, coins_by_symbol, False, backtesting_start_time_delta + ) + return commons_profiles.ProfileData.from_dict(profile_data_dict) + + +def generate_index_config( + distribution: typing.List, rebalance_cap: float, + selected_rebalance_trigger_profile: typing.Optional[str], rebalance_trigger_profiles: typing.Optional[list[dict]], + reference_market: str, + exchange: str, min_funds: float, coins_by_symbol: dict[str, str], disabled_backtesting: bool, + backtesting_start_time_delta: float +) -> dict: + profile_details = commons_profile_data.ProfileDetailsData(name="serverless") + trading = commons_profile_data.TradingData( + reference_market=reference_market, risk=0.5 + ) + config_exchanges = [commons_profile_data.ExchangeData( + internal_name=exchange, exchange_type=common_constants.CONFIG_EXCHANGE_SPOT + )] + currencies = [ + commons_profile_data.CryptoCurrencyData( + [octobot_commons.symbols.merge_currencies(element[rebalancer_enums.DistributionKeys.NAME], reference_market)], + coins_by_symbol.get( + element[rebalancer_enums.DistributionKeys.NAME], + element[rebalancer_enums.DistributionKeys.NAME] + ) + ) + for element in distribution + if element[rebalancer_enums.DistributionKeys.NAME] != reference_market + ] + trader = commons_profile_data.TraderData(enabled=True) + trader_simulator = commons_profile_data.TraderSimulatorData() + tentacles = [ + commons_profile_data.TentaclesData( + index_trading.IndexTradingMode.get_name(), _get_index_trading_config( + distribution, rebalance_cap, selected_rebalance_trigger_profile, rebalance_trigger_profiles + ) + ) + ] + backtesting = generate_index_backtesting_config( + exchange, reference_market, min_funds, disabled_backtesting, backtesting_start_time_delta + ) + base_config = commons_profiles.ProfileData( + profile_details, currencies, trading, config_exchanges, commons_profile_data.FutureExchangeData(), + trader, trader_simulator, tentacles, backtesting + ) + return base_config.to_dict(include_default_values=False) + + +def generate_index_backtesting_config( + exchange: str, reference_market: str, min_funds: float, disabled_backtesting: bool, start_time_delta: float +) -> commons_profile_data.BacktestingContext: + return commons_profile_data.BacktestingContext( + exchanges=[] if disabled_backtesting else [exchange], + start_time_delta=start_time_delta, + starting_portfolio={ + reference_market: min_funds * 10 # make sure there is always enough funds even if the market crashes + } + ) + + +def _get_index_trading_config( + distribution: typing.List, + rebalance_cap: float, + selected_rebalance_trigger_profile: typing.Optional[str], + rebalance_trigger_profiles: typing.Optional[list[dict]] +): + return { + trading_constants.TRADING_MODE_REQUIRED_STRATEGIES: [], + index_trading.IndexTradingModeProducer.REFRESH_INTERVAL: 1, + index_trading.IndexTradingModeProducer.REBALANCE_TRIGGER_MIN_PERCENT: rebalance_cap, + index_trading.IndexTradingModeProducer.SELECTED_REBALANCE_TRIGGER_PROFILE: selected_rebalance_trigger_profile, + index_trading.IndexTradingModeProducer.REBALANCE_TRIGGER_PROFILES: rebalance_trigger_profiles, + index_trading.IndexTradingModeProducer.SYNCHRONIZATION_POLICY: rebalancer_enums.SynchronizationPolicy.SELL_REMOVED_INDEX_COINS_ON_RATIO_REBALANCE.value, + index_trading.IndexTradingModeProducer.SELL_UNINDEXED_TRADED_COINS: True, + index_trading.IndexTradingModeProducer.INDEX_CONTENT: distribution, + evaluators_constants.STRATEGIES_REQUIRED_TIME_FRAME: [common_enums.TimeFrames.ONE_DAY.value], + } diff --git a/packages/tentacles/Meta/Keywords/scripting_library/configuration/profile_data_configuration.py b/packages/tentacles/Meta/Keywords/scripting_library/configuration/profile_data_configuration.py new file mode 100644 index 0000000000..3b8c85ee52 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/configuration/profile_data_configuration.py @@ -0,0 +1,511 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import typing +import os +import sortedcontainers +import time + +import octobot_commons +import octobot_commons.constants as common_constants +import octobot_commons.enums as common_enums +import octobot_commons.configuration as commons_configuration +import octobot_commons.profiles as commons_profiles +import octobot_commons.profiles.profile_data as commons_profile_data +import octobot_commons.symbols +import octobot_commons.logging + +import octobot_evaluators.constants as evaluators_constants + +import octobot_tentacles_manager.api + +import octobot_trading.constants as trading_constants +import octobot_trading.exchanges.util.exchange_data as exchange_data_import +import octobot_trading.util.config_util as config_util +import octobot_trading.api +import octobot_trading.enums + +import octobot_tentacles_manager.api + + +import tentacles.Trading.Mode.index_trading_mode.index_trading as index_trading +import octobot_copy.enums as copy_enums +import tentacles.Meta.Keywords.scripting_library.errors as scr_errors +import tentacles.Meta.Keywords.scripting_library.constants as scr_constants +import tentacles.Meta.Keywords.scripting_library.configuration.tentacles_configuration as tentacles_configuration +import tentacles.Meta.Keywords.scripting_library.configuration.indexes_configuration as indexes_configuration + + + + +def minimal_profile_data() -> commons_profiles.ProfileData: + return commons_profiles.ProfileData.from_dict({ + "profile_details": {"name": ""}, + "crypto_currencies": [], + "exchanges": [], + "trading": {"reference_market": common_constants.DEFAULT_REFERENCE_MARKET} + }) + + +def empty_config_proxy(*_, **__): + return {} + + +def create_backtesting_config( + profile_data: commons_profiles.ProfileData, + exchange_data: exchange_data_import.ExchangeData, +) -> commons_configuration.Configuration: + tentacles_config = octobot_tentacles_manager.api.get_full_tentacles_setup_config(profile_data) + apply_leverage_config(profile_data) + profile_data.exchanges = [] # clear exchange to avoid conflicts with backtesting exchanges + return config_util.get_config( + profile_data, exchange_data, tentacles_config, False, False, False + ) + + +def create_profile_data_from_tentacles_config_history( + tentacles_config_by_time: dict[float, list[commons_profile_data.TentaclesData]], exchange: str, starting_funds: float +) -> commons_profiles.ProfileData: + if not tentacles_config_by_time: + raise ValueError("tentacles_config_by_time is empty") + ordered_config = sortedcontainers.SortedDict(tentacles_config_by_time) + first_config = next(iter(ordered_config.values())) + if first_config[0].name == index_trading.IndexTradingMode.get_name(): + backtesting_start_time_delta = time.time() - next(iter(ordered_config)) + historical_config_by_time = { + timestamp: indexes_configuration.create_index_config_from_tentacles_config( + config, exchange, starting_funds, backtesting_start_time_delta + ) + for timestamp, config in ordered_config.items() + } + master_config = next(iter(historical_config_by_time.values())) + if len(historical_config_by_time) > 1: + register_historical_configs( + master_config, historical_config_by_time, + add_historical_trading_pairs_to_master_profile_data=True, + apply_master_tentacle_config_edits_to_historical_configs=False + ) + return master_config + else: + # todo implement other trading modes if necessary + raise ValueError(f"{first_config.name} config not implemented") + + + +def register_historical_configs( + master_profile_data: commons_profiles.ProfileData, + historical_profile_data_by_time: dict[float, commons_profiles.ProfileData], + add_historical_trading_pairs_to_master_profile_data: bool, + apply_master_tentacle_config_edits_to_historical_configs: bool +): + if add_historical_trading_pairs_to_master_profile_data: + # 1. register every historical profile traded pairs in master profile + if added_pairs := get_historical_added_config_trading_pairs( + master_profile_data, historical_profile_data_by_time.values() + ): + add_traded_symbols(master_profile_data, added_pairs) + + # 2. register historical tentacles_config + config_by_tentacle = master_profile_data.get_config_by_tentacle() + for historical_time, historical_profile in historical_profile_data_by_time.items(): + historical_config_by_tentacle = historical_profile.get_config_by_tentacle() + for tentacle, config in historical_config_by_tentacle.items(): + master_config = config_by_tentacle[tentacle] + if config is not master_config: + if apply_master_tentacle_config_edits_to_historical_configs: + try: + _apply_master_tentacle_config_edits_to_historical_config(tentacle, master_config, config) + except RuntimeError: + # tentacle not found, continue + _get_logger().error(f"Tentacle {tentacle} not found in available tentacles") + commons_configuration.add_historical_tentacle_config( + master_config, + historical_time, + config, + ) + + +def _apply_master_tentacle_config_edits_to_historical_config(tentacle: str, master_config: dict, historical_config: dict): + if updatable_keys := tentacles_configuration.get_config_history_propagated_tentacles_config_keys(tentacle): + for key in updatable_keys: + if key in master_config: + historical_config[key] = master_config[key] + + +def get_historical_added_config_trading_pairs( + master_profile_data: commons_profiles.ProfileData, + historical_profile_data: typing.Optional[typing.Iterable[commons_profiles.ProfileData]] +) -> list[str]: + if historical_profile_data: + historical_pairs = [ + pair + for historical_profile in historical_profile_data + for pair in historical_profile.get_traded_symbols() + ] + else: + historical_pairs = get_historical_traded_pairs(master_profile_data) + registered_pairs = master_profile_data.get_traded_symbols() + added_pairs = [] + for pair in historical_pairs: + if pair not in registered_pairs: + registered_pairs.append(pair) + added_pairs.append(pair) + return added_pairs + + +def get_historical_traded_pairs( + profile_data: commons_profiles.ProfileData +) -> typing.Iterable[str]: + trading_mode = get_trading_mode(profile_data) + trading_mode_config = _get_trading_mode_config(profile_data) + historical_trading_mode_configs = commons_configuration.get_historical_tentacle_configs( + trading_mode_config, 0, time.time() + ) + if trading_mode == index_trading.IndexTradingMode.get_name(): + return _get_historical_index_trading_pairs(profile_data, historical_trading_mode_configs) #todo + else: + raise NotImplementedError(f"Trading mode {trading_mode} not implemented") + + + +def _get_historical_index_trading_pairs( + profile_data: commons_profiles.ProfileData, historical_trading_mode_configs: typing.Iterable[dict] +) -> typing.Iterable[str]: + historical_assets = [] + latest_config_assets = set( + asset[copy_enums.DistributionKeys.NAME] + for asset in _get_trading_mode_config(profile_data)[ + index_trading.IndexTradingModeProducer.INDEX_CONTENT + ] + ) + for historical_trading_mode_config in historical_trading_mode_configs: + for asset in historical_trading_mode_config[index_trading.IndexTradingModeProducer.INDEX_CONTENT]: + historical_asset = asset[copy_enums.DistributionKeys.NAME] + if historical_asset not in historical_assets and historical_asset not in latest_config_assets: + historical_assets.append(historical_asset) + return [ + octobot_commons.symbols.merge_currencies(asset, profile_data.trading.reference_market) + for asset in historical_assets + ] + + +def add_traded_symbols( + profile_data: commons_profiles.ProfileData, + added_symbols: typing.Iterable[str] +): + traded_symbols = profile_data.get_traded_symbols() + to_add_symbols = [ + symbol + for symbol in added_symbols + if symbol not in traded_symbols + ] + if to_add_symbols: + _get_logger().info(f"Adding {to_add_symbols} to profile data traded pairs.") + expand_traded_pairs_into_currencies(profile_data, to_add_symbols) + + +def expand_traded_pairs_into_currencies(profile_data, pairs: list[str]): + for pair in pairs: + profile_data.crypto_currencies.append( + commons_profile_data.CryptoCurrencyData( + trading_pairs=[pair], + name=pair, + enabled=True + ) + ) + + +def filter_out_missing_symbols(profile_data: commons_profiles.ProfileData, available_symbols: list[str]) -> list[str]: + traded_pairs = profile_data.get_traded_symbols() + removed_symbols = [symbol for symbol in traded_pairs if symbol not in available_symbols] + if removed_symbols: + profile_data.crypto_currencies = [] + add_traded_symbols( + profile_data, + [pair for pair in traded_pairs if pair not in removed_symbols] + ) + return removed_symbols + + +def get_readonly_exchange_auth_details(exchange_internal_name: str) -> exchange_data_import.ExchangeAuthDetails: + return exchange_data_import.ExchangeAuthDetails( + api_key=_get_readonly_exchange_credential_from_env(exchange_internal_name, "KEY", False), + api_secret=_get_readonly_exchange_credential_from_env(exchange_internal_name, "SECRET", False), + api_password=_get_readonly_exchange_credential_from_env(exchange_internal_name, "PASSWORD", True), + sandboxed=False, + broker_enabled=False, + ) + + +def _get_readonly_exchange_credential_from_env(exchange_name, cred_suffix, allow_missing): + # for coinbase: COINBASE_READ_ONLY_KEY, COINBASE_READ_ONLY_SECRET, COINBASE_READ_PASSWORD + if cred := os.getenv(f"{exchange_name}_READ_ONLY_{cred_suffix}".upper(), None): + return commons_configuration.encrypt(cred).decode() + if allow_missing: + return None + raise scr_errors.MissingReadOnlyExchangeCredentialsError( + f"{exchange_name} read only credentials are missing" + ) + + +def get_required_candles_count(profile_data: commons_profiles.ProfileData, min_candles_count: int) -> int: + for tentacle_config in profile_data.tentacles: + if common_constants.CONFIG_TENTACLES_REQUIRED_CANDLES_COUNT in tentacle_config.config: + return max( + tentacle_config.config[common_constants.CONFIG_TENTACLES_REQUIRED_CANDLES_COUNT], + min_candles_count + ) + return min_candles_count + + +def update_position_levarage( + position: exchange_data_import.PositionDetails, updated_contracts_by_symbol: dict +): + leverage = float( + updated_contracts_by_symbol[ + position.contract[octobot_trading.enums.ExchangeConstantsMarginContractColumns.PAIR.value] + ].current_leverage + ) + position.contract[octobot_trading.enums.ExchangeConstantsMarginContractColumns.CURRENT_LEVERAGE.value] = leverage + position.position[octobot_trading.enums.ExchangeConstantsPositionColumns.LEVERAGE.value] = leverage + + +def merge_profile_data( + profile_data: commons_profiles.ProfileData, + previous_profile_data: commons_profiles.ProfileData, +) -> commons_profiles.ProfileData: + # previous config crypto currencies are merged + current_traded_pairs = set(profile_data.get_traded_symbols()) + for currency_data in previous_profile_data.crypto_currencies: + for previous_traded_pair in currency_data.trading_pairs: + to_add_pairs = set() + if previous_traded_pair not in current_traded_pairs: + # add pair + to_add_pairs.add(previous_traded_pair) + parsed_symbol = octobot_commons.symbols.parse_symbol(previous_traded_pair) + if parsed_symbol.quote != profile_data.trading.reference_market: + # reference market changed: also include the base of this pair within the traded pairs + ref_market_pair = octobot_commons.symbols.merge_currencies( + parsed_symbol.base, profile_data.trading.reference_market + ) + if ref_market_pair not in current_traded_pairs: + to_add_pairs.add(ref_market_pair) + for traded_pair in to_add_pairs: + _get_logger().info( + f"Profile data merge: including previous config {currency_data} currency into current profile data" + ) + expand_traded_pairs_into_currencies(profile_data, [traded_pair]) + current_traded_pairs.add(traded_pair) + return profile_data + + + +def apply_leverage_config(profile_data: commons_profiles.ProfileData): + if leverage := profile_data.future_exchange_data.default_leverage: + trading_mode_config = _get_trading_mode_config(profile_data) + apply_leverage_config_to_trading_mode_config_if_necessary(trading_mode_config, leverage) + + +def apply_leverage_config_to_trading_mode_config_if_necessary(trading_mode_config: dict, leverage: float): + if trading_constants.CONFIG_LEVERAGE not in trading_mode_config: + trading_mode_config[trading_constants.CONFIG_LEVERAGE] = leverage + +def _get_trading_mode_config(profile_data: commons_profiles.ProfileData): + trading_mode = get_trading_mode(profile_data) + config_by_tentacle = profile_data.get_config_by_tentacle() + if trading_mode in config_by_tentacle: + return config_by_tentacle[trading_mode] + raise KeyError(f"No trading mode config found in {list(config_by_tentacle)} tentacles config") + + +def get_trading_mode(profile_data: commons_profiles.ProfileData) -> typing.Optional[str]: + for tentacle_name in profile_data.get_config_by_tentacle(): + if tentacles_configuration.is_trading_mode_tentacle(tentacle_name): + return tentacle_name + return None + + +def get_traded_coins( + profile_data: commons_profiles.ProfileData, + include_stablecoins: bool, +) -> list[str]: + # return an ordered list of: + # 1. reference market + # 2. traded assets + # 3. stablecoins if include_stablecoins is True + coins = [profile_data.trading.reference_market, ] + for symbol in profile_data.get_traded_symbols(): + base, quote = octobot_commons.symbols.parse_symbol(symbol).base_and_quote() + if base not in coins: + coins.append(base) + if quote not in coins: + coins.append(quote) + if include_stablecoins: + coins.extend(tuple( + coin + for coin in common_constants.USD_LIKE_AND_FIAT_COINS + if coin not in coins + )) + return coins + + +def get_time_frames( + profile_data: commons_profiles.ProfileData, for_historical_data=False +) -> list[str]: + for config in profile_data.get_config_by_tentacle().values(): + if evaluators_constants.STRATEGIES_REQUIRED_TIME_FRAME in config: + return config[evaluators_constants.STRATEGIES_REQUIRED_TIME_FRAME] + return [_get_default_time_frame(profile_data, for_historical_data)] + + +def _get_default_time_frame(profile_data: commons_profiles.ProfileData, for_historical_data: bool): + if not for_historical_data: + # always use DEFAULT_TIMEFRAME when focusing on historical data + return scr_constants.DEFAULT_TIMEFRAME.value + return _get_historical_default_time_frame(profile_data) + + +def _get_historical_default_time_frame(profile_data: commons_profiles.ProfileData): + if time_frame := get_default_historical_time_frame(profile_data): + return time_frame.value + # fallback to default timeframe + return scr_constants.DEFAULT_TIMEFRAME.value + + +def requires_price_update_timeframe(profile_data: commons_profiles.ProfileData) -> bool: + if trading_mode := get_trading_mode(profile_data): + return octobot_tentacles_manager.api.get_tentacle_class_from_string( + trading_mode + ).use_backtesting_accurate_price_update() + return True + + +def get_default_historical_time_frame(profile_data: commons_profiles.ProfileData) -> typing.Optional[common_enums.TimeFrames]: + if trading_mode := get_trading_mode(profile_data): + return octobot_tentacles_manager.api.get_tentacle_class_from_string( + trading_mode + ).get_default_historical_time_frame() + return None + + +def can_convert_ref_market_to_usd_like( + exchange_data: exchange_data_import.ExchangeData, + profile_data: commons_profiles.ProfileData +): + return can_convert_ref_market_to_usd_like_from_symbols( + profile_data.trading.reference_market, + [market.symbol for market in exchange_data.markets] + ) + +def can_convert_ref_market_to_usd_like_from_symbols( + reference_market: str, + symbols: list[str] +): + if octobot_trading.api.is_usd_like_coin(reference_market): + return True + for symbol in symbols: + if ( + reference_market in octobot_commons.symbols.parse_symbol(symbol).base_and_quote() + and octobot_trading.api.can_convert_symbol_to_usd_like(symbol) + ): + return True + return False + + +def set_backtesting_portfolio(profile_data, exchange_data): + exchange_data.portfolio_details.content = { + asset: { + common_constants.PORTFOLIO_AVAILABLE: value, + common_constants.PORTFOLIO_TOTAL: value + } + for asset, value in profile_data.backtesting_context.starting_portfolio.items() + } + _get_logger().info( + f"Applied {profile_data.profile_details.name} backtesting starting " + f"portfolio: {profile_data.backtesting_context.starting_portfolio}" + ) + + +def get_oldest_historical_config_symbols_and_time(profile_data: commons_profiles.ProfileData, default) -> (list, float): + first_historical_config_time = _get_first_historical_config_time(profile_data, default) + if first_historical_config_time == default: + base_traded_symbols = profile_data.get_traded_symbols() + return base_traded_symbols, base_traded_symbols, default + first_traded_symbols = _get_all_tentacles_configured_traded_symbols(profile_data, first_historical_config_time) + last_traded_symbols = _get_all_tentacles_configured_traded_symbols(profile_data, None) + return list(first_traded_symbols), list(last_traded_symbols), first_historical_config_time + + +def _get_all_tentacles_configured_traded_symbols( + profile_data: commons_profiles.ProfileData, first_historical_config_time: typing.Optional[float] +) -> set: + traded_symbols = set() + tentacles_config = profile_data.get_config_by_tentacle() + for tentacle, tentacle_config in tentacles_config.items(): + if first_historical_config_time is None: + config = tentacle_config + else: + try: + config = commons_configuration.get_historical_tentacle_config( + tentacle_config, first_historical_config_time + ) + except KeyError as err: + if tentacles_configuration.is_exchange_tentacle(tentacle): + # exchange tentacles (like HollaEx exchanges) don't have historical configuration: this is normal + pass + else: + raise scr_errors.InvalidProfileError(f"{tentacle} tentacle config is invalid: {err}") + traded_symbols.update(get_tentacle_config_traded_symbols( + tentacle, config, profile_data.trading.reference_market + )) + return traded_symbols + + +def _get_first_historical_config_time(profile_data: commons_profiles.ProfileData, default) -> float: + tentacles_config = profile_data.get_config_by_tentacle() + oldest_config_times = [] + for tentacle, config in tentacles_config.items(): + try: + oldest_config_times.append( + commons_configuration.get_oldest_historical_tentacle_config_time( + config + ) + ) + except ValueError: + # no historical config + pass + if oldest_config_times: + # return the most recent of the oldest configurations + return max(oldest_config_times) + return default + + +def get_tentacle_config_traded_symbols(tentacle: str, config: dict, reference_market: str) -> set: + tentacle_class = octobot_tentacles_manager.api.get_tentacle_class_from_string(tentacle) + try: + return set(tentacle_class.get_tentacle_config_traded_symbols(config, reference_market)) + except NotImplementedError as err: + if tentacles_configuration.is_exchange_tentacle(tentacle): + # exchange tentacles don't implement get_tentacle_config_traded_symbols, this is normal + pass + else: + _get_logger().warning( + f"Trying to get tentacle config historical traded symbols for {tentacle}: {err}" + ) + return set() + + +def _get_logger(): + return octobot_commons.logging.get_logger("ScriptedProfileData") diff --git a/packages/tentacles/Meta/Keywords/scripting_library/configuration/tentacles_configuration.py b/packages/tentacles/Meta/Keywords/scripting_library/configuration/tentacles_configuration.py new file mode 100644 index 0000000000..13d2171fd8 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/configuration/tentacles_configuration.py @@ -0,0 +1,51 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import functools + +import octobot_commons.tentacles_management as tentacles_management + +import octobot_trading.exchanges as exchanges +import octobot_trading.modes + +import octobot_tentacles_manager.api + +_EXPECTED_MAX_TENTACLES_COUNT = 256 + + +def get_config_history_propagated_tentacles_config_keys(tentacle: str) -> list[str]: + tentacle_class = octobot_tentacles_manager.api.get_tentacle_class_from_string(tentacle) + return tentacle_class.get_config_history_propagated_tentacles_config_keys() + + +# cached to avoid calling default_parents_inspection when unnecessary +@functools.lru_cache(maxsize=_EXPECTED_MAX_TENTACLES_COUNT) +def is_trading_mode_tentacle(tentacle_name: str) -> bool: + tentacle_class = octobot_tentacles_manager.api.get_tentacle_class_from_string(tentacle_name) + return tentacles_management.default_parents_inspection(tentacle_class, octobot_trading.modes.AbstractTradingMode) + + +# cached to avoid calling default_parents_inspection when unnecessary +@functools.lru_cache(maxsize=_EXPECTED_MAX_TENTACLES_COUNT) +def is_exchange_tentacle(tentacle_name: str) -> bool: + tentacle_class = octobot_tentacles_manager.api.get_tentacle_class_from_string(tentacle_name) + return tentacles_management.default_parents_inspection(tentacle_class, exchanges.RestExchange) + + +def get_exchange_tentacle_from_name(tentacle_name: str) -> type[exchanges.RestExchange]: + for exchange_tentacle in octobot_tentacles_manager.api.get_all_exchange_tentacles(): + if exchange_tentacle.get_name() == tentacle_name: + return exchange_tentacle + raise ValueError(f"No exchange tentacle found for name: {tentacle_name}") diff --git a/packages/tentacles/Meta/Keywords/scripting_library/constants.py b/packages/tentacles/Meta/Keywords/scripting_library/constants.py new file mode 100644 index 0000000000..0f101c7bea --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/constants.py @@ -0,0 +1,5 @@ +import octobot_commons.enums as common_enums + + +DEFAULT_TIMEFRAME = common_enums.TimeFrames.ONE_HOUR +PRICE_UPDATE_TIME_FRAME = common_enums.TimeFrames.FIFTEEN_MINUTES diff --git a/packages/tentacles/Meta/Keywords/scripting_library/data/__init__.py b/packages/tentacles/Meta/Keywords/scripting_library/data/__init__.py new file mode 100644 index 0000000000..a0981929bb --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/data/__init__.py @@ -0,0 +1,18 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +from .reading import * +from .writing import * diff --git a/packages/tentacles/Meta/Keywords/scripting_library/data/reading/__init__.py b/packages/tentacles/Meta/Keywords/scripting_library/data/reading/__init__.py new file mode 100644 index 0000000000..abcb485935 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/data/reading/__init__.py @@ -0,0 +1,5 @@ +from .exchange_public_data import * +from .exchange_private_data import * +from .metadata_reader import * +from .trading_settings import * + diff --git a/packages/tentacles/Meta/Keywords/scripting_library/data/reading/exchange_private_data/__init__.py b/packages/tentacles/Meta/Keywords/scripting_library/data/reading/exchange_private_data/__init__.py new file mode 100644 index 0000000000..b7daceca92 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/data/reading/exchange_private_data/__init__.py @@ -0,0 +1 @@ +from .open_positions import * diff --git a/packages/tentacles/Meta/Keywords/scripting_library/data/reading/exchange_private_data/open_positions.py b/packages/tentacles/Meta/Keywords/scripting_library/data/reading/exchange_private_data/open_positions.py new file mode 100644 index 0000000000..715fb4bd38 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/data/reading/exchange_private_data/open_positions.py @@ -0,0 +1,69 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import octobot_commons.symbols.symbol_util as symbol_util +import octobot_commons.constants as commons_constants +import octobot_trading.modes.script_keywords as script_keywords +import octobot_trading.constants as trading_constants +import octobot_trading.enums as trading_enums + + +#todo clear +def is_current_contract_inverse(context, symbol=None, side=trading_enums.PositionSide.BOTH.value): + return script_keywords.get_position(context, symbol=symbol, side=side).symbol_contract.is_inverse_contract() + + +# returns negative values when in a short position +def open_position_size( + context, + side=trading_enums.PositionSide.BOTH.value, + symbol=None, + amount_type=commons_constants.PORTFOLIO_TOTAL +): + symbol = symbol or context.symbol + if context.exchange_manager.is_future: + return script_keywords.get_position(context, symbol, side).size + currency = symbol_util.parse_symbol(context.symbol).base + portfolio = context.exchange_manager.exchange_personal_data.portfolio_manager.portfolio + return portfolio.get_currency_portfolio(currency).total if amount_type == commons_constants.PORTFOLIO_TOTAL \ + else portfolio.get_currency_portfolio(currency).available + # todo handle reference market change + # todo handle futures: its account balance from exchange + # todo handle futures and return negative for shorts + + +def is_position_open( + context, + side=None +): + if side is None: + long_open = open_position_size(context, side="long") != trading_constants.ZERO + short_open = open_position_size(context, side="short") != trading_constants.ZERO + return True if long_open or short_open else False + else: + return open_position_size(context, side=side) != trading_constants.ZERO + + +def is_position_long( + context, +): + return script_keywords.get_position(context).is_long() + + +def is_position_short( + context, +): + return script_keywords.get_position(context).is_short() diff --git a/packages/tentacles/Meta/Keywords/scripting_library/data/reading/exchange_public_data.py b/packages/tentacles/Meta/Keywords/scripting_library/data/reading/exchange_public_data.py new file mode 100644 index 0000000000..4aa4e26dcd --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/data/reading/exchange_public_data.py @@ -0,0 +1,261 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.enums as commons_enums +import octobot_commons.constants as commons_constants +import octobot_trading.api as api +import octobot_trading.constants as trading_constants +import octobot_trading.exchange_data +import octobot_trading.personal_data as personal_data +import octobot_trading.exchange_data as exchange_data +import octobot_trading.enums as trading_enums +import octobot_backtesting.api as backtesting_api +from octobot_trading.modes.script_keywords.basic_keywords import run_persistence as run_persistence +from tentacles.Evaluator.Util.candles_util import CandlesUtil + + +# real time in live mode +# lowest available candle time on backtesting +def current_live_time(context) -> float: + return api.get_exchange_current_time(context.exchange_manager) + + +def symbol_fees(context, symbol=None) -> dict: + return context.exchange_manager.exchange.get_fees(symbol or context.symbol) + + +def is_futures_trading(context) -> bool: + return context.exchange_manager.is_future + + +def _time_frame_to_sec(context, time_frame=None): + return commons_enums.TimeFramesMinutes[commons_enums.TimeFrames(time_frame or context.time_frame)] * \ + commons_constants.MINUTE_TO_SECONDS + + +async def current_candle_time(context, symbol=None, time_frame=None, use_close_time=False): + symbol = symbol or context.symbol + time_frame = time_frame or context.time_frame + candles_manager = api.get_symbol_candles_manager( + api.get_symbol_data(context.exchange_manager, symbol, allow_creation=False), time_frame + ) + if use_close_time: + return candles_manager.time_candles[candles_manager.time_candles_index - 1] + \ + _time_frame_to_sec(context, time_frame) + return candles_manager.time_candles[candles_manager.time_candles_index - 1] + + +async def current_closed_candle_time(context, symbol=None, time_frame=None): + return await current_candle_time(context, symbol=symbol, time_frame=time_frame) \ + - _time_frame_to_sec(context, time_frame) + + +# Use capital letters to avoid python native lib conflicts +async def Time(context, symbol=None, time_frame=None, limit=-1, max_history=False, use_close_time=True): + candles_manager = await _get_candle_manager(context, symbol, time_frame, max_history) + if max_history and isinstance(candles_manager, octobot_trading.exchange_data.PreloadedCandlesManager): + time_data = candles_manager.time_candles + else: + time_data = candles_manager.get_symbol_time_candles(-1 if max_history else limit) + if use_close_time: + return [value + _time_frame_to_sec(context, time_frame) for value in time_data] + return time_data + + +# real time in live mode +# lowest available candle closes on backtesting +async def current_live_price(context, symbol=None): + return await personal_data.get_up_to_date_price(context.exchange_manager, symbol or context.symbol, + timeout=trading_constants.ORDER_DATA_FETCHING_TIMEOUT, + base_error="Can't get the current price:") + + +async def current_candle_price(context, symbol=None, time_frame=None): + candles_manager = await _get_candle_manager(context, symbol, time_frame, False) + return candles_manager.get_symbol_close_candles(1)[-1] + + +# Use capital letters to avoid python native lib conflicts +async def Open(context, symbol=None, time_frame=None, limit=-1, max_history=False): + candles_manager = await _get_candle_manager(context, symbol, time_frame, max_history) + if isinstance(candles_manager, octobot_trading.exchange_data.PreloadedCandlesManager) and max_history: + return candles_manager.open_candles + return candles_manager.get_symbol_open_candles(-1 if max_history else limit) + + +# Use capital letters to avoid python native lib conflicts +async def High(context, symbol=None, time_frame=None, limit=-1, max_history=False): + candles_manager = await _get_candle_manager(context, symbol, time_frame, max_history) + if isinstance(candles_manager, octobot_trading.exchange_data.PreloadedCandlesManager) and max_history: + return candles_manager.high_candles + return candles_manager.get_symbol_high_candles(-1 if max_history else limit) + + +# Use capital letters to avoid python native lib conflicts +async def Low(context, symbol=None, time_frame=None, limit=-1, max_history=False): + candles_manager = await _get_candle_manager(context, symbol, time_frame, max_history) + if isinstance(candles_manager, octobot_trading.exchange_data.PreloadedCandlesManager) and max_history: + return candles_manager.low_candles + return candles_manager.get_symbol_low_candles(-1 if max_history else limit) + + +# Use capital letters to avoid python native lib conflicts +async def Close(context, symbol=None, time_frame=None, limit=-1, max_history=False): + candles_manager = await _get_candle_manager(context, symbol, time_frame, max_history) + if isinstance(candles_manager, octobot_trading.exchange_data.PreloadedCandlesManager) and max_history: + return candles_manager.close_candles + return candles_manager.get_symbol_close_candles(-1 if max_history else limit) + + +async def hl2(context, symbol=None, time_frame=None, limit=-1, max_history=False): + try: + from tentacles.Evaluator.Util.candles_util import CandlesUtil + candles_manager = await _get_candle_manager(context, symbol, time_frame, max_history) + return CandlesUtil.HL2( + candles_manager.get_symbol_high_candles(-1 if max_history else limit), + candles_manager.get_symbol_low_candles(-1 if max_history else limit) + ) + except ImportError: + raise RuntimeError("CandlesUtil tentacle is required to use HL2") + + +async def hlc3(context, symbol=None, time_frame=None, limit=-1, max_history=False): + try: + from tentacles.Evaluator.Util.candles_util import CandlesUtil + candles_manager = await _get_candle_manager(context, symbol, time_frame, max_history) + return CandlesUtil.HLC3( + candles_manager.get_symbol_high_candles(-1 if max_history else limit), + candles_manager.get_symbol_low_candles(-1 if max_history else limit), + candles_manager.get_symbol_close_candles(-1 if max_history else limit) + ) + except ImportError: + raise RuntimeError("CandlesUtil tentacle is required to use HLC3") + + +async def ohlc4(context, symbol=None, time_frame=None, limit=-1, max_history=False): + try: + from tentacles.Evaluator.Util.candles_util import CandlesUtil + candles_manager = await _get_candle_manager(context, symbol, time_frame, max_history) + return CandlesUtil.OHLC4( + candles_manager.get_symbol_open_candles(-1 if max_history else limit), + candles_manager.get_symbol_high_candles(-1 if max_history else limit), + candles_manager.get_symbol_low_candles(-1 if max_history else limit), + candles_manager.get_symbol_close_candles(-1 if max_history else limit) + ) + except ImportError: + raise RuntimeError("CandlesUtil tentacle is required to use OHLC4") + + +# Use capital letters to avoid python native lib conflicts +async def Volume(context, symbol=None, time_frame=None, limit=-1, max_history=False): + candles_manager = await _get_candle_manager(context, symbol, time_frame, max_history) + if isinstance(candles_manager, octobot_trading.exchange_data.PreloadedCandlesManager) and max_history: + return candles_manager.close_candles + return candles_manager.get_symbol_volume_candles(-1 if max_history else limit) + + +async def get_candles_from_name(ctx, source_name="low", time_frame=None, symbol=None, limit=-1, max_history=False): + """ + source_name can be: + "open", "high", "low", "close", "hl2", "hlc3", "ohlc4", "volume", + "Heikin Ashi close", "Heikin Ashi open", "Heikin Ashi high", "Heikin Ashi low" + """ + symbol = symbol or ctx.symbol + time_frame = time_frame or ctx.time_frame + if source_name == "close": + return await Close(ctx, symbol, time_frame, limit, max_history) + if source_name == "open": + return await Open(ctx, symbol, time_frame, limit, max_history) + if source_name == "high": + return await High(ctx, symbol, time_frame, limit, max_history) + if source_name == "low": + return await Low(ctx, symbol, time_frame, limit, max_history) + if source_name == "volume": + return await Volume(ctx, symbol, time_frame, limit, max_history) + if source_name == "time": + return await Time(ctx, symbol, time_frame, limit, max_history) + if source_name == "hl2": + return await hl2(ctx, symbol, time_frame, limit, max_history) + if source_name == "hlc3": + return await hlc3(ctx, symbol, time_frame, limit, max_history) + if source_name == "ohlc4": + return await ohlc4(ctx, symbol, time_frame, limit, max_history) + if "Heikin Ashi" in source_name: + haOpen, haHigh, haLow, haClose = CandlesUtil.HeikinAshi(await Open(ctx, symbol, time_frame, limit, max_history), + await High(ctx, symbol, time_frame, limit, max_history), + await Low(ctx, symbol, time_frame, limit, max_history), + await Close(ctx, symbol, time_frame, limit, max_history) + ) + if source_name == "Heikin Ashi close": + return haClose + if source_name == "Heikin Ashi open": + return haOpen + if source_name == "Heikin Ashi high": + return haHigh + if source_name == "Heikin Ashi low": + return haLow + + +async def _local_candles_manager(exchange_manager, symbol, time_frame, start_timestamp, end_timestamp): + # warning: should only be called with an exchange simulator (in backtesting) + ohlcv_data: list = await exchange_manager.exchange.exchange_importers[0].get_ohlcv( + exchange_name=exchange_manager.exchange_name, + symbol=symbol, + time_frame=commons_enums.TimeFrames(time_frame)) + chronological_candles = sorted(ohlcv_data, key=lambda candle: candle[0]) + full_candles_history = [ + ohlcv[-1] + for ohlcv in chronological_candles + if start_timestamp <= ohlcv[0] <= end_timestamp + ] + candles_manager = exchange_data.CandlesManager(max_candles_count=len(full_candles_history)) + await candles_manager.initialize() + candles_manager.replace_all_candles(full_candles_history) + return candles_manager + + +async def _get_candle_manager(context, symbol, time_frame, max_history): + symbol = symbol or context.symbol + time_frame = time_frame or context.time_frame + candle_manager = api.get_symbol_candles_manager( + api.get_symbol_data(context.exchange_manager, symbol, allow_creation=False), time_frame + ) + if max_history and context.exchange_manager.is_backtesting: + if isinstance(candle_manager, octobot_trading.exchange_data.PreloadedCandlesManager): + return candle_manager + start_timestamp = backtesting_api.get_backtesting_starting_time(context.exchange_manager.exchange.backtesting) + end_timestamp = backtesting_api.get_backtesting_ending_time(context.exchange_manager.exchange.backtesting) + _key = symbol + time_frame + str(start_timestamp) + str(end_timestamp) + try: + return run_persistence.get_shared_element(_key) + except KeyError: + run_persistence.set_shared_element( + _key, + await _local_candles_manager( + context.exchange_manager, symbol, time_frame, start_timestamp, end_timestamp + ) + ) + return run_persistence.get_shared_element(_key) + return candle_manager + + +def get_digits_adapted_price(context, price, truncate=True): + symbol_market = context.exchange_manager.exchange.get_market_status(context.symbol, with_fixer=False) + return personal_data.decimal_adapt_price(symbol_market, price, truncate=truncate) + + +def get_digits_adapted_amount(context, amount, truncate=True): + symbol_market = context.exchange_manager.exchange.get_market_status(context.symbol, with_fixer=False) + return personal_data.decimal_adapt_quantity(symbol_market, amount, truncate=truncate) diff --git a/packages/tentacles/Meta/Keywords/scripting_library/data/reading/metadata_reader.py b/packages/tentacles/Meta/Keywords/scripting_library/data/reading/metadata_reader.py new file mode 100644 index 0000000000..6a4ba5324e --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/data/reading/metadata_reader.py @@ -0,0 +1,22 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.enums as commons_enums +import octobot_commons.databases as databases + + +class MetadataReader(databases.DBReader): + async def read(self) -> list: + return await self.all(commons_enums.DBTables.METADATA.value) diff --git a/packages/tentacles/Meta/Keywords/scripting_library/data/reading/trading_settings.py b/packages/tentacles/Meta/Keywords/scripting_library/data/reading/trading_settings.py new file mode 100644 index 0000000000..61f0d66cb0 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/data/reading/trading_settings.py @@ -0,0 +1,17 @@ +def set_initialized_evaluation(ctx, trading_mode, initialized=True, symbol=None, time_frame=None): + trading_mode.set_initialized_trading_pair_by_bot_id(symbol or ctx.symbol, time_frame or ctx.time_frame, initialized) + + +def get_initialized_evaluation(ctx, trading_mode, symbol=None, time_frame=None): + return trading_mode.get_initialized_trading_pair_by_bot_id(symbol or ctx.symbol, time_frame or ctx.time_frame) + + +def are_all_evaluation_initialized(ctx, trading_mode): + for symbol in ctx.exchange_manager.exchange_config.traded_symbol_pairs: + for time_frame in ctx.exchange_manager.exchange_config.get_relevant_time_frames(): + try: + if not get_initialized_evaluation(ctx, trading_mode, symbol=symbol, time_frame=time_frame.value): + return False + except KeyError: + return False + return True diff --git a/packages/tentacles/Meta/Keywords/scripting_library/data/writing/__init__.py b/packages/tentacles/Meta/Keywords/scripting_library/data/writing/__init__.py new file mode 100644 index 0000000000..7472c3d82d --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/data/writing/__init__.py @@ -0,0 +1 @@ +from .plotting import * diff --git a/packages/tentacles/Meta/Keywords/scripting_library/data/writing/plotting.py b/packages/tentacles/Meta/Keywords/scripting_library/data/writing/plotting.py new file mode 100644 index 0000000000..202d0593b0 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/data/writing/plotting.py @@ -0,0 +1,207 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import numpy + +import tentacles.Meta.Keywords.scripting_library.data.reading.exchange_public_data as exchange_public_data +import octobot_trading.modes.script_keywords as script_keywords +import octobot_commons.enums as commons_enums +import octobot_commons.constants as commons_constants + + +async def disable_candles_plot(ctx, time_frame=None): + time_frame = time_frame or ctx.time_frame + if not ctx.symbol_writer.are_data_initialized_by_key.get(time_frame): + await script_keywords.disable_candles_plot(None, ctx.exchange_manager) + + +async def plot(ctx, title, x=None, + y=None, z=None, open=None, high=None, low=None, close=None, volume=None, + text=None, kind="scattergl", mode="lines", line_shape="linear", + condition=None, x_function=exchange_public_data.Time, + x_multiplier=1000, time_frame=None, + chart=commons_enums.PlotCharts.SUB_CHART.value, + cache_value=None, own_yaxis=False, color=None, size=None, shape=None, + shift_to_open_candle_time=True): + time_frame = time_frame or ctx.time_frame + if condition is not None and cache_value is None: + if isinstance(ctx.symbol_writer.get_serializable_value(condition), bool): + if condition: + x = numpy.array(((await x_function(ctx, ctx.symbol, time_frame))[-1],)) + y = numpy.array((y[-1],)) + else: + x = [] + y = [] + else: + candidate_y = [] + candidate_x = [] + x_data = (await x_function(ctx, ctx.symbol, time_frame))[-len(condition):] + y_data = y[-len(condition):] + for index, value in enumerate(condition): + if value: + candidate_y.append(y_data[index]) + candidate_x.append(x_data[index]) + x = numpy.array(candidate_x) + y = numpy.array(candidate_y) + count_query = { + "time_frame": ctx.time_frame, + } + cache_full_path = None + if cache_value is not None: + cache_full_path = ctx.get_cache_path(ctx.tentacle) + count_query["title"] = title + count_query["value"] = cache_full_path + + x_shift = -commons_enums.TimeFramesMinutes[commons_enums.TimeFrames(ctx.time_frame)] * \ + commons_constants.MINUTE_TO_SECONDS if shift_to_open_candle_time else 0 + if not await ctx.symbol_writer.contains_row( + commons_enums.DBTables.CACHE_SOURCE.value if cache_value is not None else title, + count_query + ): + if cache_value is not None: + table = commons_enums.DBTables.CACHE_SOURCE.value + # save x_shift to be applied when displaying and not to change actual cached values + cache_data = { + "title": title, + "text": text, + "time_frame": ctx.time_frame, + "value": cache_full_path, + "cache_value": cache_value, + "kind": kind, + "mode": mode, + "line_shape": line_shape, + "chart": chart, + "own_yaxis": own_yaxis, + "condition": condition, + "color": color, + "size": size, + "shape": shape, + "x_shift": x_shift, + } + update_query = await ctx.symbol_writer.search() + update_query = ((update_query.kind == kind) + & (update_query.mode == mode) + & (update_query.time_frame == ctx.time_frame) + & (update_query.title == title)) + await ctx.symbol_writer.upsert(table, cache_data, update_query) + else: + adapted_x = None + if x is not None: + try: + min_available_data = len(x) + except TypeError: + min_available_data = None + if y is not None: + min_available_data = len(y) + if isinstance(y, list) and not isinstance(x, list): + x = [x] * len(y) + if z is not None: + min_available_data = len(z) if min_available_data is None else min(min_available_data, len(z)) + if isinstance(z, list) and not isinstance(z, list): + x = [x] * len(z) + adapted_x = x[-min_available_data:] if min_available_data != len(x) else x + if adapted_x is None: + raise RuntimeError("No confirmed adapted_x") + adapted_x = [(a_x + x_shift) * x_multiplier for a_x in adapted_x] if isinstance(adapted_x, list) \ + else adapted_x * x_multiplier + await ctx.symbol_writer.log_many( + title, + [ + { + "x": value, + "y": _get_value_from_array(y, index), + "z": _get_value_from_array(z, index), + "open": _get_value_from_array(open, index), + "high": _get_value_from_array(high, index), + "low": _get_value_from_array(low, index), + "close": _get_value_from_array(close, index), + "volume": _get_value_from_array(volume, index), + "time_frame": ctx.time_frame, + "kind": kind, + "mode": mode, + "line_shape": line_shape, + "chart": chart, + "own_yaxis": own_yaxis, + "color": color, + "text": text, + "size": size, + "shape": shape, + } + for index, value in enumerate(adapted_x) + ], + cache=False + ) + elif cache_value is None and x is not None: + if isinstance(y, list) and not isinstance(x, list): + x = [x] * len(y) + elif isinstance(z, list) and not isinstance(x, list): + x = [x] * len(z) + if len(x) and \ + not await ctx.symbol_writer.contains_row(title, + {"x": _get_value_from_array(x, -1) * x_multiplier}): + x_value = (_get_value_from_array(x, -1) + x_shift) * x_multiplier + await ctx.symbol_writer.upsert( + title, + { + "time_frame": ctx.time_frame, + "x": x_value, + "y": _get_value_from_array(y, -1), + "z": _get_value_from_array(z, -1), + "open": _get_value_from_array(open, -1), + "high": _get_value_from_array(high, -1), + "low": _get_value_from_array(low, -1), + "close": _get_value_from_array(close, -1), + "volume": _get_value_from_array(volume, -1), + "kind": kind, + "mode": mode, + "line_shape": line_shape, + "chart": chart, + "own_yaxis": own_yaxis, + "color": color, + "text": text, + "size": size, + "shape": shape, + }, + None, + cache_query={"x": x_value} + ) + + +async def plot_shape(ctx, title, value, y_value, + chart=commons_enums.PlotCharts.SUB_CHART.value, + kind="scattergl", mode="markers", line_shape="linear", x_multiplier=1000): + if not await ctx.symbol_writer.contains_row(title, { + "x": ctx.x, + "time_frame": ctx.time_frame + }): + await ctx.symbol_writer.log( + title, + { + "time_frame": ctx.time_frame, + "x": (await exchange_public_data.current_candle_time(ctx)) * x_multiplier, + "y": y_value, + "value": ctx.symbol_writer.get_serializable_value(value), + "kind": kind, + "mode": mode, + "line_shape": line_shape, + "chart": chart, + } + ) + + +def _get_value_from_array(array, index, multiplier=1): + if array is None: + return None + return array[index] * multiplier diff --git a/packages/tentacles/Meta/Keywords/scripting_library/errors.py b/packages/tentacles/Meta/Keywords/scripting_library/errors.py new file mode 100644 index 0000000000..e67870e872 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/errors.py @@ -0,0 +1,18 @@ +class ScriptedLibraryError(Exception): + pass + + +class InvalidBacktestingDataError(ScriptedLibraryError): + pass + + +class MissingReadOnlyExchangeCredentialsError(ScriptedLibraryError): + pass + + +class InvalidProfileError(ScriptedLibraryError): + pass + + +class InvalidTentacleProfileError(InvalidProfileError): + pass diff --git a/packages/tentacles/Meta/Keywords/scripting_library/exchanges/__init__.py b/packages/tentacles/Meta/Keywords/scripting_library/exchanges/__init__.py new file mode 100644 index 0000000000..91d5dcebc1 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/exchanges/__init__.py @@ -0,0 +1 @@ +from tentacles.Meta.Keywords.scripting_library.exchanges.local_exchange import * diff --git a/packages/tentacles/Meta/Keywords/scripting_library/exchanges/local_exchange.py b/packages/tentacles/Meta/Keywords/scripting_library/exchanges/local_exchange.py new file mode 100644 index 0000000000..76e411b7ed --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/exchanges/local_exchange.py @@ -0,0 +1,29 @@ +import contextlib +import typing + +import octobot_trading.exchanges as exchanges +import octobot_trading.exchanges.util.exchange_data as exchange_data_import +import octobot_trading.util.config_util as config_util + + +@contextlib.asynccontextmanager +async def local_ccxt_exchange_manager( + exchange_data: exchange_data_import.ExchangeData, + tentacles_setup_config, + exchange_config_by_exchange: typing.Optional[dict[str, dict]] = None, +): + exchange_config = config_util.get_exchange_config( + exchange_data, tentacles_setup_config, exchange_config_by_exchange, False + ) + ignore_config = not exchanges.is_auth_required_exchanges( + exchange_data, tentacles_setup_config, exchange_config_by_exchange + ) + async with exchanges.get_local_exchange_manager( + exchange_data.exchange_details.name, exchange_config, tentacles_setup_config, + exchange_data.auth_details.sandboxed, ignore_config=ignore_config, + use_cached_markets=True, + is_broker_enabled=exchange_data.auth_details.broker_enabled, + exchange_config_by_exchange=exchange_config_by_exchange, + disable_unauth_retry=True, # unauth fallback is never required here, if auth fails, this should fail + ) as exchange_manager: + yield exchange_manager diff --git a/packages/tentacles/Meta/Keywords/scripting_library/metadata.json b/packages/tentacles/Meta/Keywords/scripting_library/metadata.json new file mode 100644 index 0000000000..319240c835 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/metadata.json @@ -0,0 +1,6 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": [], + "tentacles-requirements": [] +} \ No newline at end of file diff --git a/packages/tentacles/Meta/Keywords/scripting_library/orders/__init__.py b/packages/tentacles/Meta/Keywords/scripting_library/orders/__init__.py new file mode 100644 index 0000000000..8424e63c46 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/orders/__init__.py @@ -0,0 +1,27 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + + +from .order_types import * +from .position_size import * +from .order_tags import * +from .grouping import * +from .cancelling import * +from .editing import * +from .chaining import * +from .open_orders import * +from .waiting import * +from .mocks import * diff --git a/packages/tentacles/Meta/Keywords/scripting_library/orders/cancelling.py b/packages/tentacles/Meta/Keywords/scripting_library/orders/cancelling.py new file mode 100644 index 0000000000..7f6d9c0fd2 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/orders/cancelling.py @@ -0,0 +1,53 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_trading.enums as enums +import octobot_trading.modes.script_keywords.basic_keywords as basic_keywords +import tentacles.Meta.Keywords.scripting_library.orders.order_tags as order_tags + + +async def cancel_orders( + ctx, which="all", symbol=None, symbols=None, + cancel_loaded_orders=True, since: int or float = -1, + until: int or float = -1, +) -> bool: + symbols = symbols or [symbol] if symbol or symbols else [ctx.symbol] + orders = None + orders_canceled = False + side = None + if which == "all": + side = None + elif which == "sell": + side = enums.TradeOrderSide.SELL + elif which == "buy": + side = enums.TradeOrderSide.BUY + else: # tagged order + orders = order_tags.get_tagged_orders( + ctx, which, symbol=symbol, since=since, until=until) + if orders is not None: + for order in orders: + if await ctx.trader.cancel_order(order): + orders_canceled = True + if basic_keywords.is_emitting_trading_signals(ctx): + ctx.get_signal_builder().add_cancelled_order(order, ctx.trader.exchange_manager) + else: + for symbol in symbols: + orders_canceled, orders = await ctx.trader.cancel_open_orders( + symbol, cancel_loaded_orders=cancel_loaded_orders, + side=side, since=since, until=until) + if basic_keywords.is_emitting_trading_signals(ctx): + for order in orders: + ctx.get_signal_builder().add_cancelled_order(order, ctx.trader.exchange_manager) + return orders_canceled diff --git a/packages/tentacles/Meta/Keywords/scripting_library/orders/chaining.py b/packages/tentacles/Meta/Keywords/scripting_library/orders/chaining.py new file mode 100644 index 0000000000..513ec39be3 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/orders/chaining.py @@ -0,0 +1,34 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_trading.personal_data as personal_data + + +async def chain_order(base_order, chained_orders, update_with_triggering_order_fees=False) -> list: + # order creation return a list by default, handle it here + orders = [] + if isinstance(base_order, list): + if not base_order: + return orders + base_order = base_order[0] + if not isinstance(chained_orders, list): + chained_orders = [chained_orders] + for order in chained_orders: + await order.set_as_chained_order(base_order, False, {}, update_with_triggering_order_fees) + base_order.add_chained_order(order) + if base_order.is_filled() and order.should_be_created(): + await personal_data.create_as_chained_order(order) + orders.append(order) + return orders diff --git a/packages/tentacles/Meta/Keywords/scripting_library/orders/editing.py b/packages/tentacles/Meta/Keywords/scripting_library/orders/editing.py new file mode 100644 index 0000000000..02ea2df7ac --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/orders/editing.py @@ -0,0 +1,47 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import decimal + +import octobot_trading.constants as trading_constants +import octobot_trading.modes.script_keywords.basic_keywords as basic_keywords + + +async def edit_order(ctx, order, + edited_quantity: decimal.Decimal = None, + edited_price: decimal.Decimal = None, + edited_stop_price: decimal.Decimal = None, + edited_current_price: decimal.Decimal = None, + params: dict = None) -> bool: + if not ctx.enable_trading: + return False + changed = await ctx.trader.edit_order( + order, + edited_quantity=edited_quantity, + edited_price=edited_price, + edited_stop_price=edited_stop_price, + edited_current_price=edited_current_price, + params=params, + ) + if basic_keywords.is_emitting_trading_signals(ctx): + ctx.get_signal_builder().add_edited_order( + order, + ctx.trader.exchange_manager, + updated_quantity=trading_constants.ZERO if edited_quantity is None else edited_quantity, + updated_limit_price=trading_constants.ZERO if edited_price is None else edited_price, + updated_stop_price=trading_constants.ZERO if edited_stop_price is None else edited_stop_price, + updated_current_price=trading_constants.ZERO if edited_current_price is None else edited_current_price + ) + return changed diff --git a/packages/tentacles/Meta/Keywords/scripting_library/orders/grouping.py b/packages/tentacles/Meta/Keywords/scripting_library/orders/grouping.py new file mode 100644 index 0000000000..5ee82de8b7 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/orders/grouping.py @@ -0,0 +1,107 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_trading.personal_data as trading_personal_data +import octobot_trading.modes.script_keywords.basic_keywords as basic_keywords + + +def create_one_cancels_the_other_group(context, group_identifier=None, orders=None) \ + -> trading_personal_data.OneCancelsTheOtherOrderGroup: + """ + Should be used to create temporary groups binding localized orders, where this group can be + created once and directly associated to each order + """ + return _create_order_group(context, trading_personal_data.OneCancelsTheOtherOrderGroup, group_identifier, orders) + + +def get_or_create_one_cancels_the_other_group( + context, orders=None, include_chained_orders=True, + group_identifier=None) -> trading_personal_data.OneCancelsTheOtherOrderGroup: + """ + Should be used to manage long lasting groups that are meant to be re-used + First: looks for groups in orders + Second: looks for groups named as group_identifier + Third: creates a group named as group_identifier + """ + if group := get_group_from_orders(orders, include_chained_orders=include_chained_orders): + return group + return _get_or_create_order_group(context, trading_personal_data.OneCancelsTheOtherOrderGroup, group_identifier) + + +def create_balanced_take_profit_and_stop_group(context, group_identifier=None, orders=None) \ + -> trading_personal_data.BalancedTakeProfitAndStopOrderGroup: + """ + Should be used to create temporary groups binding localized orders, where this group can be + created once and directly associated to each order + """ + return _create_order_group(context, trading_personal_data.BalancedTakeProfitAndStopOrderGroup, + group_identifier, orders) + + +def get_or_create_balanced_take_profit_and_stop_group( + context, orders=None, include_chained_orders=True, + group_identifier=None) -> trading_personal_data.BalancedTakeProfitAndStopOrderGroup: + """ + Should be used to manage long lasting groups that are meant to be re-used + First: looks for groups in orders + Second: looks for groups named as group_identifier + Third: creates a group named as group_identifier + """ + if group := get_group_from_orders(orders, include_chained_orders=include_chained_orders): + return group + return _get_or_create_order_group(context, trading_personal_data.BalancedTakeProfitAndStopOrderGroup, + group_identifier) + + +def add_orders_to_group(ctx, order_group, orders): + orders = orders if isinstance(orders, list) else [orders] + for order in orders: + order.add_to_order_group(order_group) + if basic_keywords.is_emitting_trading_signals(ctx): + ctx.get_signal_builder().add_order_to_group(order, ctx.exchange_manager) + + +def get_group_from_orders(orders, include_chained_orders=True): + if orders is None: + return None + orders = orders if isinstance(orders, list) else [orders] + for order in orders: + if order.order_group is not None: + return order.order_group + if include_chained_orders: + if group := get_group_from_orders(order.chained_orders): + return group + return None + + +def get_open_orders_from_group(order_group): + return order_group.get_group_open_orders() + + +async def enable_group(order_group, enabled): + await order_group.enable(enabled) + + +def _create_order_group(context, group_type, group_identifier, orders) -> trading_personal_data.OrderGroup: + group = context.exchange_manager.exchange_personal_data.orders_manager.create_group(group_type, group_identifier) + if orders is not None: + add_orders_to_group(context, group, orders) + return group + + +def _get_or_create_order_group(context, group_type, group_identifier) -> trading_personal_data.OrderGroup: + return context.exchange_manager.exchange_personal_data.orders_manager.get_or_create_group(group_type, + group_identifier) + diff --git a/packages/tentacles/Meta/Keywords/scripting_library/orders/mocks.py b/packages/tentacles/Meta/Keywords/scripting_library/orders/mocks.py new file mode 100644 index 0000000000..f8dcd893ba --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/orders/mocks.py @@ -0,0 +1,138 @@ +import decimal + +import octobot_trading.personal_data as personal_data + + +def minimal_order_amount(symbol): + return BYBIT_SYMBOLS_LIMIT_MIN_AMOUNT_EXTRACT[symbol] + + +def max_digits(symbol): + return BYBIT_SYMBOLS_AMOUNT_MAX_DIGITS_EXTRACT[symbol] + + +def adapt_digits(symbol, value): + if value is not None: + return personal_data.decimal_trunc_with_n_decimal_digits( + decimal.Decimal(str(value)), + decimal.Decimal(str(max_digits(symbol))), + truncate=True + ) + return value + + +# todo remove when symbol market status in backtesting data files +# extract from nov 2 2022 +BYBIT_SYMBOLS_LIMIT_MIN_AMOUNT_EXTRACT = { + "1INCH/USDT:USDT": 0.1, "AAVE/USDT:USDT": 0.01, "ACH/USDT:USDT": 10.0, "ADA/USD:ADA": 1.0, "ADA/USDT:USDT": 1.0, + "AGLD/USDT:USDT": 0.1, "AKRO/USDT:USDT": 100.0, "ALGO/USDT:USDT": 0.1, "ALICE/USDT:USDT": 0.1, + "ALPHA/USDT:USDT": 1.0, + "ANKR/USDT:USDT": 1.0, "ANT/USDT:USDT": 0.1, "APE/USDT:USDT": 0.1, "API3/USDT:USDT": 0.1, "APT/USDT:USDT": 0.01, + "ARPA/USDT:USDT": 10.0, "AR/USDT:USDT": 0.1, "ASTR/USDT:USDT": 1.0, "ATOM/USDT:USDT": 0.1, "AUDIO/USDT:USDT": 0.1, + "AVAX/USDT:USDT": 0.1, "AXS/USDT:USDT": 0.1, "BAKE/USDT:USDT": 0.1, "BAL/USDT:USDT": 0.01, "BAND/USDT:USDT": 0.1, + "BAT/USDT:USDT": 0.1, "BCH/USDT:USDT": 0.01, "BEL/USDT:USDT": 1.0, "BICO/USDT:USDT": 0.1, "BIT/USD:BIT": 1.0, + "BIT/USDT:USDT": 0.1, "BLZ/USDT:USDT": 1.0, "BNB/USDT:USDT": 0.01, "BNX/USDT:USDT": 0.01, "BOBA/USDT:USDT": 0.1, + "BSV/USDT:USDT": 0.01, "BSW/USDT:USDT": 1.0, "BTC/USD:BTC": 1.0, "BTC/USDT:USDT": 0.001, "C98/USDT:USDT": 0.1, + "CEEK/USDT:USDT": 1.0, "CELO/USDT:USDT": 0.1, "CELR/USDT:USDT": 1.0, "CHR/USDT:USDT": 0.1, "CHZ/USDT:USDT": 1.0, + "CKB/USDT:USDT": 10.0, "COMP/USDT:USDT": 0.01, "COTI/USDT:USDT": 1.0, "CREAM/USDT:USDT": 0.01, "CRO/USDT:USDT": 1.0, + "CRV/USDT:USDT": 0.1, "CTC/USDT:USDT": 1.0, "CTK/USDT:USDT": 0.1, "CTSI/USDT:USDT": 1.0, "CVC/USDT:USDT": 1.0, + "CVX/USDT:USDT": 0.01, "DAR/USDT:USDT": 0.1, "DASH/USDT:USDT": 0.01, "DENT/USDT:USDT": 100.0, "DGB/USDT:USDT": 10.0, + "DODO/USDT:USDT": 1.0, "DOGE/USDT:USDT": 1.0, "DOT/USD:DOT": 1.0, "DOT/USDT:USDT": 0.1, "DUSK/USDT:USDT": 1.0, + "DYDX/USDT:USDT": 0.1, "EGLD/USDT:USDT": 0.01, "ENJ/USDT:USDT": 0.1, "ENS/USDT:USDT": 0.1, "EOS/USD:EOS": 1.0, + "EOS/USDT:USDT": 0.1, "ETC/USDT:USDT": 0.1, "ETH/USD:ETH": 1.0, "ETH/USDT:USDT": 0.01, "ETHW/USDT:USDT": 0.01, + "FIL/USDT:USDT": 0.1, "FITFI/USDT:USDT": 1.0, "FLM/USDT:USDT": 1.0, "FLOW/USDT:USDT": 0.1, "FTM/USDT:USDT": 1.0, + "FTT/USDT:USDT": 0.1, "FXS/USDT:USDT": 0.01, "GALA/USDT:USDT": 1.0, "GAL/USDT:USDT": 0.01, "GLMR/USDT:USDT": 0.1, + "GMT/USDT:USDT": 1.0, "GMX/USDT:USDT": 0.01, "GRT/USDT:USDT": 0.1, "GTC/USDT:USDT": 0.1, "HBAR/USDT:USDT": 1.0, + "HNT/USDT:USDT": 0.01, "HOT/USDT:USDT": 100.0, "ICP/USDT:USDT": 0.1, "ICX/USDT:USDT": 1.0, "ILV/USDT:USDT": 0.01, + "IMX/USDT:USDT": 0.1, "INJ/USDT:USDT": 0.1, "IOST/USDT:USDT": 1.0, "IOTA/USDT:USDT": 0.1, "IOTX/USDT:USDT": 1.0, + "JASMY/USDT:USDT": 1.0, "JST/USDT:USDT": 10.0, "KAVA/USDT:USDT": 0.1, "KDA/USDT:USDT": 0.1, "KLAY/USDT:USDT": 0.1, + "KNC/USDT:USDT": 0.1, "KSM/USDT:USDT": 0.01, "LDO/USDT:USDT": 0.1, "LINA/USDT:USDT": 10.0, "LINK/USDT:USDT": 0.1, + "LIT/USDT:USDT": 0.1, "LOOKS/USDT:USDT": 0.1, "LPT/USDT:USDT": 0.1, "LRC/USDT:USDT": 0.1, "LTC/USD:LTC": 1.0, + "LTC/USDT:USDT": 0.1, "LUNA2/USDT:USDT": 0.1, "MANA/USD:MANA": 1.0, "MANA/USDT:USDT": 0.1, "MASK/USDT:USDT": 0.1, + "MATIC/USDT:USDT": 1.0, "MINA/USDT:USDT": 0.1, "MKR/USDT:USDT": 0.001, "MTL/USDT:USDT": 0.1, "NEAR/USDT:USDT": 0.1, + "NEO/USDT:USDT": 0.01, "OCEAN/USDT:USDT": 1.0, "OGN/USDT:USDT": 1.0, "OMG/USDT:USDT": 0.1, "ONE/USDT:USDT": 1.0, + "ONT/USDT:USDT": 1.0, "OP/USDT:USDT": 0.1, "PAXG/USDT:USDT": 0.001, "PEOPLE/USDT:USDT": 1.0, "QTUM/USDT:USDT": 0.1, + "RAY/USDT:USDT": 0.1, "REEF/USDT:USDT": 10.0, "REN/USDT:USDT": 0.1, "REQ/USDT:USDT": 1.0, "RNDR/USDT:USDT": 0.1, + "ROSE/USDT:USDT": 1.0, "RSR/USDT:USDT": 10.0, "RSS3/USDT:USDT": 1.0, "RUNE/USDT:USDT": 0.1, "RVN/USDT:USDT": 1.0, + "SAND/USDT:USDT": 1.0, "SCRT/USDT:USDT": 0.1, "SC/USDT:USDT": 10.0, "SFP/USDT:USDT": 0.1, "SKL/USDT:USDT": 1.0, + "SLP/USDT:USDT": 10.0, "SNX/USDT:USDT": 0.1, "SOL/USD:SOL": 1.0, "SOL/USDT:USDT": 0.1, "SPELL/USDT:USDT": 10.0, + "SRM/USDT:USDT": 0.1, "STG/USDT:USDT": 0.1, "STMX/USDT:USDT": 10.0, "STORJ/USDT:USDT": 0.1, "STX/USDT:USDT": 0.1, + "SUN/USDT:USDT": 10.0, "SUSHI/USDT:USDT": 0.1, "SXP/USDT:USDT": 0.1, "THETA/USDT:USDT": 0.1, "TLM/USDT:USDT": 1.0, + "TOMO/USDT:USDT": 0.1, "TRB/USDT:USDT": 0.01, "TRX/USDT:USDT": 1.0, "UNFI/USDT:USDT": 0.1, "UNI/USDT:USDT": 0.1, + "USDC/USDT:USDT": 0.1, "VET/USDT:USDT": 1.0, "WAVES/USDT:USDT": 0.1, "WOO/USDT:USDT": 0.1, "XCN/USDT:USDT": 10.0, + "XEM/USDT:USDT": 1.0, "XLM/USDT:USDT": 1.0, "XMR/USDT:USDT": 0.01, "XNO/USDT:USDT": 1.0, "XRP/USD:XRP": 1.0, + "XRP/USDT:USDT": 1.0, "XTZ/USDT:USDT": 0.1, "YFI/USDT:USDT": 0.0001, "YGG/USDT:USDT": 0.1, "ZEC/USDT:USDT": 0.01, + "ZEN/USDT:USDT": 0.1, "ZIL/USDT:USDT": 10.0, "ZRX/USDT:USDT": 1.0, "BTC/USD:USDC": 0.001, "ETC/USD:USDC": 0.1, + "MATIC/USD:USDC": 1.0, "OP/USD:USDC": 1.0, "ETH/USD:USDC": 0.01, "GMT/USD:USDC": 1.0, "ADA/USD:USDC": 1.0, + "AVAX/USD:USDC": 0.01, "SOL/USD:USDC": 0.1, "XRP/USD:USDC": 1.0, "SAND/USD:USDC": 1.0, "APE/USD:USDC": 0.1, + "SWEAT/USD:USDC": 100.0, "ATOM/USD:USDC": 0.1, "EOS/USD:USDC": 0.1, "CHZ/USD:USDC": 1.0, "NEAR/USD:USDC": 0.1, + "BNB/USD:USDC": 0.01, "LDO/USD:USDC": 0.1, "LUNA/USD:USDC": 0.1, "APT/USD:USDC": 0.01} + +BYBIT_SYMBOLS_AMOUNT_MAX_DIGITS_EXTRACT = { + "1INCH/USDT:USDT": 1, "AAVE/USDT:USDT": 2, "ACH/USDT:USDT": 1, + "ADA/USD:ADA": 0, "ADA/USDT:USDT": 0, "AGLD/USDT:USDT": 1, + "AKRO/USDT:USDT": 2, "ALGO/USDT:USDT": 1, "ALICE/USDT:USDT": 1, + "ALPHA/USDT:USDT": 0, "ANKR/USDT:USDT": 0, "ANT/USDT:USDT": 1, + "APE/USDT:USDT": 1, "API3/USDT:USDT": 1, "APT/USDT:USDT": 2, + "ARPA/USDT:USDT": 1, "AR/USDT:USDT": 1, "ASTR/USDT:USDT": 0, + "ATOM/USDT:USDT": 1, "AUDIO/USDT:USDT": 1, "AVAX/USDT:USDT": 1, + "AXS/USDT:USDT": 1, "BAKE/USDT:USDT": 1, "BAL/USDT:USDT": 2, + "BAND/USDT:USDT": 1, "BAT/USDT:USDT": 1, "BCH/USDT:USDT": 2, + "BEL/USDT:USDT": 0, "BICO/USDT:USDT": 1, "BIT/USD:BIT": 0, + "BIT/USDT:USDT": 1, "BLZ/USDT:USDT": 0, "BNB/USDT:USDT": 2, + "BNX/USDT:USDT": 2, "BOBA/USDT:USDT": 1, "BSV/USDT:USDT": 2, + "BSW/USDT:USDT": 0, "BTC/USD:BTC": 0, "BTC/USDT:USDT": 3, "C98/USDT:USDT": 1, + "CEEK/USDT:USDT": 0, "CELO/USDT:USDT": 1, "CELR/USDT:USDT": 0, + "CHR/USDT:USDT": 1, "CHZ/USDT:USDT": 0, "CKB/USDT:USDT": 1, + "COMP/USDT:USDT": 2, "COTI/USDT:USDT": 0, "CREAM/USDT:USDT": 2, + "CRO/USDT:USDT": 0, "CRV/USDT:USDT": 1, "CTC/USDT:USDT": 0, + "CTK/USDT:USDT": 1, "CTSI/USDT:USDT": 0, "CVC/USDT:USDT": 0, + "CVX/USDT:USDT": 2, "DAR/USDT:USDT": 1, "DASH/USDT:USDT": 2, + "DENT/USDT:USDT": 2, "DGB/USDT:USDT": 1, "DODO/USDT:USDT": 0, + "DOGE/USDT:USDT": 0, "DOT/USD:DOT": 0, "DOT/USDT:USDT": 1, + "DUSK/USDT:USDT": 0, "DYDX/USDT:USDT": 1, "EGLD/USDT:USDT": 2, + "ENJ/USDT:USDT": 1, "ENS/USDT:USDT": 1, "EOS/USD:EOS": 0, "EOS/USDT:USDT": 1, + "ETC/USDT:USDT": 1, "ETH/USD:ETH": 0, "ETH/USDT:USDT": 2, + "ETHW/USDT:USDT": 2, "FIL/USDT:USDT": 1, "FITFI/USDT:USDT": 0, + "FLM/USDT:USDT": 0, "FLOW/USDT:USDT": 1, "FTM/USDT:USDT": 0, + "FTT/USDT:USDT": 1, "FXS/USDT:USDT": 2, "GALA/USDT:USDT": 0, + "GAL/USDT:USDT": 2, "GLMR/USDT:USDT": 1, "GMT/USDT:USDT": 0, + "GMX/USDT:USDT": 2, "GRT/USDT:USDT": 1, "GTC/USDT:USDT": 1, + "HBAR/USDT:USDT": 0, "HNT/USDT:USDT": 2, "HOT/USDT:USDT": 2, + "ICP/USDT:USDT": 1, "ICX/USDT:USDT": 0, "ILV/USDT:USDT": 2, + "IMX/USDT:USDT": 1, "INJ/USDT:USDT": 1, "IOST/USDT:USDT": 0, + "IOTA/USDT:USDT": 1, "IOTX/USDT:USDT": 0, "JASMY/USDT:USDT": 0, + "JST/USDT:USDT": 1, "KAVA/USDT:USDT": 1, "KDA/USDT:USDT": 1, + "KLAY/USDT:USDT": 1, "KNC/USDT:USDT": 1, "KSM/USDT:USDT": 2, + "LDO/USDT:USDT": 1, "LINA/USDT:USDT": 1, "LINK/USDT:USDT": 1, + "LIT/USDT:USDT": 1, "LOOKS/USDT:USDT": 1, "LPT/USDT:USDT": 1, + "LRC/USDT:USDT": 1, "LTC/USD:LTC": 0, "LTC/USDT:USDT": 1, + "LUNA2/USDT:USDT": 1, "MANA/USD:MANA": 0, "MANA/USDT:USDT": 1, + "MASK/USDT:USDT": 1, "MATIC/USDT:USDT": 0, "MINA/USDT:USDT": 1, + "MKR/USDT:USDT": 3, "MTL/USDT:USDT": 1, "NEAR/USDT:USDT": 1, + "NEO/USDT:USDT": 2, "OCEAN/USDT:USDT": 0, "OGN/USDT:USDT": 0, + "OMG/USDT:USDT": 1, "ONE/USDT:USDT": 0, "ONT/USDT:USDT": 0, + "OP/USDT:USDT": 1, "PAXG/USDT:USDT": 3, "PEOPLE/USDT:USDT": 0, + "QTUM/USDT:USDT": 1, "RAY/USDT:USDT": 1, "REEF/USDT:USDT": 1, + "REN/USDT:USDT": 1, "REQ/USDT:USDT": 0, "RNDR/USDT:USDT": 1, + "ROSE/USDT:USDT": 0, "RSR/USDT:USDT": 1, "RSS3/USDT:USDT": 0, + "RUNE/USDT:USDT": 1, "RVN/USDT:USDT": 0, "SAND/USDT:USDT": 0, + "SCRT/USDT:USDT": 1, "SC/USDT:USDT": 1, "SFP/USDT:USDT": 1, + "SKL/USDT:USDT": 0, "SLP/USDT:USDT": 1, "SNX/USDT:USDT": 1, "SOL/USD:SOL": 0, + "SOL/USDT:USDT": 1, "SPELL/USDT:USDT": 1, "SRM/USDT:USDT": 1, + "STG/USDT:USDT": 1, "STMX/USDT:USDT": 1, "STORJ/USDT:USDT": 1, + "STX/USDT:USDT": 1, "SUN/USDT:USDT": 1, "SUSHI/USDT:USDT": 1, + "SXP/USDT:USDT": 1, "THETA/USDT:USDT": 1, "TLM/USDT:USDT": 0, + "TOMO/USDT:USDT": 1, "TRB/USDT:USDT": 2, "TRX/USDT:USDT": 0, + "UNFI/USDT:USDT": 1, "UNI/USDT:USDT": 1, "USDC/USDT:USDT": 1, + "VET/USDT:USDT": 0, "WAVES/USDT:USDT": 1, "WOO/USDT:USDT": 1, + "XCN/USDT:USDT": 1, "XEM/USDT:USDT": 0, "XLM/USDT:USDT": 0, + "XMR/USDT:USDT": 2, "XNO/USDT:USDT": 0, "XRP/USD:XRP": 0, "XRP/USDT:USDT": 0, + "XTZ/USDT:USDT": 1, "YFI/USDT:USDT": 4, "YGG/USDT:USDT": 1, + "ZEC/USDT:USDT": 2, "ZEN/USDT:USDT": 1, "ZIL/USDT:USDT": 1, + "ZRX/USDT:USDT": 0, "BTC/USD:USDC": 3, "ETC/USD:USDC": 1, + "MATIC/USD:USDC": 0, "OP/USD:USDC": 0, "ETH/USD:USDC": 2, "GMT/USD:USDC": 0, + "ADA/USD:USDC": 0, "AVAX/USD:USDC": 2, "SOL/USD:USDC": 1, "XRP/USD:USDC": 0, + "SAND/USD:USDC": 0, "APE/USD:USDC": 1, "SWEAT/USD:USDC": 2, + "ATOM/USD:USDC": 1, "EOS/USD:USDC": 1, "CHZ/USD:USDC": 0, "NEAR/USD:USDC": 1, + "BNB/USD:USDC": 2, "LDO/USD:USDC": 1, "LUNA/USD:USDC": 1, "APT/USD:USDC": 2} diff --git a/packages/tentacles/Meta/Keywords/scripting_library/orders/open_orders.py b/packages/tentacles/Meta/Keywords/scripting_library/orders/open_orders.py new file mode 100644 index 0000000000..63fc9b1916 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/orders/open_orders.py @@ -0,0 +1,19 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + + +def get_open_orders(context): + return context.exchange_manager.exchange_personal_data.orders_manager.get_open_orders(symbol=context.symbol) diff --git a/packages/tentacles/Meta/Keywords/scripting_library/orders/order_tags.py b/packages/tentacles/Meta/Keywords/scripting_library/orders/order_tags.py new file mode 100644 index 0000000000..fda2a0265e --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/orders/order_tags.py @@ -0,0 +1,23 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + + +def get_tagged_orders( + ctx, tag, symbol=None, since: int or float = -1, until: int or float = -1 +): + return ctx.exchange_manager.exchange_personal_data.orders_manager.get_open_orders( + symbol=symbol, tag=tag, since=since, until=until + ) diff --git a/packages/tentacles/Meta/Keywords/scripting_library/orders/order_types/__init__.py b/packages/tentacles/Meta/Keywords/scripting_library/orders/order_types/__init__.py new file mode 100644 index 0000000000..b40894eb55 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/orders/order_types/__init__.py @@ -0,0 +1,24 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + + +from .limit_order import * +from .market_order import * +from .stop_loss_order import * +from .trailing_market_order import * +from .trailing_limit_order import * +from .trailing_stop_loss_order import * +from .scaled_order import * diff --git a/packages/tentacles/Meta/Keywords/scripting_library/orders/order_types/create_order.py b/packages/tentacles/Meta/Keywords/scripting_library/orders/order_types/create_order.py new file mode 100644 index 0000000000..4f567912cd --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/orders/order_types/create_order.py @@ -0,0 +1,450 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import asyncio + +import octobot_trading.personal_data as trading_personal_data +import octobot_trading.constants as trading_constants +import octobot_trading.enums as trading_enums +import octobot_trading.errors as trading_errors +import octobot_trading.modes.script_keywords.basic_keywords as basic_keywords +import octobot_trading.modes.script_keywords as script_keywords +import tentacles.Meta.Keywords.scripting_library.settings as settings +import tentacles.Meta.Keywords.scripting_library.orders.position_size as position_size +import tentacles.Meta.Keywords.scripting_library.orders.chaining as chaining +import tentacles.Meta.Keywords.scripting_library.orders.grouping as grouping +import tentacles.Meta.Keywords.scripting_library.data.reading.exchange_private_data as exchange_private_data + + +async def create_order_instance( + context, + side=None, + symbol=None, + + order_amount=None, + order_target_position=None, + + stop_loss_offset=None, + stop_loss_tag=None, + stop_loss_type=None, + stop_loss_group=False, + take_profit_offset=None, + take_profit_tag=None, + take_profit_type=None, + take_profit_group=False, + + order_type_name=None, + + order_offset=None, + order_min_offset=None, + order_max_offset=None, + order_limit_offset=None, # todo + + slippage_limit=None, + time_limit=None, + + reduce_only=False, + post_only=False, # Todo + tag=None, + + group=None, + wait_for=None +): + if not context.enable_trading or _paired_order_is_closed(context, group): + return [] + async with context.exchange_manager.exchange_personal_data.portfolio_manager.portfolio.lock: + # ensure proper trader allow_artificial_orders value + settings.set_allow_artificial_orders(context, context.allow_artificial_orders) + unknown_portfolio_on_creation = wait_for is not None and any(o.is_open() for o in wait_for) + input_side = side + order_quantity, side = await _get_order_quantity_and_side(context, order_amount, order_target_position, + order_type_name, input_side, reduce_only, + unknown_portfolio_on_creation) + + order_type, order_price, final_side, reduce_only, trailing_method, \ + min_offset_val, max_offset_val, order_limit_offset, limit_offset_val = \ + await _get_order_details(context, order_type_name, side, order_offset, reduce_only, order_limit_offset) + + stop_loss_price = None if stop_loss_offset is None else await script_keywords.get_price_with_offset( + context, stop_loss_offset + ) + take_profit_price = None if take_profit_offset is None else await script_keywords.get_price_with_offset( + context, take_profit_offset + ) + # round down when not reduce only and up when reduce only to avoid letting small positions open + truncate = not reduce_only + return await _create_order(context=context, symbol=symbol, order_quantity=order_quantity, + order_price=order_price, tag=tag, order_type_name=order_type_name, + input_side=input_side, side=side, final_side=final_side, + order_type=order_type, order_min_offset=order_min_offset, + max_offset_val=max_offset_val, reduce_only=reduce_only, group=group, + stop_loss_price=stop_loss_price, stop_loss_tag=stop_loss_tag, + stop_loss_type=stop_loss_type, stop_loss_group=stop_loss_group, + take_profit_price=take_profit_price, take_profit_tag=take_profit_tag, + take_profit_type=take_profit_type, take_profit_group=take_profit_group, + wait_for=wait_for, truncate=truncate, order_amount=order_amount, + order_target_position=order_target_position) + + +async def _get_order_percents(context, order_amount, order_target_position, input_side, symbol): + order_pf_percent = None + if order_amount is not None: + quantity_type, quantity = script_keywords.parse_quantity(order_amount) + if quantity_type in (script_keywords.QuantityType.PERCENT, script_keywords.QuantityType.AVAILABLE_PERCENT): + order_pf_percent = order_amount + elif quantity_type in (script_keywords.QuantityType.DELTA, script_keywords.QuantityType.DELTA_BASE): + percent = await script_keywords.get_order_size_portfolio_percent( + context, quantity, input_side, symbol + ) + order_pf_percent = f"{float(percent)}{script_keywords.QuantityType.PERCENT.value}" + else: + raise trading_errors.InvalidArgumentError(f"Unsupported quantity for trading signals: {order_amount}") + order_position_percent = None + if order_target_position is not None: + quantity_type, quantity = script_keywords.parse_quantity(order_target_position) + if quantity_type in (script_keywords.QuantityType.PERCENT, + script_keywords.QuantityType.AVAILABLE_PERCENT): + # position out of pf % here + order_pf_percent = order_target_position + elif quantity_type is script_keywords.QuantityType.POSITION_PERCENT: + order_position_percent = order_target_position + elif quantity_type is script_keywords.QuantityType.DELTA: + percent = order_target_position * exchange_private_data.open_position_size(context) \ + * trading_constants.ONE_HUNDRED + order_position_percent = f"{float(percent)}{script_keywords.QuantityType.POSITION_PERCENT.value}" + return order_pf_percent, order_position_percent + + +def _paired_order_is_closed(context, group): + grouped_orders = [] if group is None else group.get_group_open_orders() + if group is not None and grouped_orders and all(order.is_closed() for order in grouped_orders): + return True + for order in context.just_created_orders: + if order is not None: + if isinstance(order.order_group, trading_personal_data.OneCancelsTheOtherOrderGroup)\ + and order.order_group == group and order.is_closed(): + return True + return False + + +def _use_total_holding(order_type_name): + return _is_stop_order(order_type_name) + + +def _is_stop_order(order_type_name): + return "stop" in order_type_name + + +async def _get_order_quantity_and_side(context, order_amount, order_target_position, + order_type_name, side, reduce_only, unknown_portfolio_on_creation): + if order_amount is not None and order_target_position is not None: + raise trading_errors.InvalidArgumentError("order_amount and order_target_position can't be " + "both given as parameter") + + use_total_holding = _use_total_holding(order_type_name) + is_stop_order = _is_stop_order(order_type_name) + # size based on amount + if side is not None and order_amount is not None: + # side + if side != trading_enums.TradeOrderSide.BUY.value and side != trading_enums.TradeOrderSide.SELL.value: + # we should skip that cause of performance + raise trading_errors.InvalidArgumentError( + f"Side parameter needs to be {trading_enums.TradeOrderSide.BUY.value} " + f"or {trading_enums.TradeOrderSide.SELL.value} for your {order_type_name}.") + return await position_size.get_amount(context, order_amount, side, reduce_only, is_stop_order, + use_total_holding=use_total_holding, + unknown_portfolio_on_creation=unknown_portfolio_on_creation), side + + # size and side based on target position + if order_target_position is not None: + return await position_size.get_target_position(context, order_target_position, reduce_only, is_stop_order, + use_total_holding=use_total_holding, + unknown_portfolio_on_creation=unknown_portfolio_on_creation) + + raise trading_errors.InvalidArgumentError("Either use side with amount or target_position.") + + +async def _get_order_details(context, order_type_name, side, order_offset, reduce_only, order_limit_offset): + # order types + order_type = None + final_side = side + order_price = None + min_offset_val = None + max_offset_val = None + limit_offset_val = None + trailing_method = None + + # normal order + if order_type_name == "market": + order_type = trading_enums.TraderOrderType.SELL_MARKET if side == trading_enums.TradeOrderSide.SELL.value \ + else trading_enums.TraderOrderType.BUY_MARKET + order_price = await script_keywords.get_price_with_offset(context, "0") + final_side = None # needs to be None + + elif order_type_name == "limit": + order_type = trading_enums.TraderOrderType.SELL_LIMIT if side == trading_enums.TradeOrderSide.SELL.value \ + else trading_enums.TraderOrderType.BUY_LIMIT + order_price = await script_keywords.get_price_with_offset(context, order_offset) + final_side = None # needs to be None + # todo post only + + # conditional orders + # should be a real SL on the exchange short and long + elif order_type_name == "stop_loss": + order_type = trading_enums.TraderOrderType.STOP_LOSS + final_side = trading_enums.TradeOrderSide.SELL if side == trading_enums.TradeOrderSide.SELL.value \ + else trading_enums.TradeOrderSide.BUY + order_price = await script_keywords.get_price_with_offset(context, order_offset) + reduce_only = True + + # should be conditional order on the exchange + elif order_type_name == "stop_market": + order_type = None # todo + order_price = await script_keywords.get_price_with_offset(context, order_offset) + + # has a trigger price and a offset where the limit gets placed when triggered - + # conditional order on exchange possible? + elif order_type_name == "stop_limit": + order_type = None # todo + order_price = await script_keywords.get_price_with_offset(context, order_offset) + order_limit_offset = await script_keywords.get_price_with_offset(context, order_offset) + # todo post only + + # trailling orders + # should be a real trailing stop loss on the exchange - short and long + elif order_type_name == "trailing_stop_loss": + order_price = await script_keywords.get_price_with_offset(context, order_offset) + order_type = None # todo + reduce_only = True + trailing_method = "continuous" + # todo make sure order gets replaced by market if price jumped below price before order creation + + # todo should use trailing on exchange if available or replace order on exchange + elif order_type_name == "trailing_market": + order_price = await script_keywords.get_price_with_offset(context, order_offset) + trailing_method = "continuous" + order_type = trading_enums.TraderOrderType.TRAILING_STOP + final_side = trading_enums.TradeOrderSide.SELL if side == trading_enums.TradeOrderSide.SELL.value \ + else trading_enums.TradeOrderSide.BUY + + # todo should use trailing on exchange if available or replace order on exchange + elif order_type_name == "trailing_limit": + order_type = trading_enums.TraderOrderType.TRAILING_STOP_LIMIT + final_side = trading_enums.TradeOrderSide.SELL if side == trading_enums.TradeOrderSide.SELL.value \ + else trading_enums.TradeOrderSide.BUY + trailing_method = "continuous" + min_offset_val = await script_keywords.get_price_with_offset(context, order_offset) + # todo If the price changes such that the order becomes more than maxOffset away from the + # price, then the order will be moved to minOffset away again. + max_offset_val = await script_keywords.get_price_with_offset(context, order_offset) + # todo post only + + return order_type, order_price, final_side, reduce_only, trailing_method, \ + min_offset_val, max_offset_val, order_limit_offset, limit_offset_val + + +async def _create_order(context, symbol, order_quantity, order_price, tag, order_type_name, input_side, side, + final_side, + order_type, order_min_offset, max_offset_val, reduce_only, group, + stop_loss_price, stop_loss_tag, stop_loss_type, stop_loss_group, + take_profit_price, take_profit_tag, take_profit_type, take_profit_group, + wait_for, truncate, order_amount, order_target_position): + # todo handle offsets, reduce_only, post_only, + orders = [] + error_message = "" + chained_orders_group = _get_group_or_default(context, group, stop_loss_price, take_profit_price) + order_pf_percent = order_position_percent = None + if basic_keywords.is_emitting_trading_signals(context): + order_pf_percent, order_position_percent = await _get_order_percents(context, order_amount, + order_target_position, input_side, symbol) + try: + fees_currency_side = None + if context.exchange_manager.is_future: + fees_currency_side = context.exchange_manager.exchange.get_pair_contract(symbol).\ + get_fees_currency_side() + _, _, _, current_price, symbol_market = \ + await trading_personal_data.get_pre_order_data(context.exchange_manager, + symbol=symbol, + timeout=trading_constants.ORDER_DATA_FETCHING_TIMEOUT) + group_adapted_quantity = _get_group_adapted_quantity(context, group, order_type, order_quantity) + for final_order_quantity, final_order_price in \ + trading_personal_data.decimal_check_and_adapt_order_details_if_necessary( + group_adapted_quantity, + order_price, + symbol_market, + truncate=truncate + ): + if not truncate: + # ensure enough money to trade (because of upper rounding) + available_acc_bal = await script_keywords.available_account_balance( + context, side, use_total_holding=_use_total_holding(order_type_name), + is_stop_order=_is_stop_order(order_type_name), reduce_only=reduce_only) + if final_order_quantity > available_acc_bal: + final_order_quantity = trading_personal_data.decimal_adapt_quantity( + symbol_market, available_acc_bal, truncate=True + ) + created_order = trading_personal_data.create_order_instance( + trader=context.trader, + order_type=order_type, + symbol=symbol, + current_price=current_price, + quantity=final_order_quantity, + price=final_order_price, + side=final_side, + tag=tag, + group=group, + reduce_only=reduce_only, + fees_currency_side=fees_currency_side + ) + if order_min_offset is not None: + await created_order.set_trailing_percent(order_min_offset) + if wait_for: + chained_orders = await chaining.chain_order(wait_for, created_order) + else: + stop_loss_take_profit_quantity = final_order_quantity + fees = created_order.get_computed_fee() + if fees[trading_enums.FeePropertyColumns.CURRENCY.value] == created_order.quantity_currency: + stop_loss_take_profit_quantity = final_order_quantity - \ + fees[trading_enums.FeePropertyColumns.COST.value] + stop_loss_take_profit_quantity = trading_personal_data.decimal_adapt_quantity( + symbol_market, stop_loss_take_profit_quantity, truncate=True + ) + params = await _bundle_stop_loss_and_take_profit( + context, symbol_market, fees_currency_side, created_order, stop_loss_take_profit_quantity, + chained_orders_group, + stop_loss_tag, stop_loss_type, stop_loss_price, stop_loss_group, + take_profit_tag, take_profit_type, take_profit_price, take_profit_group, + order_pf_percent, order_position_percent) + chained_orders = created_order.chained_orders + created_order = await context.trader.create_order(created_order, params=params) + if basic_keywords.is_emitting_trading_signals(context): + context.get_signal_builder().add_created_order(created_order, context.trader.exchange_manager, + order_pf_percent, order_position_percent) + created_chained_orders = [order + for order in chained_orders + if order.is_created()] + # add chained order if any + context.just_created_orders += created_chained_orders + if wait_for: + # base order to be created are actually the chained orders, return them if created + orders += created_chained_orders + else: + # add create base order + orders.append(created_order) + context.just_created_orders.append(created_order) + except (trading_errors.MissingFunds, trading_errors.MissingMinimalExchangeTradeVolume): + error_message = "missing minimal funds" + except asyncio.TimeoutError as e: + error_message = f"{e} and is necessary to compute the order details" + except Exception as e: + error_message = f"failed to create order : {e}." + context.logger.exception(e, True, f"Failed to create order : {e}.") + if not orders: + error_message = f"not enough funds" + if error_message: + context.logger.warning(f"No order created when asking for {symbol} {order_type.name} " + f"with a volume of {order_quantity} on {context.exchange_manager.exchange_name}: " + f"{error_message}.") + return orders + + +def _get_group_adapted_quantity(context, group, order_type, order_quantity): + if isinstance(group, trading_personal_data.BalancedTakeProfitAndStopOrderGroup) and context.just_created_orders: + all_take_profit = all_stop = True + is_creating_stop_order = trading_personal_data.is_stop_order(order_type) + + for order in context.just_created_orders: + if order.order_group == group: + if trading_personal_data.is_stop_order(order.order_type): + all_take_profit = False + else: + all_stop = False + if (is_creating_stop_order and all_stop) or (not is_creating_stop_order and all_take_profit): + # we are only creating stop / take profit orders, no need to balance + return order_quantity + # we are now adding the order side of the orders, we need to balance + if group.can_create_order(order_type, order_quantity): + return order_quantity + return group.get_max_order_quantity(order_type) + return order_quantity + + +def _get_group_or_default(context, group, stop_loss_price, take_profit_price): + if stop_loss_price is not None or take_profit_price is not None: + # orders have to be bundled together, group them + if group is None: + # use balanced group by default + return grouping.create_balanced_take_profit_and_stop_group(context) + else: + return group + return group + + +async def _bundle_stop_loss_and_take_profit( + context, symbol_market, fees_currency_side, order, quantity, default_group, + stop_loss_tag, stop_loss_type, stop_loss_price, stop_loss_group, + take_profit_tag, take_profit_type, take_profit_price, take_profit_group, + order_pf_percent, order_position_percent) -> dict: + params = {} + side = trading_enums.TradeOrderSide.SELL if order.side is trading_enums.TradeOrderSide.BUY \ + else trading_enums.TradeOrderSide.BUY + order_kwargs = { + "fees_currency_side": fees_currency_side, + "reduce_only": True + } + if stop_loss_price is not None: + order_type = stop_loss_type if stop_loss_type else trading_enums.TraderOrderType.STOP_LOSS + params.update( + await _bundle_chained_order(context, symbol_market, order, quantity, default_group, side, order_kwargs, + stop_loss_tag, order_type, stop_loss_price, stop_loss_group, + order_pf_percent, order_position_percent) + ) + if take_profit_price is not None: + if take_profit_type: + order_type = take_profit_type + else: + order_type = trading_enums.TraderOrderType.BUY_LIMIT if side is trading_enums.TradeOrderSide.BUY \ + else trading_enums.TraderOrderType.SELL_LIMIT + params.update( + await _bundle_chained_order(context, symbol_market, order, quantity, default_group, None, order_kwargs, + take_profit_tag, order_type, take_profit_price, take_profit_group, + order_pf_percent, order_position_percent) + ) + return params + + +async def _bundle_chained_order(context, symbol_market, order, quantity, default_group, side, order_kwargs, + tag, order_type, price, group, order_pf_percent, order_position_percent) -> dict: + adapted_price = trading_personal_data.decimal_adapt_price(symbol_market, price) + group = default_group if group is None else group + chained_order = trading_personal_data.create_order_instance( + trader=context.trader, + order_type=order_type, + symbol=order.symbol, + current_price=order.created_last_price, + quantity=quantity, + price=adapted_price, + side=side, + tag=tag, + group=group, + **order_kwargs + ) + params = await context.trader.bundle_chained_order_with_uncreated_order( + order, chained_order, chained_order.update_with_triggering_order_fees + ) + if basic_keywords.is_emitting_trading_signals(context): + context.get_signal_builder().add_created_order(chained_order, context.trader.exchange_manager, + order_pf_percent, order_position_percent) + return params diff --git a/packages/tentacles/Meta/Keywords/scripting_library/orders/order_types/limit_order.py b/packages/tentacles/Meta/Keywords/scripting_library/orders/order_types/limit_order.py new file mode 100644 index 0000000000..21806d7cb5 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/orders/order_types/limit_order.py @@ -0,0 +1,78 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import tentacles.Meta.Keywords.scripting_library.orders.order_types.create_order as create_order + + +async def limit( + context, + side=None, + symbol=None, + + amount=None, + target_position=None, + + offset=None, + + stop_loss_offset=None, + stop_loss_tag=None, + stop_loss_type=None, + stop_loss_group=None, + take_profit_offset=None, + take_profit_tag=None, + take_profit_type=None, + take_profit_group=None, + + slippage_limit=None, + time_limit=None, + + reduce_only=False, + post_only=False, + tag=None, + + group=None, + wait_for=None +): + return await create_order.create_order_instance( + context, + side=side, + symbol=symbol or context.symbol, + + order_amount=amount, + order_target_position=target_position, + + stop_loss_offset=stop_loss_offset, + stop_loss_tag=stop_loss_tag, + stop_loss_type=stop_loss_type, + stop_loss_group=stop_loss_group, + take_profit_offset=take_profit_offset, + take_profit_tag=take_profit_tag, + take_profit_type=take_profit_type, + take_profit_group=take_profit_group, + + order_type_name="limit", + order_offset=offset, + + slippage_limit=slippage_limit, + time_limit=time_limit, + reduce_only=reduce_only, + post_only=post_only, + + tag=tag, + group=group, + wait_for=wait_for + ) + diff --git a/packages/tentacles/Meta/Keywords/scripting_library/orders/order_types/market_order.py b/packages/tentacles/Meta/Keywords/scripting_library/orders/order_types/market_order.py new file mode 100644 index 0000000000..9c8231d0b4 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/orders/order_types/market_order.py @@ -0,0 +1,68 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import tentacles.Meta.Keywords.scripting_library.orders.order_types.create_order as create_order + + +async def market( + context, + side=None, + symbol=None, + + amount=None, + target_position=None, + + stop_loss_offset=None, + stop_loss_tag=None, + stop_loss_type=None, + stop_loss_group=None, + take_profit_offset=None, + take_profit_tag=None, + take_profit_type=None, + take_profit_group=None, + + reduce_only=False, + + tag=None, + + group=None, + wait_for=None +): + return await create_order.create_order_instance( + context, + side=side, + symbol=symbol or context.symbol, + + order_amount=amount, + order_target_position=target_position, + + stop_loss_offset=stop_loss_offset, + stop_loss_tag=stop_loss_tag, + stop_loss_type=stop_loss_type, + stop_loss_group=stop_loss_group, + take_profit_offset=take_profit_offset, + take_profit_tag=take_profit_tag, + take_profit_type=take_profit_type, + take_profit_group=take_profit_group, + + order_type_name="market", + + reduce_only=reduce_only, + + tag=tag, + group=group, + wait_for=wait_for + ) diff --git a/packages/tentacles/Meta/Keywords/scripting_library/orders/order_types/scaled_order.py b/packages/tentacles/Meta/Keywords/scripting_library/orders/order_types/scaled_order.py new file mode 100644 index 0000000000..f021a4b949 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/orders/order_types/scaled_order.py @@ -0,0 +1,171 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import tentacles.Meta.Keywords.scripting_library.orders.order_types.create_order as create_order +import tentacles.Meta.Keywords.scripting_library.orders.position_size as position_size +import octobot_trading.modes.script_keywords as script_keywords + +async def scaled_limit( + context, + side=None, + symbol=None, + + order_type_name="limit", + + scale_from=None, + scale_to=None, + order_count=10, + distribution="linear", + + amount=None, + target_position=None, + + stop_loss_offset=None, + stop_loss_tag=None, + stop_loss_type=None, + stop_loss_group=None, + take_profit_offset=None, + take_profit_tag=None, + take_profit_type=None, + take_profit_group=None, + + slippage_limit=None, + time_limit=None, + + reduce_only=False, + post_only=False, + + tag=None, + + group=None, + wait_for=None +): + amount_per_order = None + unknown_portfolio_on_creation = wait_for is not None + if target_position is None and amount is not None: + amount_per_order = await position_size. \ + get_amount(context, amount, side=side, use_total_holding=True, + unknown_portfolio_on_creation=unknown_portfolio_on_creation) / order_count + + elif target_position is not None and amount is None and side is None: + total_amount, side = await position_size.get_target_position( + context, target_position, reduce_only=reduce_only, + unknown_portfolio_on_creation=unknown_portfolio_on_creation) + amount_per_order = total_amount / order_count + else: + raise RuntimeError("Either use side with amount or target_position for scaled orders.") + + scale_from_price = await script_keywords.get_price_with_offset(context, scale_from, side=side) + scale_to_price = await script_keywords.get_price_with_offset(context, scale_to, side=side) + order_prices = [] + if distribution == "linear": + if scale_from_price >= scale_to_price: + price_difference = scale_from_price - scale_to_price + step_size = price_difference / (order_count - 1) + for i in range(0, order_count): + order_prices.append(scale_from_price - (step_size * i)) + elif scale_to_price > scale_from_price: + price_difference = scale_to_price - scale_from_price + step_size = price_difference / (order_count - 1) + for i in range(0, order_count): + order_prices.append(scale_from_price + (step_size * i)) + + else: + raise RuntimeError("scaled order: unsupported distribution type. check the documentation for more informations") + created_orders = [] + for order_price in order_prices: + new_created_order = await create_order.create_order_instance( + context, + side=side, + symbol=symbol or context.symbol, + + order_amount=amount_per_order, + + order_type_name="limit", + order_offset=f"@{order_price}", + + stop_loss_offset=stop_loss_offset, + stop_loss_tag=stop_loss_tag, + stop_loss_type=stop_loss_type, + stop_loss_group=stop_loss_group, + take_profit_offset=take_profit_offset, + take_profit_tag=take_profit_tag, + take_profit_type=take_profit_type, + take_profit_group=take_profit_group, + + slippage_limit=slippage_limit, + time_limit=time_limit, + + reduce_only=reduce_only, + post_only=post_only, + group=group, + tag=tag, + + wait_for=wait_for + ) + try: + created_orders.append(new_created_order[0]) + except IndexError: + pass + # raise RuntimeError(f"scaled {side} order not created") + return created_orders + + +async def scaled_stop_loss( + context, + side=None, + symbol=None, + + scale_from=None, + scale_to=None, + order_count=10, + distribution="linear", + + amount=None, + target_position=None, + + slippage_limit=None, + time_limit=None, + + tag=None, + + group=None, + wait_for=None +): + await scaled_limit(context, + side=side, + symbol=symbol, + + order_type_name="stop_loss", + + scale_from=scale_from, + scale_to=scale_to, + order_count=order_count, + distribution=distribution, + + amount=amount, + target_position=target_position, + + slippage_limit=slippage_limit, + time_limit=time_limit, + + reduce_only=True, + + tag=tag, + + group=group, + wait_for=wait_for + ) diff --git a/packages/tentacles/Meta/Keywords/scripting_library/orders/order_types/stop_loss_order.py b/packages/tentacles/Meta/Keywords/scripting_library/orders/order_types/stop_loss_order.py new file mode 100644 index 0000000000..d360a8a28d --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/orders/order_types/stop_loss_order.py @@ -0,0 +1,51 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import tentacles.Meta.Keywords.scripting_library.orders.order_types.create_order as create_order + + +async def stop_loss( + context, + side=None, + symbol=None, + + offset=None, + + amount=None, + target_position=None, + + tag=None, + + group=None, + wait_for=None +): + return await create_order.create_order_instance( + context, + side=side, + symbol=symbol or context.symbol, + + order_amount=amount, + order_target_position=target_position, + + order_type_name="stop_loss", + order_offset=offset, + + reduce_only=True, + tag=tag, + + group=group, + wait_for=wait_for + ) diff --git a/packages/tentacles/Meta/Keywords/scripting_library/orders/order_types/trailing_limit_order.py b/packages/tentacles/Meta/Keywords/scripting_library/orders/order_types/trailing_limit_order.py new file mode 100644 index 0000000000..e530b4d24b --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/orders/order_types/trailing_limit_order.py @@ -0,0 +1,67 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import tentacles.Meta.Keywords.scripting_library.orders.order_types.create_order as create_order + + +async def trailing_limit( + context, + side=None, + symbol=None, + + amount=None, + target_position=None, + + offset=None, + min_offset=None, + max_offset=None, + + slippage_limit=None, + time_limit=None, + + reduce_only=False, + post_only=False, + + tag=None, + + group=None, + wait_for=None +): + return await create_order.create_order_instance( + context, + side=side, + symbol=symbol or context.symbol, + + order_amount=amount, + order_target_position=target_position, + + order_type_name="trailing_limit", + + order_min_offset=min_offset, + order_max_offset=max_offset, + order_offset=offset, + + slippage_limit=slippage_limit, + time_limit=time_limit, + reduce_only=reduce_only, + post_only=post_only, + group=group, + + tag=tag, + + + wait_for=wait_for + ) diff --git a/packages/tentacles/Meta/Keywords/scripting_library/orders/order_types/trailing_market_order.py b/packages/tentacles/Meta/Keywords/scripting_library/orders/order_types/trailing_market_order.py new file mode 100644 index 0000000000..3d3e31486a --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/orders/order_types/trailing_market_order.py @@ -0,0 +1,54 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import tentacles.Meta.Keywords.scripting_library.orders.order_types.create_order as create_order + + +async def trailing_market( + context, + side=None, + symbol=None, + + amount=None, + target_position=None, + + offset=None, + + reduce_only=False, + + tag=None, + + group=None, + wait_for=None +): + return await create_order.create_order_instance( + context, + side=side, + symbol=symbol or context.symbol, + + order_amount=amount, + order_target_position=target_position, + + order_type_name="trailing_market", + + order_offset=offset, + + reduce_only=reduce_only, + + tag=tag, + group=group, + wait_for=wait_for + ) diff --git a/packages/tentacles/Meta/Keywords/scripting_library/orders/order_types/trailing_stop_loss_order.py b/packages/tentacles/Meta/Keywords/scripting_library/orders/order_types/trailing_stop_loss_order.py new file mode 100644 index 0000000000..5a3483da25 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/orders/order_types/trailing_stop_loss_order.py @@ -0,0 +1,54 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import tentacles.Meta.Keywords.scripting_library.orders.order_types.create_order as create_order + + +async def trailing_stop_loss( + context, + side=None, + symbol=None, + + amount=None, + target_position=None, + + offset=None, + + reduce_only=True, + tag=None, + + group=None, + wait_for=None +) -> list: + return await create_order.create_order_instance( + context, + side=side, + symbol=symbol or context.symbol, + + order_amount=amount, + order_target_position=target_position, + + order_type_name="trailing_stop_loss", + + order_offset=offset, + + reduce_only=reduce_only, + + tag=tag, + group=group, + + wait_for=wait_for + ) diff --git a/packages/tentacles/Meta/Keywords/scripting_library/orders/position_size/__init__.py b/packages/tentacles/Meta/Keywords/scripting_library/orders/position_size/__init__.py new file mode 100644 index 0000000000..90573d46ae --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/orders/position_size/__init__.py @@ -0,0 +1,19 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + + +from .amount import * +from .target_position import * diff --git a/packages/tentacles/Meta/Keywords/scripting_library/orders/position_size/amount.py b/packages/tentacles/Meta/Keywords/scripting_library/orders/position_size/amount.py new file mode 100644 index 0000000000..8e57112848 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/orders/position_size/amount.py @@ -0,0 +1,46 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import octobot_trading.modes.script_keywords as script_keywords +import octobot_trading.errors as trading_errors +import octobot_trading.enums as trading_enums +import tentacles.Meta.Keywords.scripting_library.data.reading.exchange_private_data as exchange_private_data +import octobot_commons.constants as commons_constants + + +async def get_amount( + context=None, + input_amount=None, + side=trading_enums.TradeOrderSide.BUY.value, + reduce_only=True, + is_stop_order=False, + use_total_holding=False, + unknown_portfolio_on_creation=False, + target_price=None +): + amount_value = await script_keywords.get_amount_from_input_amount( + context=context, + input_amount=input_amount, + side=side, + reduce_only=reduce_only, + is_stop_order=is_stop_order, + use_total_holding=use_total_holding, + target_price=target_price + ) + if unknown_portfolio_on_creation: + # no way to check if the amount is valid when creating order + _, amount_value = script_keywords.parse_quantity(input_amount) + return amount_value diff --git a/packages/tentacles/Meta/Keywords/scripting_library/orders/position_size/target_position.py b/packages/tentacles/Meta/Keywords/scripting_library/orders/position_size/target_position.py new file mode 100644 index 0000000000..b27fe946e4 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/orders/position_size/target_position.py @@ -0,0 +1,78 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import octobot_trading.enums as trading_enums +import octobot_trading.constants as trading_constants +import octobot_trading.errors as trading_errors +import octobot_trading.modes.script_keywords as script_keywords +import tentacles.Meta.Keywords.scripting_library.data.reading.exchange_private_data as exchange_private_data + + +# todo handle negative open position for shorts +async def get_target_position( + context=None, + target=None, + reduce_only=True, + is_stop_order=False, + use_total_holding=False, + unknown_portfolio_on_creation=False, + target_price=None +): + target_position_type, target_position_value = script_keywords.parse_quantity(target) + + if target_position_type is script_keywords.QuantityType.POSITION_PERCENT: + open_position_size_val = exchange_private_data.open_position_size(context) + target_size = open_position_size_val * target_position_value / 100 + order_size = target_size - open_position_size_val + + elif target_position_type is script_keywords.QuantityType.PERCENT: + total_acc_bal = await script_keywords.total_account_balance(context) + target_size = total_acc_bal * target_position_value / 100 + order_size = target_size - exchange_private_data.open_position_size(context) + + # in target position, we always provide the position size we want to end up with + elif target_position_type in (script_keywords.QuantityType.DELTA, script_keywords.QuantityType.DELTA_BASE) \ + or target_position_type is script_keywords.QuantityType.FLAT: + order_size = target_position_value - exchange_private_data.open_position_size(context) + if target == order_size: + # no order to create + return trading_constants.ZERO, trading_enums.TradeOrderSide.BUY.value + + elif target_position_type is script_keywords.QuantityType.AVAILABLE_PERCENT: + available_account_balance_val = await script_keywords.available_account_balance(context, + reduce_only=reduce_only) + order_size = available_account_balance_val * target_position_value / 100 + + else: + raise trading_errors.InvalidArgumentError("make sure to use a supported syntax for position") + + side = get_target_position_side(order_size) + if side == trading_enums.TradeOrderSide.SELL.value: + order_size = order_size * -1 + if not unknown_portfolio_on_creation: + order_size = await script_keywords.adapt_amount_to_holdings(context, order_size, side, + use_total_holding, reduce_only, is_stop_order, + target_price=target_price) + return order_size, side + + +def get_target_position_side(order_size): + if order_size < 0: + return trading_enums.TradeOrderSide.SELL.value + elif order_size > 0: + return trading_enums.TradeOrderSide.BUY.value + # order_size == 0 + raise RuntimeError("Computed position size is 0") diff --git a/packages/tentacles/Meta/Keywords/scripting_library/orders/waiting.py b/packages/tentacles/Meta/Keywords/scripting_library/orders/waiting.py new file mode 100644 index 0000000000..cad601f117 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/orders/waiting.py @@ -0,0 +1,88 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import asyncio +import time + +import tentacles.Meta.Keywords.scripting_library.orders.open_orders as open_orders +import octobot_trading.personal_data as personal_data +import octobot_commons.logging as logging + + +async def wait_for_orders_close(ctx, orders, timeout=None): + if not isinstance(orders, list): + orders = [orders] + t0 = time.time() + refresh_interval = 0.01 + # wait for orders to be filled or cancelled + # also wait for associated chained orders to be opened + try: # order.is_closed() fails when order got filled meanwhile + while not all(order.is_closed() for order in orders) or \ + not are_all_chained_orders_created(ctx, orders): + if timeout is None or time.time() - t0 < timeout: + if ctx.exchange_manager.is_backtesting: + raise asyncio.TimeoutError("Can't wait for orders in backtesting") + await asyncio.sleep(refresh_interval) + else: + raise asyncio.TimeoutError("Order wasnt not filled in time") + except AttributeError as e: + logging.get_logger("Waiting").exception(e, True, "AttributeError on checking orders (should not happen)") + pass # continue try to create take profit in case of connection issues + + +def are_all_chained_orders_created(ctx, orders): + for order in orders: + for chained_order in order.chained_orders: + if not chained_order.is_created(): + return False + if chained_order.is_closed(): + continue + found_order = False + # ensure that chained orders are open or got closed + for open_order in open_orders.get_open_orders(ctx): + if personal_data.is_associated_pending_order(open_order, chained_order): + found_order = True + break + if not found_order: + return False + return True + + +async def wait_for_stop_loss_open(ctx, order_tag=None, order_group=None, timeout=60): + """ + waits for and finds a stop order based on order tag or order group + :param ctx: + :param order_tag: + :param order_group: + :param timeout: in seconds + :return: the stop loss order + """ + t0 = time.time() + refresh_interval = 0.01 + orders = ctx.exchange_manager.exchange_personal_data.orders_manager.orders + + stop_found = False + while not stop_found: + for order in orders: + stop_found = orders[order].tag == order_tag or orders[order].order_group == order_group + if stop_found: + return orders[order] + if timeout is None or time.time() - t0 < timeout: + if ctx.exchange_manager.is_backtesting: + raise asyncio.TimeoutError("Can't wait for orders in backtesting") + await asyncio.sleep(refresh_interval) + else: + ctx.logger.error("Stop Loss Order was not found: was not placed in time or got already triggered") + return None diff --git a/packages/tentacles/Meta/Keywords/scripting_library/settings/__init__.py b/packages/tentacles/Meta/Keywords/scripting_library/settings/__init__.py new file mode 100644 index 0000000000..d2555f8d32 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/settings/__init__.py @@ -0,0 +1,18 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + + +from .script_settings import * diff --git a/packages/tentacles/Meta/Keywords/scripting_library/settings/script_settings.py b/packages/tentacles/Meta/Keywords/scripting_library/settings/script_settings.py new file mode 100644 index 0000000000..a1bd5d3f40 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/settings/script_settings.py @@ -0,0 +1,40 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_trading.api as trading_api +import octobot_commons.errors as errors + + +def set_minimum_candles(context, candles_count): + available_candles = 0 + try: + available_candles = trading_api.get_symbol_candles_count( + trading_api.get_symbol_data(context.exchange_manager, context.symbol, allow_creation=False), + context.time_frame + ) + if available_candles >= candles_count: + return + except KeyError: + pass + raise errors.MissingDataError(f"Missing candles: available: {available_candles}, required: {candles_count}") + + +def do_not_initialize(): + raise errors.MissingDataError("Script should not be considered initialized (do_not_initialize call)") + + +def set_allow_artificial_orders(context, allow_artificial_orders): + context.allow_artificial_orders = allow_artificial_orders + context.exchange_manager.trader.allow_artificial_orders = context.allow_artificial_orders diff --git a/packages/tentacles/Meta/Keywords/scripting_library/tests/__init__.py b/packages/tentacles/Meta/Keywords/scripting_library/tests/__init__.py new file mode 100644 index 0000000000..7ffb2ad077 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/tests/__init__.py @@ -0,0 +1,151 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import pytest +import pytest_asyncio +import mock +import decimal +import sys +import asyncio + +import octobot_commons.asyncio_tools as asyncio_tools +import octobot_trading.modes.script_keywords.context_management as context_management +import octobot_trading.exchanges as trading_exchanges +import octobot_trading.enums as enums + + +@pytest.fixture +def null_context(): + context = context_management.Context( + None, + None, + None, + None, + None, + None, + None, + None, + None, + None, + None, + None, + None, + None, + None, + ) + yield context + + +@pytest_asyncio.fixture +async def mock_context(backtesting_trader): + _, exchange_manager, trader_inst = backtesting_trader + context = context_management.Context( + mock.Mock(), + exchange_manager, + trader_inst, + mock.Mock(), + "BTC/USDT", + mock.Mock(), + mock.Mock(), + mock.Mock(), + mock.Mock(), + mock.Mock(), + mock.Mock(), + mock.Mock(), + mock.Mock(), + mock.Mock(), + mock.Mock(), + ) + context.signal_builder = mock.Mock() + context.is_trading_signal_emitter = mock.Mock(return_value=False) + context.orders_writer = mock.Mock(log_many=mock.AsyncMock()) + portfolio_manager = exchange_manager.exchange_personal_data.portfolio_manager + # init portfolio with 0.5 BTC, 20 ETH and 30000 USDT and only 0.1 available BTC + portfolio_manager.portfolio.update_portfolio_from_balance({ + 'BTC': {'available': decimal.Decimal("0.1"), 'total': decimal.Decimal("0.5")}, + 'ETH': {'available': decimal.Decimal("20"), 'total': decimal.Decimal("20")}, + 'USDT': {'available': decimal.Decimal("30000"), 'total': decimal.Decimal("30000")} + }, True) + exchange_manager.client_symbols.append("BTC/USDT") + exchange_manager.client_symbols.append("ETH/USDT") + exchange_manager.client_symbols.append("ETH/BTC") + # init prices with BTC/USDT = 40000, ETH/BTC = 0.1 and ETH/USDT = 4000 + portfolio_manager.portfolio_value_holder.value_converter.last_prices_by_trading_pair["BTC/USDT"] = \ + decimal.Decimal("40000") + portfolio_manager.portfolio_value_holder.value_converter.last_prices_by_trading_pair["ETH/USDT"] = \ + decimal.Decimal("4000") + portfolio_manager.portfolio_value_holder.value_converter.last_prices_by_trading_pair["ETH/BTC"] = \ + decimal.Decimal("0.1") + portfolio_manager.handle_balance_updated() + yield context + + +@pytest.fixture +def symbol_market(): + return { + enums.ExchangeConstantsMarketStatusColumns.LIMITS.value: { + enums.ExchangeConstantsMarketStatusColumns.LIMITS_AMOUNT.value: { + enums.ExchangeConstantsMarketStatusColumns.LIMITS_AMOUNT_MIN.value: 0.5, + enums.ExchangeConstantsMarketStatusColumns.LIMITS_AMOUNT_MAX.value: 100, + }, + enums.ExchangeConstantsMarketStatusColumns.LIMITS_COST.value: { + enums.ExchangeConstantsMarketStatusColumns.LIMITS_COST_MIN.value: 1, + enums.ExchangeConstantsMarketStatusColumns.LIMITS_COST_MAX.value: 200 + }, + enums.ExchangeConstantsMarketStatusColumns.LIMITS_PRICE.value: { + enums.ExchangeConstantsMarketStatusColumns.LIMITS_PRICE_MIN.value: 0.5, + enums.ExchangeConstantsMarketStatusColumns.LIMITS_PRICE_MAX.value: 50 + }, + }, + enums.ExchangeConstantsMarketStatusColumns.PRECISION.value: { + enums.ExchangeConstantsMarketStatusColumns.PRECISION_PRICE.value: 8, + enums.ExchangeConstantsMarketStatusColumns.PRECISION_AMOUNT.value: 8 + } + } + + +@pytest.fixture +def event_loop(): + # re-configure async loop each time this fixture is called + _configure_async_test_loop() + loop = asyncio.new_event_loop() + # use ErrorContainer to catch otherwise hidden exceptions occurring in async scheduled tasks + error_container = asyncio_tools.ErrorContainer() + loop.set_exception_handler(error_container.exception_handler) + yield loop + # will fail if exceptions have been silently raised + loop.run_until_complete(error_container.check()) + loop.close() + + +@pytest.fixture +def skip_if_octobot_trading_mocking_disabled(request): + try: + with mock.patch.object(trading_exchanges.Trader, "cancel_order", mock.AsyncMock()): + pass + # mocking is available + except TypeError: + pytest.skip(reason=f"Disabled {request.node.name} [OctoBot-Trading mocks not allowed]") + + +def _configure_async_test_loop(): + if sys.version_info[0] == 3 and sys.version_info[1] >= 8 and sys.platform.startswith('win'): + # use WindowsSelectorEventLoopPolicy to avoid aiohttp connexion close warnings + # https://github.com/encode/httpx/issues/914#issuecomment-622586610 + asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) + + +# set default values for async loop +_configure_async_test_loop() diff --git a/packages/tentacles/Meta/Keywords/scripting_library/tests/backtesting/__init__.py b/packages/tentacles/Meta/Keywords/scripting_library/tests/backtesting/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/packages/tentacles/Meta/Keywords/scripting_library/tests/backtesting/data_store.py b/packages/tentacles/Meta/Keywords/scripting_library/tests/backtesting/data_store.py new file mode 100644 index 0000000000..cb4dd9a624 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/tests/backtesting/data_store.py @@ -0,0 +1,127 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import pytest + +import octobot_trading.enums as trading_enums +import octobot_commons.enums as commons_enums + + +@pytest.fixture +def default_price_data(): + # imported from real backtesting data + return { + "BTC/USDT": [[1606780800000.0, 19695.87, 19720.0, 19479.8, 19565.47, 4570.361518], [1606784400000.0, 19565.47, 19639.99, 19433.15, 19605.75, 2702.459235], [1606788000000.0, 19605.75, 19704.93, 19548.57, 19680.95, 2408.229978], [1606791600000.0, 19680.96, 19682.77, 19340.0, 19419.74, 2889.848604], [1606795200000.0, 19419.73, 19527.02, 19344.92, 19354.31, 3400.857941], [1606798800000.0, 19352.64, 19502.54, 19281.38, 19483.73, 2620.883792], [1606802400000.0, 19483.73, 19517.94, 19309.87, 19338.34, 3129.776329], [1606806000000.0, 19338.33, 19546.81, 19300.0, 19515.63, 3009.225182], [1606809600000.0, 19515.62, 19567.0, 19441.19, 19466.99, 3143.172961], [1606813200000.0, 19467.0, 19570.0, 19426.96, 19565.0, 2824.268695], [1606816800000.0, 19564.99, 19800.0, 19558.77, 19739.51, 7640.260767], [1606820400000.0, 19739.51, 19888.0, 18886.0, 19425.0, 14556.657151], [1606824000000.0, 19425.4, 19482.01, 18399.99, 18551.35, 17554.24707], [1606827600000.0, 18550.25, 18844.15, 18001.12, 18759.73, 14772.777639], [1606831200000.0, 18759.74, 19364.82, 18651.0, 19285.31, 7821.047189], [1606834800000.0, 19285.3, 19489.3, 19147.6, 19263.37, 6838.162713], [1606838400000.0, 19263.36, 19325.83, 18938.22, 19058.8, 5690.017039], [1606842000000.0, 19058.8, 19086.95, 18611.88, 19058.4, 6526.028983], [1606845600000.0, 19058.4, 19074.64, 18693.37, 18738.82, 3316.505911], [1606849200000.0, 18738.83, 19135.19, 18720.48, 19069.79, 2954.651375], [1606852800000.0, 19069.79, 19150.0, 18862.0, 19024.32, 2451.419624], [1606856400000.0, 19024.33, 19211.0, 18936.61, 19038.39, 2275.27857], [1606860000000.0, 19038.39, 19156.72, 18830.18, 18895.0, 1719.522796], [1606863600000.0, 18895.01, 18943.26, 18725.0, 18764.96, 2883.10159], [1606867200000.0, 18764.96, 18877.92, 18433.0, 18836.51, 4372.162317], [1606870800000.0, 18836.5, 18972.12, 18703.0, 18854.01, 2504.979019], [1606874400000.0, 18854.01, 18863.73, 18639.86, 18676.4, 1647.545452], [1606878000000.0, 18676.39, 18788.32, 18506.16, 18618.25, 3284.666149], [1606881600000.0, 18618.0, 18700.0, 18330.0, 18550.96, 4330.094724], [1606885200000.0, 18550.95, 18660.0, 18465.42, 18654.41, 2661.915712], [1606888800000.0, 18654.27, 18939.97, 18630.63, 18926.66, 4068.001777], [1606892400000.0, 18924.13, 19135.0, 18833.0, 19124.48, 4325.750236], [1606896000000.0, 19124.49, 19342.0, 19059.09, 19195.19, 4980.517871], [1606899600000.0, 19195.52, 19319.67, 18977.03, 19035.99, 3695.078977], [1606903200000.0, 19036.29, 19196.63, 18991.43, 19103.39, 3344.073936], [1606906800000.0, 19103.38, 19199.0, 19022.47, 19127.31, 2688.864019], [1606910400000.0, 19127.31, 19129.13, 18792.31, 18940.98, 4128.997824], [1606914000000.0, 18940.97, 19250.7, 18919.31, 19124.51, 3955.239884], [1606917600000.0, 19126.75, 19175.0, 18850.0, 18910.21, 4219.856604], [1606921200000.0, 18910.21, 18989.0, 18728.38, 18891.57, 3966.239422], [1606924800000.0, 18891.57, 19015.7, 18770.0, 18856.25, 3304.383039], [1606928400000.0, 18856.25, 18999.0, 18810.27, 18976.33, 2378.363036], [1606932000000.0, 18976.33, 19068.0, 18894.8, 19011.99, 2505.872597], [1606935600000.0, 19011.97, 19139.06, 18964.08, 19101.1, 2128.498473], [1606939200000.0, 19101.1, 19150.0, 19044.85, 19083.4, 2075.778464], [1606942800000.0, 19083.77, 19168.91, 19046.3, 19145.01, 1486.371573], [1606946400000.0, 19145.0, 19235.0, 19099.0, 19111.13, 1845.354603], [1606950000000.0, 19111.13, 19260.0, 19089.5, 19204.09, 2012.40777], [1606953600000.0, 19204.08, 19299.0, 19150.76, 19180.0, 2131.560886], [1606957200000.0, 19180.0, 19184.54, 18940.0, 19016.91, 2664.789937], [1606960800000.0, 19016.92, 19099.05, 18945.0, 19041.73, 2011.154103], [1606964400000.0, 19041.73, 19178.87, 19013.17, 19087.0, 1658.093318], [1606968000000.0, 19087.01, 19124.03, 19022.22, 19041.21, 1398.143328], [1606971600000.0, 19041.21, 19110.0, 18867.2, 18922.83, 2207.439489], [1606975200000.0, 18922.83, 19015.42, 18880.51, 18970.24, 1915.073941], [1606978800000.0, 18970.24, 19250.0, 18911.0, 19208.11, 3187.186407], [1606982400000.0, 19208.11, 19450.0, 19161.85, 19378.79, 6548.804707], [1606986000000.0, 19378.79, 19420.96, 19097.6, 19363.99, 4473.862826], [1606989600000.0, 19363.98, 19422.9, 19244.75, 19353.69, 3404.982735], [1606993200000.0, 19353.69, 19444.4, 19290.0, 19396.5, 3752.006358], [1606996800000.0, 19396.49, 19425.0, 19219.78, 19321.26, 3073.255484], [1607000400000.0, 19321.26, 19375.16, 19252.2, 19281.0, 2397.631645], [1607004000000.0, 19281.51, 19397.0, 19274.0, 19351.32, 2196.785603], [1607007600000.0, 19351.32, 19536.2, 19300.0, 19535.0, 4242.672379], [1607011200000.0, 19535.0, 19598.0, 19251.76, 19354.78, 5994.113495], [1607014800000.0, 19354.78, 19384.23, 19194.06, 19299.23, 2612.551421], [1607018400000.0, 19299.24, 19398.9, 19299.24, 19371.46, 2030.270253], [1607022000000.0, 19371.46, 19422.9, 19328.51, 19414.29, 1747.888777], [1607025600000.0, 19414.29, 19435.29, 19306.27, 19369.44, 1970.71359], [1607029200000.0, 19369.44, 19462.24, 19328.92, 19438.86, 1585.05878], [1607032800000.0, 19438.86, 19479.71, 19402.0, 19464.11, 1224.310923], [1607036400000.0, 19464.12, 19540.0, 19402.11, 19421.9, 2261.040894], [1607040000000.0, 19422.34, 19527.0, 19378.92, 19460.65, 2083.900225], [1607043600000.0, 19462.49, 19489.84, 19319.39, 19321.42, 1937.056634], [1607047200000.0, 19323.31, 19375.0, 19250.0, 19251.92, 2016.847648], [1607050800000.0, 19251.92, 19323.31, 19122.74, 19162.62, 2645.78391], [1607054400000.0, 19162.62, 19318.83, 19122.48, 19286.78, 2332.879073], [1607058000000.0, 19286.78, 19312.79, 19190.52, 19200.07, 1607.201706], [1607061600000.0, 19200.01, 19367.05, 19192.89, 19317.13, 1791.575063], [1607065200000.0, 19317.13, 19335.83, 19238.31, 19283.94, 2191.960974], [1607068800000.0, 19283.94, 19447.14, 19281.23, 19388.89, 3152.638117], [1607072400000.0, 19388.9, 19410.49, 19316.11, 19354.23, 1738.767103], [1607076000000.0, 19354.23, 19360.73, 18900.0, 18978.35, 9630.663093], [1607079600000.0, 18978.35, 19077.16, 18700.0, 18833.25, 5226.950061], [1607083200000.0, 18834.48, 19029.56, 18686.38, 19026.49, 6651.625618], [1607086800000.0, 19026.49, 19027.0, 18914.42, 19005.34, 2476.86035], [1607090400000.0, 19005.34, 19146.22, 18917.84, 19046.11, 3301.095255], [1607094000000.0, 19046.11, 19073.46, 18938.25, 18943.35, 2288.005452], [1607097600000.0, 18944.06, 18991.7, 18817.0, 18981.28, 3493.465697], [1607101200000.0, 18981.28, 19029.2, 18932.23, 18968.93, 1898.7485], [1607104800000.0, 18968.82, 19078.68, 18929.16, 19056.45, 1718.020232], [1607108400000.0, 19056.45, 19065.0, 19000.0, 19038.73, 1689.617236], [1607112000000.0, 19038.73, 19045.34, 18880.0, 18962.53, 2390.289129], [1607115600000.0, 18962.52, 18988.75, 18725.6, 18806.41, 3265.941089], [1607119200000.0, 18807.09, 18875.27, 18565.31, 18665.3, 2805.203431], [1607122800000.0, 18665.31, 18841.0, 18601.5, 18650.52, 2948.572604], [1607126400000.0, 18650.51, 18791.53, 18500.0, 18764.23, 4398.592542], [1607130000000.0, 18764.23, 18819.83, 18634.5, 18644.89, 2253.931869], [1607133600000.0, 18644.88, 18848.62, 18641.1, 18789.66, 2388.321713], [1607137200000.0, 18789.65, 18880.0, 18738.34, 18818.85, 2317.888684], [1607140800000.0, 18818.05, 18932.0, 18800.0, 18863.9, 1551.894666], [1607144400000.0, 18863.9, 18970.0, 18840.69, 18954.42, 1647.778313], [1607148000000.0, 18955.83, 18990.08, 18906.16, 18963.03, 1590.750531], [1607151600000.0, 18963.03, 18986.61, 18900.0, 18911.31, 1514.204824], [1607155200000.0, 18911.3, 19177.0, 18911.3, 19149.9, 3390.549236], [1607158800000.0, 19149.9, 19165.0, 19053.05, 19110.42, 1671.420619], [1607162400000.0, 19110.42, 19137.28, 19037.5, 19114.48, 1404.535637], [1607166000000.0, 19114.48, 19127.46, 18959.03, 19013.43, 2031.724234], [1607169600000.0, 19013.43, 19080.01, 18965.45, 19060.01, 1505.479033], [1607173200000.0, 19060.27, 19069.9, 18941.74, 18983.13, 1663.18758], [1607176800000.0, 18982.57, 19092.03, 18963.83, 19080.01, 2031.09956], [1607180400000.0, 19080.01, 19150.0, 19051.0, 19110.78, 1878.881403], [1607184000000.0, 19111.13, 19149.92, 19044.28, 19099.0, 1712.332099], [1607187600000.0, 19098.99, 19125.76, 19026.0, 19125.76, 1130.566015], [1607191200000.0, 19125.77, 19140.0, 19085.0, 19116.3, 1113.481955], [1607194800000.0, 19116.65, 19172.37, 19075.3, 19119.19, 1273.081414], [1607198400000.0, 19119.19, 19140.0, 18993.92, 19054.9, 1291.846074], [1607202000000.0, 19055.24, 19110.0, 19000.0, 19007.04, 1050.880547], [1607205600000.0, 19007.04, 19052.17, 18977.16, 19020.5, 772.952693], [1607209200000.0, 19020.59, 19157.52, 19020.08, 19147.66, 1337.367332], [1607212800000.0, 19147.66, 19277.0, 19131.02, 19268.81, 2729.198654], [1607216400000.0, 19268.81, 19342.0, 19228.0, 19261.52, 2377.439344], [1607220000000.0, 19261.51, 19288.06, 19206.03, 19233.43, 1450.02811], [1607223600000.0, 19233.44, 19245.91, 19137.18, 19183.39, 1223.127577], [1607227200000.0, 19183.39, 19231.08, 19155.04, 19185.92, 881.834286], [1607230800000.0, 19185.92, 19222.54, 19105.0, 19151.97, 1054.988231], [1607234400000.0, 19152.0, 19224.66, 19133.5, 19183.94, 845.302981], [1607238000000.0, 19184.23, 19260.0, 19172.3, 19230.99, 921.805973], [1607241600000.0, 19230.99, 19251.0, 19003.64, 19017.56, 2003.080539], [1607245200000.0, 19017.54, 19083.98, 18950.87, 19042.22, 2087.157592], [1607248800000.0, 19042.23, 19061.53, 18957.04, 19041.31, 1124.016156], [1607252400000.0, 19041.32, 19100.0, 18959.73, 19089.16, 1115.628806], [1607256000000.0, 19089.15, 19089.16, 18960.55, 18982.47, 1111.226328], [1607259600000.0, 18982.87, 19021.0, 18857.0, 18953.07, 2100.601588], [1607263200000.0, 18953.07, 19131.53, 18930.98, 19120.0, 1809.57926], [1607266800000.0, 19120.27, 19152.38, 19094.0, 19125.83, 1350.286034], [1607270400000.0, 19125.84, 19213.09, 19018.0, 19106.8, 1927.444027], [1607274000000.0, 19106.46, 19177.16, 19065.91, 19128.02, 920.596848], [1607277600000.0, 19127.86, 19174.36, 19093.78, 19105.21, 870.086461], [1607281200000.0, 19105.2, 19156.96, 19076.47, 19132.62, 870.441944], [1607284800000.0, 19132.62, 19184.62, 19123.41, 19168.73, 868.814714], [1607288400000.0, 19168.73, 19244.0, 19114.01, 19238.08, 1606.570085], [1607292000000.0, 19238.08, 19249.0, 19045.0, 19125.44, 1481.428442], [1607295600000.0, 19125.55, 19420.0, 19125.55, 19359.4, 4312.407881], [1607299200000.0, 19358.67, 19420.91, 19288.23, 19318.56, 1983.516535], [1607302800000.0, 19318.56, 19349.55, 19219.99, 19293.08, 1527.856348], [1607306400000.0, 19293.09, 19307.49, 19169.35, 19200.0, 1548.269166], [1607310000000.0, 19200.01, 19288.23, 19157.55, 19282.68, 1339.849597], [1607313600000.0, 19282.68, 19306.11, 19233.36, 19285.97, 1193.985926], [1607317200000.0, 19285.97, 19299.0, 19240.0, 19246.26, 1023.882776], [1607320800000.0, 19246.25, 19356.3, 19237.37, 19306.36, 1454.812033], [1607324400000.0, 19306.36, 19399.0, 19293.93, 19371.3, 1503.212608], [1607328000000.0, 19371.24, 19386.15, 19200.0, 19250.84, 1808.057972], [1607331600000.0, 19250.85, 19256.62, 19177.99, 19213.34, 1678.482081], [1607335200000.0, 19213.34, 19254.06, 19147.05, 19183.41, 1839.598649], [1607338800000.0, 19183.41, 19231.22, 19090.0, 19103.58, 1914.160878], [1607342400000.0, 19103.79, 19239.0, 19097.5, 19238.8, 2301.613075], [1607346000000.0, 19239.0, 19253.09, 19180.7, 19187.67, 1522.342358], [1607349600000.0, 19187.9, 19234.61, 19095.72, 19195.84, 2077.875894], [1607353200000.0, 19195.84, 19257.03, 19168.88, 19213.9, 1660.035444], [1607356800000.0, 19213.9, 19241.39, 19166.79, 19188.36, 1587.897438], [1607360400000.0, 19188.29, 19210.09, 19139.99, 19160.01, 1440.248622], [1607364000000.0, 19160.01, 19188.16, 18902.88, 18939.7, 4582.186702], [1607367600000.0, 18938.51, 19039.02, 18912.31, 18967.01, 1898.119135], [1607371200000.0, 18967.01, 19059.8, 18935.06, 19050.63, 1533.444692], [1607374800000.0, 19050.63, 19115.28, 19015.36, 19075.41, 1561.729683], [1607378400000.0, 19075.45, 19115.78, 19057.86, 19114.48, 788.101718], [1607382000000.0, 19114.49, 19217.64, 19072.8, 19166.9, 1603.016963], [1607385600000.0, 19166.9, 19229.99, 19132.67, 19228.4, 1539.700738], [1607389200000.0, 19228.41, 19235.6, 19150.25, 19210.62, 1465.000464], [1607392800000.0, 19210.63, 19215.34, 19160.0, 19177.61, 1061.470891], [1607396400000.0, 19177.61, 19196.0, 19132.78, 19154.73, 1086.763535], [1607400000000.0, 19154.73, 19210.0, 19151.96, 19181.21, 1423.218666], [1607403600000.0, 19181.21, 19293.76, 19181.21, 19293.76, 1362.402648], [1607407200000.0, 19293.76, 19294.84, 19091.0, 19160.49, 1931.913758], [1607410800000.0, 19160.49, 19199.62, 19135.0, 19143.1, 1877.753232], [1607414400000.0, 19143.1, 19168.88, 19010.0, 19069.96, 2558.836053], [1607418000000.0, 19069.95, 19101.59, 18700.0, 18791.77, 5751.663474], [1607421600000.0, 18791.78, 18879.0, 18700.04, 18816.0, 3764.950593], [1607425200000.0, 18816.0, 18869.23, 18730.0, 18762.96, 2445.010049], [1607428800000.0, 18762.95, 18864.06, 18610.0, 18726.93, 4536.598736], [1607432400000.0, 18726.92, 18942.3, 18726.92, 18922.84, 3398.709741], [1607436000000.0, 18923.42, 18974.83, 18861.47, 18865.01, 2255.911886], [1607439600000.0, 18865.0, 18913.14, 18780.0, 18809.91, 2017.421275], [1607443200000.0, 18809.91, 18910.0, 18745.31, 18832.03, 2390.26467], [1607446800000.0, 18832.02, 18937.81, 18818.0, 18927.41, 1365.998541], [1607450400000.0, 18927.41, 18930.84, 18826.08, 18831.0, 1791.746261], [1607454000000.0, 18831.0, 18895.0, 18787.87, 18795.74, 1381.006558], [1607457600000.0, 18795.74, 18848.0, 18664.51, 18742.63, 3177.566538], [1607461200000.0, 18742.64, 18827.34, 18687.8, 18777.86, 1763.504325], [1607464800000.0, 18778.02, 18837.45, 18320.0, 18340.01, 4197.202626], [1607468400000.0, 18336.5, 18500.0, 18200.0, 18324.11, 7082.332356], [1607472000000.0, 18324.11, 18380.0, 18120.0, 18180.01, 4284.974779], [1607475600000.0, 18180.0, 18329.65, 18032.0, 18215.49, 4099.081784], [1607479200000.0, 18215.5, 18350.36, 18151.6, 18281.0, 2790.501477], [1607482800000.0, 18281.26, 18310.0, 18220.88, 18300.01, 1888.927345], [1607486400000.0, 18300.0, 18308.39, 18125.0, 18159.58, 2057.228798], [1607490000000.0, 18159.57, 18242.32, 18058.0, 18205.5, 2983.491849], [1607493600000.0, 18206.57, 18280.0, 18153.36, 18214.09, 2142.323158], [1607497200000.0, 18214.08, 18244.26, 17830.0, 17924.07, 5288.683222], [1607500800000.0, 17924.06, 18048.12, 17650.0, 17955.78, 8243.276243], [1607504400000.0, 17955.78, 18108.64, 17919.93, 18036.34, 4587.527362], [1607508000000.0, 18040.98, 18249.0, 17978.16, 18234.65, 4049.214998], [1607511600000.0, 18234.66, 18379.87, 18180.72, 18244.95, 4005.127067], [1607515200000.0, 18244.94, 18365.0, 18210.05, 18234.74, 2551.592987], [1607518800000.0, 18234.74, 18498.0, 18169.1, 18483.5, 3956.733743], [1607522400000.0, 18483.24, 18523.99, 18425.17, 18451.01, 3858.941662], [1607526000000.0, 18451.01, 18470.22, 18290.49, 18349.5, 3458.085019], [1607529600000.0, 18349.5, 18392.0, 18252.15, 18389.74, 2491.702939], [1607533200000.0, 18389.73, 18435.0, 18297.32, 18304.06, 2386.467822], [1607536800000.0, 18304.06, 18352.22, 18165.0, 18196.4, 2456.281166], [1607540400000.0, 18196.41, 18289.06, 18196.11, 18260.03, 1802.917152], [1607544000000.0, 18260.02, 18372.0, 18211.78, 18341.02, 2640.593524], [1607547600000.0, 18341.01, 18550.0, 18326.04, 18519.37, 3203.221946], [1607551200000.0, 18519.32, 18613.99, 18485.71, 18556.11, 2659.087548], [1607554800000.0, 18556.12, 18639.57, 18531.29, 18541.28, 1699.570211], [1607558400000.0, 18541.29, 18557.32, 18416.17, 18432.01, 1824.411371], [1607562000000.0, 18432.01, 18501.96, 18303.28, 18431.25, 1984.13813], [1607565600000.0, 18431.25, 18485.71, 18353.29, 18386.02, 1684.784578], [1607569200000.0, 18386.02, 18500.0, 18350.0, 18409.77, 1492.900435], [1607572800000.0, 18409.78, 18493.28, 18389.0, 18418.15, 1491.721766], [1607576400000.0, 18418.15, 18420.8, 18305.4, 18372.37, 1571.720153], [1607580000000.0, 18372.36, 18435.74, 18278.44, 18298.22, 2065.773007], [1607583600000.0, 18298.23, 18386.16, 18287.78, 18335.91, 1225.145125], [1607587200000.0, 18335.91, 18471.18, 18319.74, 18433.3, 1918.034657], [1607590800000.0, 18433.31, 18480.0, 18224.01, 18237.52, 2921.513279], [1607594400000.0, 18237.51, 18281.01, 18070.0, 18145.0, 3256.288961], [1607598000000.0, 18145.0, 18254.48, 18117.32, 18192.49, 2447.637312], [1607601600000.0, 18192.5, 18290.63, 18165.89, 18209.96, 1783.460883], [1607605200000.0, 18209.72, 18246.8, 18045.31, 18233.65, 3702.028832], [1607608800000.0, 18233.65, 18252.55, 18040.01, 18079.99, 3043.013527], [1607612400000.0, 18080.0, 18151.0, 17911.12, 18134.08, 4777.60176], [1607616000000.0, 18132.11, 18225.31, 18096.42, 18176.99, 2836.097029], [1607619600000.0, 18176.99, 18217.95, 18076.07, 18213.86, 1822.381199], [1607623200000.0, 18213.87, 18316.76, 18195.01, 18274.75, 2327.712761], [1607626800000.0, 18274.74, 18403.28, 18225.31, 18403.1, 2437.619642], [1607630400000.0, 18403.11, 18435.74, 18342.56, 18365.78, 2400.941605], [1607634000000.0, 18365.79, 18405.62, 18305.74, 18349.0, 1405.94011], [1607637600000.0, 18348.99, 18397.77, 18297.76, 18326.36, 1007.941783], [1607641200000.0, 18326.36, 18380.41, 18220.0, 18254.63, 1461.867189], [1607644800000.0, 18254.81, 18292.73, 17950.0, 18018.32, 3824.693799], [1607648400000.0, 18018.31, 18074.8, 17804.0, 17901.45, 5573.67197], [1607652000000.0, 17901.44, 18040.81, 17801.37, 17804.97, 3702.977958], [1607655600000.0, 17804.97, 17996.14, 17715.6, 17990.88, 3471.134571], [1607659200000.0, 17990.69, 18048.6, 17929.81, 17959.72, 2129.211271], [1607662800000.0, 17959.73, 18021.42, 17833.88, 17899.44, 2208.244252], [1607666400000.0, 17899.43, 17968.0, 17814.73, 17923.6, 1844.101172], [1607670000000.0, 17923.6, 17944.87, 17700.51, 17811.95, 3651.861489], [1607673600000.0, 17811.96, 18001.15, 17810.5, 17898.79, 3799.966897], [1607677200000.0, 17899.46, 17908.0, 17728.25, 17802.6, 2599.671677], [1607680800000.0, 17802.6, 17874.01, 17572.33, 17758.45, 5006.234384], [1607684400000.0, 17758.46, 17761.1, 17600.0, 17647.71, 3575.265625], [1607688000000.0, 17647.71, 17916.5, 17617.0, 17864.73, 4028.592436], [1607691600000.0, 17864.73, 17987.98, 17827.68, 17972.01, 2900.77125], [1607695200000.0, 17972.0, 18068.0, 17901.07, 18058.76, 3657.438567], [1607698800000.0, 18058.75, 18132.0, 18028.0, 18104.74, 3191.423603], [1607702400000.0, 18104.74, 18111.55, 17942.85, 17978.14, 2468.647851], [1607706000000.0, 17978.15, 18007.0, 17852.0, 17995.53, 2771.296577], [1607709600000.0, 17995.5, 18046.83, 17943.29, 18044.26, 1971.882831], [1607713200000.0, 18044.26, 18093.95, 17959.22, 17977.0, 2055.802959], [1607716800000.0, 17975.08, 18060.0, 17925.37, 17982.89, 2012.499396], [1607720400000.0, 17983.47, 18125.18, 17952.15, 18100.01, 2363.583895], [1607724000000.0, 18100.01, 18184.0, 18067.72, 18127.81, 1977.130093], [1607727600000.0, 18127.81, 18149.75, 18012.69, 18036.53, 1824.619736], [1607731200000.0, 18036.53, 18370.0, 18020.7, 18342.06, 4583.783306], [1607734800000.0, 18342.05, 18375.0, 18271.1, 18283.84, 1922.630745], [1607738400000.0, 18283.84, 18350.0, 18278.14, 18319.99, 1370.498539], [1607742000000.0, 18319.99, 18336.82, 18268.34, 18282.02, 1178.519836], [1607745600000.0, 18282.01, 18390.0, 18261.32, 18370.28, 1520.77657], [1607749200000.0, 18370.28, 18398.19, 18310.01, 18313.36, 1600.542043], [1607752800000.0, 18313.35, 18366.24, 18278.91, 18315.76, 1342.470282], [1607756400000.0, 18315.76, 18400.0, 18300.67, 18380.85, 1514.440818], [1607760000000.0, 18380.85, 18450.0, 18318.98, 18377.64, 2153.284652], [1607763600000.0, 18377.35, 18478.0, 18345.14, 18433.47, 1957.433154], [1607767200000.0, 18433.47, 18459.42, 18370.0, 18382.1, 1482.558626], [1607770800000.0, 18382.11, 18513.66, 18366.68, 18506.1, 1892.703046], [1607774400000.0, 18506.1, 18525.33, 18427.01, 18445.15, 1831.731773], [1607778000000.0, 18445.14, 18451.35, 18388.88, 18400.21, 1443.035834], [1607781600000.0, 18400.21, 18475.63, 18308.82, 18372.97, 2537.663833], [1607785200000.0, 18372.98, 18434.62, 18317.67, 18399.01, 1786.916682], [1607788800000.0, 18399.01, 18450.46, 18362.42, 18397.88, 1754.689938], [1607792400000.0, 18397.87, 18527.93, 18374.07, 18483.17, 1642.530837], [1607796000000.0, 18482.83, 18746.33, 18482.47, 18687.62, 4917.028007], [1607799600000.0, 18687.63, 18850.0, 18687.63, 18805.29, 3255.899793], [1607803200000.0, 18805.29, 18840.4, 18745.01, 18770.68, 2031.985016], [1607806800000.0, 18770.67, 18840.4, 18729.09, 18787.72, 1855.293184], [1607810400000.0, 18787.72, 18948.66, 18754.37, 18870.51, 2213.09286], [1607814000000.0, 18872.07, 18880.37, 18768.77, 18808.69, 1730.469058], [1607817600000.0, 18808.69, 18875.0, 18711.12, 18750.0, 2078.489661], [1607821200000.0, 18750.0, 18808.98, 18711.57, 18795.31, 1142.36527], [1607824800000.0, 18795.12, 18830.0, 18750.02, 18812.61, 1530.614138], [1607828400000.0, 18812.61, 18831.36, 18760.77, 18784.42, 932.07127], [1607832000000.0, 18784.56, 18884.5, 18768.19, 18852.51, 1252.673306], [1607835600000.0, 18852.51, 18938.55, 18815.0, 18850.0, 2197.616883], [1607839200000.0, 18849.99, 18984.55, 18844.95, 18973.93, 1930.019028], [1607842800000.0, 18973.93, 19306.27, 18973.93, 19247.68, 7062.527311], [1607846400000.0, 19249.21, 19322.24, 19179.5, 19245.91, 3622.141403], [1607850000000.0, 19245.25, 19301.14, 19185.74, 19263.25, 2399.866077], [1607853600000.0, 19263.26, 19370.0, 19255.0, 19278.99, 3202.08969], [1607857200000.0, 19278.99, 19349.0, 19200.0, 19316.81, 2247.859112], [1607860800000.0, 19316.8, 19400.0, 19265.91, 19348.97, 3516.700147], [1607864400000.0, 19348.97, 19411.0, 19290.01, 19321.13, 2820.186035], [1607868000000.0, 19321.13, 19364.0, 19275.41, 19292.13, 1662.41871], [1607871600000.0, 19292.13, 19334.08, 19080.0, 19211.34, 3876.890837], [1607875200000.0, 19211.34, 19385.0, 19208.7, 19328.12, 3401.557937], [1607878800000.0, 19328.11, 19334.85, 19197.36, 19273.6, 1835.01721], [1607882400000.0, 19273.59, 19273.6, 19172.33, 19185.33, 1636.424148], [1607886000000.0, 19185.33, 19233.0, 19155.0, 19193.78, 1307.778726], [1607889600000.0, 19193.28, 19193.29, 19089.63, 19154.57, 1908.19497], [1607893200000.0, 19154.56, 19225.63, 19150.0, 19190.01, 1153.642827], [1607896800000.0, 19190.0, 19194.33, 18971.0, 19112.41, 2215.95021], [1607900400000.0, 19112.41, 19225.0, 19090.0, 19174.99, 1627.726838], [1607904000000.0, 19174.99, 19174.99, 19000.0, 19057.19, 1888.751084], [1607907600000.0, 19056.23, 19112.74, 19019.65, 19085.64, 1246.655384], [1607911200000.0, 19085.64, 19307.09, 19052.62, 19297.7, 2765.118326], [1607914800000.0, 19297.9, 19347.0, 19227.01, 19260.68, 2413.537572], [1607918400000.0, 19260.68, 19282.11, 19055.51, 19126.94, 2675.257429], [1607922000000.0, 19126.93, 19130.0, 19033.84, 19085.53, 1556.626987], [1607925600000.0, 19085.53, 19207.93, 19080.4, 19206.23, 1741.878147], [1607929200000.0, 19206.24, 19210.0, 19100.43, 19155.8, 1326.684257], [1607932800000.0, 19155.8, 19244.69, 19140.0, 19222.0, 2065.456886], [1607936400000.0, 19222.0, 19258.96, 19120.0, 19184.61, 1986.246842], [1607940000000.0, 19184.6, 19215.19, 19052.72, 19088.94, 2101.350737], [1607943600000.0, 19088.94, 19144.14, 19051.11, 19106.44, 1864.319235], [1607947200000.0, 19106.43, 19147.0, 19028.97, 19114.66, 2118.835216], [1607950800000.0, 19114.66, 19199.86, 19063.31, 19184.19, 2220.236947], [1607954400000.0, 19184.2, 19227.66, 19088.88, 19221.74, 2439.250888], [1607958000000.0, 19221.75, 19300.0, 19158.57, 19200.07, 3589.350164], [1607961600000.0, 19200.0, 19237.79, 19114.58, 19152.37, 1896.167202], [1607965200000.0, 19152.59, 19170.51, 19113.08, 19143.09, 1255.884608], [1607968800000.0, 19143.38, 19236.76, 19143.38, 19236.75, 1817.186019], [1607972400000.0, 19236.75, 19236.76, 19165.0, 19175.88, 1343.180358], [1607976000000.0, 19175.88, 19216.57, 19130.85, 19208.07, 1386.346738], [1607979600000.0, 19208.08, 19220.85, 19150.85, 19197.83, 1359.138901], [1607983200000.0, 19197.82, 19296.0, 19188.76, 19291.9, 1317.369348], [1607986800000.0, 19291.99, 19349.0, 19200.85, 19273.14, 2882.372019], [1607990400000.0, 19273.69, 19395.0, 19243.64, 19394.94, 2998.531387], [1607994000000.0, 19394.94, 19470.0, 19320.85, 19455.06, 2941.282431], [1607997600000.0, 19455.06, 19570.0, 19444.25, 19458.97, 5055.6835], [1608001200000.0, 19458.98, 19509.57, 19416.35, 19479.7, 1934.600997], [1608004800000.0, 19479.69, 19497.36, 19161.03, 19180.71, 3179.726241], [1608008400000.0, 19180.7, 19259.78, 19050.0, 19132.4, 3984.043664], [1608012000000.0, 19133.29, 19211.76, 19101.0, 19186.12, 2257.168193], [1608015600000.0, 19186.12, 19226.95, 19146.95, 19197.6, 1730.803117], [1608019200000.0, 19197.59, 19219.96, 19106.95, 19137.24, 1854.09426], [1608022800000.0, 19136.96, 19209.99, 19074.0, 19187.51, 2498.768], [1608026400000.0, 19187.51, 19350.0, 19117.9, 19310.0, 3596.157208], [1608030000000.0, 19309.99, 19334.9, 19241.07, 19293.54, 1837.391665], [1608033600000.0, 19293.54, 19383.98, 19256.33, 19291.4, 2177.678422], [1608037200000.0, 19291.4, 19349.0, 19263.5, 19349.0, 1506.492391], [1608040800000.0, 19349.16, 19433.0, 19256.27, 19337.46, 3320.865118], [1608044400000.0, 19337.46, 19429.27, 19337.46, 19403.31, 2771.86623], [1608048000000.0, 19403.31, 19425.87, 19328.06, 19364.1, 2346.463058], [1608051600000.0, 19364.09, 19422.92, 19352.48, 19399.53, 1851.189273], [1608055200000.0, 19399.53, 19543.0, 19390.0, 19530.0, 3565.309308], [1608058800000.0, 19529.99, 19545.0, 19465.0, 19530.38, 2470.61552], [1608062400000.0, 19530.38, 19547.0, 19461.55, 19491.87, 1595.053706], [1608066000000.0, 19491.87, 19511.99, 19276.0, 19419.92, 3770.651415], [1608069600000.0, 19419.93, 19489.09, 19379.19, 19461.38, 1176.976643], [1608073200000.0, 19461.37, 19471.18, 19345.51, 19426.43, 1412.954264], [1608076800000.0, 19426.43, 19454.97, 19278.6, 19365.29, 2363.836441], [1608080400000.0, 19365.28, 19420.0, 19317.01, 19389.37, 1791.407177], [1608084000000.0, 19389.37, 19488.02, 19389.37, 19442.08, 1919.597241], [1608087600000.0, 19442.08, 19454.0, 19325.0, 19346.52, 2010.880432], [1608091200000.0, 19346.51, 19403.07, 19300.3, 19358.68, 1417.796057], [1608094800000.0, 19358.31, 19421.8, 19339.35, 19373.81, 1444.892753], [1608098400000.0, 19373.82, 19454.93, 19341.4, 19429.89, 1512.570737], [1608102000000.0, 19429.9, 19451.03, 19399.0, 19423.96, 1290.055946], [1608105600000.0, 19423.95, 19487.17, 19370.62, 19482.57, 1854.4269], [1608109200000.0, 19482.57, 19525.0, 19420.84, 19516.69, 2118.952672], [1608112800000.0, 19516.22, 19800.0, 19498.01, 19798.17, 8207.25114], [1608116400000.0, 19798.18, 19860.0, 19645.9, 19739.78, 5884.326771], [1608120000000.0, 19739.78, 19889.99, 19680.0, 19762.81, 6075.101618], [1608123600000.0, 19762.8, 20450.0, 19762.8, 20319.51, 11510.059772], [1608127200000.0, 20320.85, 20799.0, 20206.16, 20649.0, 14801.122043], [1608130800000.0, 20650.01, 20733.0, 20539.0, 20661.37, 7170.614379], [1608134400000.0, 20661.37, 20865.43, 20620.0, 20854.56, 7391.879642], [1608138000000.0, 20854.56, 20855.0, 20573.82, 20639.82, 5243.123474], [1608141600000.0, 20639.82, 20737.44, 20550.0, 20585.79, 3487.839577], [1608145200000.0, 20585.79, 20766.39, 20550.0, 20736.87, 2693.660582], [1608148800000.0, 20736.87, 20839.0, 20727.3, 20802.82, 3210.854218], [1608152400000.0, 20802.82, 21288.0, 20711.0, 21192.78, 7677.808354], [1608156000000.0, 21191.53, 21444.44, 21172.79, 21366.42, 6275.220393], [1608159600000.0, 21366.02, 21560.0, 21200.0, 21335.52, 6953.057251], [1608163200000.0, 21335.52, 21400.0, 21230.0, 21389.25, 4427.884694], [1608166800000.0, 21389.26, 21860.05, 21389.26, 21719.22, 6860.927779], [1608170400000.0, 21719.77, 21994.0, 21642.13, 21913.9, 5864.760533], [1608174000000.0, 21913.91, 22166.0, 21703.67, 21753.26, 7372.388063], [1608177600000.0, 21752.65, 21900.0, 21735.09, 21785.88, 3477.941858], [1608181200000.0, 21785.87, 22311.38, 21781.99, 22280.0, 5326.441496], [1608184800000.0, 22280.0, 22400.0, 22053.0, 22172.72, 6338.231327], [1608188400000.0, 22172.72, 22488.0, 22102.0, 22478.76, 5356.558094], [1608192000000.0, 22478.75, 22990.0, 22400.0, 22904.7, 12107.706886], [1608195600000.0, 22904.7, 23800.0, 21801.0, 22650.0, 23832.91859], [1608199200000.0, 22648.86, 22934.0, 22380.79, 22618.11, 11643.529767], [1608202800000.0, 22617.73, 22808.56, 22528.73, 22752.16, 5168.285261], [1608206400000.0, 22752.15, 23199.0, 22600.0, 23149.99, 6628.225882], [1608210000000.0, 23150.0, 23348.0, 22647.51, 22831.84, 7791.389716], [1608213600000.0, 22832.73, 23257.9, 22715.38, 23086.01, 7463.214961], [1608217200000.0, 23086.0, 23369.0, 22900.0, 23333.8, 6953.463504], [1608220800000.0, 23333.81, 23650.0, 23200.0, 23591.23, 10032.336464], [1608224400000.0, 23592.2, 23699.7, 23000.0, 23023.98, 6924.834078], [1608228000000.0, 23023.98, 23280.1, 22500.0, 23250.27, 11618.263635], [1608231600000.0, 23251.16, 23497.0, 22804.22, 22899.08, 7447.203472], [1608235200000.0, 22898.47, 23106.34, 22311.1, 22791.55, 8469.168153], [1608238800000.0, 22791.78, 22848.39, 22382.7, 22791.96, 5412.104163], [1608242400000.0, 22785.93, 23080.0, 22757.52, 22963.04, 3385.2089], [1608246000000.0, 22963.05, 23000.0, 22570.59, 22797.16, 4979.489472], [1608249600000.0, 22797.15, 22842.76, 22470.35, 22764.77, 3923.98594], [1608253200000.0, 22764.77, 23146.95, 22634.11, 22988.21, 4155.082395], [1608256800000.0, 22988.21, 23248.99, 22937.9, 23003.31, 3844.945472], [1608260400000.0, 23003.31, 23047.88, 22762.05, 22811.59, 3180.188409], [1608264000000.0, 22811.58, 22973.92, 22772.05, 22870.93, 2602.419348], [1608267600000.0, 22870.92, 23028.13, 22835.02, 22955.51, 2184.781421], [1608271200000.0, 22955.51, 23168.59, 22874.69, 22994.49, 3086.571333], [1608274800000.0, 22992.06, 23069.44, 22838.19, 23055.98, 3071.638245], [1608278400000.0, 23055.98, 23285.18, 22938.87, 23114.84, 4699.90906], [1608282000000.0, 23114.84, 23215.0, 23017.97, 23212.82, 2878.472119], [1608285600000.0, 23212.82, 23220.0, 22933.07, 22964.67, 3730.390769], [1608289200000.0, 22964.12, 22967.29, 22691.61, 22886.17, 4048.00235], [1608292800000.0, 22886.18, 23073.25, 22727.0, 22939.42, 3427.454017], [1608296400000.0, 22939.41, 22948.64, 22548.99, 22571.78, 4037.637495], [1608300000000.0, 22571.79, 22758.44, 22400.0, 22610.65, 5664.078883], [1608303600000.0, 22610.65, 22636.27, 22350.0, 22549.0, 4804.643291], [1608307200000.0, 22549.0, 22752.77, 22463.59, 22719.28, 3562.521096], [1608310800000.0, 22719.29, 22818.0, 22670.22, 22749.32, 2476.345002], [1608314400000.0, 22749.32, 22829.04, 22694.22, 22781.44, 2092.86434], [1608318000000.0, 22781.44, 22783.42, 22626.81, 22762.28, 1880.94478], [1608321600000.0, 22762.28, 22795.11, 22650.29, 22755.66, 1760.425011], [1608325200000.0, 22755.25, 23078.47, 22751.1, 22880.07, 3998.59848], [1608328800000.0, 22880.07, 23034.33, 22800.0, 23011.38, 1992.223703], [1608332400000.0, 23011.38, 23143.56, 22908.45, 23107.39, 2542.011356], [1608336000000.0, 23107.39, 23168.28, 22940.0, 22954.02, 2050.965871], [1608339600000.0, 22954.02, 23099.0, 22902.1, 23046.76, 2149.001638], [1608343200000.0, 23046.75, 23220.0, 23034.75, 23220.0, 2910.223586], [1608346800000.0, 23219.51, 23225.0, 23093.93, 23098.04, 2329.278504], [1608350400000.0, 23098.05, 23138.0, 23032.0, 23043.4, 1859.427393], [1608354000000.0, 23043.41, 23075.36, 22924.79, 22958.0, 2023.000091], [1608357600000.0, 22958.48, 23010.6, 22821.0, 22853.5, 1813.015202], [1608361200000.0, 22853.51, 23038.0, 22832.0, 22853.75, 1791.212957], [1608364800000.0, 22853.75, 22990.0, 22750.0, 22983.77, 3048.665524], [1608368400000.0, 22983.77, 23045.49, 22928.05, 22973.06, 2229.925469], [1608372000000.0, 22973.06, 23080.45, 22950.0, 23019.99, 2331.997056], [1608375600000.0, 23020.0, 23063.49, 22875.01, 22888.54, 1942.204736], [1608379200000.0, 22888.53, 23127.72, 22886.79, 23041.53, 3218.568336], [1608382800000.0, 23041.53, 23189.1, 22992.23, 23172.74, 3234.586672], [1608386400000.0, 23172.75, 23627.99, 23052.0, 23296.96, 10267.116714], [1608390000000.0, 23296.96, 23650.0, 23296.95, 23552.01, 6178.384265], [1608393600000.0, 23552.0, 24171.47, 23456.55, 23966.48, 13881.907694], [1608397200000.0, 23966.48, 24100.0, 23680.0, 23886.44, 6294.413079], [1608400800000.0, 23886.71, 23906.66, 23556.76, 23822.66, 4060.081676], [1608404400000.0, 23822.66, 23883.79, 23651.0, 23791.91, 3011.088133], [1608408000000.0, 23791.82, 23937.0, 23782.91, 23902.17, 2634.921207], [1608411600000.0, 23902.19, 24065.41, 23780.21, 23974.71, 3253.897865], [1608415200000.0, 23974.7, 23999.0, 23825.95, 23905.73, 1543.403326], [1608418800000.0, 23905.73, 23915.26, 23719.25, 23821.61, 1987.777683], [1608422400000.0, 23821.6, 23836.48, 23230.0, 23481.41, 5981.312918], [1608426000000.0, 23483.11, 23548.72, 23390.0, 23485.56, 2118.503927], [1608429600000.0, 23486.42, 23542.99, 23300.0, 23429.92, 2007.609104], [1608433200000.0, 23429.92, 23429.92, 23180.88, 23346.48, 2578.315434], [1608436800000.0, 23346.25, 23452.63, 23060.0, 23426.54, 2789.303747], [1608440400000.0, 23426.15, 23588.88, 23397.58, 23481.38, 2113.937874], [1608444000000.0, 23481.38, 23614.84, 23459.98, 23506.67, 1518.982809], [1608447600000.0, 23506.67, 23646.31, 23410.62, 23628.88, 2081.734038], [1608451200000.0, 23628.89, 23791.0, 23532.0, 23698.49, 2953.113346], [1608454800000.0, 23698.49, 23748.4, 23503.0, 23592.92, 2928.141217], [1608458400000.0, 23592.93, 23648.92, 23358.78, 23394.76, 3238.112754], [1608462000000.0, 23394.77, 23625.0, 23393.0, 23553.02, 1945.517865], [1608465600000.0, 23553.02, 23588.71, 23333.57, 23472.44, 2466.127241], [1608469200000.0, 23472.45, 23590.9, 23296.0, 23561.36, 3044.961002], [1608472800000.0, 23561.36, 23682.0, 23500.19, 23537.7, 2538.180355], [1608476400000.0, 23537.7, 23910.0, 23527.48, 23868.09, 4012.70827], [1608480000000.0, 23868.08, 23901.01, 23628.31, 23630.1, 3492.542838], [1608483600000.0, 23630.1, 23800.0, 23625.41, 23722.62, 2050.917554], [1608487200000.0, 23722.62, 23877.0, 23655.26, 23866.69, 2433.334581], [1608490800000.0, 23866.68, 23995.97, 23780.83, 23930.0, 2727.20596], [1608494400000.0, 23928.8, 24295.0, 23850.18, 24172.25, 6396.944648], [1608498000000.0, 24172.99, 24208.62, 23350.0, 23373.05, 7510.998452], [1608501600000.0, 23376.94, 23614.36, 23090.0, 23507.79, 5563.847281], [1608505200000.0, 23507.03, 23594.73, 23450.0, 23455.52, 2197.79247], [1608508800000.0, 23455.54, 23709.31, 23287.94, 23679.55, 3148.360382], [1608512400000.0, 23679.55, 23744.86, 23587.85, 23663.48, 1823.646184], [1608516000000.0, 23663.49, 23887.0, 23652.48, 23856.38, 2136.222374], [1608519600000.0, 23856.39, 24016.93, 23657.5, 23945.29, 3842.134228], [1608523200000.0, 23945.3, 24102.77, 23790.0, 23895.73, 3537.794056], [1608526800000.0, 23895.02, 23975.0, 23841.99, 23909.83, 2315.212127], [1608530400000.0, 23909.83, 23926.8, 23700.0, 23921.73, 2593.611244], [1608534000000.0, 23921.74, 24028.15, 23888.15, 23980.0, 3110.573389], [1608537600000.0, 23979.99, 24075.94, 23635.08, 23659.59, 4670.537852], [1608541200000.0, 23659.59, 23739.18, 23328.0, 23461.35, 6435.521197], [1608544800000.0, 23460.93, 23495.91, 22441.01, 22445.99, 13511.910357], [1608548400000.0, 22446.39, 22833.01, 22350.0, 22645.85, 7582.69498], [1608552000000.0, 22646.7, 22663.0, 21815.0, 22307.5, 11239.201922], [1608555600000.0, 22307.5, 22665.35, 22251.23, 22646.53, 2830.345587], [1608559200000.0, 22646.53, 22646.53, 22646.53, 22646.53, 0.0], [1608573600000.0, 22693.65, 22988.6, 22621.44, 22813.66, 4796.306546], [1608577200000.0, 22813.65, 22940.3, 22681.32, 22816.62, 2874.578236], [1608580800000.0, 22816.63, 22930.0, 22732.78, 22829.79, 1900.463944], [1608584400000.0, 22829.79, 23162.26, 22765.0, 23127.37, 3028.124973], [1608588000000.0, 23127.38, 23254.33, 23021.17, 23170.89, 2084.951696], [1608591600000.0, 23169.88, 23228.35, 22699.99, 22719.71, 3712.997151], [1608595200000.0, 22719.88, 22926.14, 22500.0, 22558.42, 4435.40638], [1608598800000.0, 22558.41, 22875.61, 22428.86, 22753.99, 3850.003384], [1608602400000.0, 22753.99, 22970.0, 22730.0, 22957.57, 2524.453641], [1608606000000.0, 22957.58, 22969.0, 22737.9, 22851.2, 2438.400079], [1608609600000.0, 22851.19, 23076.77, 22851.19, 22951.3, 2681.871362], [1608613200000.0, 22951.3, 22970.0, 22610.19, 22681.51, 3600.425979], [1608616800000.0, 22681.5, 22870.02, 22551.02, 22750.96, 3044.631907], [1608620400000.0, 22750.85, 22826.78, 22500.0, 22655.83, 3555.118357], [1608624000000.0, 22655.82, 22750.0, 22353.4, 22689.86, 4786.238036], [1608627600000.0, 22689.87, 22822.83, 22600.0, 22783.85, 3441.122021], [1608631200000.0, 22782.92, 22819.76, 22602.12, 22701.2, 2835.326798], [1608634800000.0, 22701.2, 23155.0, 22701.2, 23119.48, 4983.540952], [1608638400000.0, 23119.47, 23300.0, 23062.01, 23175.57, 4777.70466], [1608642000000.0, 23175.57, 23536.96, 23103.98, 23487.2, 4972.84068], [1608645600000.0, 23487.2, 23628.89, 23335.83, 23439.99, 6009.583132], [1608649200000.0, 23439.99, 23600.0, 23300.42, 23342.58, 4452.75789], [1608652800000.0, 23342.68, 23456.0, 23237.0, 23348.95, 4298.902555], [1608656400000.0, 23348.43, 23442.0, 23224.4, 23342.54, 2756.859302], [1608660000000.0, 23342.54, 23520.0, 23342.51, 23435.27, 2784.592049]] + } + + +@pytest.fixture +def default_trades_data(): + # imported from real backtesting data + return { + "BTC/USDT": [ + {commons_enums.PlotAttributes.X.value: 1607986800000, + commons_enums.PlotAttributes.VOLUME.value: 0.00617086, + commons_enums.DBRows.SYMBOL.value: "BTC/USDT", + commons_enums.PlotAttributes.Y.value: 19291.9, + commons_enums.PlotAttributes.SIDE.value: trading_enums.TradeOrderSide.BUY.value, + commons_enums.DBRows.FEES_AMOUNT.value: 6.17e-06, + commons_enums.DBRows.FEES_CURRENCY.value: 'BTC'}, + {commons_enums.PlotAttributes.X.value: 1608040800000, + commons_enums.PlotAttributes.VOLUME.value: 0.00614347, + commons_enums.DBRows.SYMBOL.value: "BTC/USDT", + commons_enums.PlotAttributes.Y.value: 19349.0, + commons_enums.PlotAttributes.SIDE.value: trading_enums.TradeOrderSide.BUY.value, + commons_enums.DBRows.FEES_AMOUNT.value: 6.14e-06, + commons_enums.DBRows.FEES_CURRENCY.value: 'BTC'}, + {commons_enums.PlotAttributes.X.value: 1608134400000, + commons_enums.PlotAttributes.VOLUME.value: 0.00616469, + commons_enums.DBRows.SYMBOL.value: "BTC/USDT", + commons_enums.PlotAttributes.Y.value: 20835.252, + commons_enums.PlotAttributes.SIDE.value: trading_enums.TradeOrderSide.SELL.value, + commons_enums.DBRows.FEES_AMOUNT.value: 0.12834515, + commons_enums.DBRows.FEES_CURRENCY.value: 'USDT'}, + {commons_enums.PlotAttributes.X.value: 1608152400000, + commons_enums.PlotAttributes.VOLUME.value: 0.00613733, + commons_enums.DBRows.SYMBOL.value: "BTC/USDT", + commons_enums.PlotAttributes.Y.value: 20896.92, + commons_enums.PlotAttributes.SIDE.value: trading_enums.TradeOrderSide.SELL.value, + commons_enums.DBRows.FEES_AMOUNT.value: 0.12830709, + commons_enums.DBRows.FEES_CURRENCY.value: 'USDT'}, + {commons_enums.PlotAttributes.X.value: 1608343200000, + commons_enums.PlotAttributes.VOLUME.value: 0.00526114, + commons_enums.DBRows.SYMBOL.value: "BTC/USDT", + commons_enums.PlotAttributes.Y.value: 23046.76, + commons_enums.PlotAttributes.SIDE.value: trading_enums.TradeOrderSide.BUY.value, + commons_enums.DBRows.FEES_AMOUNT.value: 5.26e-06, + commons_enums.DBRows.FEES_CURRENCY.value: 'BTC'}, + {commons_enums.PlotAttributes.X.value: 1608390000000, + commons_enums.PlotAttributes.VOLUME.value: 0.0051656, + commons_enums.DBRows.SYMBOL.value: "BTC/USDT", + commons_enums.PlotAttributes.Y.value: 23296.96, + commons_enums.PlotAttributes.SIDE.value: trading_enums.TradeOrderSide.BUY.value, + commons_enums.DBRows.FEES_AMOUNT.value: 5.17e-06, + commons_enums.DBRows.FEES_CURRENCY.value: 'BTC'}, + {commons_enums.PlotAttributes.X.value: 1608548400000, + commons_enums.PlotAttributes.VOLUME.value: 0.00516043, + commons_enums.DBRows.SYMBOL.value: "BTC/USDT", commons_enums.PlotAttributes.Y.value: 22365.0816, + commons_enums.PlotAttributes.SIDE.value: trading_enums.TradeOrderSide.SELL.value, + commons_enums.DBRows.FEES_AMOUNT.value: 0.11540382, commons_enums.DBRows.FEES_CURRENCY.value: 'USDT'}, + {commons_enums.PlotAttributes.X.value: 1608552000000, + commons_enums.PlotAttributes.VOLUME.value: 0.00525588, + commons_enums.DBRows.SYMBOL.value: "BTC/USDT", commons_enums.PlotAttributes.Y.value: 22124.8896, + commons_enums.PlotAttributes.SIDE.value: trading_enums.TradeOrderSide.SELL.value, + commons_enums.DBRows.FEES_AMOUNT.value: 0.11637692, commons_enums.DBRows.FEES_CURRENCY.value: 'USDT'}, + + ] + } + + +@pytest.fixture +def default_portfolio_historical_value(): + # imported from real backtesting data, verified values + return [1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 999.8815237991, 999.7687099720999, 1000.5161786346, 1000.8867997973999, 1000.9109653822, 1001.0386361121, 999.195455449, 998.9031874960999, 999.2288680688, 999.2995770631, 998.9258119084, 999.2374369879, 999.9924882191, 999.8910790686, 999.877886632, 1000.1161382391999, 999.9722046052, 1000.7822926222, 1000.2998073977999, 1000.7357909865999, 1002.3407125158, 1002.3455103035999, 1001.8717595133999, 1000.9867521946, 1001.4965479033999, 1001.0667153246, 1000.3144468016, 1000.6108024634, 1001.2592419376, 1000.0835378861999, 1000.2287017222, 1000.4195060523999, 1001.1094033339999, 1001.036206315, 1001.7573507274, 1002.1713137003999, 1005.6399912595999, 1004.9215532915999, 1005.2047457919999, 1012.0698880529999, 1016.1192209561999, 1017.2025553799799, 1018.38822616268, 1017.0702959184799, 1016.7386959785799, 1017.6659237949799, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.5198963714798, 1018.3986178550799, 1019.3066236838799, 1018.6682444990798, 1018.3810632158799, 1017.9346813274799, 1017.3829716038799, 1017.3842330150799, 1018.0676025326799, 1018.0113120578799, 1018.2580230650799, 1017.5670325214799, 1018.3711821614799, 1019.0608587350798, 1019.5932463066798, 1022.2498220090798, 1026.56717417788, 1025.7362651291799, 1025.0691004736798, 1024.74786147328, 1025.8975096079798, 1026.6527962460798, 1025.93438334538, 1025.0580591850799, 1021.5322424131798, 1021.5667203992798, 1020.9781988842799, 1020.1066662265798, 1020.9389293955799, 1021.5142221968799, 1021.7776506767798, 1023.0507320849798, 1023.7757072609799, 1022.6761615773798, 1020.6120655877799, 1022.2604466452799, 1021.4212045485799, 1022.3473186706799, 1022.1008687760799, 1025.54220927388, 1023.0633358200798, 1024.0270528212798, 1025.52762643988, 1026.1746876170798, 1028.7182463559798, 1020.4263427804799, 1021.7814005483799, 1021.2450647464799, 1023.5784223495798, 1023.4111364109799, 1025.4204426099798, 1026.3465567320798, 1025.8228246652798, 1025.9770902163798, 1026.1011484684798, 1026.7078985259798, 1023.3705128019799, 1021.3012086573799, 1010.1984553833678, 1008.3923109410158, 1008.3923109410158, 1008.3923109410158, 1008.3923109410158, 1008.3923109410158, 1008.3923109410158, 1008.3923109410158, 1008.3923109410158, 1008.3923109410158, 1008.3923109410158, 1008.3923109410158, 1008.3923109410158, 1008.3923109410158, 1008.3923109410158, 1008.3923109410158, 1008.3923109410158, 1008.3923109410158, 1008.3923109410158, 1008.3923109410158, 1008.3923109410158, 1008.3923109410158, 1008.3923109410158, 1008.3923109410158, 1008.3923109410158, 1008.3923109410158, 1008.3923109410158, 1008.3923109410158, 1008.3923109410158] + + +@pytest.fixture +def default_portfolio_data(): + return {'BTC': 0.0, 'USDT': 1000.0} + + +@pytest.fixture +def default_spot_metadata(): + return { + commons_enums.DBRows.EXCHANGES.value: "binance", + commons_enums.DBRows.FUTURE_CONTRACTS.value: {}, + } + + +@pytest.fixture +def default_pnl_historical_value(): + # imported from real backtesting data, verified values + # add 0 at the end for the end backtesting value + return [0, 9.266910467879995, 9.25298590360002, -3.7521819056716623, -6.375403524792318, 0] + + +@pytest.fixture +def default_funding_fees_data(): + #TODO + return [] + + +@pytest.fixture +def default_realized_pnl_history(): + #TODO + return [] diff --git a/packages/tentacles/Meta/Keywords/scripting_library/tests/backtesting/test_backtesting_data_collector.py b/packages/tentacles/Meta/Keywords/scripting_library/tests/backtesting/test_backtesting_data_collector.py new file mode 100644 index 0000000000..f83a896a0b --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/tests/backtesting/test_backtesting_data_collector.py @@ -0,0 +1,251 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import pytest + +import octobot_commons.enums as common_enums +import octobot_commons.constants as common_constants + +import octobot.constants as constants + +import tentacles.Meta.Keywords.scripting_library.backtesting.backtesting_data_collector as src_backtesting_data_collector +import tentacles.Meta.Keywords.scripting_library.errors as errors + +class DummyLogger: + def __init__(self): + self.infos = [] + self.errors = [] + self.exceptions = [] + def info(self, msg): + self.infos.append(msg) + def error(self, msg): + self.errors.append(msg) + def exception(self, err, *args, **kwargs): + self.exceptions.append((err, args, kwargs)) + +def patch_logger(monkeypatch): + logger = DummyLogger() + monkeypatch.setattr(src_backtesting_data_collector, "_get_logger", lambda: logger) + return logger + +def base_args(): + return dict( + exchange="binance", + symbol="BTC/USDT", + time_frame=common_enums.TimeFrames.ONE_HOUR, + allow_candles_beyond_range=False, + required_from_the_start=True, + required_till_the_end=True, + first_traded_symbols_time=9999999999, # large for test + allow_any_backtesting_start_and_end_time=False, + ) + + +def test_ensure_compatible_candle_time_normal_case(monkeypatch): + logger = patch_logger(monkeypatch) + args = base_args() + tf_sec = common_enums.TimeFramesMinutes[args["time_frame"]] * common_constants.MINUTE_TO_SECONDS + first_open_time = 1000000 + last_open_time = 1000000 + 10 * tf_sec + first_candle_time = first_open_time + last_candle_time = last_open_time + result = src_backtesting_data_collector.ensure_compatible_candle_time( + **args, + first_open_time=first_open_time, + last_open_time=last_open_time, + first_candle_time=first_candle_time, + last_candle_time=last_candle_time, + ) + assert result is None + assert not logger.errors + assert not logger.infos + +def test_ensure_compatible_candle_time_starts_too_early(): + args = base_args() + tf_sec = common_enums.TimeFramesMinutes[args["time_frame"]] * common_constants.MINUTE_TO_SECONDS + first_open_time = 1000000 + last_open_time = 1000000 + 10 * tf_sec + first_candle_time = first_open_time - constants.BACKTESTING_DATA_ALLOWED_PRICE_WINDOW - 1 + last_candle_time = last_open_time + with pytest.raises(errors.InvalidBacktestingDataError) as exc: + src_backtesting_data_collector.ensure_compatible_candle_time( + **args, + first_open_time=first_open_time, + last_open_time=last_open_time, + first_candle_time=first_candle_time, + last_candle_time=last_candle_time, + ) + assert "starts too early" in str(exc.value) + +def test_ensure_compatible_candle_time_starts_too_late_and_required(): + args = base_args() + tf_sec = common_enums.TimeFramesMinutes[args["time_frame"]] * common_constants.MINUTE_TO_SECONDS + first_open_time = 1000000 + last_open_time = 1000000 + 10 * tf_sec + first_candle_time = first_open_time + tf_sec * 2 + last_candle_time = last_open_time + args["first_traded_symbols_time"] = first_open_time - constants.BACKTESTING_DATA_ALLOWED_PRICE_WINDOW # force fail + with pytest.raises(errors.InvalidBacktestingDataError) as exc: + src_backtesting_data_collector.ensure_compatible_candle_time( + **args, + first_open_time=first_open_time, + last_open_time=last_open_time, + first_candle_time=first_candle_time, + last_candle_time=last_candle_time, + ) + assert "starts too late" in str(exc.value) + +def test_ensure_compatible_candle_time_starts_too_late_but_adapted_with_test_data(monkeypatch): + logger = patch_logger(monkeypatch) + args = base_args() + tf_sec = common_enums.TimeFramesMinutes[args["time_frame"]] * common_constants.MINUTE_TO_SECONDS + first_open_time = 1000000 + last_open_time = 1000000 + 10 * tf_sec + first_candle_time = first_open_time + tf_sec * 2 + last_candle_time = last_open_time + args["first_traded_symbols_time"] = first_open_time + tf_sec * 3 # allow adaptation + result = src_backtesting_data_collector.ensure_compatible_candle_time( + **args, + first_open_time=first_open_time, + last_open_time=last_open_time, + first_candle_time=first_candle_time, + last_candle_time=last_candle_time, + ) + assert result == first_candle_time + assert any("acceptable, start time is adapted" in msg for msg in logger.infos) + +def test_ensure_compatible_candle_time_starts_too_late_but_adapted_with_real_data_dca(monkeypatch): + logger = patch_logger(monkeypatch) + args = base_args() + args["time_frame"] = common_enums.TimeFrames.FOUR_HOURS + first_open_time = 1737424774.2265518 # Tuesday, January 21, 2025 9:44:54.459 + last_open_time = 1752990294.4590356 # Sunday, July 20, 2025 5:44:54.459 + first_candle_time = 1737446400 # Tuesday, January 21, 2025 12:00:00 + last_candle_time = 1752955200 # Saturday, July 19, 2025 20:00:00 + args["first_traded_symbols_time"] = 1737465882.5380511 # Tuesday, January 21, 2025 13:24:42.538 + # fails without the kw_constants.BACKTESTING_DATA_ALLOWED_PRICE_WINDOW allowance over first_traded_symbols_time + result = src_backtesting_data_collector.ensure_compatible_candle_time( + **args, + first_open_time=first_open_time, + last_open_time=last_open_time, + first_candle_time=first_candle_time, + last_candle_time=last_candle_time, + ) + assert result == first_candle_time + assert any("acceptable, start time is adapted" in msg for msg in logger.infos) + + first_candle_time = args["first_traded_symbols_time"] + constants.BACKTESTING_DATA_ALLOWED_PRICE_WINDOW # Thursday, January 23, 2025 13:24:42.538 + with pytest.raises(errors.InvalidBacktestingDataError) as exc: + result = src_backtesting_data_collector.ensure_compatible_candle_time( + **args, + first_open_time=first_open_time, + last_open_time=last_open_time, + first_candle_time=first_candle_time, + last_candle_time=last_candle_time, + ) + assert "starts too late" in str(exc.value) + +def test_ensure_compatible_candle_time_starts_too_late_but_adapted_with_real_data_basked(monkeypatch): + logger = patch_logger(monkeypatch) + args = base_args() + args["time_frame"] = common_enums.TimeFrames.FOUR_HOURS + first_open_time = 1737453626.6562696 # Tuesday, January 21, 2025 10:00:26.656 + last_open_time = 1752919226.658268 # Saturday, July 19, 2025 10:00:26.658 + first_candle_time = 1737590400 # Thursday, January 23, 2025 0:00:00 + last_candle_time = 1752883200 # Saturday, July 19, 2025 0:00:00 + args["first_traded_symbols_time"] = 1749325565.048149 # Saturday, June 7, 2025 19:46:05.048 + result = src_backtesting_data_collector.ensure_compatible_candle_time( + **args, + first_open_time=first_open_time, + last_open_time=last_open_time, + first_candle_time=first_candle_time, + last_candle_time=last_candle_time, + ) + assert result == first_candle_time + assert any("acceptable, start time is adapted" in msg for msg in logger.infos) + +def test_ensure_compatible_candle_time_ends_too_late(): + args = base_args() + tf_sec = common_enums.TimeFramesMinutes[args["time_frame"]] * common_constants.MINUTE_TO_SECONDS + first_open_time = 1000000 + last_open_time = 1000000 + 10 * tf_sec + first_candle_time = first_open_time + last_candle_time = last_open_time + tf_sec * 2 + with pytest.raises(errors.InvalidBacktestingDataError) as exc: + src_backtesting_data_collector.ensure_compatible_candle_time( + **args, + first_open_time=first_open_time, + last_open_time=last_open_time, + first_candle_time=first_candle_time, + last_candle_time=last_candle_time, + ) + assert "ends too late" in str(exc.value) + +def test_ensure_compatible_candle_time_ends_too_early_and_required(): + args = base_args() + tf_sec = common_enums.TimeFramesMinutes[args["time_frame"]] * common_constants.MINUTE_TO_SECONDS + first_open_time = 1000000 + last_open_time = 1000000 + 10 * tf_sec + first_candle_time = first_open_time + last_candle_time = last_open_time - constants.BACKTESTING_DATA_ALLOWED_PRICE_WINDOW - 1 + with pytest.raises(errors.InvalidBacktestingDataError) as exc: + src_backtesting_data_collector.ensure_compatible_candle_time( + **args, + first_open_time=first_open_time, + last_open_time=last_open_time, + first_candle_time=first_candle_time, + last_candle_time=last_candle_time, + ) + assert "ends too early" in str(exc.value) + +def test_ensure_compatible_candle_time_ends_too_early_but_not_required(monkeypatch): + logger = patch_logger(monkeypatch) + args = base_args() + args["required_till_the_end"] = False + first_open_time = 1000000 + last_open_time = 1000000 + constants.BACKTESTING_DATA_ALLOWED_PRICE_WINDOW + first_candle_time = first_open_time + last_candle_time = last_open_time - constants.BACKTESTING_DATA_ALLOWED_PRICE_WINDOW - 1 + result = src_backtesting_data_collector.ensure_compatible_candle_time( + **args, + first_open_time=first_open_time, + last_open_time=last_open_time, + first_candle_time=first_candle_time, + last_candle_time=last_candle_time, + ) + assert result is None + assert any("acceptable, this symbol is not required till the end" in msg for msg in logger.infos) + +def test_ensure_compatible_candle_time_adapted_start_time_too_short(): + args = base_args() + tf_sec = common_enums.TimeFramesMinutes[args["time_frame"]] * common_constants.MINUTE_TO_SECONDS + first_open_time = 1000000 + last_open_time = 1000000 + 30 * tf_sec + first_candle_time = first_open_time + 25 * tf_sec + last_candle_time = last_open_time + args["first_traded_symbols_time"] = first_open_time + 30 * tf_sec + # This will adapt, but duration will be too short + with pytest.raises(errors.InvalidBacktestingDataError) as exc: + src_backtesting_data_collector.ensure_compatible_candle_time( + **args, + first_open_time=first_open_time, + last_open_time=last_open_time, + first_candle_time=first_candle_time, + last_candle_time=last_candle_time, + ) + assert "adapted backtesting start time starts too late" in str(exc.value) + + + diff --git a/packages/tentacles/Meta/Keywords/scripting_library/tests/backtesting/test_collect_data_and_run_backtesting.py b/packages/tentacles/Meta/Keywords/scripting_library/tests/backtesting/test_collect_data_and_run_backtesting.py new file mode 100644 index 0000000000..7b5136b5cd --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/tests/backtesting/test_collect_data_and_run_backtesting.py @@ -0,0 +1,121 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import pytest + +import octobot_commons.enums as commons_enums +import octobot_commons.profiles.profile_data as commons_profile_data +import octobot_commons.constants as commons_constants + +import octobot_trading.api +import octobot_trading.exchanges.connectors.ccxt.ccxt_clients_cache as ccxt_clients_cache +import octobot_trading.exchanges.util.exchange_data as exchange_data_import + +import tentacles.Meta.Keywords.scripting_library as scripting_library +import tentacles.Trading.Mode.index_trading_mode.index_distribution as index_distribution +import tentacles.Trading.Mode.index_trading_mode.index_trading as index_trading +import octobot_copy.enums as copy_enums + + +@pytest.fixture +def trading_mode_tentacles_data() -> commons_profile_data.TentaclesData: + distribution = [ + { + copy_enums.DistributionKeys.NAME: "BTC", + copy_enums.DistributionKeys.VALUE: 50.0, + }, + { + copy_enums.DistributionKeys.NAME: "ETH", + copy_enums.DistributionKeys.VALUE: 30.0, + }, + { + copy_enums.DistributionKeys.NAME: "USD", # Will be replaced by reference market + copy_enums.DistributionKeys.VALUE: 20.0, + }, + ] + + # Create test trading mode config + trading_mode_config = { + index_trading.IndexTradingModeProducer.INDEX_CONTENT: distribution, + index_trading.IndexTradingModeProducer.REBALANCE_TRIGGER_MIN_PERCENT: 5.0, + index_trading.IndexTradingModeProducer.SELECTED_REBALANCE_TRIGGER_PROFILE: "test_profile", + index_trading.IndexTradingModeProducer.REBALANCE_TRIGGER_PROFILES: [ + { + index_trading.IndexTradingModeProducer.REBALANCE_TRIGGER_PROFILE_NAME: "test_profile", + index_trading.IndexTradingModeProducer.REBALANCE_TRIGGER_PROFILE_MIN_PERCENT: 5.0, + } + ], + } + return commons_profile_data.TentaclesData( + name=index_trading.IndexTradingMode.get_name(), + config=trading_mode_config + ) + +@pytest.mark.asyncio +async def test_collect_candles_without_backend_and_run_backtesting(trading_mode_tentacles_data): + # 1. init strategy + exchange_data = exchange_data_import.ExchangeData() + # run backtesting for 200 days + days = 200 + profile_data = scripting_library.create_index_config_from_tentacles_config( + tentacles_config=[trading_mode_tentacles_data], + exchange="binanceus", + starting_funds=1000, + backtesting_start_time_delta=days * commons_constants.DAYS_TO_SECONDS + ) + + # 2. collect candles + ccxt_clients_cache._SHARED_MARKETS_EXCHANGE_BY_EXCHANGE.clear() + await scripting_library.init_exchange_market_status_and_populate_backtesting_exchange_data( + exchange_data, profile_data + ) + # cached markets have been updated and now contain this exchange markets + assert len(ccxt_clients_cache._SHARED_MARKETS_EXCHANGE_BY_EXCHANGE) == 1 + # ensure collected datas are correct + assert len(exchange_data.markets) == 2 + assert sorted([market.symbol for market in exchange_data.markets]) == ["BTC/USDT", "ETH/USDT"] + for market in exchange_data.markets: + assert market.time_frame == commons_enums.TimeFrames.ONE_DAY.value + assert days - 1 <= len(market.close) <= days + assert days - 1 <= len(market.open) <= days + assert days - 1 <= len(market.high) <= days + assert days - 1 <= len(market.low) <= days + assert days - 1 <= len(market.volume) <= days + assert days - 1 <= len(market.time) <= days + + starting_portfolio = profile_data.backtesting_context.starting_portfolio + assert starting_portfolio == { + "USDT": 1000, + } + # 3. run backtesting + async with scripting_library.init_and_run_backtesting( + exchange_data, profile_data + ) as independent_backtesting: + # backtesting completed, make sure it executed correctly + for exchange_id in independent_backtesting.octobot_backtesting.exchange_manager_ids: + exchange_manager = octobot_trading.api.get_exchange_manager_from_exchange_id(exchange_id) + ending_portfolio = octobot_trading.api.get_portfolio(exchange_manager, as_decimal=False) + assert ending_portfolio != starting_portfolio + assert "ETH" in ending_portfolio + assert "BTC" in ending_portfolio + assert "USDT" in ending_portfolio + trades = octobot_trading.api.get_trade_history(exchange_manager) + # at least 2 trades are expected, one for each symbol + assert len(trades) >= 2 + # backtesting is not stopped yet + assert independent_backtesting.stopped is False + + # 4. ensure backtesting is stopped + assert independent_backtesting.stopped is True diff --git a/packages/tentacles/Meta/Keywords/scripting_library/tests/backtesting/test_run_data.py b/packages/tentacles/Meta/Keywords/scripting_library/tests/backtesting/test_run_data.py new file mode 100644 index 0000000000..f1bdced9fa --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/tests/backtesting/test_run_data.py @@ -0,0 +1,149 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import pytest +import mock + +import tentacles.Meta.Keywords.scripting_library.backtesting.run_data_analysis as run_data_analysis +import octobot_trading.enums as trading_enums +import octobot_commons.enums as commons_enums + +from tentacles.Meta.Keywords.scripting_library.tests import event_loop +from tentacles.Meta.Keywords.scripting_library.tests.backtesting.data_store import default_price_data, \ + default_trades_data, default_portfolio_data, default_portfolio_historical_value, default_pnl_historical_value, \ + default_funding_fees_data, default_realized_pnl_history, default_spot_metadata + +# All test coroutines will be treated as marked. +pytestmark = pytest.mark.asyncio + + +async def test_plot_historical_portfolio_value(default_price_data, default_trades_data, default_portfolio_data, + default_portfolio_historical_value, default_funding_fees_data, + default_spot_metadata): + expected_time_data = [candle[commons_enums.PriceIndexes.IND_PRICE_TIME.value] + for candle in default_price_data["BTC/USDT"]] + await _test_historical_portfolio_values(default_price_data, default_trades_data, default_portfolio_data, + default_funding_fees_data, expected_time_data, + default_portfolio_historical_value, + "spot", + default_spot_metadata) + + +async def test_get_historical_pnl(default_price_data, default_trades_data, default_pnl_historical_value, + default_realized_pnl_history, default_spot_metadata): + # expected_time_data start at the 1st time data with a default_pnl_historical_value at 0 + expected_time_data = \ + [default_price_data["BTC/USDT"][0][commons_enums.PriceIndexes.IND_PRICE_TIME.value]] + \ + [trade[commons_enums.PlotAttributes.X.value] + for trade in default_trades_data["BTC/USDT"] + if trade[commons_enums.PlotAttributes.SIDE.value] == trading_enums.TradeOrderSide.SELL.value] + \ + [default_price_data["BTC/USDT"][-1][commons_enums.PriceIndexes.IND_PRICE_TIME.value]] + cumulative_pnl_historical_value = [default_pnl_historical_value[0]] + for value in default_pnl_historical_value[1:]: + cumulative_pnl_historical_value.append(cumulative_pnl_historical_value[-1] + value) + await _test_historical_pnl_values_from_trades(default_price_data, default_trades_data, [], False, True, False, + expected_time_data, default_pnl_historical_value, + cumulative_pnl_historical_value, + "spot", default_spot_metadata) + + expected_time_data = [i for i in range(len(cumulative_pnl_historical_value))] + await _test_historical_pnl_values_from_trades(default_price_data, default_trades_data, default_realized_pnl_history, + True, True, True, expected_time_data, default_pnl_historical_value, + cumulative_pnl_historical_value, + "spot", default_spot_metadata) + await _test_historical_pnl_values_from_trades(default_price_data, default_trades_data, default_realized_pnl_history, + False, False, True, expected_time_data, default_pnl_historical_value, + cumulative_pnl_historical_value, + "spot", default_spot_metadata) + + +async def test_total_paid_fees(default_trades_data): + usdt_fees = sum(trade[commons_enums.DBRows.FEES_AMOUNT.value] + for trade in default_trades_data["BTC/USDT"] + if trade[commons_enums.DBRows.FEES_CURRENCY.value] == "USDT") + btc_fees_in_usdt = sum(trade[commons_enums.DBRows.FEES_AMOUNT.value] * trade[commons_enums.PlotAttributes.Y.value] + for trade in default_trades_data["BTC/USDT"] + if trade[commons_enums.DBRows.FEES_CURRENCY.value] == "BTC") + with mock.patch.object(run_data_analysis, "get_transactions", + mock.AsyncMock(return_value=[])) as get_transactions_mock: + assert round(await run_data_analysis.total_paid_fees(None, default_trades_data["BTC/USDT"]), 15) == \ + round(usdt_fees + btc_fees_in_usdt, 15) + get_transactions_mock.assert_called_once() + + +async def _test_historical_portfolio_values(price_data, trades_data, portfolio_data, funding_fees_data, + expected_time_data, expected_value_data, exchange_type, + spot_metadata): + plotted_element = mock.Mock() + with mock.patch.object(run_data_analysis, "load_historical_values", + mock.AsyncMock(return_value=(price_data, trades_data, portfolio_data, exchange_type, + spot_metadata, spot_metadata))) \ + as load_historical_values_mock, \ + mock.patch.object(run_data_analysis, "get_transactions", + mock.AsyncMock(return_value=funding_fees_data)) \ + as get_transactions_mock: + await run_data_analysis.plot_historical_portfolio_value("meta_database", plotted_element, + exchange="exchange", own_yaxis=True) + load_historical_values_mock.assert_called_once_with("meta_database", "exchange") + get_transactions_mock.assert_called_once_with("meta_database", + transaction_type=trading_enums.TransactionType.FUNDING_FEE.value) + plotted_element.plot.assert_called_once_with( + mode="scatter", + x=expected_time_data, + y=expected_value_data, + title="Portfolio value", + own_yaxis=True + ) + + +async def _test_historical_pnl_values_from_trades(price_data, trades_data, pnl_data, include_cumulative, + include_unitary, + x_as_trade_count, expected_time_data, expected_value_data, + expected_cumulative_values, + exchange_type, spot_metadata): + plotted_element = mock.Mock() + with mock.patch.object(run_data_analysis, "load_historical_values", + mock.AsyncMock(return_value=(price_data, trades_data, None, exchange_type, spot_metadata, + spot_metadata))) \ + as load_historical_values_mock, \ + mock.patch.object(run_data_analysis, "get_transactions", + mock.AsyncMock(return_value=pnl_data)) \ + as get_transactions_mock: + await run_data_analysis._get_historical_pnl("meta_database", plotted_element, include_cumulative, + include_unitary, + exchange="exchange", x_as_trade_count=x_as_trade_count, + own_yaxis=True) + load_historical_values_mock.assert_called_once_with("meta_database", "exchange") + get_transactions_mock.assert_called_once_with("meta_database", + transaction_types=( + trading_enums.TransactionType.TRADING_FEE.value, + trading_enums.TransactionType.FUNDING_FEE.value, + trading_enums.TransactionType.REALISED_PNL.value, + trading_enums.TransactionType.CLOSE_REALISED_PNL.value) + ) + if include_cumulative: + assert plotted_element.plot.call_count == 2 + else: + if include_unitary: + plotted_element.plot.assert_called_once_with( + kind="bar", + x=expected_time_data, + y=expected_value_data, + x_type="tick0" if x_as_trade_count else "date", + title="P&L per trade", + own_yaxis=True + ) + else: + plotted_element.assert_not_called() diff --git a/packages/tentacles/Meta/Keywords/scripting_library/tests/configuration/__init__.py b/packages/tentacles/Meta/Keywords/scripting_library/tests/configuration/__init__.py new file mode 100644 index 0000000000..5cfd029119 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/tests/configuration/__init__.py @@ -0,0 +1,74 @@ +import os +import pathlib + +import pytest +import pytest_asyncio + +import octobot_commons.constants as commons_constants +import octobot_backtesting.backtesting as backtesting +import octobot_backtesting.constants as backtesting_constants +import octobot_backtesting.time as backtesting_time +import octobot_trading.exchanges as exchanges + +from octobot_commons.tests.test_config import load_test_config + +pytestmark = pytest.mark.asyncio + + +DEFAULT_EXCHANGE_NAME = "binance" +TEST_CONFIG_FOLDER = pathlib.Path(os.path.abspath(__file__)).parent.parent + + +@pytest_asyncio.fixture +async def backtesting_config(request): + config = load_test_config(test_folder=TEST_CONFIG_FOLDER) + config[backtesting_constants.CONFIG_BACKTESTING] = {} + config[backtesting_constants.CONFIG_BACKTESTING][commons_constants.CONFIG_ENABLED_OPTION] = True + if hasattr(request, "param"): + ref_market = request.param + config[commons_constants.CONFIG_TRADING][commons_constants.CONFIG_TRADER_REFERENCE_MARKET] = ref_market + return config + + +@pytest_asyncio.fixture +async def fake_backtesting(backtesting_config): + return backtesting.Backtesting( + config=backtesting_config, + exchange_ids=[], + matrix_id="", + backtesting_files=[], + ) + + +@pytest_asyncio.fixture +async def backtesting_exchange_manager(request, backtesting_config, fake_backtesting): + config = None + exchange_name = DEFAULT_EXCHANGE_NAME + is_spot = True + is_margin = False + is_future = False + is_option = False + if hasattr(request, "param"): + config, exchange_name, is_spot, is_margin, is_future = request.param + + if config is None: + config = backtesting_config + exchange_manager_instance = exchanges.ExchangeManager(config, exchange_name) + exchange_manager_instance.is_backtesting = True + exchange_manager_instance.is_spot_only = is_spot + exchange_manager_instance.is_margin = is_margin + exchange_manager_instance.is_future = is_future + exchange_manager_instance.is_option = is_option + exchange_manager_instance.use_cached_markets = False + exchange_manager_instance.backtesting = fake_backtesting + exchange_manager_instance.backtesting.time_manager = backtesting_time.TimeManager(config) + await exchange_manager_instance.initialize(exchange_config_by_exchange=None) + yield exchange_manager_instance + await exchange_manager_instance.stop() + + +@pytest_asyncio.fixture +async def backtesting_trader(backtesting_config, backtesting_exchange_manager): + trader_instance = exchanges.TraderSimulator(backtesting_config, backtesting_exchange_manager) + await trader_instance.initialize() + return backtesting_config, backtesting_exchange_manager, trader_instance diff --git a/packages/tentacles/Meta/Keywords/scripting_library/tests/configuration/test_indexes_configuration.py b/packages/tentacles/Meta/Keywords/scripting_library/tests/configuration/test_indexes_configuration.py new file mode 100644 index 0000000000..a876ede83b --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/tests/configuration/test_indexes_configuration.py @@ -0,0 +1,216 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_commons.profiles.profile_data as commons_profile_data +import octobot_commons.constants as commons_constants + +import tentacles.Trading.Mode.index_trading_mode.index_trading as index_trading +import octobot_copy.enums as rebalancer_enums +import tentacles.Meta.Keywords.scripting_library.configuration.indexes_configuration as indexes_configuration + + +def test_create_index_config_from_tentacles_config(): + # Create test distribution + distribution = [ + { + rebalancer_enums.DistributionKeys.NAME: "BTC", + rebalancer_enums.DistributionKeys.VALUE: 50.0, + }, + { + rebalancer_enums.DistributionKeys.NAME: "ETH", + rebalancer_enums.DistributionKeys.VALUE: 30.0, + }, + { + rebalancer_enums.DistributionKeys.NAME: "USD", # Should be replaced by reference market + rebalancer_enums.DistributionKeys.VALUE: 20.0, + }, + ] + + # Create test trading mode config + trading_mode_config = { + index_trading.IndexTradingModeProducer.INDEX_CONTENT: distribution, + index_trading.IndexTradingModeProducer.REBALANCE_TRIGGER_MIN_PERCENT: 5.0, + index_trading.IndexTradingModeProducer.SELECTED_REBALANCE_TRIGGER_PROFILE: "test_profile", + index_trading.IndexTradingModeProducer.REBALANCE_TRIGGER_PROFILES: [ + { + index_trading.IndexTradingModeProducer.REBALANCE_TRIGGER_PROFILE_NAME: "test_profile", + index_trading.IndexTradingModeProducer.REBALANCE_TRIGGER_PROFILE_MIN_PERCENT: 5.0, + } + ], + } + + # Create tentacles config + tentacles_config = [ + commons_profile_data.TentaclesData( + name=index_trading.IndexTradingMode.get_name(), + config=trading_mode_config + ) + ] + + # Test parameters + exchange = "binance" + starting_funds = 10000.0 + backtesting_start_time_delta = 86400.0 # 1 day in seconds + + # Call the function + result = indexes_configuration.create_index_config_from_tentacles_config( + tentacles_config, exchange, starting_funds, backtesting_start_time_delta + ) + + # Assertions + assert isinstance(result, commons_profile_data.ProfileData) + assert result.profile_details.name == "serverless" + assert result.trading.reference_market == "USDC" # binance default + assert result.trading.risk == 0.5 + assert len(result.exchanges) == 1 + assert result.exchanges[0].internal_name == exchange + assert result.exchanges[0].exchange_type == commons_constants.CONFIG_EXCHANGE_SPOT + + # Check currencies (BTC and ETH, not USD which should be replaced by reference market) + assert len(result.crypto_currencies) == 2 + trading_pairs = {curr.name: curr.trading_pairs for curr in result.crypto_currencies} + assert ["BTC/USDC"] == trading_pairs["BTC"] + assert ["ETH/USDC"] == trading_pairs["ETH"] + + # Check trader settings + assert result.trader.enabled is True + + # Check tentacles config + assert len(result.tentacles) == 1 + assert result.tentacles[0].name == index_trading.IndexTradingMode.get_name() + assert index_trading.IndexTradingModeProducer.INDEX_CONTENT in result.tentacles[0].config + + # Check that USD was replaced by reference market in distribution + distribution_names = [ + item[rebalancer_enums.DistributionKeys.NAME] + for item in result.tentacles[0].config[index_trading.IndexTradingModeProducer.INDEX_CONTENT] + ] + assert "USD" not in distribution_names + assert "USDC" in distribution_names # binance's reference market + + # Check backtesting config + assert result.backtesting_context is not None + assert [exchange] == result.backtesting_context.exchanges + assert result.backtesting_context.start_time_delta == backtesting_start_time_delta + assert {"USDC": starting_funds} == result.backtesting_context.starting_portfolio + + +def test_generate_index_config(): + # Create test distribution + distribution = [ + { + rebalancer_enums.DistributionKeys.NAME: "BTC", + rebalancer_enums.DistributionKeys.VALUE: 50.0, + }, + { + rebalancer_enums.DistributionKeys.NAME: "ETH", + rebalancer_enums.DistributionKeys.VALUE: 30.0, + }, + { + rebalancer_enums.DistributionKeys.NAME: "USDT", + rebalancer_enums.DistributionKeys.VALUE: 20.0, + }, + ] + + # Test parameters + rebalance_cap = 5.0 + selected_rebalance_trigger_profile = "profile1" + rebalance_trigger_profiles = [ + { + index_trading.IndexTradingModeProducer.REBALANCE_TRIGGER_PROFILE_NAME: "profile1", + index_trading.IndexTradingModeProducer.REBALANCE_TRIGGER_PROFILE_MIN_PERCENT: 5.0, + }, + { + index_trading.IndexTradingModeProducer.REBALANCE_TRIGGER_PROFILE_NAME: "profile2", + index_trading.IndexTradingModeProducer.REBALANCE_TRIGGER_PROFILE_MIN_PERCENT: 10.0, + }, + ] + reference_market = "USDT" + exchange = "binance" + min_funds = 1000.0 + coins_by_symbol = { + "BTC": "BTC", + "ETH": "ETH", + "USDT": "USDT", + } + disabled_backtesting = False + backtesting_start_time_delta = 172800.0 # 2 days in seconds + + # Call the function + result = indexes_configuration.generate_index_config( + distribution, rebalance_cap, selected_rebalance_trigger_profile, + rebalance_trigger_profiles, reference_market, exchange, min_funds, + coins_by_symbol, disabled_backtesting, backtesting_start_time_delta + ) + + # Assertions - check that result is a dict + assert isinstance(result, dict) + + # Check profile details + assert "profile_details" in result + assert result["profile_details"]["name"] == "serverless" + + # Check trading config + assert "trading" in result + assert result["trading"]["reference_market"] == reference_market + assert result["trading"]["risk"] == 0.5 + + # Check exchanges + assert "exchanges" in result + assert len(result["exchanges"]) == 1 + assert result["exchanges"][0]["internal_name"] == exchange + + # Check crypto currencies (should not include reference market) + assert "crypto_currencies" in result + assert len(result["crypto_currencies"]) == 2 # BTC and ETH, not USDT (reference market) + trading_pairs = {curr["name"]: curr["trading_pairs"] for curr in result["crypto_currencies"]} + assert ["BTC/USDT"] == trading_pairs["BTC"] + assert ["ETH/USDT"] == trading_pairs["ETH"] + + # Check trader + assert "trader" in result + assert result["trader"]["enabled"] is True + + # Check tentacles + assert "tentacles" in result + assert len(result["tentacles"]) == 1 + tentacle_config = result["tentacles"][0] + assert tentacle_config["name"] == index_trading.IndexTradingMode.get_name() + assert "config" in tentacle_config + + # Check index trading config + config = tentacle_config["config"] + assert config[index_trading.IndexTradingModeProducer.INDEX_CONTENT] == distribution + assert config[index_trading.IndexTradingModeProducer.REBALANCE_TRIGGER_MIN_PERCENT] == rebalance_cap + assert config[index_trading.IndexTradingModeProducer.SELECTED_REBALANCE_TRIGGER_PROFILE] == selected_rebalance_trigger_profile + assert config[index_trading.IndexTradingModeProducer.REBALANCE_TRIGGER_PROFILES] == rebalance_trigger_profiles + assert config[index_trading.IndexTradingModeProducer.SYNCHRONIZATION_POLICY] == rebalancer_enums.SynchronizationPolicy.SELL_REMOVED_INDEX_COINS_ON_RATIO_REBALANCE.value + assert config[index_trading.IndexTradingModeProducer.SELL_UNINDEXED_TRADED_COINS] is True + assert config[index_trading.IndexTradingModeProducer.REFRESH_INTERVAL] == 1 + + # Check backtesting config + assert "backtesting_context" in result + backtesting = result["backtesting_context"] + assert backtesting["exchanges"] == [exchange] + assert backtesting["start_time_delta"] == backtesting_start_time_delta + assert {"USDT": min_funds * 10} == backtesting["starting_portfolio"] + + # Test with disabled backtesting + result_no_backtesting = indexes_configuration.generate_index_config( + distribution, rebalance_cap, selected_rebalance_trigger_profile, + rebalance_trigger_profiles, reference_market, exchange, min_funds, + coins_by_symbol, True, backtesting_start_time_delta + ) + assert "exchanges" not in result_no_backtesting["backtesting_context"] diff --git a/packages/tentacles/Meta/Keywords/scripting_library/tests/configuration/test_profile_data_configuration.py b/packages/tentacles/Meta/Keywords/scripting_library/tests/configuration/test_profile_data_configuration.py new file mode 100644 index 0000000000..9fde09a39e --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/tests/configuration/test_profile_data_configuration.py @@ -0,0 +1,132 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import tentacles.Trading.Mode.index_trading_mode.index_trading as index_trading +import octobot_copy.enums as rebalancer_enums + +import octobot_commons.constants as commons_constants +import octobot_commons.profiles.profile_data as commons_profile_data +import tentacles.Meta.Keywords.scripting_library as scripting_library + + + +def test_register_historical_configs_adds_traded_pairs(): + # Master has no traded pairs, historical has one + master = scripting_library.minimal_profile_data() + tentacle_name = "TestTentacle" + master.tentacles = [commons_profile_data.TentaclesData(name=tentacle_name, config={})] + # Historical profile with a traded pair + historical = scripting_library.minimal_profile_data() + historical.tentacles = [commons_profile_data.TentaclesData(name=tentacle_name, config={})] + scripting_library.add_traded_symbols(historical, ["BTC/USDT"]) + historicals = {1000.0: historical} + assert [] == master.get_traded_symbols() + scripting_library.register_historical_configs(master, historicals, True, False) + # Master should now have the traded pair + assert ["BTC/USDT"] == master.get_traded_symbols() + + +def test_register_historical_configs_registers_historical_tentacle_config(): + # Master and historical have different tentacle config dicts + master = scripting_library.minimal_profile_data() + tentacle_name = "TestTentacle" + master_config = {"foo": 1} + master.tentacles = [commons_profile_data.TentaclesData(name=tentacle_name, config=master_config)] + historical_1 = scripting_library.minimal_profile_data() + hist_config_1 = {"foo": 2} + historical_1.tentacles = [commons_profile_data.TentaclesData(name=tentacle_name, config=hist_config_1)] + historical_2 = scripting_library.minimal_profile_data() + hist_config_2 = {"foo": 3} + historical_2.tentacles = [commons_profile_data.TentaclesData(name=tentacle_name, config=hist_config_2)] + historicals = {1000.0: historical_1, 2000.0: historical_2} + scripting_library.register_historical_configs(master, historicals, False, False) + # Master config should now have a historical config registered + assert commons_constants.CONFIG_HISTORICAL_CONFIGURATION in master_config + assert len(master_config[commons_constants.CONFIG_HISTORICAL_CONFIGURATION]) == 2 + assert master_config[commons_constants.CONFIG_HISTORICAL_CONFIGURATION][0][0] == 2000.0 + assert master_config[commons_constants.CONFIG_HISTORICAL_CONFIGURATION][0][1] == hist_config_2 + assert master_config[commons_constants.CONFIG_HISTORICAL_CONFIGURATION][1][0] == 1000.0 + assert master_config[commons_constants.CONFIG_HISTORICAL_CONFIGURATION][1][1] == hist_config_1 + + +def test_register_historical_configs_applies_master_edits(): + # Master has a config with a special field, historical does not + master = scripting_library.minimal_profile_data() + tentacle_name = "TestTentacle" + special_key = "special" + master_config = { + special_key: 42, + index_trading.IndexTradingModeProducer.SELECTED_REBALANCE_TRIGGER_PROFILE: "plop1", + index_trading.IndexTradingModeProducer.REBALANCE_TRIGGER_PROFILES: [ + { + index_trading.IndexTradingModeProducer.REBALANCE_TRIGGER_PROFILE_NAME: "plop1", + index_trading.IndexTradingModeProducer.REBALANCE_TRIGGER_PROFILE_MIN_PERCENT: 4 + }, + { + index_trading.IndexTradingModeProducer.REBALANCE_TRIGGER_PROFILE_NAME: "plop2", + index_trading.IndexTradingModeProducer.REBALANCE_TRIGGER_PROFILE_MIN_PERCENT: 20 + } + ], + index_trading.IndexTradingModeProducer.SYNCHRONIZATION_POLICY: rebalancer_enums.SynchronizationPolicy.SELL_REMOVED_INDEX_COINS_ON_RATIO_REBALANCE.value + } + master.tentacles = [commons_profile_data.TentaclesData(name=tentacle_name, config=master_config)] + historical_1 = scripting_library.minimal_profile_data() + hist_config_1 = {} + historical_1.tentacles = [commons_profile_data.TentaclesData(name=tentacle_name, config=hist_config_1)] + historical_2 = scripting_library.minimal_profile_data() + hist_config_2 = {special_key: 1} + historical_2.tentacles = [commons_profile_data.TentaclesData(name=tentacle_name, config=hist_config_2)] + historicals = {1000.0: historical_1, 2000.0: historical_2} + + scripting_library.register_historical_configs(master, historicals, False, True) + # no update as tentacle_name is not configurable tentacles and config keys + assert hist_config_1 == {} + assert hist_config_2 == {special_key: 1} + + # now using IndexTradingMode: a whitelisted tentacle + for profile_data in (master, historical_1, historical_2): + profile_data.tentacles[0].name = index_trading.IndexTradingMode.get_name() + + scripting_library.register_historical_configs(master, historicals, False, True) + # configurable tentacles abd config keys are applied to historical configs + assert hist_config_1 == { + index_trading.IndexTradingModeProducer.SELECTED_REBALANCE_TRIGGER_PROFILE: "plop1", + index_trading.IndexTradingModeProducer.REBALANCE_TRIGGER_PROFILES: [ + { + index_trading.IndexTradingModeProducer.REBALANCE_TRIGGER_PROFILE_NAME: "plop1", + index_trading.IndexTradingModeProducer.REBALANCE_TRIGGER_PROFILE_MIN_PERCENT: 4 + }, + { + index_trading.IndexTradingModeProducer.REBALANCE_TRIGGER_PROFILE_NAME: "plop2", + index_trading.IndexTradingModeProducer.REBALANCE_TRIGGER_PROFILE_MIN_PERCENT: 20 + } + ], + index_trading.IndexTradingModeProducer.SYNCHRONIZATION_POLICY: rebalancer_enums.SynchronizationPolicy.SELL_REMOVED_INDEX_COINS_ON_RATIO_REBALANCE.value + } + assert hist_config_2 == { + special_key: 1, + index_trading.IndexTradingModeProducer.SELECTED_REBALANCE_TRIGGER_PROFILE: "plop1", + index_trading.IndexTradingModeProducer.REBALANCE_TRIGGER_PROFILES: [ + { + index_trading.IndexTradingModeProducer.REBALANCE_TRIGGER_PROFILE_NAME: "plop1", + index_trading.IndexTradingModeProducer.REBALANCE_TRIGGER_PROFILE_MIN_PERCENT: 4 + }, + { + index_trading.IndexTradingModeProducer.REBALANCE_TRIGGER_PROFILE_NAME: "plop2", + index_trading.IndexTradingModeProducer.REBALANCE_TRIGGER_PROFILE_MIN_PERCENT: 20 + } + ], + index_trading.IndexTradingModeProducer.SYNCHRONIZATION_POLICY: rebalancer_enums.SynchronizationPolicy.SELL_REMOVED_INDEX_COINS_ON_RATIO_REBALANCE.value + } diff --git a/packages/tentacles/Meta/Keywords/scripting_library/tests/exchanges/__init__.py b/packages/tentacles/Meta/Keywords/scripting_library/tests/exchanges/__init__.py new file mode 100644 index 0000000000..5cfd029119 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/tests/exchanges/__init__.py @@ -0,0 +1,74 @@ +import os +import pathlib + +import pytest +import pytest_asyncio + +import octobot_commons.constants as commons_constants +import octobot_backtesting.backtesting as backtesting +import octobot_backtesting.constants as backtesting_constants +import octobot_backtesting.time as backtesting_time +import octobot_trading.exchanges as exchanges + +from octobot_commons.tests.test_config import load_test_config + +pytestmark = pytest.mark.asyncio + + +DEFAULT_EXCHANGE_NAME = "binance" +TEST_CONFIG_FOLDER = pathlib.Path(os.path.abspath(__file__)).parent.parent + + +@pytest_asyncio.fixture +async def backtesting_config(request): + config = load_test_config(test_folder=TEST_CONFIG_FOLDER) + config[backtesting_constants.CONFIG_BACKTESTING] = {} + config[backtesting_constants.CONFIG_BACKTESTING][commons_constants.CONFIG_ENABLED_OPTION] = True + if hasattr(request, "param"): + ref_market = request.param + config[commons_constants.CONFIG_TRADING][commons_constants.CONFIG_TRADER_REFERENCE_MARKET] = ref_market + return config + + +@pytest_asyncio.fixture +async def fake_backtesting(backtesting_config): + return backtesting.Backtesting( + config=backtesting_config, + exchange_ids=[], + matrix_id="", + backtesting_files=[], + ) + + +@pytest_asyncio.fixture +async def backtesting_exchange_manager(request, backtesting_config, fake_backtesting): + config = None + exchange_name = DEFAULT_EXCHANGE_NAME + is_spot = True + is_margin = False + is_future = False + is_option = False + if hasattr(request, "param"): + config, exchange_name, is_spot, is_margin, is_future = request.param + + if config is None: + config = backtesting_config + exchange_manager_instance = exchanges.ExchangeManager(config, exchange_name) + exchange_manager_instance.is_backtesting = True + exchange_manager_instance.is_spot_only = is_spot + exchange_manager_instance.is_margin = is_margin + exchange_manager_instance.is_future = is_future + exchange_manager_instance.is_option = is_option + exchange_manager_instance.use_cached_markets = False + exchange_manager_instance.backtesting = fake_backtesting + exchange_manager_instance.backtesting.time_manager = backtesting_time.TimeManager(config) + await exchange_manager_instance.initialize(exchange_config_by_exchange=None) + yield exchange_manager_instance + await exchange_manager_instance.stop() + + +@pytest_asyncio.fixture +async def backtesting_trader(backtesting_config, backtesting_exchange_manager): + trader_instance = exchanges.TraderSimulator(backtesting_config, backtesting_exchange_manager) + await trader_instance.initialize() + return backtesting_config, backtesting_exchange_manager, trader_instance diff --git a/packages/tentacles/Meta/Keywords/scripting_library/tests/orders/__init__.py b/packages/tentacles/Meta/Keywords/scripting_library/tests/orders/__init__.py new file mode 100644 index 0000000000..b98f1648fb --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/tests/orders/__init__.py @@ -0,0 +1 @@ +# Copyright diff --git a/packages/tentacles/Meta/Keywords/scripting_library/tests/orders/order_types/__init__.py b/packages/tentacles/Meta/Keywords/scripting_library/tests/orders/order_types/__init__.py new file mode 100644 index 0000000000..b98f1648fb --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/tests/orders/order_types/__init__.py @@ -0,0 +1 @@ +# Copyright diff --git a/packages/tentacles/Meta/Keywords/scripting_library/tests/orders/order_types/test_create_order.py b/packages/tentacles/Meta/Keywords/scripting_library/tests/orders/order_types/test_create_order.py new file mode 100644 index 0000000000..c53d3d5a92 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/tests/orders/order_types/test_create_order.py @@ -0,0 +1,434 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import pytest +import mock +import decimal +import os + +import tentacles.Meta.Keywords.scripting_library.orders.order_types.create_order as create_order +import tentacles.Meta.Keywords.scripting_library.orders.position_size as position_size +import tentacles.Meta.Keywords.scripting_library.orders.grouping as grouping +import octobot_trading.enums as trading_enums +import octobot_trading.errors as errors +import octobot_trading.constants as trading_constants +import octobot_trading.personal_data as trading_personal_data +import octobot_trading.modes.script_keywords as script_keywords + +from tentacles.Meta.Keywords.scripting_library.tests import event_loop, null_context, mock_context, symbol_market, \ + skip_if_octobot_trading_mocking_disabled +from tentacles.Meta.Keywords.scripting_library.tests.exchanges import backtesting_trader, backtesting_config, \ + backtesting_exchange_manager, fake_backtesting + + +# All test coroutines will be treated as marked. +pytestmark = pytest.mark.asyncio + + +async def test_create_order_instance(mock_context): + with mock.patch.object(create_order, "_get_order_quantity_and_side", + mock.AsyncMock(return_value=(decimal.Decimal(1), "sell"))) \ + as _get_order_quantity_and_side_mock, \ + mock.patch.object(create_order, "_get_order_details", + mock.AsyncMock(return_value=(1, 2, 3, 4, 5, 6, 7, 8, 9))) \ + as _get_order_details_mock, \ + mock.patch.object(script_keywords, "get_price_with_offset", mock.AsyncMock(return_value=42)) as get_offset_mock, \ + mock.patch.object(create_order, "_create_order", mock.AsyncMock()) as _create_order_mock: + with mock.patch.object(create_order, "_paired_order_is_closed", mock.Mock(return_value=True)) \ + as _paired_order_is_closed_mock: + order = mock.Mock(is_open=mock.Mock(return_value=False)) + assert [] == await create_order.create_order_instance( + mock_context, "side", "symbol", "order_amount", "order_target_position", + "stop_loss_offset", "stop_loss_tag", "stop_loss_type", "stop_loss_group", + "take_profit_offset", "take_profit_tag", "take_profit_type", "take_profit_group", + "order_type_name", "order_offset", "order_min_offset", "order_max_offset", "order_limit_offset", + "slippage_limit", "time_limit", "reduce_only", "post_only", "tag", "group", [order]) + _paired_order_is_closed_mock.assert_called_once_with(mock_context, "group") + _get_order_quantity_and_side_mock.assert_not_called() + _get_order_details_mock.assert_not_called() + get_offset_mock.assert_not_called() + _create_order_mock.assert_not_called() + with mock.patch.object(create_order, "_paired_order_is_closed", mock.Mock(return_value=False)) \ + as _paired_order_is_closed_mock: + order = mock.Mock(is_open=mock.Mock(return_value=False)) + await create_order.create_order_instance( + mock_context, "side", "symbol", "order_amount", "order_target_position", + "stop_loss_offset", "stop_loss_tag", "stop_loss_type", "stop_loss_group", + "take_profit_offset", "take_profit_tag", "take_profit_type", "take_profit_group", + "order_type_name", "order_offset", "order_min_offset", "order_max_offset", "order_limit_offset", + "slippage_limit", "time_limit", "reduce_only", "post_only", "tag", "group", [order]) + _paired_order_is_closed_mock.assert_called_once_with(mock_context, "group") + _get_order_quantity_and_side_mock.assert_called_once_with(mock_context, "order_amount", + "order_target_position", "order_type_name", + "side", "reduce_only", False) + _get_order_details_mock.assert_called_once_with(mock_context, "order_type_name", "sell", "order_offset", + "reduce_only", "order_limit_offset") + assert get_offset_mock.call_count == 2 + _create_order_mock.assert_called_once_with( + context=mock_context, symbol="symbol", order_quantity=decimal.Decimal(1), order_price=2, tag="tag", + order_type_name="order_type_name", input_side="side", + side="sell", final_side=3, order_type=1, order_min_offset="order_min_offset", max_offset_val=7, + reduce_only=4, group="group", + stop_loss_price=42, stop_loss_tag="stop_loss_tag", stop_loss_type="stop_loss_type", + stop_loss_group="stop_loss_group", + take_profit_price=42, take_profit_tag="take_profit_tag", take_profit_type="take_profit_type", + take_profit_group="take_profit_group", + wait_for=[order], + truncate=False, + order_amount='order_amount', order_target_position='order_target_position') + + +async def test_paired_order_is_closed(mock_context, skip_if_octobot_trading_mocking_disabled): + # skip_if_octobot_trading_mocking_disabled oco_group, "get_group_open_orders" + assert create_order._paired_order_is_closed(mock_context, None) is False + oco_group = grouping.create_one_cancels_the_other_group(mock_context) + assert create_order._paired_order_is_closed(mock_context, oco_group) is False + order = mock.Mock() + order_2 = mock.Mock() + order_2.is_closed = mock.Mock(return_value=True) + if os.getenv('CYTHON_IGNORE'): + return + with mock.patch.object(oco_group, "get_group_open_orders", mock.Mock(return_value=[order, order_2])) as \ + get_group_open_orders_mock: + with mock.patch.object(order, "is_closed", mock.Mock(return_value=True)) as is_closed_mock: + assert create_order._paired_order_is_closed(mock_context, oco_group) is True + is_closed_mock.assert_called_once() + get_group_open_orders_mock.assert_called_once() + get_group_open_orders_mock.reset_mock() + with mock.patch.object(order, "is_closed", mock.Mock(return_value=False)) as is_closed_mock: + assert create_order._paired_order_is_closed(mock_context, oco_group) is False + is_closed_mock.assert_called_once() + get_group_open_orders_mock.assert_called_once() + order.order_group = None + null_context.just_created_orders = [order] + with mock.patch.object(order, "is_closed", mock.Mock(return_value=True)) as is_closed_mock: + assert create_order._paired_order_is_closed(null_context, oco_group) is False + is_closed_mock.assert_not_called() + order.order_group = oco_group + assert create_order._paired_order_is_closed(null_context, oco_group) is True + is_closed_mock.assert_called_once() + order.order_group = mock.Mock() + is_closed_mock.reset_mock() + assert create_order._paired_order_is_closed(null_context, oco_group) is False + is_closed_mock.assert_not_called() + + +async def test_use_total_holding(): + with mock.patch.object(create_order, "_is_stop_order", mock.Mock(return_value=False)) as _is_stop_order_mock: + assert create_order._use_total_holding("type") is False + _is_stop_order_mock.assert_called_once_with("type") + with mock.patch.object(create_order, "_is_stop_order", mock.Mock(return_value=True)) as _is_stop_order_mock: + assert create_order._use_total_holding("type2") is True + _is_stop_order_mock.assert_called_once_with("type2") + + +async def test_is_stop_order(): + assert create_order._is_stop_order("") is False + assert create_order._is_stop_order("market") is False + assert create_order._is_stop_order("limit") is False + assert create_order._is_stop_order("stop_loss") is True + assert create_order._is_stop_order("stop_market") is True + assert create_order._is_stop_order("stop_limit") is True + assert create_order._is_stop_order("trailing_stop_loss") is True + assert create_order._is_stop_order("trailing_market") is False + assert create_order._is_stop_order("trailing_limit") is False + + +async def test_get_order_quantity_and_side(null_context): + # order_amount and order_target_position are both not set + with pytest.raises(errors.InvalidArgumentError): + await create_order._get_order_quantity_and_side(null_context, None, None, "", "", True, False) + + # order_amount and order_target_position are set + with pytest.raises(errors.InvalidArgumentError): + await create_order._get_order_quantity_and_side(null_context, 1, 2, "", "", True, False) + + # order_amount but no side + with pytest.raises(errors.InvalidArgumentError): + await create_order._get_order_quantity_and_side(null_context, 1, None, "", None, True, False) + with pytest.raises(errors.InvalidArgumentError): + await create_order._get_order_quantity_and_side(null_context, 1, None, "", "fsdsfds", True, True), False + + with mock.patch.object(position_size, "get_amount", + mock.AsyncMock(return_value=decimal.Decimal(1))) as get_amount_mock: + with mock.patch.object(create_order, "_use_total_holding", + mock.Mock(return_value=False)) as _use_total_holding_mock, \ + mock.patch.object(create_order, "_is_stop_order", + mock.Mock(return_value=False)) as _is_stop_order_mock: + assert await create_order._get_order_quantity_and_side(null_context, 1, None, "", "sell", True, False) \ + == (decimal.Decimal(1), "sell") + get_amount_mock.assert_called_once_with(null_context, 1, "sell", True, False, use_total_holding=False, + unknown_portfolio_on_creation=False) + get_amount_mock.reset_mock() + _is_stop_order_mock.assert_called_once_with("") + _use_total_holding_mock.assert_called_once_with("") + with mock.patch.object(create_order, "_use_total_holding", + mock.Mock(return_value=True)) as _use_total_holding_mock, \ + mock.patch.object(create_order, "_is_stop_order", + mock.Mock(return_value=True)) as _is_stop_order_mock: + assert await create_order._get_order_quantity_and_side(null_context, 1, None, "order_type", "sell", False, + True) \ + == (decimal.Decimal(1), "sell") + get_amount_mock.assert_called_once_with(null_context, 1, "sell", False, True, use_total_holding=True, + unknown_portfolio_on_creation=True) + get_amount_mock.reset_mock() + _is_stop_order_mock.assert_called_once_with("order_type") + _use_total_holding_mock.assert_called_once_with("order_type") + + with mock.patch.object(position_size, "get_target_position", + mock.AsyncMock(return_value=(decimal.Decimal(10), "buy"))) as get_target_position_mock: + with mock.patch.object(create_order, "_use_total_holding", + mock.Mock(return_value=True)) as _use_total_holding_mock, \ + mock.patch.object(create_order, "_is_stop_order", + mock.Mock(return_value=False)) as _is_stop_order_mock: + assert await create_order._get_order_quantity_and_side(null_context, None, 1, "order_type", None, True, + False) \ + == (decimal.Decimal(10), "buy") + get_target_position_mock.assert_called_once_with(null_context, 1, True, False, use_total_holding=True, + unknown_portfolio_on_creation=False) + get_target_position_mock.reset_mock() + _is_stop_order_mock.assert_called_once_with("order_type") + _use_total_holding_mock.assert_called_once_with("order_type") + with mock.patch.object(create_order, "_use_total_holding", + mock.Mock(return_value=False)) as _use_total_holding_mock, \ + mock.patch.object(create_order, "_is_stop_order", + mock.Mock(return_value=True)) as _is_stop_order_mock: + assert await create_order._get_order_quantity_and_side(null_context, None, 1, "order_type", None, False, + True) \ + == (decimal.Decimal(10), "buy") + get_target_position_mock.assert_called_once_with(null_context, 1, False, True, use_total_holding=False, + unknown_portfolio_on_creation=True) + get_target_position_mock.reset_mock() + _is_stop_order_mock.assert_called_once_with("order_type") + _use_total_holding_mock.assert_called_once_with("order_type") + + +async def test_get_order_details(null_context): + ten = decimal.Decimal(10) + with mock.patch.object(script_keywords, "get_price_with_offset", mock.AsyncMock(return_value=ten)) as get_offset_mock: + + async def _test_market(side, expected_order_type): + order_type, order_price, side, _, _, _, _, _, _ = await create_order._get_order_details( + null_context, "market", side, None, None, None + ) + assert order_type is expected_order_type + assert order_price == ten + assert side is None + get_offset_mock.assert_called_once_with(null_context, "0") + get_offset_mock.reset_mock() + await _test_market(trading_enums.TradeOrderSide.SELL.value, trading_enums.TraderOrderType.SELL_MARKET) + await _test_market(trading_enums.TradeOrderSide.BUY.value, trading_enums.TraderOrderType.BUY_MARKET) + + async def _test_limit(side, expected_order_type): + order_type, order_price, side, _, _, _, _, _, _ = await create_order._get_order_details( + null_context, "limit", side, "25%", None, None + ) + assert order_type is expected_order_type + assert order_price == ten + assert side is None + get_offset_mock.assert_called_once_with(null_context, "25%") + get_offset_mock.reset_mock() + await _test_limit(trading_enums.TradeOrderSide.SELL.value, trading_enums.TraderOrderType.SELL_LIMIT) + await _test_limit(trading_enums.TradeOrderSide.BUY.value, trading_enums.TraderOrderType.BUY_LIMIT) + + async def _test_stop_loss(side, expected_side): + order_type, order_price, side, _, _, _, _, _, _ = await create_order._get_order_details( + null_context, "stop_loss", side, "25%", None, None + ) + assert order_type is trading_enums.TraderOrderType.STOP_LOSS + assert order_price == ten + assert side is expected_side + get_offset_mock.assert_called_once_with(null_context, "25%") + get_offset_mock.reset_mock() + await _test_stop_loss(trading_enums.TradeOrderSide.SELL.value, trading_enums.TradeOrderSide.SELL) + await _test_stop_loss(trading_enums.TradeOrderSide.BUY.value, trading_enums.TradeOrderSide.BUY) + + async def _test_trailing_market(side, expected_side): + order_type, order_price, side, _, trailing_method, _, _, _, _ = await create_order._get_order_details( + null_context, "trailing_market", side, "25%", None, None + ) + assert order_type is trading_enums.TraderOrderType.TRAILING_STOP + assert trailing_method == "continuous" + assert order_price == ten + assert side is expected_side + get_offset_mock.assert_called_once_with(null_context, "25%") + get_offset_mock.reset_mock() + await _test_trailing_market(trading_enums.TradeOrderSide.SELL.value, trading_enums.TradeOrderSide.SELL) + await _test_trailing_market(trading_enums.TradeOrderSide.BUY.value, trading_enums.TradeOrderSide.BUY) + + async def _test_trailing_limit(side, expected_side): + order_type, order_price, side, _, trailing_method, min_offset_val, max_offset_val, _, _ \ + = await create_order._get_order_details( + null_context, "trailing_limit", side, "25%", None, None + ) + assert order_type is trading_enums.TraderOrderType.TRAILING_STOP_LIMIT + assert trailing_method == "continuous" + assert order_price is None + assert side is expected_side + assert min_offset_val == ten + assert max_offset_val == ten + assert get_offset_mock.call_count == 2 + get_offset_mock.reset_mock() + await _test_trailing_limit(trading_enums.TradeOrderSide.SELL.value, trading_enums.TradeOrderSide.SELL) + await _test_trailing_limit(trading_enums.TradeOrderSide.BUY.value, trading_enums.TradeOrderSide.BUY) + + +async def test_create_order(mock_context, symbol_market): + with mock.patch.object(trading_personal_data, "get_pre_order_data", + mock.AsyncMock(return_value=(None, None, decimal.Decimal(5), decimal.Decimal(105), + symbol_market))) \ + as get_pre_order_data_mock, \ + mock.patch.object(create_order, "_get_group_adapted_quantity", mock.Mock(return_value=decimal.Decimal(1))) \ + as _get_group_adapted_quantity_mock: + + # without linked orders + # don't plot orders + mock_context.plot_orders = False + orders = await create_order._create_order( + mock_context, "BTC/USDT", decimal.Decimal(1), decimal.Decimal(100), "tag", + "order_type_name", "input_side", trading_enums.TradeOrderSide.BUY.value, None, + trading_enums.TraderOrderType.BUY_MARKET, None, None, False, None, None, + None, None, None, None, + None, None, None, None, + None, True, None) + assert get_pre_order_data_mock.call_count == 2 + _get_group_adapted_quantity_mock.assert_called_once_with(mock_context, None, + trading_enums.TraderOrderType.BUY_MARKET, + decimal.Decimal(1)) + assert len(orders) == 1 + assert isinstance(orders[0], trading_personal_data.BuyMarketOrder) + assert orders[0].symbol == "BTC/USDT" + assert orders[0].tag == "tag" + assert orders[0].origin_price == decimal.Decimal(105) + assert orders[0].origin_quantity == decimal.Decimal(1) + assert mock_context.just_created_orders == orders + mock_context.just_created_orders = [] + get_pre_order_data_mock.reset_mock() + _get_group_adapted_quantity_mock.reset_mock() + + # with order group + # plot orders + mock_context.plot_orders = True + oco_group = grouping.create_one_cancels_the_other_group(mock_context) + orders = await create_order._create_order( + mock_context, "BTC/USDT", decimal.Decimal(1), decimal.Decimal(100), "tag2", + "order_type_name", "input_side", trading_enums.TradeOrderSide.BUY.value, None, + trading_enums.TraderOrderType.TRAILING_STOP, decimal.Decimal(5), None, False, oco_group, + None, None, None, None, + None, None, None, None, + None, True, None, None) + get_pre_order_data_mock.assert_called_once_with(mock_context.exchange_manager, symbol="BTC/USDT", + timeout=trading_constants.ORDER_DATA_FETCHING_TIMEOUT) + _get_group_adapted_quantity_mock.assert_called_once_with(mock_context, oco_group, + trading_enums.TraderOrderType.TRAILING_STOP, + decimal.Decimal(1)) + assert len(orders) == 1 + assert isinstance(orders[0], trading_personal_data.TrailingStopOrder) + assert orders[0].symbol == "BTC/USDT" + assert orders[0].tag == "tag2" + assert orders[0].origin_price == decimal.Decimal(100) + assert orders[0].origin_quantity == decimal.Decimal(1) + assert orders[0].trader == mock_context.trader + assert orders[0].trailing_percent == decimal.Decimal(5) + assert orders[0].order_group is oco_group + assert mock_context.just_created_orders == orders + mock_context.just_created_orders = [] + get_pre_order_data_mock.reset_mock() + _get_group_adapted_quantity_mock.reset_mock() + + # with same order group as one previously created order: group them together + oco_group = grouping.create_one_cancels_the_other_group(mock_context) + previous_orders = [trading_personal_data.LimitOrder(mock_context.trader), + trading_personal_data.LimitOrder(mock_context.trader)] + previous_orders[0].add_to_order_group(oco_group) + # with mock.patch.object(create_order, "pre_initialize_order_callback", mock.AsyncMock()) \ + # as pre_initialize_order_callback_mock: + mock_context.plot_orders = False + orders = await create_order._create_order( + mock_context, "BTC/USDT", decimal.Decimal(1), decimal.Decimal(100), "tag2", + "order_type_name", "side", trading_enums.TradeOrderSide.BUY.value, trading_enums.TradeOrderSide.BUY, + trading_enums.TraderOrderType.TRAILING_STOP, + decimal.Decimal(5), None, True, oco_group, + None, None, None, None, + None, None, None, None, + None, True, None, None) + get_pre_order_data_mock.assert_called_once_with(mock_context.exchange_manager, symbol="BTC/USDT", + timeout=trading_constants.ORDER_DATA_FETCHING_TIMEOUT) + _get_group_adapted_quantity_mock.assert_called_once_with(mock_context, oco_group, + trading_enums.TraderOrderType.TRAILING_STOP, + decimal.Decimal(1)) + assert len(orders) == 1 + assert isinstance(orders[0], trading_personal_data.TrailingStopOrder) + assert orders[0].symbol == "BTC/USDT" + assert orders[0].tag == "tag2" + assert orders[0].origin_price == decimal.Decimal(100) + assert orders[0].origin_quantity == decimal.Decimal(1) + assert orders[0].trader == mock_context.trader + assert orders[0].trailing_percent == decimal.Decimal(5) + assert orders[0].order_group is oco_group + assert orders[0].side is trading_enums.TradeOrderSide.BUY + assert mock_context.just_created_orders == orders + mock_context.just_created_orders = [] + + grouped_orders = grouping.get_open_orders_from_group(oco_group) + assert len(grouped_orders) == 1 # only order this order got created and therefore is open in group + assert grouped_orders[0] is orders[0] + + +async def test_get_group_adapted_quantity(mock_context, skip_if_octobot_trading_mocking_disabled): + # skip_if_octobot_trading_mocking_disabled btps_group, "can_create_order" + oco_group = grouping.create_one_cancels_the_other_group(mock_context) + # no filter on oco groups + assert create_order._get_group_adapted_quantity(mock_context, oco_group, "whatever", decimal.Decimal(1000000)) \ + == decimal.Decimal(1000000) + + btps_group = grouping.create_balanced_take_profit_and_stop_group(mock_context) + if os.getenv('CYTHON_IGNORE'): + return + with mock.patch.object(btps_group, "can_create_order", mock.Mock(return_value=False)) as can_create_order_mock, \ + mock.patch.object(btps_group, "get_max_order_quantity", mock.Mock(return_value=decimal.Decimal(1))) \ + as get_max_order_quantity_mock: + # no context.just_created_orders: never block 1st orders to create as they can't be balanced + assert create_order._get_group_adapted_quantity(mock_context, btps_group, "whatever", decimal.Decimal(100)) \ + == decimal.Decimal(100) + can_create_order_mock.assert_not_called() + get_max_order_quantity_mock.assert_not_called() + + order_1 = mock.Mock(order_group=oco_group, order_type=trading_enums.TraderOrderType.STOP_LOSS) + mock_context.just_created_orders.append(order_1) + # context.just_created_orders has orders from other groups: consider this one as 1st from the group + assert create_order._get_group_adapted_quantity(mock_context, btps_group, "whatever", decimal.Decimal(100)) \ + == decimal.Decimal(100) + can_create_order_mock.assert_not_called() + get_max_order_quantity_mock.assert_not_called() + + order_2 = mock.Mock(order_group=btps_group, order_type=trading_enums.TraderOrderType.SELL_LIMIT) + mock_context.just_created_orders.append(order_2) + # only take profits being created: allow it + assert create_order._get_group_adapted_quantity(mock_context, btps_group, + trading_enums.TraderOrderType.SELL_LIMIT, + decimal.Decimal(10)) \ + == decimal.Decimal(10) + can_create_order_mock.assert_not_called() + get_max_order_quantity_mock.assert_not_called() + + # imbalanced orders: call can_create_order to figure out if we can create this order + assert create_order._get_group_adapted_quantity(mock_context, btps_group, + trading_enums.TraderOrderType.STOP_LOSS_LIMIT, + decimal.Decimal(10)) == decimal.Decimal(1) + can_create_order_mock.assert_called_once_with(trading_enums.TraderOrderType.STOP_LOSS_LIMIT, + decimal.Decimal(10)) + get_max_order_quantity_mock.assert_called_once_with(trading_enums.TraderOrderType.STOP_LOSS_LIMIT) diff --git a/packages/tentacles/Meta/Keywords/scripting_library/tests/orders/order_types/test_limit_order.py b/packages/tentacles/Meta/Keywords/scripting_library/tests/orders/order_types/test_limit_order.py new file mode 100644 index 0000000000..0a59c57591 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/tests/orders/order_types/test_limit_order.py @@ -0,0 +1,43 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import pytest +import mock + +import tentacles.Meta.Keywords.scripting_library.orders.order_types.create_order as create_order +import tentacles.Meta.Keywords.scripting_library.orders.order_types.limit_order as limit_order + +from tentacles.Meta.Keywords.scripting_library.tests import event_loop, null_context + + +# All test coroutines will be treated as marked. +pytestmark = pytest.mark.asyncio + + +async def test_limit(null_context): + with mock.patch.object(create_order, "create_order_instance", mock.AsyncMock()) as create_order_instance: + await limit_order.limit(null_context, "side", "symbol", "amount", "target_position", "offset", + "stop_loss_offset", "stop_loss_tag", "stop_loss_type", "stop_loss_group", + "take_profit_offset", "take_profit_tag", "take_profit_type", "take_profit_group", + "slippage_limit", "time_limit", "reduce_only", "post_only", "tag", "group", "wait_for") + create_order_instance.assert_called_once_with( + null_context, side="side", symbol="symbol", order_amount="amount", order_target_position="target_position", + stop_loss_offset="stop_loss_offset", stop_loss_tag="stop_loss_tag", stop_loss_type="stop_loss_type", + stop_loss_group="stop_loss_group", + take_profit_offset="take_profit_offset", take_profit_tag="take_profit_tag", + take_profit_type="take_profit_type", take_profit_group="take_profit_group", + order_type_name="limit", order_offset="offset", slippage_limit="slippage_limit", time_limit="time_limit", + reduce_only="reduce_only", post_only="post_only", tag="tag", group="group", wait_for="wait_for") diff --git a/packages/tentacles/Meta/Keywords/scripting_library/tests/orders/order_types/test_market_order.py b/packages/tentacles/Meta/Keywords/scripting_library/tests/orders/order_types/test_market_order.py new file mode 100644 index 0000000000..7f09f874e2 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/tests/orders/order_types/test_market_order.py @@ -0,0 +1,42 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import pytest +import mock + +import tentacles.Meta.Keywords.scripting_library.orders.order_types.create_order as create_order +import tentacles.Meta.Keywords.scripting_library.orders.order_types.market_order as market_order + +from tentacles.Meta.Keywords.scripting_library.tests import event_loop, null_context + + +# All test coroutines will be treated as marked. +pytestmark = pytest.mark.asyncio + + +async def test_market(null_context): + with mock.patch.object(create_order, "create_order_instance", mock.AsyncMock()) as create_order_instance: + await market_order.market(null_context, "side", "symbol", "amount", "target_position", + "stop_loss_offset", "stop_loss_tag", "stop_loss_type", "stop_loss_group", + "take_profit_offset", "take_profit_tag", "take_profit_type", "take_profit_group", + "reduce_only", "tag", "group", "wait_for") + create_order_instance.assert_called_once_with( + null_context, side="side", symbol="symbol", order_amount="amount", order_target_position="target_position", + stop_loss_offset="stop_loss_offset", stop_loss_tag="stop_loss_tag", stop_loss_type="stop_loss_type", + stop_loss_group="stop_loss_group", + take_profit_offset="take_profit_offset", take_profit_tag="take_profit_tag", + take_profit_type="take_profit_type", take_profit_group="take_profit_group", + order_type_name="market", reduce_only="reduce_only", tag="tag", group="group", wait_for="wait_for") diff --git a/packages/tentacles/Meta/Keywords/scripting_library/tests/orders/order_types/test_multiple_orders_creation.py b/packages/tentacles/Meta/Keywords/scripting_library/tests/orders/order_types/test_multiple_orders_creation.py new file mode 100644 index 0000000000..6771ea2af3 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/tests/orders/order_types/test_multiple_orders_creation.py @@ -0,0 +1,512 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import asyncio +import pytest +import mock +import decimal +import contextlib +import os + +import octobot_trading.personal_data as trading_personal_data +import octobot_trading.personal_data.orders.order_util as order_util +import octobot_trading.api as api +import octobot_trading.errors as errors +import octobot_trading.enums as trading_enums +import octobot_trading.constants as trading_constants +import tentacles.Meta.Keywords.scripting_library as scripting_library + + +from tentacles.Meta.Keywords.scripting_library.tests import event_loop, mock_context, \ + skip_if_octobot_trading_mocking_disabled +from tentacles.Meta.Keywords.scripting_library.tests.exchanges import backtesting_trader, backtesting_config, \ + backtesting_exchange_manager, fake_backtesting +import tentacles.Meta.Keywords.scripting_library.tests.test_utils.order_util as test_order_util + + +# All test coroutines will be treated as marked. +pytestmark = pytest.mark.asyncio + + +@pytest.mark.parametrize("backtesting_config", ["USDT"], indirect=["backtesting_config"]) +async def test_orders_with_invalid_values(mock_context, skip_if_octobot_trading_mocking_disabled): + # skip_if_octobot_trading_mocking_disabled mock_context.trader, "create_order" + initial_usdt_holdings, btc_price = await _usdt_trading_context(mock_context) + + if os.getenv('CYTHON_IGNORE'): + return + with mock.patch.object(trading_personal_data, "get_up_to_date_price", mock.AsyncMock(return_value=btc_price)), \ + mock.patch.object(order_util, "get_up_to_date_price", mock.AsyncMock(return_value=btc_price)), \ + mock.patch.object(mock_context.trader, "create_order", mock.AsyncMock()) as create_order_mock: + + with pytest.raises(errors.InvalidArgumentError): + # no amount + await scripting_library.market( + mock_context, + side="buy" + ) + create_order_mock.assert_not_called() + create_order_mock.reset_mock() + + with pytest.raises(errors.InvalidArgumentError): + # negative amount + await scripting_library.market( + mock_context, + amount="-1", + side="buy" + ) + create_order_mock.assert_not_called() + create_order_mock.reset_mock() + + with pytest.raises(errors.InvalidArgumentError): + # missing offset parameter + await scripting_library.limit( + mock_context, + target_position="20%", + side="buy" + ) + + with pytest.raises(errors.InvalidArgumentError): + # missing side parameter + await scripting_library.market( + mock_context, + amount="1" + ) + + # orders without having enough funds + for amount, side in ((1, "sell"), (0.000000001, "buy")): + await scripting_library.market( + mock_context, + amount=amount, + side=side + ) + create_order_mock.assert_not_called() + create_order_mock.reset_mock() + mock_context.orders_writer.log_many.assert_not_called() + mock_context.orders_writer.log_many.reset_mock() + mock_context.logger.warning.assert_called_once() + mock_context.logger.warning.reset_mock() + + +@pytest.mark.parametrize("backtesting_config", ["USDT"], indirect=["backtesting_config"]) +async def test_orders_amount_then_position_sequence(mock_context): + initial_usdt_holdings, btc_price = await _usdt_trading_context(mock_context) + mock_context.exchange_manager.is_future = True + symbol_contract = api.create_default_future_contract( + "BTC/USDT", decimal.Decimal(1), trading_enums.FutureContractType.LINEAR_PERPETUAL, + trading_constants.DEFAULT_SYMBOL_POSITION_MODE + ) + # We have to hardcode the symbol contract as it's not a futures symbol so we can't use load_pair_contract + mock_context.exchange_manager.exchange.pair_contracts[mock_context.symbol] = symbol_contract + + if os.getenv('CYTHON_IGNORE'): + return + with mock.patch.object(trading_personal_data, "get_up_to_date_price", mock.AsyncMock(return_value=btc_price)), \ + mock.patch.object(order_util, "get_up_to_date_price", mock.AsyncMock(return_value=btc_price)): + + # buy for 10% of the total portfolio value + orders = await scripting_library.market( + mock_context, + amount="10%", + side="buy" + ) + btc_val = decimal.Decimal(10) # 10.00 + usdt_val = decimal.Decimal(45000) # 45000.00 + await _fill_and_check(mock_context, btc_val, usdt_val, orders) + + # buy for 10% of the portfolio available value + orders = await scripting_library.limit( + mock_context, + amount="10%a", + offset="0", + side="buy" + ) + btc_val = btc_val + decimal.Decimal(str((45000 * decimal.Decimal("0.1")) / 500)) # 19.0 + usdt_val = usdt_val * decimal.Decimal(str(0.9)) # 40500.00 + await _fill_and_check(mock_context, btc_val, usdt_val, orders) + + # buy for for 10% of the current position value + orders = await scripting_library.market( + mock_context, + amount="10%p", + side="buy" + ) + usdt_val = usdt_val - (btc_val * decimal.Decimal("0.1") * btc_price) # 39550.00 + btc_val = btc_val * decimal.Decimal("1.1") # 20.90 + await _fill_and_check(mock_context, btc_val, usdt_val, orders) + + # price changes to 1000 + btc_price = 1000 + mock_context.exchange_manager.exchange_personal_data.portfolio_manager.handle_mark_price_update( + "BTC/USDT", btc_price) + with mock.patch.object(trading_personal_data, "get_up_to_date_price", mock.AsyncMock(return_value=btc_price)), \ + mock.patch.object(order_util, "get_up_to_date_price", mock.AsyncMock(return_value=btc_price)): + + # buy to reach a target position of 25 btc + orders = await scripting_library.market( + mock_context, + target_position=25 + ) + usdt_val = usdt_val - ((25 - btc_val) * btc_price) # 35450.00 + btc_val = decimal.Decimal(25) # 25 + await _fill_and_check(mock_context, btc_val, usdt_val, orders) + + # buy to reach a target position of 60% of the total portfolio (in BTC) + orders = await scripting_library.limit( + mock_context, + target_position="60%", + offset=0 + ) + previous_btc_val = btc_val + btc_val = (btc_val + (usdt_val / btc_price)) * decimal.Decimal("0.6") # 36.27 + usdt_val = usdt_val - (btc_val - previous_btc_val) * btc_price # 24180.00 + await _fill_and_check(mock_context, btc_val, usdt_val, orders) + + # buy to reach a target position including an additional 50% of the available USDT in BTC + orders = await scripting_library.market( + mock_context, + target_position="50%a" + ) + btc_val = btc_val + usdt_val / 2 / btc_price # 48.36 + usdt_val = usdt_val / 2 # 12090.00 + await _fill_and_check(mock_context, btc_val, usdt_val, orders) + + # sell to keep only 10% of the position, sell at 2000 (1000 + 100%) + orders = await scripting_library.limit( + mock_context, + target_position="10%p", + offset="100%" + ) + usdt_val = usdt_val + btc_val * decimal.Decimal("0.9") * (btc_price * 2) # 99138.00 + btc_val = btc_val / 10 # 4.836 + await _fill_and_check(mock_context, btc_val, usdt_val, orders) + + +@pytest.mark.parametrize("backtesting_config", ["USDT"], indirect=["backtesting_config"]) +async def test_concurrent_orders(mock_context): + async with _20_percent_position_trading_context(mock_context) as context_data: + btc_val, usdt_val, btc_price = context_data + + # create 3 sell orders (at price = 500 + 10 = 510) + # that would end up selling more than what we have if not executed sequentially + # 1st order is 80% of available btc, second is 80% of the remaining 20% and so on + + orders = [] + async def create_order(amount): + orders.append( + (await scripting_library.limit( + mock_context, + amount=amount, + offset=10, + side="sell" + ))[0] + ) + await asyncio.gather( + *( + create_order("80%a") + for _ in range(3) + ) + ) + + initial_btc_holdings = btc_val + btc_val = initial_btc_holdings * (decimal.Decimal("0.2") ** 3) + usdt_val = usdt_val + (initial_btc_holdings - btc_val) * (btc_price + 10) # 50118.40 + await _fill_and_check(mock_context, btc_val, usdt_val, orders, orders_count=3) + + # create 3 buy orders (at price = 500 + 10 = 510) all of them for a target position of 10% + # first order gets created to have this 10% position, others are also created like this, ending up in a 30% + # position + + # update portfolio current value + mock_context.exchange_manager.exchange_personal_data.portfolio_manager.handle_balance_updated() + + orders = [] + + async def create_order(target_position): + orders.append( + (await scripting_library.limit( + mock_context, + target_position=target_position, + offset=10 + ))[0] + ) + await asyncio.gather( + *( + create_order("10%") + for _ in range(3) + ) + ) + + initial_btc_holdings = btc_val # 0.16 + initial_total_val = initial_btc_holdings * btc_price + usdt_val + initial_position_percent = decimal.Decimal(initial_btc_holdings * btc_price / initial_total_val) + btc_val = initial_btc_holdings + \ + initial_total_val * (decimal.Decimal("0.1") - initial_position_percent) * 3 / btc_price # 29.79904 + usdt_val = usdt_val - (btc_val - initial_btc_holdings) * (btc_price + 10) # 35002.4896 + await _fill_and_check(mock_context, btc_val, usdt_val, orders, orders_count=3) + + +@pytest.mark.parametrize("backtesting_config", ["USDT"], indirect=["backtesting_config"]) +async def test_sell_limit_with_stop_loss_orders_single_sell_and_stop_with_oco_group(mock_context): + async with _20_percent_position_trading_context(mock_context) as context_data: + btc_val, usdt_val, btc_price = context_data + + mock_context.allow_artificial_orders = True # make stop loss not lock funds + oco_group = scripting_library.create_one_cancels_the_other_group(mock_context) + sell_limit_orders = await scripting_library.limit( + mock_context, + target_position="0%", + offset=50, + group=oco_group + ) + # add_to_order_group(oco_group, sell_limit_orders) + stop_loss_orders = await scripting_library.stop_loss( + mock_context, + target_position="0%", + offset=-75, + group=oco_group + ) + assert len(sell_limit_orders) == len(stop_loss_orders) == 1 + + # stop order is filled + usdt_val = usdt_val + btc_val * (btc_price - 75) # 48500.00 + btc_val = trading_constants.ZERO # 0.00 + await _fill_and_check(mock_context, btc_val, usdt_val, stop_loss_orders, logged_orders_count=2) + # linked order is cancelled + assert sell_limit_orders[0].is_cancelled() + + +@pytest.mark.parametrize("backtesting_config", ["USDT"], indirect=["backtesting_config"]) +async def test_sell_limit_with_stop_loss_orders_two_sells_and_stop_with_oco(mock_context): + async with _20_percent_position_trading_context(mock_context) as context_data: + btc_val, usdt_val, btc_price = context_data + + mock_context.allow_artificial_orders = True # make stop loss not lock funds + oco_group = scripting_library.create_one_cancels_the_other_group(mock_context) + stop_loss_orders = await scripting_library.stop_loss( + mock_context, + target_position="0%", + offset=-50, + side="sell", + group=oco_group, + tag="exitPosition" + ) + take_profit_limit_orders_1 = await scripting_library.limit( + mock_context, + target_position="50%p", + offset=50 + ) + take_profit_limit_orders_2 = await scripting_library.limit( + mock_context, + target_position="0%p", + offset=100, + group=oco_group, + tag="exitPosition" + ) + + # take_profit_limit_orders_1 filled + available_btc_val = trading_constants.ZERO # 10.00 + total_btc_val = btc_val / 2 # 10.00 + usdt_val = usdt_val + btc_val / 2 * (btc_price + 50) # 40000.00 + await _fill_and_check(mock_context, available_btc_val, usdt_val, take_profit_limit_orders_1, + btc_total=total_btc_val) + # linked order is not cancelled + assert stop_loss_orders[0].is_open() + + # take_profit_limit_orders_2 filled + usdt_val = usdt_val + btc_val / 2 * (btc_price + 100) # 40000.00 + btc_val = trading_constants.ZERO # 0.00 + await _fill_and_check(mock_context, btc_val, usdt_val, take_profit_limit_orders_2) + # linked order is cancelled + assert stop_loss_orders[0].is_cancelled() + + +@pytest.mark.parametrize("backtesting_config", ["USDT"], indirect=["backtesting_config"]) +async def test_sell_limit_with_multiple_stop_loss_and_sell_orders_in_balanced_take_profit_and_stop_group(mock_context): + async with _20_percent_position_trading_context(mock_context) as context_data: + btc_val, usdt_val, btc_price = context_data + + mock_context.allow_artificial_orders = True # make stop loss not lock funds + btsl_group_1 = scripting_library.create_balanced_take_profit_and_stop_group(mock_context) + g1_stop_1 = await scripting_library.stop_loss( + mock_context, amount="2", offset=-50, side="sell", group=btsl_group_1, tag="exitPosition1" + ) + g1_stop_2 = await scripting_library.stop_loss( + mock_context, amount="3", offset=-100, side="sell", group=btsl_group_1, tag="exitPosition1" + ) + g1_stop_3 = await scripting_library.stop_loss( + mock_context, amount="4", offset=-150, side="sell", group=btsl_group_1, tag="exitPosition1" + ) + g1_tp_1 = await scripting_library.limit( + mock_context, amount="4", offset=50, side="sell", group=btsl_group_1, tag="exitPosition1" + ) + g1_tp_2 = await scripting_library.limit( + mock_context, amount="5", offset=100, side="sell", group=btsl_group_1, tag="exitPosition1" + ) + + btsl_group_2 = scripting_library.create_balanced_take_profit_and_stop_group(mock_context) + g2_stop_1 = await scripting_library.stop_loss( + mock_context, amount="5", offset=-50, side="sell", group=btsl_group_2, tag="exitPosition1" + ) + g2_tp_1 = await scripting_library.limit( + mock_context, amount="3", offset=50, side="sell", group=btsl_group_2, tag="exitPosition1" + ) + g2_tp_2 = await scripting_library.limit( + mock_context, amount="2", offset=100, side="sell", group=btsl_group_2, tag="exitPosition1" + ) + + # g1_tp_1 filled + available_btc_val = decimal.Decimal(6) + sold_btc = decimal.Decimal(4) + total_btc_val = btc_val - sold_btc + usdt_val = usdt_val + sold_btc * (btc_price + 50) + await _fill_and_check(mock_context, available_btc_val, usdt_val, g1_tp_1, btc_total=total_btc_val) + # g1_stop_3 is cancelled (same size), other are untouched + assert g1_stop_3[0].is_cancelled() + assert all(o[0].is_open() for o in [g1_stop_1, g1_stop_2, g1_tp_2, g2_stop_1, g2_tp_1, g2_tp_2]) + + # g1_stop_1 filled + sold_btc = decimal.Decimal(2) + total_btc_val = total_btc_val - sold_btc + usdt_val = usdt_val + sold_btc * (btc_price - 50) + await _fill_and_check(mock_context, available_btc_val, usdt_val, g1_stop_1, btc_total=total_btc_val) + # g1_tp_1 is edited (reduced size), other are untouched + assert g1_tp_2[0].origin_quantity == decimal.Decimal(3) # 5 - 2 + assert all(o[0].is_open() for o in [g1_stop_2, g1_tp_2, g2_stop_1, g2_tp_1, g2_tp_2]) + + # g2_stop_1 filled + sold_btc = decimal.Decimal(5) + total_btc_val = total_btc_val - sold_btc + usdt_val = usdt_val + sold_btc * (btc_price - 50) + await _fill_and_check(mock_context, available_btc_val, usdt_val, g2_stop_1, btc_total=total_btc_val) + # g1_tp_1 is edited (reduced size), other are untouched + assert all(o[0].is_cancelled() for o in [g2_tp_1, g2_tp_2]) + assert all(o[0].is_open() for o in [g1_stop_2, g1_tp_2]) + + # g1_stop_2 cancelled + await mock_context.trader.cancel_order(g1_stop_2[0]) + # g1_tp_2 is cancelled as well + assert all(o[0].is_cancelled() for o in [g1_stop_2, g1_tp_2]) + assert scripting_library.get_open_orders(mock_context) == [] + + +@pytest.mark.parametrize("backtesting_config", ["USDT"], indirect=["backtesting_config"]) +async def test_multiple_sell_limit_with_stop_loss_rounding_issues_in_balanced_take_profit_and_stop_group(mock_context): + async with _20_percent_position_trading_context(mock_context) as context_data: + btc_val, usdt_val, btc_price = context_data + + mock_context.allow_artificial_orders = True # make stop loss not lock funds + btsl_group_1 = scripting_library.create_balanced_take_profit_and_stop_group(mock_context) + # disable to create orders + await btsl_group_1.enable(False) + position_size = decimal.Decimal(20) + added_amount = decimal.Decimal("0.00100001111") + + market_1 = await scripting_library.market(mock_context, amount=added_amount, side="buy") + assert market_1[0].is_filled() + amount = position_size + decimal.Decimal("0.00100001") # ending "111" got truncated + assert api.get_portfolio_currency(mock_context.exchange_manager, "BTC").total == amount + assert api.get_portfolio_currency(mock_context.exchange_manager, "BTC").available == amount + + g1_stop_1 = await scripting_library.stop_loss( + mock_context, amount=amount, offset=-50, side="sell", group=btsl_group_1, tag="exitPosition1" + ) + g1_tp_1 = await scripting_library.limit( + mock_context, amount=amount * decimal.Decimal("0.5"), offset=50, side="sell", group=btsl_group_1, + reduce_only=True + ) + g1_tp_2 = await scripting_library.limit( + mock_context, amount=amount * decimal.Decimal("0.5"), offset=100, side="sell", group=btsl_group_1, + reduce_only=True + ) + + assert g1_stop_1[0].origin_quantity == amount + assert g1_tp_1[0].origin_quantity == decimal.Decimal('10.00050001') + assert g1_tp_2[0].origin_quantity == decimal.Decimal('10.00050000') + + # enable order group: no order edit is triggered as scripting_library took care of the rounding issue of + # 20.00100001 / 2 + await btsl_group_1.enable(False) + + assert g1_stop_1[0].origin_quantity == amount + assert g1_tp_1[0].origin_quantity == decimal.Decimal('10.00050001') + assert g1_tp_2[0].origin_quantity == decimal.Decimal('10.00050000') + + +async def _usdt_trading_context(mock_context): + initial_usdt_holdings = 50000 + mock_context.exchange_manager.exchange_personal_data.portfolio_manager.portfolio.update_portfolio_from_balance({ + 'BTC': {'available': decimal.Decimal(0), 'total': decimal.Decimal(0)}, + 'ETH': {'available': decimal.Decimal(0), 'total': decimal.Decimal(0)}, + 'USDT': {'available': decimal.Decimal(str(initial_usdt_holdings)), + 'total': decimal.Decimal(str(initial_usdt_holdings))} + }, mock_context.exchange_manager) + mock_context.exchange_manager.exchange_personal_data.portfolio_manager.handle_balance_updated() + btc_price = 500 + mock_context.exchange_manager.exchange_personal_data.portfolio_manager.handle_mark_price_update( + "BTC/USDT", btc_price) + return initial_usdt_holdings, btc_price + + +@contextlib.asynccontextmanager +async def _20_percent_position_trading_context(mock_context): + initial_usdt_holdings, btc_price = await _usdt_trading_context(mock_context) + usdt_val = decimal.Decimal(str(initial_usdt_holdings)) + with mock.patch.object(trading_personal_data, "get_up_to_date_price", mock.AsyncMock(return_value=btc_price)), \ + mock.patch.object(order_util, "get_up_to_date_price", mock.AsyncMock(return_value=btc_price)): + # initial limit buy order: buy with 20% of portfolio + buy_limit_orders = await scripting_library.limit( + mock_context, + target_position="20%", + offset=0, + side="buy" + ) + btc_val = (usdt_val * decimal.Decimal("0.2")) / btc_price # 20.00 + usdt_val = usdt_val * decimal.Decimal("0.8") # 40000.00 + # position size = 20 BTC + await _fill_and_check(mock_context, btc_val, usdt_val, buy_limit_orders) + yield btc_val, usdt_val, btc_price + + +async def _fill_and_check(mock_context, btc_available, usdt_available, orders, + btc_total=None, usdt_total=None, orders_count=1, logged_orders_count=None): + for order in orders: + if isinstance(order, trading_personal_data.LimitOrder): + await test_order_util.fill_limit_or_stop_order(order) + elif isinstance(order, trading_personal_data.MarketOrder): + await test_order_util.fill_market_order(order) + + _ensure_orders_validity(mock_context, btc_available, usdt_available, orders, + btc_total=btc_total, usdt_total=usdt_total, orders_count=orders_count, + logged_orders_count=logged_orders_count) + + +def _ensure_orders_validity(mock_context, btc_available, usdt_available, orders, + btc_total=None, usdt_total=None, orders_count=1, logged_orders_count=None): + exchange_manager = mock_context.exchange_manager + btc_total = btc_total or btc_available + usdt_total = usdt_total or usdt_available + assert len(orders) == orders_count + assert all(isinstance(order, trading_personal_data.Order) for order in orders) + assert mock_context.orders_writer.log_many.call_count == logged_orders_count or orders_count + mock_context.orders_writer.log_many.reset_mock() + mock_context.logger.warning.assert_not_called() + mock_context.logger.warning.reset_mock() + mock_context.logger.exception.assert_not_called() + mock_context.logger.exception.reset_mock() + assert api.get_portfolio_currency(exchange_manager, "BTC").available == btc_available + assert api.get_portfolio_currency(exchange_manager, "BTC").total == btc_total + assert api.get_portfolio_currency(exchange_manager, "USDT").available == usdt_available + assert api.get_portfolio_currency(exchange_manager, "USDT").total == usdt_total diff --git a/packages/tentacles/Meta/Keywords/scripting_library/tests/orders/order_types/test_stop_loss_order.py b/packages/tentacles/Meta/Keywords/scripting_library/tests/orders/order_types/test_stop_loss_order.py new file mode 100644 index 0000000000..6ad0f2aa92 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/tests/orders/order_types/test_stop_loss_order.py @@ -0,0 +1,37 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import pytest +import mock + +import tentacles.Meta.Keywords.scripting_library.orders.order_types.create_order as create_order +import tentacles.Meta.Keywords.scripting_library.orders.order_types.stop_loss_order as stop_loss_order + +from tentacles.Meta.Keywords.scripting_library.tests import event_loop, null_context + + +# All test coroutines will be treated as marked. +pytestmark = pytest.mark.asyncio + + +async def test_stop_loss(null_context): + with mock.patch.object(create_order, "create_order_instance", mock.AsyncMock()) as create_order_instance: + await stop_loss_order.stop_loss(null_context, "side", "symbol", "offset", "amount", "target_position", + "tag", "group", "wait_for") + create_order_instance.assert_called_once_with( + null_context, side="side", symbol="symbol", order_amount="amount", order_target_position="target_position", + order_type_name="stop_loss", order_offset="offset", reduce_only=True, + tag="tag", group="group", wait_for="wait_for") diff --git a/packages/tentacles/Meta/Keywords/scripting_library/tests/orders/order_types/test_trailing_limit_order.py b/packages/tentacles/Meta/Keywords/scripting_library/tests/orders/order_types/test_trailing_limit_order.py new file mode 100644 index 0000000000..53e5652621 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/tests/orders/order_types/test_trailing_limit_order.py @@ -0,0 +1,40 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import pytest +import mock + +import tentacles.Meta.Keywords.scripting_library.orders.order_types.create_order as create_order +import tentacles.Meta.Keywords.scripting_library.orders.order_types.trailing_limit_order as trailing_limit_order + +from tentacles.Meta.Keywords.scripting_library.tests import event_loop, null_context + + +# All test coroutines will be treated as marked. +pytestmark = pytest.mark.asyncio + + +async def test_trailing_limit(null_context): + with mock.patch.object(create_order, "create_order_instance", mock.AsyncMock()) as create_order_instance: + await trailing_limit_order.trailing_limit(null_context, "side", "symbol", "amount", "target_position", + "offset", "min_offset", "max_offset", "slippage_limit", "time_limit", + "reduce_only", "post_only", + "tag", "group", "wait_for") + create_order_instance.assert_called_once_with( + null_context, side="side", symbol="symbol", order_amount="amount", order_target_position="target_position", + order_type_name="trailing_limit", order_min_offset="min_offset", order_max_offset="max_offset", + order_offset="offset", slippage_limit="slippage_limit", time_limit="time_limit", reduce_only="reduce_only", + post_only="post_only", tag="tag", group="group", wait_for="wait_for") diff --git a/packages/tentacles/Meta/Keywords/scripting_library/tests/orders/order_types/test_trailing_market_order.py b/packages/tentacles/Meta/Keywords/scripting_library/tests/orders/order_types/test_trailing_market_order.py new file mode 100644 index 0000000000..37467bb9de --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/tests/orders/order_types/test_trailing_market_order.py @@ -0,0 +1,37 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import pytest +import mock + +import tentacles.Meta.Keywords.scripting_library.orders.order_types.create_order as create_order +import tentacles.Meta.Keywords.scripting_library.orders.order_types.trailing_market_order as trailing_market_order + +from tentacles.Meta.Keywords.scripting_library.tests import event_loop, null_context + + +# All test coroutines will be treated as marked. +pytestmark = pytest.mark.asyncio + + +async def test_trailing_market(null_context): + with mock.patch.object(create_order, "create_order_instance", mock.AsyncMock()) as create_order_instance: + await trailing_market_order.trailing_market(null_context, "side", "symbol", "amount", "target_position", + "offset", "reduce_only", "tag", "group", "wait_for") + create_order_instance.assert_called_once_with( + null_context, side="side", symbol="symbol", order_amount="amount", order_target_position="target_position", + order_type_name="trailing_market", order_offset="offset", reduce_only="reduce_only", + tag="tag", group="group", wait_for="wait_for") diff --git a/packages/tentacles/Meta/Keywords/scripting_library/tests/orders/order_types/test_trailing_stop_loss_order.py b/packages/tentacles/Meta/Keywords/scripting_library/tests/orders/order_types/test_trailing_stop_loss_order.py new file mode 100644 index 0000000000..25671ddf60 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/tests/orders/order_types/test_trailing_stop_loss_order.py @@ -0,0 +1,37 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import pytest +import mock + +import tentacles.Meta.Keywords.scripting_library.orders.order_types.create_order as create_order +import tentacles.Meta.Keywords.scripting_library.orders.order_types.trailing_stop_loss_order as trailing_stop_loss_order + +from tentacles.Meta.Keywords.scripting_library.tests import event_loop, null_context + + +# All test coroutines will be treated as marked. +pytestmark = pytest.mark.asyncio + + +async def test_trailing_stop_loss(null_context): + with mock.patch.object(create_order, "create_order_instance", mock.AsyncMock()) as create_order_instance: + await trailing_stop_loss_order.trailing_stop_loss(null_context, "side", "symbol", "amount", "target_position", + "offset", "reduce_only", "tag", "group", "wait_for") + create_order_instance.assert_called_once_with( + null_context, side="side", symbol="symbol", order_amount="amount", order_target_position="target_position", + order_type_name="trailing_stop_loss", order_offset="offset", reduce_only="reduce_only", + tag="tag", group="group", wait_for="wait_for") diff --git a/packages/tentacles/Meta/Keywords/scripting_library/tests/orders/position_size/__init__.py b/packages/tentacles/Meta/Keywords/scripting_library/tests/orders/position_size/__init__.py new file mode 100644 index 0000000000..b98f1648fb --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/tests/orders/position_size/__init__.py @@ -0,0 +1 @@ +# Copyright diff --git a/packages/tentacles/Meta/Keywords/scripting_library/tests/orders/position_size/test_target_position.py b/packages/tentacles/Meta/Keywords/scripting_library/tests/orders/position_size/test_target_position.py new file mode 100644 index 0000000000..58646a7aa2 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/tests/orders/position_size/test_target_position.py @@ -0,0 +1,169 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import pytest +import mock +import decimal + +import octobot_trading.enums as trading_enums +import octobot_trading.errors as errors +import octobot_trading.modes.script_keywords as script_keywords +import tentacles.Meta.Keywords.scripting_library.orders.position_size.target_position as target_position +import tentacles.Meta.Keywords.scripting_library.data.reading.exchange_private_data as exchange_private_data + +from tentacles.Meta.Keywords.scripting_library.tests import event_loop, mock_context +from tentacles.Meta.Keywords.scripting_library.tests.exchanges import backtesting_trader, backtesting_config, \ + backtesting_exchange_manager, fake_backtesting + + +def test_get_target_position_side(): + assert target_position.get_target_position_side(1) == trading_enums.TradeOrderSide.BUY.value + assert target_position.get_target_position_side(-1) == trading_enums.TradeOrderSide.SELL.value + with pytest.raises(RuntimeError): + target_position.get_target_position_side(0) + + +@pytest.mark.asyncio +async def test_get_target_position(mock_context): + with pytest.raises(errors.InvalidArgumentError): + await target_position.get_target_position(mock_context, "1sdsqdq") + + # with positive (long) position + with mock.patch.object(script_keywords, "adapt_amount_to_holdings", + mock.AsyncMock(return_value=decimal.Decimal(1))) as adapt_amount_to_holdings_mock, \ + mock.patch.object(exchange_private_data, "open_position_size", + mock.Mock(return_value=decimal.Decimal(10))) as open_position_size_mock: + + with mock.patch.object(script_keywords, "parse_quantity", + mock.Mock(return_value=(script_keywords.QuantityType.POSITION_PERCENT, decimal.Decimal(10)))) \ + as parse_quantity_mock, \ + mock.patch.object(target_position, "get_target_position_side", + mock.Mock(return_value=trading_enums.TradeOrderSide.SELL.value)) \ + as get_target_position_side_mock: + assert await target_position.get_target_position(mock_context, "1", target_price="hello") == \ + (decimal.Decimal(1), trading_enums.TradeOrderSide.SELL.value) + parse_quantity_mock.assert_called_once_with("1") + open_position_size_mock.assert_called_once_with(mock_context) + get_target_position_side_mock.assert_called_once_with(decimal.Decimal(-9)) + adapt_amount_to_holdings_mock.assert_called_once_with(mock_context, decimal.Decimal(9), + trading_enums.TradeOrderSide.SELL.value, + False, True, False, target_price="hello") + adapt_amount_to_holdings_mock.reset_mock() + get_target_position_side_mock.reset_mock() + open_position_size_mock.reset_mock() + + with mock.patch.object(script_keywords, "parse_quantity", + mock.Mock(return_value=(script_keywords.QuantityType.PERCENT, decimal.Decimal(110)))) \ + as parse_quantity_mock, \ + mock.patch.object(script_keywords, "total_account_balance", + mock.AsyncMock(return_value=decimal.Decimal(10))) \ + as total_account_balance_mock, \ + mock.patch.object(target_position, "get_target_position_side", + mock.Mock(return_value=trading_enums.TradeOrderSide.BUY.value)) \ + as get_target_position_side_mock: + assert await target_position.get_target_position(mock_context, "1", use_total_holding=True, + reduce_only=False, is_stop_order=True) == \ + (decimal.Decimal(1), trading_enums.TradeOrderSide.BUY.value) + parse_quantity_mock.assert_called_once_with("1") + total_account_balance_mock.assert_called_once_with(mock_context) + open_position_size_mock.assert_called_once_with(mock_context) + get_target_position_side_mock.assert_called_once_with(decimal.Decimal(1)) + adapt_amount_to_holdings_mock.assert_called_once_with(mock_context, decimal.Decimal(1), + trading_enums.TradeOrderSide.BUY.value, + True, False, True, target_price=None) + adapt_amount_to_holdings_mock.reset_mock() + get_target_position_side_mock.reset_mock() + open_position_size_mock.reset_mock() + + with mock.patch.object(script_keywords, "parse_quantity", + mock.Mock(return_value=(script_keywords.QuantityType.FLAT, decimal.Decimal(-3)))) \ + as parse_quantity_mock, \ + mock.patch.object(target_position, "get_target_position_side", + mock.Mock(return_value=trading_enums.TradeOrderSide.SELL.value)) \ + as get_target_position_side_mock: + assert await target_position.get_target_position(mock_context, "1") == \ + (decimal.Decimal(1), trading_enums.TradeOrderSide.SELL.value) + parse_quantity_mock.assert_called_once_with("1") + open_position_size_mock.assert_called_once_with(mock_context) + get_target_position_side_mock.assert_called_once_with(decimal.Decimal(-13)) + adapt_amount_to_holdings_mock.assert_called_once_with(mock_context, decimal.Decimal(13), + trading_enums.TradeOrderSide.SELL.value, + False, True, False, target_price=None) + adapt_amount_to_holdings_mock.reset_mock() + get_target_position_side_mock.reset_mock() + open_position_size_mock.reset_mock() + + with mock.patch.object(script_keywords, "parse_quantity", + mock.Mock(return_value=(script_keywords.QuantityType.AVAILABLE_PERCENT, decimal.Decimal(25)))) \ + as parse_quantity_mock, \ + mock.patch.object(script_keywords, "available_account_balance", + mock.AsyncMock(return_value=decimal.Decimal(5))) \ + as available_account_balance_mock, \ + mock.patch.object(target_position, "get_target_position_side", + mock.Mock(return_value=trading_enums.TradeOrderSide.BUY.value)) \ + as get_target_position_side_mock: + assert await target_position.get_target_position(mock_context, "1") == \ + (decimal.Decimal(1), trading_enums.TradeOrderSide.BUY.value) + parse_quantity_mock.assert_called_once_with("1") + available_account_balance_mock.assert_called_once_with(mock_context, reduce_only=True) + # we are at initially at 10, we want add 20% of 5 => need to buy 1.25 + get_target_position_side_mock.assert_called_once_with(decimal.Decimal("1.25")) + adapt_amount_to_holdings_mock.assert_called_once_with(mock_context, decimal.Decimal(1.25), + trading_enums.TradeOrderSide.BUY.value, + False, True, False, target_price=None) + adapt_amount_to_holdings_mock.reset_mock() + get_target_position_side_mock.reset_mock() + open_position_size_mock.reset_mock() + + # with negative (short) position + with mock.patch.object(script_keywords, "adapt_amount_to_holdings", + mock.AsyncMock(return_value=decimal.Decimal(2))) as adapt_amount_to_holdings_mock, \ + mock.patch.object(exchange_private_data, "open_position_size", + mock.Mock(return_value=decimal.Decimal(-10))) as open_position_size_mock: + with mock.patch.object(script_keywords, "parse_quantity", + mock.Mock(return_value=(script_keywords.QuantityType.DELTA, decimal.Decimal(-3)))) \ + as parse_quantity_mock, \ + mock.patch.object(target_position, "get_target_position_side", + mock.Mock(return_value=trading_enums.TradeOrderSide.BUY.value)) \ + as get_target_position_side_mock: + assert await target_position.get_target_position(mock_context, "1") == \ + (decimal.Decimal(2), trading_enums.TradeOrderSide.BUY.value) + parse_quantity_mock.assert_called_once_with("1") + open_position_size_mock.assert_called_once_with(mock_context) + get_target_position_side_mock.assert_called_once_with(decimal.Decimal(7)) + adapt_amount_to_holdings_mock.assert_called_once_with(mock_context, decimal.Decimal(7), + trading_enums.TradeOrderSide.BUY.value, + False, True, False, target_price=None) + adapt_amount_to_holdings_mock.reset_mock() + get_target_position_side_mock.reset_mock() + open_position_size_mock.reset_mock() + + with mock.patch.object(script_keywords, "parse_quantity", + mock.Mock(return_value=(script_keywords.QuantityType.POSITION_PERCENT, decimal.Decimal(-3)))) \ + as parse_quantity_mock, \ + mock.patch.object(target_position, "get_target_position_side", + mock.Mock(return_value=trading_enums.TradeOrderSide.BUY.value)) \ + as get_target_position_side_mock: + assert await target_position.get_target_position(mock_context, "1") == \ + (decimal.Decimal(2), trading_enums.TradeOrderSide.BUY.value) + parse_quantity_mock.assert_called_once_with("1") + open_position_size_mock.assert_called_once_with(mock_context) + get_target_position_side_mock.assert_called_once_with(decimal.Decimal("10.3")) + adapt_amount_to_holdings_mock.assert_called_once_with(mock_context, decimal.Decimal("10.3"), + trading_enums.TradeOrderSide.BUY.value, + False, True, False, target_price=None) + adapt_amount_to_holdings_mock.reset_mock() + get_target_position_side_mock.reset_mock() + open_position_size_mock.reset_mock() diff --git a/packages/tentacles/Meta/Keywords/scripting_library/tests/orders/test_cancelling.py b/packages/tentacles/Meta/Keywords/scripting_library/tests/orders/test_cancelling.py new file mode 100644 index 0000000000..e3ec17f2bc --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/tests/orders/test_cancelling.py @@ -0,0 +1,110 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. + +import pytest +import mock + +import tentacles.Meta.Keywords.scripting_library.orders.order_tags as order_tags +import tentacles.Meta.Keywords.scripting_library.orders.cancelling as cancelling +import octobot_trading.enums as trading_enums +import octobot_trading.constants as trading_constants + +from tentacles.Meta.Keywords.scripting_library.tests import event_loop, mock_context, \ + skip_if_octobot_trading_mocking_disabled +from tentacles.Meta.Keywords.scripting_library.tests.exchanges import backtesting_trader, backtesting_config, \ + backtesting_exchange_manager, fake_backtesting + + +# All test coroutines will be treated as marked. +pytestmark = pytest.mark.asyncio + + +async def test_cancel_orders(mock_context, skip_if_octobot_trading_mocking_disabled): + # skip_if_octobot_trading_mocking_disabled mock_context.trader, "cancel_order" + tagged_orders = ["order_1", "order_2"] + with mock.patch.object(mock_context.trader, "cancel_order", + mock.AsyncMock(return_value=True)) as cancel_order_mock, \ + mock.patch.object(mock_context.trader, "cancel_open_orders", mock.AsyncMock(return_value=(True, []))) \ + as cancel_open_orders_mock: + with mock.patch.object(order_tags, "get_tagged_orders", mock.Mock(return_value=tagged_orders)) \ + as get_tagged_orders_mock: + # cancel all orders from context symbol + assert await cancelling.cancel_orders(mock_context) is True + get_tagged_orders_mock.assert_not_called() + cancel_order_mock.assert_not_called() + cancel_open_orders_mock.assert_called_once_with( + mock_context.symbol, cancel_loaded_orders=True, side=None, + since=trading_constants.NO_DATA_LIMIT, + until=trading_constants.NO_DATA_LIMIT + ) + cancel_open_orders_mock.reset_mock() + + # cancel sided orders from context symbol + side_str_to_side = { + "sell": trading_enums.TradeOrderSide.SELL, + "buy": trading_enums.TradeOrderSide.BUY, + "all": None, + } + for side, value in side_str_to_side.items(): + assert await cancelling.cancel_orders(mock_context, which=side, cancel_loaded_orders=False) is True + get_tagged_orders_mock.assert_not_called() + cancel_order_mock.assert_not_called() + cancel_open_orders_mock.assert_called_once_with( + mock_context.symbol, cancel_loaded_orders=False, side=value, + since=trading_constants.NO_DATA_LIMIT, + until=trading_constants.NO_DATA_LIMIT + ) + cancel_open_orders_mock.reset_mock() + + # different symbol values + assert await cancelling.cancel_orders(mock_context, symbol="ETH/USDT") is True + get_tagged_orders_mock.assert_not_called() + cancel_order_mock.assert_not_called() + cancel_open_orders_mock.assert_called_once_with( + "ETH/USDT", cancel_loaded_orders=True, side=value, + since=trading_constants.NO_DATA_LIMIT, + until=trading_constants.NO_DATA_LIMIT + ) + cancel_open_orders_mock.reset_mock() + assert await cancelling.cancel_orders(mock_context, symbols=["ETH/USDT", "USDT/USDC"]) is True + get_tagged_orders_mock.assert_not_called() + cancel_order_mock.assert_not_called() + assert cancel_open_orders_mock.mock_calls[0].args == ("ETH/USDT", ) + assert cancel_open_orders_mock.mock_calls[1].args == ("USDT/USDC", ) + cancel_open_orders_mock.reset_mock() + + # tags + assert await cancelling.cancel_orders(mock_context, which="tag1") is True + get_tagged_orders_mock.assert_called_once_with( + mock_context, "tag1", symbol=None, + since=trading_constants.NO_DATA_LIMIT, + until=trading_constants.NO_DATA_LIMIT + ) + assert cancel_order_mock.mock_calls[0].args == ("order_1", ) + assert cancel_order_mock.mock_calls[1].args == ("order_2", ) + cancel_open_orders_mock.assert_not_called() + cancel_order_mock.reset_mock() + + # no order to cancel + with mock.patch.object(order_tags, "get_tagged_orders", mock.Mock(return_value=[])) as get_tagged_orders_mock: + assert await cancelling.cancel_orders(mock_context, which="tag1") is False + get_tagged_orders_mock.assert_called_once_with( + mock_context, "tag1", symbol=None, + since=trading_constants.NO_DATA_LIMIT, + until=trading_constants.NO_DATA_LIMIT + ) + cancel_order_mock.assert_not_called() + cancel_open_orders_mock.assert_not_called() diff --git a/packages/tentacles/Meta/Keywords/scripting_library/tests/static/config.json b/packages/tentacles/Meta/Keywords/scripting_library/tests/static/config.json new file mode 100644 index 0000000000..39041b116c --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/tests/static/config.json @@ -0,0 +1,20 @@ +{ + "time_frame": ["1h", "4h", "1d"], + "exchanges": { + "binance": { + "api-key": "", + "api-secret": "", + "web-socket": false + }, + "bitmex": { + "api-key": "", + "api-secret": "", + "web-socket": false + }, + "poloniex": { + "api-key": "", + "api-secret": "", + "web-socket": false + } + } +} \ No newline at end of file diff --git a/packages/tentacles/Meta/Keywords/scripting_library/tests/static/profile.json b/packages/tentacles/Meta/Keywords/scripting_library/tests/static/profile.json new file mode 100644 index 0000000000..fd683a0ee6 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/tests/static/profile.json @@ -0,0 +1,99 @@ +{ + "profile": { + "avatar": "default_profile.png", + "description": "OctoBot default profile.", + "id": "default", + "name": "default" + }, + "config": { + "crypto-currencies": { + "Bitcoin": { + "pairs": [ + "BTC/USDT", + "BTC/EUR", + "BTC/USDC" + ] + }, + "Neo": { + "pairs": [ + "NEO/BTC" + ] + }, + "Ethereum": { + "pairs": [ + "ETH/USDT" + ] + }, + "Icon": { + "pairs": [ + "ICX/BTC" + ] + }, + "VeChain": { + "pairs": [ + "VEN/BTC" + ] + }, + "Nano": { + "pairs": [ + "XRB/BTC" + ] + }, + "Cardano": { + "pairs": [ + "ADA/BTC" + ] + }, + "Ontology": { + "pairs": [ + "ONT/BTC" + ] + }, + "Stellar": { + "pairs": [ + "XLM/BTC" + ] + }, + "Power Ledger": { + "pairs": [ + "POWR/BTC" + ] + }, + "Ethereum Classic": { + "pairs": [ + "ETC/BTC" + ] + }, + "WAX": { + "pairs": [ + "WAX/BTC" + ] + }, + "XRP": { + "pairs": [ + "XRP/BTC" + ] + }, + "Verge": { + "pairs": [ + "XVG/BTC" + ] + } + }, + "exchanges": {}, + "trading": { + "risk": 1, + "reference-market": "BTC" + }, + "trader": { + "enabled": false + }, + "trader-simulator": { + "enabled": true, + "starting-portfolio": { + "BTC": 10, + "USDT": 1000 + } + } + } +} diff --git a/packages/tentacles/Meta/Keywords/scripting_library/tests/test_utils/__init__.py b/packages/tentacles/Meta/Keywords/scripting_library/tests/test_utils/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/packages/tentacles/Meta/Keywords/scripting_library/tests/test_utils/order_util.py b/packages/tentacles/Meta/Keywords/scripting_library/tests/test_utils/order_util.py new file mode 100644 index 0000000000..8090ce3836 --- /dev/null +++ b/packages/tentacles/Meta/Keywords/scripting_library/tests/test_utils/order_util.py @@ -0,0 +1,26 @@ +# Drakkar-Software OctoBot-Tentacles +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +from octobot_commons.asyncio_tools import wait_asyncio_next_cycle + + +async def fill_limit_or_stop_order(limit_or_stop_order): + await limit_or_stop_order.on_fill() + await wait_asyncio_next_cycle() + + +async def fill_market_order(market_order): + await market_order.on_fill() + await wait_asyncio_next_cycle() diff --git a/packages/tentacles/README.md b/packages/tentacles/README.md new file mode 100644 index 0000000000..7437c4d169 --- /dev/null +++ b/packages/tentacles/README.md @@ -0,0 +1,25 @@ +# OctoBot-Tentacles +[![OctoBot-Tentacles-CI](https://github.com/Drakkar-Software/OctoBot-Tentacles/workflows/OctoBot-Tentacles-CI/badge.svg)](https://github.com/Drakkar-Software/OctoBot-Tentacles/actions) + +This repository contains default evaluators, strategies, utilitary modules, interfaces and trading modes for the [OctoBot](https://github.com/Drakkar-Software/OctoBot) project. + +Modules in this tentacles are installed in the **Default** folder of the associated module types + +To add custom tentacles to your OctoBot, see the [dedicated docs page](https://www.octobot.cloud/guides/octobot-tentacles-development/customize-your-octobot?utm_source=octobot&utm_medium=dk&utm_campaign=regular_open_source_content&utm_content=octobot_tentacles_readme). + +## Contributing to the official OctoBot Tentacles: +1. Create your own fork of this repo +2. Start your branch from the `dev` branch of this repo +3. Commit and push your changes into your fork +4. Create a pull request from your branch on your fork to the `dev` branch of this repo + +Tips: + +To export changes from your local OctoBot tentacles folder into this repo, run this command from your OctoBot folder: +`python start.py tentacles -e "../../OctoBot-Tentacles" OctoBot-Default-Tentacles -d "tentacles"` +Where: +- `start.py`: start.py script from your OctoBot folder +- `tentacles`: the tentacles command of the script +- `../../OctoBot-Tentacles`: the path to your fork of this repository (relatively to the folder you are running the command from) +- `OctoBot-Default-Tentacles`: filter to only export tentacles tagged as `OctoBot-Default-Tentacles` (in metadata file) +- `-d tentacles`: name of your OctoBot tentacles folder that are to be copied to the repo (relatively to the folder you are running the command from) diff --git a/packages/tentacles/Services/Interfaces/node_api_interface/__init__.py b/packages/tentacles/Services/Interfaces/node_api_interface/__init__.py new file mode 100644 index 0000000000..3509969f43 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_api_interface/__init__.py @@ -0,0 +1,19 @@ +# This file is part of OctoBot Node (https://github.com/Drakkar-Software/OctoBot-Node) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + +import octobot_commons.constants as commons_constants +if not commons_constants.USE_MINIMAL_LIBS: + from .node_api import NodeApiInterface diff --git a/packages/tentacles/Services/Interfaces/node_api_interface/api/__init__.py b/packages/tentacles/Services/Interfaces/node_api_interface/api/__init__.py new file mode 100644 index 0000000000..ad25c24157 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_api_interface/api/__init__.py @@ -0,0 +1,15 @@ +# This file is part of OctoBot Node (https://github.com/Drakkar-Software/OctoBot-Node) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . diff --git a/packages/tentacles/Services/Interfaces/node_api_interface/api/deps.py b/packages/tentacles/Services/Interfaces/node_api_interface/api/deps.py new file mode 100644 index 0000000000..31d76abbf2 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_api_interface/api/deps.py @@ -0,0 +1,64 @@ +# This file is part of OctoBot Node (https://github.com/Drakkar-Software/OctoBot-Node) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + +import uuid +import typing + +from fastapi import Depends, HTTPException, status +from fastapi.security import HTTPBasic, HTTPBasicCredentials + +import octobot_node.models +import octobot.community.authentication as community_auth + +security_basic = HTTPBasic(auto_error=False) + +_BASIC_AUTH_USER_ID = uuid.uuid4() + + +def get_current_user( + credentials: typing.Annotated[typing.Optional[HTTPBasicCredentials], Depends(security_basic)], +) -> octobot_node.models.User: + auth = community_auth.CommunityAuthentication.instance() + if auth is None or not auth.is_node_wallet_configured(): + raise HTTPException( + status_code=status.HTTP_503_SERVICE_UNAVAILABLE, + detail="Node not configured", + ) + if credentials is None or not auth.verify_node_passphrase(credentials.password): + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Incorrect passphrase", + ) + address = auth.get_node_wallet_address() + return octobot_node.models.User( + id=_BASIC_AUTH_USER_ID, + email=address, + is_active=True, + is_superuser=True, + full_name=None, + ) + + +CurrentUser = typing.Annotated[octobot_node.models.User, Depends(get_current_user)] + + +def get_current_active_superuser(current_user: CurrentUser) -> octobot_node.models.User: + if not current_user.is_superuser: + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="The user doesn't have enough privileges" + ) + return current_user diff --git a/packages/tentacles/Services/Interfaces/node_api_interface/api/main.py b/packages/tentacles/Services/Interfaces/node_api_interface/api/main.py new file mode 100644 index 0000000000..9771f0282c --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_api_interface/api/main.py @@ -0,0 +1,56 @@ +# This file is part of OctoBot Node (https://github.com/Drakkar-Software/OctoBot-Node) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + +from fastapi import APIRouter + +# Import from tentacles package (runtime) or fallback to direct imports (build) +try: + from tentacles.Services.Interfaces.node_api_interface.api.route_provider import ( + register_all_provider_routes, + ) + from tentacles.Services.Interfaces.node_api_interface.api.routes import ( + login, + nodes, + users, + tasks, + logs, + setup, + exchanges, + ) +except ImportError: + from api.route_provider import register_all_provider_routes + from api.routes import ( + login, + nodes, + users, + tasks, + logs, + setup, + exchanges, + ) + + +def build_api_router() -> APIRouter: + api_router = APIRouter() + api_router.include_router(setup.router) + api_router.include_router(login.router) + api_router.include_router(exchanges.router) + register_all_provider_routes(api_router) + api_router.include_router(users.router, prefix="/users") + api_router.include_router(tasks.router, prefix="/tasks") + api_router.include_router(nodes.router, prefix="/nodes") + api_router.include_router(logs.router, prefix="/logs") + return api_router diff --git a/packages/tentacles/Services/Interfaces/node_api_interface/api/route_provider.py b/packages/tentacles/Services/Interfaces/node_api_interface/api/route_provider.py new file mode 100644 index 0000000000..2d8966254e --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_api_interface/api/route_provider.py @@ -0,0 +1,64 @@ +# This file is part of OctoBot Node (https://github.com/Drakkar-Software/OctoBot-Node) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + +import abc +import enum +import typing + +import octobot_commons.tentacles_management.class_inspector as class_inspector + +import fastapi + + +class RouteType(enum.Enum): + TENTACLES = "tentacles" + + +def _is_installable_provider_class( + provider_class: typing.Type[typing.Any], +) -> bool: + return not class_inspector.is_abstract_using_inspection_and_class_naming( + provider_class + ) + + +def _include_router_prefix(route_type: RouteType) -> typing.Optional[str]: + if route_type == RouteType.TENTACLES: + return "/tentacles" + return None + + +class RouteProvider(abc.ABC): + """Concrete subclasses must set ``ROUTE_TYPE`` and implement ``get_router``.""" + + ROUTE_TYPE: typing.ClassVar[RouteType] + + @abc.abstractmethod + def get_router(self) -> fastapi.APIRouter: + raise NotImplementedError + + +def register_all_provider_routes(api_router: fastapi.APIRouter) -> None: + for provider_class in class_inspector.get_all_classes_from_parent(RouteProvider): + if not _is_installable_provider_class(provider_class): + continue + route_prefix = _include_router_prefix(provider_class.ROUTE_TYPE) + if route_prefix is not None: + api_router.include_router( + provider_class().get_router(), prefix=route_prefix + ) + else: + api_router.include_router(provider_class().get_router()) diff --git a/packages/tentacles/Services/Interfaces/node_api_interface/api/routes/__init__.py b/packages/tentacles/Services/Interfaces/node_api_interface/api/routes/__init__.py new file mode 100644 index 0000000000..e4de5f80da --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_api_interface/api/routes/__init__.py @@ -0,0 +1,16 @@ +# This file is part of OctoBot Node (https://github.com/Drakkar-Software/OctoBot-Node) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + diff --git a/packages/tentacles/Services/Interfaces/node_api_interface/api/routes/exchanges.py b/packages/tentacles/Services/Interfaces/node_api_interface/api/routes/exchanges.py new file mode 100644 index 0000000000..2c66b971be --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_api_interface/api/routes/exchanges.py @@ -0,0 +1,47 @@ +# This file is part of OctoBot Node (https://github.com/Drakkar-Software/OctoBot-Node) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . +import typing + +from fastapi import APIRouter, Query +from fastapi.responses import JSONResponse + +try: + import tentacles.Services.Interfaces.node_api_interface.core.exchanges as exchange_core +except ImportError: + import core.exchanges as exchange_core + + +router = APIRouter(prefix="/exchanges", tags=["exchanges"]) + + +@router.get("/traded-pairs") +async def get_traded_pairs( + exchange_config: typing.Annotated[exchange_core.ExchangeConfig, Query()], +) -> JSONResponse: + pairs_and_tf_by_exchange = await exchange_core.get_traded_pairs_and_timeframes_by_exchange(exchange_config) + return JSONResponse(content={ + exchange: pairs_and_tf[exchange_core.ExchangeInfo.PAIRS.value] + for exchange, pairs_and_tf in pairs_and_tf_by_exchange.items() + }) + + +@router.get("/traded-pairs-and-timeframes") +async def get_traded_pairs_and_timeframes( + exchange_config: typing.Annotated[exchange_core.ExchangeConfig, Query()], +) -> JSONResponse: + return JSONResponse( + content=await exchange_core.get_traded_pairs_and_timeframes_by_exchange(exchange_config) + ) diff --git a/packages/tentacles/Services/Interfaces/node_api_interface/api/routes/login.py b/packages/tentacles/Services/Interfaces/node_api_interface/api/routes/login.py new file mode 100644 index 0000000000..d911221700 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_api_interface/api/routes/login.py @@ -0,0 +1,32 @@ +# This file is part of OctoBot Node (https://github.com/Drakkar-Software/OctoBot-Node) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + +import typing + +from fastapi import APIRouter + +try: + from tentacles.Services.Interfaces.node_api_interface.api.deps import CurrentUser +except ImportError: + from api.deps import CurrentUser +import octobot_node.models + +router = APIRouter(tags=["login"]) + + +@router.get("/login/test", response_model=octobot_node.models.User) +def test_auth(current_user: CurrentUser) -> typing.Any: + return current_user diff --git a/packages/tentacles/Services/Interfaces/node_api_interface/api/routes/logs.py b/packages/tentacles/Services/Interfaces/node_api_interface/api/routes/logs.py new file mode 100644 index 0000000000..6018eed4a2 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_api_interface/api/routes/logs.py @@ -0,0 +1,65 @@ +# This file is part of OctoBot Node (https://github.com/Drakkar-Software/OctoBot-Node) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + +import os +import tempfile +import typing + +import pydantic +from fastapi import APIRouter, Depends +from fastapi.security import HTTPBasicCredentials + +import octobot_node.constants as node_constants +import octobot.community.errors_upload.error_sharing as error_sharing + +try: + from tentacles.Services.Interfaces.node_api_interface.api.deps import CurrentUser, security_basic +except ImportError: + from api.deps import CurrentUser, security_basic + +router = APIRouter(tags=["logs"]) + + +class ShareLogsRequest(pydantic.BaseModel): + automation_ids: typing.Optional[list[str]] = None + + +@router.post("/share") +async def share_logs( + current_user: CurrentUser, + credentials: typing.Annotated[typing.Optional[HTTPBasicCredentials], Depends(security_basic)], + body: typing.Optional[ShareLogsRequest] = None, +) -> typing.Any: + try: + with tempfile.NamedTemporaryFile(suffix="", delete=False) as tmp: + export_path = tmp.name + passphrase = credentials.password if credentials else None + log_paths = None + if body and body.automation_ids: + log_paths = [ + os.path.join(node_constants.AUTOMATION_LOGS_FOLDER, f"{automation_id}.log") + for automation_id in body.automation_ids + ] + result = await error_sharing.share_logs(export_path, passphrase, log_paths) + if result is None: + return {"success": False, "error": "Not connected to octobot.cloud"} + return { + "success": True, + "errorId": result.get("errorId"), + "errorSecret": result.get("errorSecret"), + } + except Exception as e: + return {"success": False, "error": str(e)} diff --git a/packages/tentacles/Services/Interfaces/node_api_interface/api/routes/market_making.py b/packages/tentacles/Services/Interfaces/node_api_interface/api/routes/market_making.py new file mode 100644 index 0000000000..d1eb5d96f1 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_api_interface/api/routes/market_making.py @@ -0,0 +1,41 @@ +# This file is part of OctoBot Node (https://github.com/Drakkar-Software/OctoBot-Node) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + +import typing + +from fastapi import APIRouter, Body +from fastapi.responses import JSONResponse, Response + +try: + import tentacles.Trading.Mode.simple_market_making_trading_mode.api.api_handlers as market_making_handlers +except ImportError: + # market making tentacles are not available + pass + +router = APIRouter(prefix="/market-making", tags=["market-making"]) + + +@router.get("/ping") +def market_making_ping() -> Response: + return Response(status_code=200, content=b"") + + +@router.post("/") +async def market_making_root( + body: typing.Optional[dict] = Body(default=None), +) -> JSONResponse: + payload, status_code = await market_making_handlers.dispatch_market_making_request(body) + return JSONResponse(content=payload, status_code=status_code) diff --git a/packages/tentacles/Services/Interfaces/node_api_interface/api/routes/nodes.py b/packages/tentacles/Services/Interfaces/node_api_interface/api/routes/nodes.py new file mode 100644 index 0000000000..33aa005ca9 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_api_interface/api/routes/nodes.py @@ -0,0 +1,76 @@ +# This file is part of OctoBot Node (https://github.com/Drakkar-Software/OctoBot-Node) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + +import logging +import typing + +from fastapi import APIRouter + +import octobot_node.config +import octobot_node.constants +import octobot_node.models +import octobot_node.scheduler.api +import octobot_node.scheduler.scheduler + +try: + import octobot_commons.logging.context_based_file_handler as context_based_file_handler +except ImportError: + context_based_file_handler = None + +router = APIRouter(tags=["nodes"]) + + +@router.get("/me", response_model=octobot_node.models.Node) +def get_current_node() -> typing.Any: + status = octobot_node.scheduler.api.get_node_status() + return octobot_node.models.Node(**status) + + +@router.get("/config") +def get_node_config() -> typing.Any: + return { + "node_type": "master" if octobot_node.config.settings.IS_MASTER_MODE else "standalone", + "use_dedicated_log_file_per_automation": octobot_node.config.settings.USE_DEDICATED_LOG_FILE_PER_AUTOMATION, + "tasks_encryption_enabled": octobot_node.config.settings.tasks_encryption_enabled, + "server_encryption_env_vars": octobot_node.constants.TASKS_ENCRYPTION_ENV_VARS, + } + + +@router.patch("/config") +def update_node_config(config: dict) -> typing.Any: + if "node_type" in config: + octobot_node.config.settings.IS_MASTER_MODE = config["node_type"] == "master" + if "use_dedicated_log_file_per_automation" in config: + value = bool(config["use_dedicated_log_file_per_automation"]) + octobot_node.config.settings.USE_DEDICATED_LOG_FILE_PER_AUTOMATION = value + if value: + octobot_node.scheduler.scheduler.Scheduler._setup_workflow_logging() + else: + _remove_context_based_file_handlers() + return get_node_config() + + +def _remove_context_based_file_handlers() -> None: + if context_based_file_handler is None: + return + root_logger = logging.getLogger() + to_remove = [ + h for h in root_logger.handlers + if isinstance(h, context_based_file_handler.ContextBasedFileHandler) + ] + for handler in to_remove: + handler.close() + root_logger.removeHandler(handler) diff --git a/packages/tentacles/Services/Interfaces/node_api_interface/api/routes/setup.py b/packages/tentacles/Services/Interfaces/node_api_interface/api/routes/setup.py new file mode 100644 index 0000000000..7a80648720 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_api_interface/api/routes/setup.py @@ -0,0 +1,99 @@ +# This file is part of OctoBot Node (https://github.com/Drakkar-Software/OctoBot-Node) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + +import typing + +import pydantic +from fastapi import APIRouter, Depends, HTTPException, status +from fastapi.security import HTTPBasicCredentials + +import octobot_node.config as node_config +import octobot.community.authentication as community_auth + +try: + from tentacles.Services.Interfaces.node_api_interface.api.deps import CurrentUser, security_basic +except ImportError: + from api.deps import CurrentUser, security_basic + +router = APIRouter(tags=["setup"]) + + +class SetupStatus(pydantic.BaseModel): + configured: bool + + +class SetupInit(pydantic.BaseModel): + passphrase: str + node_type: typing.Literal["standalone", "master"] + private_key: typing.Optional[str] = None + + +class SetupResult(pydantic.BaseModel): + address: str + + +class WalletExport(pydantic.BaseModel): + address: str + private_key: str + + +@router.get("/setup/status", response_model=SetupStatus) +def get_setup_status() -> SetupStatus: + auth = community_auth.CommunityAuthentication.instance() + configured = auth is not None and auth.is_node_wallet_configured() + return SetupStatus(configured=configured) + + +@router.post("/setup/init", response_model=SetupResult) +def init_setup(body: SetupInit) -> SetupResult: + auth = community_auth.CommunityAuthentication.instance() + if auth is None: + raise HTTPException( + status_code=status.HTTP_503_SERVICE_UNAVAILABLE, + detail="Service not initialized", + ) + if auth.is_node_wallet_configured(): + raise HTTPException( + status_code=status.HTTP_409_CONFLICT, + detail="Node is already configured", + ) + try: + if body.private_key: + wallet = auth.import_and_encrypt_node_wallet(body.private_key, body.passphrase) + else: + wallet = auth.create_and_encrypt_node_wallet(body.passphrase) + except ValueError as err: + raise HTTPException( + status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, + detail=str(err), + ) from err + node_config.settings.IS_MASTER_MODE = body.node_type == "master" + return SetupResult(address=wallet.address) + + +@router.get("/setup/wallet/export", response_model=WalletExport) +def export_wallet( + current_user: CurrentUser, + credentials: typing.Annotated[typing.Optional[HTTPBasicCredentials], Depends(security_basic)], +) -> WalletExport: + auth = community_auth.CommunityAuthentication.instance() + if auth is None or credentials is None: + raise HTTPException( + status_code=status.HTTP_503_SERVICE_UNAVAILABLE, + detail="Node not configured", + ) + wallet = auth.decrypt_node_wallet(credentials.password) + return WalletExport(address=wallet.address, private_key=wallet.private_key) diff --git a/packages/tentacles/Services/Interfaces/node_api_interface/api/routes/tasks.py b/packages/tentacles/Services/Interfaces/node_api_interface/api/routes/tasks.py new file mode 100644 index 0000000000..4859a7301e --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_api_interface/api/routes/tasks.py @@ -0,0 +1,78 @@ +# This file is part of OctoBot Node (https://github.com/Drakkar-Software/OctoBot-Node) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + +import typing +import uuid +from fastapi import APIRouter, HTTPException, Query +from pydantic import BaseModel + +import octobot_node.config +import octobot_node.models +import octobot_node.scheduler.api +import octobot_node.scheduler.tasks + +router = APIRouter(tags=["tasks"]) + +@router.post("/", response_model=tuple[int, int]) +async def create_tasks(tasks: list[octobot_node.models.Task]) -> tuple[int, int]: + success_count = 0 + error_count = 0 + for task in tasks: + is_scheduled = await octobot_node.scheduler.tasks.trigger_task(task) + if is_scheduled: + success_count += 1 + else: + error_count += 1 + return success_count, error_count + + +@router.get("/server-public-keys") +def get_server_public_keys() -> dict: + if not octobot_node.config.settings.is_node_side_encryption_enabled: + raise HTTPException(status_code=400, detail="Server encryption keys not configured") + from cryptography.hazmat.primitives.serialization import load_pem_private_key, Encoding, PublicFormat + rsa_private = load_pem_private_key(octobot_node.config.settings.TASKS_SERVER_RSA_PRIVATE_KEY, password=None) + ecdsa_private = load_pem_private_key(octobot_node.config.settings.TASKS_SERVER_ECDSA_PRIVATE_KEY, password=None) + return { + "server_rsa_public_pem": rsa_private.public_key().public_bytes(Encoding.PEM, PublicFormat.SubjectPublicKeyInfo).decode(), + "server_ecdsa_public_pem": ecdsa_private.public_key().public_bytes(Encoding.PEM, PublicFormat.SubjectPublicKeyInfo).decode(), + } + + +@router.get("/metrics") +async def get_metrics() -> typing.Any: + return await octobot_node.scheduler.api.get_task_metrics() + +@router.get("/", response_model=list[octobot_node.models.Task], response_model_exclude_none=True) +async def get_tasks(page: int = 1, limit: int = 100) -> typing.Any: + tasks_data = await octobot_node.scheduler.api.get_all_tasks() + + start_idx = (page - 1) * limit + end_idx = start_idx + limit + paginated_tasks = tasks_data[start_idx:end_idx] + return paginated_tasks + +@router.put("/", response_model=octobot_node.models.Task) +def update_task(taskId: uuid.UUID, task: octobot_node.models.Task) -> typing.Any: + # TODO + return task + +@router.delete("/", response_model=list[str]) +async def delete_tasks(taskIds: list[uuid.UUID] = Query(...)) -> list[str]: + try: + return await octobot_node.scheduler.api.delete_tasks([str(t) for t in taskIds]) + except ValueError as e: + raise HTTPException(status_code=404, detail=str(e)) diff --git a/packages/tentacles/Services/Interfaces/node_api_interface/api/routes/users.py b/packages/tentacles/Services/Interfaces/node_api_interface/api/routes/users.py new file mode 100644 index 0000000000..92875cf7fb --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_api_interface/api/routes/users.py @@ -0,0 +1,32 @@ +# This file is part of OctoBot Node (https://github.com/Drakkar-Software/OctoBot-Node) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + +import typing +from fastapi import APIRouter + +import octobot_node.models + +# Import from tentacles package (runtime) or fallback to direct imports (build) +try: + from tentacles.Services.Interfaces.node_api_interface.api.deps import CurrentUser +except ImportError: + from api.deps import CurrentUser + +router = APIRouter(tags=["users"]) + +@router.get("/me", response_model=octobot_node.models.User) +def read_user_me(current_user: CurrentUser) -> typing.Any: + return current_user diff --git a/packages/tentacles/Services/Interfaces/node_api_interface/build_openapi.py b/packages/tentacles/Services/Interfaces/node_api_interface/build_openapi.py new file mode 100644 index 0000000000..a722504f60 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_api_interface/build_openapi.py @@ -0,0 +1,21 @@ +#!/usr/bin/env python +""" +Build script to generate OpenAPI specification for NodeApiInterface. +This script can be run independently without requiring tentacles to be installed. +""" +import json +import sys +from pathlib import Path + +# Add current directory to path to enable imports +sys.path.insert(0, str(Path(__file__).parent)) + +# Import node_api module +import node_api + +# Generate OpenAPI spec +app = node_api.NodeApiInterface.create_app() +openapi_spec = app.openapi() + +# Output to stdout +print(json.dumps(openapi_spec, indent=2)) diff --git a/packages/tentacles/Services/Interfaces/node_api_interface/core/__init__.py b/packages/tentacles/Services/Interfaces/node_api_interface/core/__init__.py new file mode 100644 index 0000000000..ad25c24157 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_api_interface/core/__init__.py @@ -0,0 +1,15 @@ +# This file is part of OctoBot Node (https://github.com/Drakkar-Software/OctoBot-Node) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . diff --git a/packages/tentacles/Services/Interfaces/node_api_interface/core/config.py b/packages/tentacles/Services/Interfaces/node_api_interface/core/config.py new file mode 100644 index 0000000000..979bf296e4 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_api_interface/core/config.py @@ -0,0 +1,17 @@ +# This file is part of OctoBot Node (https://github.com/Drakkar-Software/OctoBot-Node) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + +from octobot_node.config import * diff --git a/packages/tentacles/Services/Interfaces/node_api_interface/core/exchanges.py b/packages/tentacles/Services/Interfaces/node_api_interface/core/exchanges.py new file mode 100644 index 0000000000..241da642a7 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_api_interface/core/exchanges.py @@ -0,0 +1,88 @@ +import typing +import enum +import pydantic + +import octobot_commons.constants +import octobot_commons.profiles.profile_data +import octobot_trading.enums +import octobot_trading.exchanges +import octobot_trading.api +import octobot_tentacles_manager.api +import octobot.community.models.formatters as community_formatters + + +class ExchangeInfo(enum.Enum): + PAIRS = "pairs" + TIMEFRAMES = "timeframes" + + +class ExchangeConfig(pydantic.BaseModel): + name: str + sandboxed: bool = False + exchange_type: typing.Optional[str] = None + url: typing.Optional[str] = None + + +async def get_traded_pairs_and_timeframes_by_exchange( + exchange_config: ExchangeConfig, +) -> dict[str, dict[str, list[str]]]: + traded_pairs_and_tf_by_exchange = {} + tentacles_setup_config = octobot_tentacles_manager.api.get_full_tentacles_setup_config() + profile_data = _get_exchange_profile_data(exchange_config) + for exchange in profile_data.exchanges: + internal_name = exchange.internal_name + local_exchange_type = octobot_trading.enums.ExchangeTypes(exchange.exchange_type) + exchange_data = octobot_trading.exchanges.exchange_data_factory( + internal_name, + exchange_type=local_exchange_type.value + ) + async with octobot_trading.exchanges.exchange_manager_from_exchange_data( + exchange_data, profile_data, tentacles_setup_config, None + ) as exchange_manager: + traded_pairs_and_tf_by_exchange[internal_name] = { + ExchangeInfo.PAIRS.value: list( + octobot_trading.api.get_all_available_symbols(exchange_manager, exchange_type=local_exchange_type) + ), + ExchangeInfo.TIMEFRAMES.value: list( + octobot_trading.api.get_all_available_time_frames(exchange_manager) + ), + } + return traded_pairs_and_tf_by_exchange + + +def _get_exchange_profile_data(exchange_config: ExchangeConfig) -> octobot_commons.profiles.profile_data.ProfileData: + tentacles_data = [] + local_name = community_formatters.to_bot_exchange_internal_name(exchange_config.name) + exchange_type = community_formatters.get_exchange_type_from_internal_name(exchange_config.name) + if exchange_config.url: + import tentacles.Trading.Exchange + import tentacles.Meta.Keywords.scripting_library as scripting_library + exchange_config_update = {} + if scripting_library.is_exchange_with_different_public_data_after_auth(local_name): + exchange_config_update[octobot_commons.constants.CONFIG_FORCE_AUTHENTICATION] = True + exchange_tentacle_name = tentacles.Trading.Exchange.HollaexAutofilled.get_name() + tentacle_config = {**exchange_config_update, **{ + tentacles.Trading.Exchange.HollaexAutofilled.AUTO_FILLED_KEY: { + local_name: { + tentacles.Trading.Exchange.HollaexAutofilled.URL_KEY: exchange_config.url + } + } + }} + tentacles_data.append(octobot_commons.profiles.profile_data.TentaclesData( + exchange_tentacle_name, tentacle_config + )) + return octobot_commons.profiles.profile_data.ProfileData( + octobot_commons.profiles.profile_data.ProfileDetailsData(), + [], + octobot_commons.profiles.profile_data.TradingData(""), + exchanges=[octobot_commons.profiles.profile_data.ExchangeData( + internal_name=local_name, + sandboxed=exchange_config.sandboxed, + exchange_type=( + exchange_config.exchange_type + if exchange_config.exchange_type is not None + else exchange_type + ), + )], + tentacles=tentacles_data + ) diff --git a/packages/tentacles/Services/Interfaces/node_api_interface/core/security.py b/packages/tentacles/Services/Interfaces/node_api_interface/core/security.py new file mode 100644 index 0000000000..5ba9cb45e8 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_api_interface/core/security.py @@ -0,0 +1,43 @@ +# This file is part of OctoBot Node (https://github.com/Drakkar-Software/OctoBot-Node) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + +from datetime import datetime, timedelta, timezone +from typing import Any + +import jwt +from passlib.context import CryptContext + +from octobot_node.config import settings + +pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto") + + +ALGORITHM = "HS256" + + +def create_access_token(subject: str | Any, expires_delta: timedelta) -> str: + expire = datetime.now(timezone.utc) + expires_delta + to_encode = {"exp": expire, "sub": str(subject)} + encoded_jwt = jwt.encode(to_encode, settings.SECRET_KEY, algorithm=ALGORITHM) + return encoded_jwt + + +def verify_password(plain_password: str, hashed_password: str) -> bool: + return pwd_context.verify(plain_password, hashed_password) + + +def get_password_hash(password: str) -> str: + return pwd_context.hash(password) diff --git a/packages/tentacles/Services/Interfaces/node_api_interface/enums.py b/packages/tentacles/Services/Interfaces/node_api_interface/enums.py new file mode 100644 index 0000000000..2f580641e4 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_api_interface/enums.py @@ -0,0 +1,17 @@ +# This file is part of OctoBot Node (https://github.com/Drakkar-Software/OctoBot-Node) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + +from octobot_node.enums import * diff --git a/packages/tentacles/Services/Interfaces/node_api_interface/metadata.json b/packages/tentacles/Services/Interfaces/node_api_interface/metadata.json new file mode 100644 index 0000000000..f4664ef498 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_api_interface/metadata.json @@ -0,0 +1,7 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["NodeApiInterface"], + "tentacles-requirements": ["node_api_service"], + "build": ["python build_openapi.py > ../node_web_interface/openapi.json"] +} diff --git a/packages/tentacles/Services/Interfaces/node_api_interface/models.py b/packages/tentacles/Services/Interfaces/node_api_interface/models.py new file mode 100644 index 0000000000..d6c2fb407d --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_api_interface/models.py @@ -0,0 +1,17 @@ +# This file is part of OctoBot Node (https://github.com/Drakkar-Software/OctoBot-Node) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + +from octobot_node.models import * diff --git a/packages/tentacles/Services/Interfaces/node_api_interface/node_api.py b/packages/tentacles/Services/Interfaces/node_api_interface/node_api.py new file mode 100644 index 0000000000..8c6c61ff89 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_api_interface/node_api.py @@ -0,0 +1,158 @@ +# Drakkar-Software OctoBot-Interfaces +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +from contextlib import asynccontextmanager + +import uvicorn +from fastapi import FastAPI, HTTPException, Request +from fastapi.responses import FileResponse +from fastapi.routing import APIRoute +from fastapi.staticfiles import StaticFiles +from starlette.middleware.cors import CORSMiddleware + +import octobot.community.authentication as community_auth +import octobot_services.interfaces as services_interfaces +import octobot_node.config as node_config +import octobot_node.scheduler as scheduler # noqa: F401 + + +# Service_bases is only needed at runtime, not for build +try: + import tentacles.Services.Services_bases as Service_bases +except ImportError: + Service_bases = None + +# Import from tentacles package (runtime) or fallback to direct imports (build) +try: + from tentacles.Services.Interfaces.node_api_interface.utils import get_dist_directory + from tentacles.Services.Interfaces.node_api_interface.api.main import build_api_router +except ImportError: + import utils + import api.main + get_dist_directory = utils.get_dist_directory + build_api_router = api.main.build_api_router + +def custom_generate_unique_id(route: APIRoute) -> str: + if route.tags: + return f"{route.tags[0]}-{route.name}" + # Fallback for routes without tags (e.g., SPA root) + return route.name or route.path.replace("/", "-").strip("-") + + +class NodeApiInterface(services_interfaces.AbstractInterface): + API_NAME = "OctoBot Node API" + + try: + REQUIRED_SERVICES = [Service_bases.NodeApiService] + except AttributeError: + # fallback to empty array (build time) + REQUIRED_SERVICES = [] + + def __init__(self, config): + super().__init__(config) + self.logger = self.get_logger() + self.server = None + self.app = None + self.host = None + self.port = None + self.node_api_service = None + + async def _inner_start(self) -> bool: + return self.threaded_start() + + async def _async_run(self) -> bool: + if self.node_api_service is None: + self.node_api_service = Service_bases.NodeApiService.instance() + self.host = self.node_api_service.get_bind_host() + self.port = self.node_api_service.get_bind_port() + node_sqlite_file = self.node_api_service.get_node_sqlite_file() + node_postgres_url = self.node_api_service.get_node_postgres_url() + if node_sqlite_file: + node_config.settings.SCHEDULER_SQLITE_FILE = node_sqlite_file + if node_postgres_url is not None: + node_config.settings.SCHEDULER_POSTGRES_URL = node_postgres_url + host = self.host + port = self.port + community_auth.CommunityAuthentication.instance() + self.app = self.create_app() + # Set CORS from service config + cors_origins_str = self.node_api_service.get_backend_cors_origins() + if cors_origins_str: + cors_origins = [i.strip() for i in cors_origins_str.split(",") if i.strip()] + self.app.add_middleware( + CORSMiddleware, + allow_origins=cors_origins, + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], + ) + config = uvicorn.Config(self.app, host=host, port=port, log_level="info") + self.server = uvicorn.Server(config) + await self.server.serve() + return True + + async def stop(self): + if self.server is not None: + self.server.should_exit = True + + @classmethod + def create_app(cls) -> FastAPI: + @asynccontextmanager + async def lifespan(app: FastAPI): + yield + # Shutdown: trading signal channel first, then DBOS + await scheduler.shutdown_scheduler_and_trading_signal_channel() + + app = FastAPI( + title=cls.API_NAME, + openapi_url="/api/v1/openapi.json", + generate_unique_id_function=custom_generate_unique_id, + lifespan=lifespan, + ) + + app.include_router(build_api_router(), prefix="/api/v1") + + # Get the path to the dist folder (works for both development and installed packages) + dist_dir = get_dist_directory() + + # Serve static files from the dist folder only if UI is enabled + if dist_dir: + assets_dir = dist_dir / "assets" + if assets_dir.exists(): + # Mount assets under /app/assets to match the SPA base path + app.mount("/app/assets", StaticFiles(directory=str(assets_dir)), name="assets") + + # Serve SPA root for /app + @app.get("/app") + async def serve_spa_app_root(): + index_path = dist_dir / "index.html" + if index_path.exists(): + return FileResponse(str(index_path)) + raise HTTPException(status_code=404, detail="Frontend build not found") + + # Serve SPA for /app routes + @app.get("/app/{path:path}") + async def serve_spa_app(request: Request, path: str): + # Don't interfere with assets (already handled by mount) + if path.startswith("assets/"): + raise HTTPException(status_code=404) + + # Serve index.html for all /app routes (SPA routing) + index_path = dist_dir / "index.html" + if index_path.exists(): + return FileResponse(str(index_path)) + raise HTTPException(status_code=404, detail="Frontend build not found") + + return app diff --git a/packages/tentacles/Services/Interfaces/node_api_interface/tests/__init__.py b/packages/tentacles/Services/Interfaces/node_api_interface/tests/__init__.py new file mode 100644 index 0000000000..ad25c24157 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_api_interface/tests/__init__.py @@ -0,0 +1,15 @@ +# This file is part of OctoBot Node (https://github.com/Drakkar-Software/OctoBot-Node) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . diff --git a/packages/tentacles/Services/Interfaces/node_api_interface/tests/conftest.py b/packages/tentacles/Services/Interfaces/node_api_interface/tests/conftest.py new file mode 100644 index 0000000000..927fa3292f --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_api_interface/tests/conftest.py @@ -0,0 +1,74 @@ +# This file is part of OctoBot Node (https://github.com/Drakkar-Software/OctoBot-Node) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + +import typing + +import pytest +from fastapi import FastAPI +from fastapi.testclient import TestClient +from starlette.middleware.cors import CORSMiddleware + + +import tentacles.Services.Interfaces.node_api_interface as node_api_interface_module + + +@pytest.fixture() +def app() -> FastAPI: + fastapi_app = node_api_interface_module.NodeApiInterface.create_app() + fastapi_app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], + ) + return fastapi_app + + +@pytest.fixture() +def client(app: FastAPI) -> TestClient: + return TestClient(app) + + +def assert_response_headers( + response, + expected_content_type: typing.Optional[str] = None, + expected_content_length: typing.Optional[int] = None, +): + headers = {header_key.lower(): header_value for header_key, header_value in response.headers.items()} + if expected_content_type is not None: + assert headers["content-type"] == expected_content_type, ( + f"Content-Type is {headers.get('content-type')}" + ) + else: + assert headers["content-type"] == "application/json", ( + f"Content-Type is {headers.get('content-type')}" + ) + # Starlette CORSMiddleware only sets this when the request is a CORS request (e.g. Origin set); + # plain TestClient POST without Origin may omit the header. + allow_origin = headers.get("access-control-allow-origin") + if allow_origin is not None: + assert allow_origin == "*", ( + f"Access-Control-Allow-Origin is {allow_origin}" + ) + if expected_content_length is not None: + assert int(headers["content-length"]) == expected_content_length, ( + f"Content-Length is {headers.get('content-length')}" + ) + else: + assert int(headers.get("content-length", 0)) > 0, ( + f"Content-Length is {headers.get('content-length')}" + ) diff --git a/packages/tentacles/Services/Interfaces/node_api_interface/tests/core/test_exchanges.py b/packages/tentacles/Services/Interfaces/node_api_interface/tests/core/test_exchanges.py new file mode 100644 index 0000000000..7c812a83cc --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_api_interface/tests/core/test_exchanges.py @@ -0,0 +1,85 @@ +# This file is part of OctoBot Node (https://github.com/Drakkar-Software/OctoBot-Node) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or modify it under the terms of the GNU +# General Public License as published by the Free Software Foundation; either version 3.0 of the +# License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License along with OctoBot. If not, +# see . + +# Functional tests for node_api_interface.core.exchanges (real public exchange data; no mocks). +# Migrated from simple_market_making_trading_mode TestGetTradedPairs (handler layer → core API). + +import os + +import octobot_commons.symbols.symbol_util as commons_symbols +import octobot_trading.enums +import pytest + +import tentacles.Services.Interfaces.node_api_interface.core.exchanges as exchanges + + +def _is_github_actions() -> bool: + return bool(os.getenv("GITHUB_ACTIONS")) + + +# binanceus works on GitHub CI; binance is used for local runs. +def _public_exchange_name_for_test() -> str: + return "binanceus" if _is_github_actions() else "binance" + + +LIQUID_TEST_SYMBOL = "BTC/USDT" +_INVALID_EXCHANGE_TYPE = "not_a_spot_type" + +pytestmark = pytest.mark.asyncio + + +def _spot_exchange_config() -> exchanges.ExchangeConfig: + return exchanges.ExchangeConfig( + name=_public_exchange_name_for_test(), + exchange_type=octobot_trading.enums.ExchangeTypes.SPOT.value, + sandboxed=False, + ) + + +class TestGetTradedPairsAndTimeframesByExchange: + async def test_includes_btc_usdt( + self, + ) -> None: + public_name = _public_exchange_name_for_test() + config = _spot_exchange_config() + result = await exchanges.get_traded_pairs_and_timeframes_by_exchange(config) + assert public_name in result + pair_key = exchanges.ExchangeInfo.PAIRS.value + assert pair_key in result[public_name] + pairs_list = result[public_name][pair_key] + assert isinstance(pairs_list, list) + assert len(pairs_list) > 0 + assert LIQUID_TEST_SYMBOL in pairs_list + for traded_symbol in pairs_list: + assert commons_symbols.parse_symbol(traded_symbol).is_spot(), ( + f"with exchange_type=spot, only spot pairs are returned; got {traded_symbol!r}" + ) + + def test_accepts_arbitrary_exchange_type_string_on_config( + self, + ) -> None: + config = exchanges.ExchangeConfig.model_validate( + { + "name": _public_exchange_name_for_test(), + "exchange_type": _INVALID_EXCHANGE_TYPE, + "sandboxed": False, + } + ) + assert config.exchange_type == _INVALID_EXCHANGE_TYPE + + def test_rejects_invalid_exchange_type_enum_value( + self, + ) -> None: + with pytest.raises(ValueError): + octobot_trading.enums.ExchangeTypes(_INVALID_EXCHANGE_TYPE) diff --git a/packages/tentacles/Services/Interfaces/node_api_interface/tests/test_exchanges_api.py b/packages/tentacles/Services/Interfaces/node_api_interface/tests/test_exchanges_api.py new file mode 100644 index 0000000000..6b47dbed5d --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_api_interface/tests/test_exchanges_api.py @@ -0,0 +1,126 @@ +# This file is part of OctoBot Node (https://github.com/Drakkar-Software/OctoBot-Node) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or modify it under the terms of the GNU +# General Public License as published by the Free Software Foundation; either version 3.0 of the +# License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License along with OctoBot. If not, +# see . + +import os +import mock +import typing + +import octobot_commons.symbols.symbol_util as commons_symbols +import tentacles.Services.Interfaces.node_api_interface.core.exchanges as node_exchanges_core + +from tentacles.Services.Interfaces.node_api_interface.tests.conftest import assert_response_headers + + +_TRADED_PAIRS = "/api/v1/exchanges/traded-pairs" +_TRADED_PAIRS_AND_TIMEFRAMES = "/api/v1/exchanges/traded-pairs-and-timeframes" +_INVALID_EXCHANGE_TYPE = "not_a_spot_type" + + +def _is_github_actions() -> bool: + return bool(os.getenv("GITHUB_ACTIONS")) + + +# binanceus works on GitHub CI; binance is used for local runs. +def _remote_exchange_display_name() -> str: + return "binanceus" if _is_github_actions() else "binance" + + +def _query_params_for_spot_exchange() -> dict[str, typing.Any]: + return { + "name": _remote_exchange_display_name(), + "exchange_type": "spot", + "sandboxed": False, + } + + +def _mock_pairs_timeframes_payload() -> dict[str, dict[str, list[str]]]: + pair_key = node_exchanges_core.ExchangeInfo.PAIRS.value + timeframe_key = node_exchanges_core.ExchangeInfo.TIMEFRAMES.value + return { + "binance": { + pair_key: ["BTC/USDT", "ETH/USDC"], + timeframe_key: ["1h", "4h"], + } + } + + +class TestExchangesGetTradedPairs: + def test_returns_pairs_map_only( + self, + client: typing.Any, + ) -> None: + raw = _mock_pairs_timeframes_payload() + with mock.patch.object( + node_exchanges_core, + "get_traded_pairs_and_timeframes_by_exchange", + mock.AsyncMock(return_value=raw), + ) as get_pairs_tf_mock: + response = client.get( + _TRADED_PAIRS, + params={"name": "binance", "exchange_type": "spot", "sandboxed": False}, + ) + get_pairs_tf_mock.assert_awaited_once() + assert response.status_code == 200 + pair_key = node_exchanges_core.ExchangeInfo.PAIRS.value + assert response.json() == { + "binance": raw["binance"][pair_key], + } + assert_response_headers(response) + + +class TestExchangesGetTradedPairsAndTimeframes: + def test_returns_full_payload_from_core( + self, + client: typing.Any, + ) -> None: + expected = _mock_pairs_timeframes_payload() + with mock.patch.object( + node_exchanges_core, + "get_traded_pairs_and_timeframes_by_exchange", + mock.AsyncMock(return_value=expected), + ) as get_pairs_tf_mock: + response = client.get( + _TRADED_PAIRS_AND_TIMEFRAMES, + params={"name": "binance", "exchange_type": "spot", "sandboxed": False}, + ) + get_pairs_tf_mock.assert_awaited_once() + assert response.status_code == 200 + assert response.json() == expected + assert_response_headers(response) + + +class TestExchangesTradedPairsIntegration: + def test_includes_btc_usdt_on_public_exchange( + self, + client: typing.Any, + ) -> None: + display_name = _remote_exchange_display_name() + response = client.get( + _TRADED_PAIRS_AND_TIMEFRAMES, + params=_query_params_for_spot_exchange(), + ) + assert response.status_code == 200 + body: dict = response.json() + assert display_name in body + pair_key = node_exchanges_core.ExchangeInfo.PAIRS.value + assert pair_key in body[display_name] + pairs_list: list = body[display_name][pair_key] + assert isinstance(pairs_list, list) + assert len(pairs_list) > 0 + assert "BTC/USDT" in pairs_list + for traded_symbol in pairs_list: + assert commons_symbols.parse_symbol(traded_symbol).is_spot(), ( + f"with exchange_type=spot, only spot pairs are returned; got {traded_symbol!r}" + ) + assert_response_headers(response) diff --git a/packages/tentacles/Services/Interfaces/node_api_interface/tests/test_register_all_provider_routes.py b/packages/tentacles/Services/Interfaces/node_api_interface/tests/test_register_all_provider_routes.py new file mode 100644 index 0000000000..1263fe3e73 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_api_interface/tests/test_register_all_provider_routes.py @@ -0,0 +1,86 @@ +# This file is part of OctoBot Node (https://github.com/Drakkar-Software/OctoBot-Node) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + +import typing + +import fastapi +import fastapi.routing + +import tentacles.Services.Interfaces.node_api_interface.api.route_provider as route_provider + + +def _api_route_paths(api_router: fastapi.APIRouter) -> set[str]: + return { + route.path + for route in api_router.routes + if isinstance(route, fastapi.routing.APIRoute) + } + + +class RouteProviderTestAlpha(route_provider.RouteProvider): + ROUTE_TYPE: typing.ClassVar[route_provider.RouteType] = ( + route_provider.RouteType.TENTACLES + ) + + def get_router(self) -> fastapi.APIRouter: + router = fastapi.APIRouter() + + @router.get("/route-provider-test-alpha") + def route_provider_test_alpha() -> dict[str, str]: + return {"registered": "alpha"} + + return router + + +class RouteProviderTestAbstractInName(route_provider.RouteProvider): + """ + ``class_inspector.is_abstract_using_inspection_and_class_naming`` treats any + class name containing the substring "abstract" as not installable. + """ + + def get_router(self) -> fastapi.APIRouter: + router = fastapi.APIRouter() + + @router.get("/route-provider-abstract-in-name-skip") + def route_do_not_register() -> dict[str, str]: + return {"should": "not appear"} + + return router + + +class UnimplementedRouteProvider(route_provider.RouteProvider): + pass + + +class TestRegisterAllProviderRoutes: + def test_includes_routers_from_concrete_subclasses(self) -> None: + api_router = fastapi.APIRouter() + route_provider.register_all_provider_routes(api_router) + paths = _api_route_paths(api_router) + assert "/tentacles/route-provider-test-alpha" in paths + + def test_skips_class_unimplemented_get_router(self) -> None: + assert UnimplementedRouteProvider in route_provider.RouteProvider.__subclasses__() + api_router = fastapi.APIRouter() + route_provider.register_all_provider_routes(api_router) + paths = _api_route_paths(api_router) + assert not any("unimplemented" in path for path in paths) + + def test_skips_concrete_class_with_abstract_in_name(self) -> None: + api_router = fastapi.APIRouter() + route_provider.register_all_provider_routes(api_router) + paths = _api_route_paths(api_router) + assert "/route-provider-abstract-in-name-skip" not in paths diff --git a/packages/tentacles/Services/Interfaces/node_api_interface/utils.py b/packages/tentacles/Services/Interfaces/node_api_interface/utils.py new file mode 100644 index 0000000000..4e0e8d7c9c --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_api_interface/utils.py @@ -0,0 +1,39 @@ +# This file is part of OctoBot Node (https://github.com/Drakkar-Software/OctoBot-Node) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + +import pathlib + + +def get_dist_directory() -> pathlib.Path | None: + try: + import tentacles.Services.Interfaces.node_web_interface as node_web_interface + interface_path = pathlib.Path(node_web_interface.__file__).resolve().parent + dist_path = interface_path / "dist" + if dist_path.exists() and dist_path.is_dir(): + return dist_path + except (ImportError, ModuleNotFoundError, AttributeError): + pass + + # Fallback: try relative to current file (for development if module not found) + # Go up from node_api/utils.py -> node_api -> Interfaces -> node_web_interface -> dist + current_file = pathlib.Path(__file__).resolve() + interface_path = current_file.parent.parent / "node_web_interface" + dist_path = interface_path / "dist" + + if dist_path.exists() and dist_path.is_dir(): + return dist_path + + return None diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/.gitignore b/packages/tentacles/Services/Interfaces/node_web_interface/.gitignore new file mode 100644 index 0000000000..1c2c2dcf6c --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/.gitignore @@ -0,0 +1,250 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[codz] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py.cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# UV +# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +#uv.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock +#poetry.toml + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +# pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python. +# https://pdm-project.org/en/latest/usage/project/#working-with-version-control +#pdm.lock +#pdm.toml +.pdm-python +.pdm-build/ + +# pixi +# Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control. +#pixi.lock +# Pixi creates a virtual environment in the .pixi directory, just like venv module creates one +# in the .venv directory. It is recommended not to include this directory in version control. +.pixi + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.envrc +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ + +# Abstra +# Abstra is an AI-powered process automation framework. +# Ignore directories containing user credentials, local state, and settings. +# Learn more at https://abstra.io/docs +.abstra/ + +# Visual Studio Code +# Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore +# that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore +# and can be added to the global gitignore or merged into this file. However, if you prefer, +# you could uncomment the following to ignore the entire vscode folder +# .vscode/ + +# Ruff stuff: +.ruff_cache/ + +# PyPI configuration file +.pypirc + +# Cursor +# Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to +# exclude from AI features like autocomplete and code analysis. Recommended for sensitive data +# refer to https://docs.cursor.com/context/ignore-files +.cursorignore +.cursorindexingignore + +# Marimo +marimo/_static/ +marimo/_lsp/ +__marimo__/ + +# Logs +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +pnpm-debug.log* +lerna-debug.log* + +node_modules +dist +dist-ssr +*.local +openapi.json + +# Editor directories and files +.vscode/* +!.vscode/extensions.json +.idea +.DS_Store +*.suo +*.ntvs* +*.njsproj +*.sln +*.sw? +/test-results/ +/playwright-report/ +/blob-report/ +/playwright/.cache/ +/playwright/.auth/ + + +# Don't ignore the frontend lib directory +!src/lib + +*.db +*.db-* +/tentacles + +.tanstack + +*.gen.ts diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/.nvmrc b/packages/tentacles/Services/Interfaces/node_web_interface/.nvmrc new file mode 100644 index 0000000000..a45fd52cc5 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/.nvmrc @@ -0,0 +1 @@ +24 diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/__init__.py b/packages/tentacles/Services/Interfaces/node_web_interface/__init__.py new file mode 100644 index 0000000000..50baf8621d --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/__init__.py @@ -0,0 +1,19 @@ +# This file is part of OctoBot Node (https://github.com/Drakkar-Software/OctoBot-Node) +# Copyright (c) 2025 Drakkar-Software, All rights reserved. +# +# OctoBot is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# OctoBot is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with OctoBot. If not, see . + +import octobot_commons.constants as commons_constants +if not commons_constants.USE_MINIMAL_LIBS: + from .node_web_interface import NodeWebInterface diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/biome.json b/packages/tentacles/Services/Interfaces/node_web_interface/biome.json new file mode 100644 index 0000000000..39a7c74da9 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/biome.json @@ -0,0 +1,44 @@ +{ + "$schema": "https://biomejs.dev/schemas/2.3.8/schema.json", + "assist": { "actions": { "source": { "organizeImports": "on" } } }, + "files": { + "includes": [ + "**", + "!**/dist/**/*", + "!**/node_modules/**/*", + "!**/src/routeTree.gen.ts", + "!**/src/client/**/*", + "!**/src/components/ui/**/*" + ] + }, + "linter": { + "enabled": true, + "rules": { + "recommended": true, + "suspicious": { + "noExplicitAny": "off", + "noArrayIndexKey": "off" + }, + "style": { + "noNonNullAssertion": "off", + "noParameterAssign": "error", + "useSelfClosingElements": "error", + "noUselessElse": "error" + } + } + }, + "formatter": { + "indentStyle": "space" + }, + "javascript": { + "formatter": { + "quoteStyle": "double", + "semicolons": "asNeeded" + } + }, + "css": { + "parser": { + "tailwindDirectives": true + } + } +} diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/components.json b/packages/tentacles/Services/Interfaces/node_web_interface/components.json new file mode 100644 index 0000000000..2b0833f097 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/components.json @@ -0,0 +1,22 @@ +{ + "$schema": "https://ui.shadcn.com/schema.json", + "style": "new-york", + "rsc": false, + "tsx": true, + "tailwind": { + "config": "", + "css": "src/index.css", + "baseColor": "neutral", + "cssVariables": true, + "prefix": "" + }, + "iconLibrary": "lucide", + "aliases": { + "components": "@/components", + "utils": "@/lib/utils", + "ui": "@/components/ui", + "lib": "@/lib", + "hooks": "@/hooks" + }, + "registries": {} +} diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/index.html b/packages/tentacles/Services/Interfaces/node_web_interface/index.html new file mode 100644 index 0000000000..dc082e2e2a --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/index.html @@ -0,0 +1,13 @@ + + + + + + OctoBot Node + + + +
+ + + diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/metadata.json b/packages/tentacles/Services/Interfaces/node_web_interface/metadata.json new file mode 100644 index 0000000000..2a7aa6b460 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/metadata.json @@ -0,0 +1,8 @@ +{ + "version": "1.2.0", + "origin_package": "OctoBot-Default-Tentacles", + "tentacles": ["NodeWebInterface"], + "tentacles-requirements": ["node_api_interface"], + "build": ["npm install", "npm run generate-client", "npm run build"], + "include": ["dist/**/*", "node_web_interface.py"] +} diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/node_web_interface.py b/packages/tentacles/Services/Interfaces/node_web_interface/node_web_interface.py new file mode 100644 index 0000000000..de45373de5 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/node_web_interface.py @@ -0,0 +1,26 @@ +# Drakkar-Software OctoBot-Interfaces +# Copyright (c) Drakkar-Software, All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3.0 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. +import octobot_services.interfaces as services_interfaces + + +class NodeWebInterface(services_interfaces.AbstractInterface): + REQUIRED_SERVICES = False + + async def _async_run(self) -> bool: + return True + + async def stop(self): + return None diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/openapi-ts.config.ts b/packages/tentacles/Services/Interfaces/node_web_interface/openapi-ts.config.ts new file mode 100644 index 0000000000..b5a69e20eb --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/openapi-ts.config.ts @@ -0,0 +1,33 @@ +import { defineConfig } from "@hey-api/openapi-ts" + +export default defineConfig({ + input: "./openapi.json", + output: "./src/client", + + plugins: [ + "legacy/axios", + { + name: "@hey-api/sdk", + // NOTE: this doesn't allow tree-shaking + asClass: true, + operationId: true, + classNameBuilder: "{{name}}Service", + methodNameBuilder: (operation) => { + // @ts-expect-error + let name: string = operation.name + // @ts-expect-error + const service: string = operation.service + + if (service && name.toLowerCase().startsWith(service.toLowerCase())) { + name = name.slice(service.length) + } + + return name.charAt(0).toLowerCase() + name.slice(1) + }, + }, + { + name: "@hey-api/schemas", + type: "json", + }, + ], +}) diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/package-lock.json b/packages/tentacles/Services/Interfaces/node_web_interface/package-lock.json new file mode 100644 index 0000000000..93b85ca189 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/package-lock.json @@ -0,0 +1,7201 @@ +{ + "name": "octobot-node", + "version": "0.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "octobot-node", + "version": "0.0.0", + "dependencies": { + "@hookform/resolvers": "^5.2.2", + "@noble/curves": "^2.2.0", + "@radix-ui/react-avatar": "^1.1.11", + "@radix-ui/react-checkbox": "^1.3.3", + "@radix-ui/react-dialog": "^1.1.15", + "@radix-ui/react-dropdown-menu": "^2.1.16", + "@radix-ui/react-label": "^2.1.8", + "@radix-ui/react-radio-group": "^1.3.8", + "@radix-ui/react-scroll-area": "^1.2.10", + "@radix-ui/react-select": "^2.2.6", + "@radix-ui/react-separator": "^1.1.8", + "@radix-ui/react-slot": "^1.2.4", + "@radix-ui/react-tabs": "^1.1.13", + "@radix-ui/react-tooltip": "^1.2.8", + "@tailwindcss/vite": "^4.1.18", + "@tanstack/react-query": "^5.90.12", + "@tanstack/react-query-devtools": "^5.91.1", + "@tanstack/react-router": "^1.142.11", + "@tanstack/react-router-devtools": "^1.142.8", + "@tanstack/react-table": "^8.21.3", + "axios": "1.13.2", + "class-variance-authority": "^0.7.1", + "clsx": "^2.1.1", + "form-data": "4.0.5", + "lucide-react": "^0.556.0", + "next-themes": "^0.4.6", + "react": "^19.1.1", + "react-dom": "^19.2.3", + "react-error-boundary": "^6.0.0", + "react-hook-form": "^7.68.0", + "react-icons": "^5.5.0", + "react-qr-code": "^2.0.18", + "sonner": "^2.0.7", + "tailwind-merge": "^3.4.0", + "tailwindcss": "^4.1.17", + "zod": "^4.2.1" + }, + "devDependencies": { + "@biomejs/biome": "^2.3.10", + "@hey-api/openapi-ts": "0.73.0", + "@playwright/test": "1.57.0", + "@tanstack/router-devtools": "^1.142.11", + "@tanstack/router-plugin": "^1.140.0", + "@types/node": "^25.0.2", + "@types/react": "^19.2.7", + "@types/react-dom": "^19.2.3", + "@vitejs/plugin-react-swc": "^4.2.2", + "dotenv": "^17.2.3", + "tw-animate-css": "^1.4.0", + "typescript": "^5.9.3", + "vite": "^7.3.0", + "vitest": "^4.1.2" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz", + "integrity": "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-validator-identifier": "^7.27.1", + "js-tokens": "^4.0.0", + "picocolors": "^1.1.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/compat-data": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.28.5.tgz", + "integrity": "sha512-6uFXyCayocRbqhZOB+6XcuZbkMNimwfVGFji8CTZnCzOHVGvDqzvitu1re2AU5LROliz7eQPhB8CpAMvnx9EjA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.28.5.tgz", + "integrity": "sha512-e7jT4DxYvIDLk1ZHmU/m/mB19rex9sv0c2ftBtjSBv+kVM/902eh0fINUzD7UwLLNR+jU585GxUJ8/EBfAM5fw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.27.1", + "@babel/generator": "^7.28.5", + "@babel/helper-compilation-targets": "^7.27.2", + "@babel/helper-module-transforms": "^7.28.3", + "@babel/helpers": "^7.28.4", + "@babel/parser": "^7.28.5", + "@babel/template": "^7.27.2", + "@babel/traverse": "^7.28.5", + "@babel/types": "^7.28.5", + "@jridgewell/remapping": "^2.3.5", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@babel/generator": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.28.5.tgz", + "integrity": "sha512-3EwLFhZ38J4VyIP6WNtt2kUdW9dokXA9Cr4IVIFHuCpZ3H8/YFOl5JjZHisrn1fATPBmKKqXzDFvh9fUwHz6CQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.28.5", + "@babel/types": "^7.28.5", + "@jridgewell/gen-mapping": "^0.3.12", + "@jridgewell/trace-mapping": "^0.3.28", + "jsesc": "^3.0.2" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets": { + "version": "7.27.2", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.27.2.tgz", + "integrity": "sha512-2+1thGUUWWjLTYTHZWK1n8Yga0ijBz1XAhUXcKy81rd5g6yh7hGqMp45v7cadSbEHc9G3OTv45SyneRN3ps4DQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/compat-data": "^7.27.2", + "@babel/helper-validator-option": "^7.27.1", + "browserslist": "^4.24.0", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-globals": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz", + "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.27.1.tgz", + "integrity": "sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/traverse": "^7.27.1", + "@babel/types": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-transforms": { + "version": "7.28.3", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.28.3.tgz", + "integrity": "sha512-gytXUbs8k2sXS9PnQptz5o0QnpLL51SwASIORY6XaBKF88nsOT0Zw9szLqlSGQDP/4TljBAD5y98p2U1fqkdsw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-module-imports": "^7.27.1", + "@babel/helper-validator-identifier": "^7.27.1", + "@babel/traverse": "^7.28.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-plugin-utils": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.27.1.tgz", + "integrity": "sha512-1gn1Up5YXka3YYAHGKpbideQ5Yjf1tDa9qYcgysz+cNCXukyLl6DjPXhD3VRwSb8c0J9tA4b2+rHEZtc6R0tlw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", + "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz", + "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-option": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz", + "integrity": "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helpers": { + "version": "7.28.4", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.28.4.tgz", + "integrity": "sha512-HFN59MmQXGHVyYadKLVumYsA9dBFun/ldYxipEjzA4196jpLZd8UjEEBLkbEkvfYreDqJhZxYAWFPtrfhNpj4w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/template": "^7.27.2", + "@babel/types": "^7.28.4" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/parser": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.28.5.tgz", + "integrity": "sha512-KKBU1VGYR7ORr3At5HAtUQ+TV3SzRCXmA/8OdDZiLDBIZxVyzXuztPjfLd3BV1PRAQGCMWWSHYhL0F8d5uHBDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.28.5" + }, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/plugin-syntax-jsx": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.27.1.tgz", + "integrity": "sha512-y8YTNIeKoyhGd9O0Jiyzyyqk8gdjnumGTQPsz0xOZOQ2RmkVJeZ1vmmfIvFEKqucBG6axJGBZDE/7iI5suUI/w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-typescript": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.27.1.tgz", + "integrity": "sha512-xfYCBMxveHrRMnAWl1ZlPXOZjzkN82THFvLhQhFXFt81Z5HnN+EtUkZhv/zcKpmT3fzmWZB0ywiBrbC3vogbwQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/template": { + "version": "7.27.2", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.2.tgz", + "integrity": "sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.27.1", + "@babel/parser": "^7.27.2", + "@babel/types": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.28.5.tgz", + "integrity": "sha512-TCCj4t55U90khlYkVV/0TfkJkAkUg3jZFA3Neb7unZT8CPok7iiRfaX0F+WnqWqt7OxhOn0uBKXCw4lbL8W0aQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.27.1", + "@babel/generator": "^7.28.5", + "@babel/helper-globals": "^7.28.0", + "@babel/parser": "^7.28.5", + "@babel/template": "^7.27.2", + "@babel/types": "^7.28.5", + "debug": "^4.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/types": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.28.5.tgz", + "integrity": "sha512-qQ5m48eI/MFLQ5PxQj4PFaprjyCTLI37ElWMmNs0K8Lk3dVeOdNpB3ks8jc7yM5CDmVC73eMVk/trk3fgmrUpA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-string-parser": "^7.27.1", + "@babel/helper-validator-identifier": "^7.28.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@biomejs/biome": { + "version": "2.3.11", + "resolved": "https://registry.npmjs.org/@biomejs/biome/-/biome-2.3.11.tgz", + "integrity": "sha512-/zt+6qazBWguPG6+eWmiELqO+9jRsMZ/DBU3lfuU2ngtIQYzymocHhKiZRyrbra4aCOoyTg/BmY+6WH5mv9xmQ==", + "dev": true, + "license": "MIT OR Apache-2.0", + "bin": { + "biome": "bin/biome" + }, + "engines": { + "node": ">=14.21.3" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/biome" + }, + "optionalDependencies": { + "@biomejs/cli-darwin-arm64": "2.3.11", + "@biomejs/cli-darwin-x64": "2.3.11", + "@biomejs/cli-linux-arm64": "2.3.11", + "@biomejs/cli-linux-arm64-musl": "2.3.11", + "@biomejs/cli-linux-x64": "2.3.11", + "@biomejs/cli-linux-x64-musl": "2.3.11", + "@biomejs/cli-win32-arm64": "2.3.11", + "@biomejs/cli-win32-x64": "2.3.11" + } + }, + "node_modules/@biomejs/cli-darwin-arm64": { + "version": "2.3.11", + "resolved": "https://registry.npmjs.org/@biomejs/cli-darwin-arm64/-/cli-darwin-arm64-2.3.11.tgz", + "integrity": "sha512-/uXXkBcPKVQY7rc9Ys2CrlirBJYbpESEDme7RKiBD6MmqR2w3j0+ZZXRIL2xiaNPsIMMNhP1YnA+jRRxoOAFrA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT OR Apache-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=14.21.3" + } + }, + "node_modules/@biomejs/cli-darwin-x64": { + "version": "2.3.11", + "resolved": "https://registry.npmjs.org/@biomejs/cli-darwin-x64/-/cli-darwin-x64-2.3.11.tgz", + "integrity": "sha512-fh7nnvbweDPm2xEmFjfmq7zSUiox88plgdHF9OIW4i99WnXrAC3o2P3ag9judoUMv8FCSUnlwJCM1B64nO5Fbg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT OR Apache-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=14.21.3" + } + }, + "node_modules/@biomejs/cli-linux-arm64": { + "version": "2.3.11", + "resolved": "https://registry.npmjs.org/@biomejs/cli-linux-arm64/-/cli-linux-arm64-2.3.11.tgz", + "integrity": "sha512-l4xkGa9E7Uc0/05qU2lMYfN1H+fzzkHgaJoy98wO+b/7Gl78srbCRRgwYSW+BTLixTBrM6Ede5NSBwt7rd/i6g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT OR Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=14.21.3" + } + }, + "node_modules/@biomejs/cli-linux-arm64-musl": { + "version": "2.3.11", + "resolved": "https://registry.npmjs.org/@biomejs/cli-linux-arm64-musl/-/cli-linux-arm64-musl-2.3.11.tgz", + "integrity": "sha512-XPSQ+XIPZMLaZ6zveQdwNjbX+QdROEd1zPgMwD47zvHV+tCGB88VH+aynyGxAHdzL+Tm/+DtKST5SECs4iwCLg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT OR Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=14.21.3" + } + }, + "node_modules/@biomejs/cli-linux-x64": { + "version": "2.3.11", + "resolved": "https://registry.npmjs.org/@biomejs/cli-linux-x64/-/cli-linux-x64-2.3.11.tgz", + "integrity": "sha512-/1s9V/H3cSe0r0Mv/Z8JryF5x9ywRxywomqZVLHAoa/uN0eY7F8gEngWKNS5vbbN/BsfpCG5yeBT5ENh50Frxg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT OR Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=14.21.3" + } + }, + "node_modules/@biomejs/cli-linux-x64-musl": { + "version": "2.3.11", + "resolved": "https://registry.npmjs.org/@biomejs/cli-linux-x64-musl/-/cli-linux-x64-musl-2.3.11.tgz", + "integrity": "sha512-vU7a8wLs5C9yJ4CB8a44r12aXYb8yYgBn+WeyzbMjaCMklzCv1oXr8x+VEyWodgJt9bDmhiaW/I0RHbn7rsNmw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT OR Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=14.21.3" + } + }, + "node_modules/@biomejs/cli-win32-arm64": { + "version": "2.3.11", + "resolved": "https://registry.npmjs.org/@biomejs/cli-win32-arm64/-/cli-win32-arm64-2.3.11.tgz", + "integrity": "sha512-PZQ6ElCOnkYapSsysiTy0+fYX+agXPlWugh6+eQ6uPKI3vKAqNp6TnMhoM3oY2NltSB89hz59o8xIfOdyhi9Iw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT OR Apache-2.0", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=14.21.3" + } + }, + "node_modules/@biomejs/cli-win32-x64": { + "version": "2.3.11", + "resolved": "https://registry.npmjs.org/@biomejs/cli-win32-x64/-/cli-win32-x64-2.3.11.tgz", + "integrity": "sha512-43VrG813EW+b5+YbDbz31uUsheX+qFKCpXeY9kfdAx+ww3naKxeVkTD9zLIWxUPfJquANMHrmW3wbe/037G0Qg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT OR Apache-2.0", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=14.21.3" + } + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.27.2.tgz", + "integrity": "sha512-GZMB+a0mOMZs4MpDbj8RJp4cw+w1WV5NYD6xzgvzUJ5Ek2jerwfO2eADyI6ExDSUED+1X8aMbegahsJi+8mgpw==", + "cpu": [ + "ppc64" + ], + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.27.2.tgz", + "integrity": "sha512-DVNI8jlPa7Ujbr1yjU2PfUSRtAUZPG9I1RwW4F4xFB1Imiu2on0ADiI/c3td+KmDtVKNbi+nffGDQMfcIMkwIA==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.27.2.tgz", + "integrity": "sha512-pvz8ZZ7ot/RBphf8fv60ljmaoydPU12VuXHImtAs0XhLLw+EXBi2BLe3OYSBslR4rryHvweW5gmkKFwTiFy6KA==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.27.2.tgz", + "integrity": "sha512-z8Ank4Byh4TJJOh4wpz8g2vDy75zFL0TlZlkUkEwYXuPSgX8yzep596n6mT7905kA9uHZsf/o2OJZubl2l3M7A==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.27.2.tgz", + "integrity": "sha512-davCD2Zc80nzDVRwXTcQP/28fiJbcOwvdolL0sOiOsbwBa72kegmVU0Wrh1MYrbuCL98Omp5dVhQFWRKR2ZAlg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.27.2.tgz", + "integrity": "sha512-ZxtijOmlQCBWGwbVmwOF/UCzuGIbUkqB1faQRf5akQmxRJ1ujusWsb3CVfk/9iZKr2L5SMU5wPBi1UWbvL+VQA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.27.2.tgz", + "integrity": "sha512-lS/9CN+rgqQ9czogxlMcBMGd+l8Q3Nj1MFQwBZJyoEKI50XGxwuzznYdwcav6lpOGv5BqaZXqvBSiB/kJ5op+g==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.27.2.tgz", + "integrity": "sha512-tAfqtNYb4YgPnJlEFu4c212HYjQWSO/w/h/lQaBK7RbwGIkBOuNKQI9tqWzx7Wtp7bTPaGC6MJvWI608P3wXYA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.27.2.tgz", + "integrity": "sha512-vWfq4GaIMP9AIe4yj1ZUW18RDhx6EPQKjwe7n8BbIecFtCQG4CfHGaHuh7fdfq+y3LIA2vGS/o9ZBGVxIDi9hw==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.27.2.tgz", + "integrity": "sha512-hYxN8pr66NsCCiRFkHUAsxylNOcAQaxSSkHMMjcpx0si13t1LHFphxJZUiGwojB1a/Hd5OiPIqDdXONia6bhTw==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.27.2.tgz", + "integrity": "sha512-MJt5BRRSScPDwG2hLelYhAAKh9imjHK5+NE/tvnRLbIqUWa+0E9N4WNMjmp/kXXPHZGqPLxggwVhz7QP8CTR8w==", + "cpu": [ + "ia32" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.27.2.tgz", + "integrity": "sha512-lugyF1atnAT463aO6KPshVCJK5NgRnU4yb3FUumyVz+cGvZbontBgzeGFO1nF+dPueHD367a2ZXe1NtUkAjOtg==", + "cpu": [ + "loong64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.27.2.tgz", + "integrity": "sha512-nlP2I6ArEBewvJ2gjrrkESEZkB5mIoaTswuqNFRv/WYd+ATtUpe9Y09RnJvgvdag7he0OWgEZWhviS1OTOKixw==", + "cpu": [ + "mips64el" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.27.2.tgz", + "integrity": "sha512-C92gnpey7tUQONqg1n6dKVbx3vphKtTHJaNG2Ok9lGwbZil6DrfyecMsp9CrmXGQJmZ7iiVXvvZH6Ml5hL6XdQ==", + "cpu": [ + "ppc64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.27.2.tgz", + "integrity": "sha512-B5BOmojNtUyN8AXlK0QJyvjEZkWwy/FKvakkTDCziX95AowLZKR6aCDhG7LeF7uMCXEJqwa8Bejz5LTPYm8AvA==", + "cpu": [ + "riscv64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.27.2.tgz", + "integrity": "sha512-p4bm9+wsPwup5Z8f4EpfN63qNagQ47Ua2znaqGH6bqLlmJ4bx97Y9JdqxgGZ6Y8xVTixUnEkoKSHcpRlDnNr5w==", + "cpu": [ + "s390x" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.27.2.tgz", + "integrity": "sha512-uwp2Tip5aPmH+NRUwTcfLb+W32WXjpFejTIOWZFw/v7/KnpCDKG66u4DLcurQpiYTiYwQ9B7KOeMJvLCu/OvbA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-arm64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.27.2.tgz", + "integrity": "sha512-Kj6DiBlwXrPsCRDeRvGAUb/LNrBASrfqAIok+xB0LxK8CHqxZ037viF13ugfsIpePH93mX7xfJp97cyDuTZ3cw==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.27.2.tgz", + "integrity": "sha512-HwGDZ0VLVBY3Y+Nw0JexZy9o/nUAWq9MlV7cahpaXKW6TOzfVno3y3/M8Ga8u8Yr7GldLOov27xiCnqRZf0tCA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-arm64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.27.2.tgz", + "integrity": "sha512-DNIHH2BPQ5551A7oSHD0CKbwIA/Ox7+78/AWkbS5QoRzaqlev2uFayfSxq68EkonB+IKjiuxBFoV8ESJy8bOHA==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.27.2.tgz", + "integrity": "sha512-/it7w9Nb7+0KFIzjalNJVR5bOzA9Vay+yIPLVHfIQYG/j+j9VTH84aNB8ExGKPU4AzfaEvN9/V4HV+F+vo8OEg==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openharmony-arm64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.27.2.tgz", + "integrity": "sha512-LRBbCmiU51IXfeXk59csuX/aSaToeG7w48nMwA6049Y4J4+VbWALAuXcs+qcD04rHDuSCSRKdmY63sruDS5qag==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.27.2.tgz", + "integrity": "sha512-kMtx1yqJHTmqaqHPAzKCAkDaKsffmXkPHThSfRwZGyuqyIeBvf08KSsYXl+abf5HDAPMJIPnbBfXvP2ZC2TfHg==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.27.2.tgz", + "integrity": "sha512-Yaf78O/B3Kkh+nKABUF++bvJv5Ijoy9AN1ww904rOXZFLWVc5OLOfL56W+C8F9xn5JQZa3UX6m+IktJnIb1Jjg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.27.2.tgz", + "integrity": "sha512-Iuws0kxo4yusk7sw70Xa2E2imZU5HoixzxfGCdxwBdhiDgt9vX9VUCBhqcwY7/uh//78A1hMkkROMJq9l27oLQ==", + "cpu": [ + "ia32" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.27.2.tgz", + "integrity": "sha512-sRdU18mcKf7F+YgheI/zGf5alZatMUTKj/jNS6l744f9u3WFu4v7twcUI9vu4mknF4Y9aDlblIie0IM+5xxaqQ==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@floating-ui/core": { + "version": "1.7.3", + "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.7.3.tgz", + "integrity": "sha512-sGnvb5dmrJaKEZ+LDIpguvdX3bDlEllmv4/ClQ9awcmCZrlx5jQyyMWFM5kBI+EyNOCDDiKk8il0zeuX3Zlg/w==", + "license": "MIT", + "dependencies": { + "@floating-ui/utils": "^0.2.10" + } + }, + "node_modules/@floating-ui/dom": { + "version": "1.7.4", + "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.7.4.tgz", + "integrity": "sha512-OOchDgh4F2CchOX94cRVqhvy7b3AFb+/rQXyswmzmGakRfkMgoWVjfnLWkRirfLEfuD4ysVW16eXzwt3jHIzKA==", + "license": "MIT", + "dependencies": { + "@floating-ui/core": "^1.7.3", + "@floating-ui/utils": "^0.2.10" + } + }, + "node_modules/@floating-ui/react-dom": { + "version": "2.1.6", + "resolved": "https://registry.npmjs.org/@floating-ui/react-dom/-/react-dom-2.1.6.tgz", + "integrity": "sha512-4JX6rEatQEvlmgU80wZyq9RT96HZJa88q8hp0pBd+LrczeDI4o6uA2M+uvxngVHo4Ihr8uibXxH6+70zhAFrVw==", + "license": "MIT", + "dependencies": { + "@floating-ui/dom": "^1.7.4" + }, + "peerDependencies": { + "react": ">=16.8.0", + "react-dom": ">=16.8.0" + } + }, + "node_modules/@floating-ui/utils": { + "version": "0.2.10", + "resolved": "https://registry.npmjs.org/@floating-ui/utils/-/utils-0.2.10.tgz", + "integrity": "sha512-aGTxbpbg8/b5JfU1HXSrbH3wXZuLPJcNEcZQFMxLs3oSzgtVu6nFPkbbGGUvBcUjKV2YyB9Wxxabo+HEH9tcRQ==", + "license": "MIT" + }, + "node_modules/@hey-api/json-schema-ref-parser": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/@hey-api/json-schema-ref-parser/-/json-schema-ref-parser-1.0.6.tgz", + "integrity": "sha512-yktiFZoWPtEW8QKS65eqKwA5MTKp88CyiL8q72WynrBs/73SAaxlSWlA2zW/DZlywZ5hX1OYzrCC0wFdvO9c2w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jsdevtools/ono": "^7.1.3", + "@types/json-schema": "^7.0.15", + "js-yaml": "^4.1.0", + "lodash": "^4.17.21" + }, + "engines": { + "node": ">= 16" + }, + "funding": { + "url": "https://github.com/sponsors/hey-api" + } + }, + "node_modules/@hey-api/openapi-ts": { + "version": "0.73.0", + "resolved": "https://registry.npmjs.org/@hey-api/openapi-ts/-/openapi-ts-0.73.0.tgz", + "integrity": "sha512-sUscR3OIGW0k9U//28Cu6BTp3XaogWMDORj9H+5Du9E5AvTT7LZbCEDvkLhebFOPkp2cZAQfd66HiZsiwssBcQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@hey-api/json-schema-ref-parser": "1.0.6", + "ansi-colors": "4.1.3", + "c12": "2.0.1", + "color-support": "1.1.3", + "commander": "13.0.0", + "handlebars": "4.7.8", + "open": "10.1.2" + }, + "bin": { + "openapi-ts": "bin/index.cjs" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=22.10.0" + }, + "funding": { + "url": "https://github.com/sponsors/hey-api" + }, + "peerDependencies": { + "typescript": "^5.5.3" + } + }, + "node_modules/@hookform/resolvers": { + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/@hookform/resolvers/-/resolvers-5.2.2.tgz", + "integrity": "sha512-A/IxlMLShx3KjV/HeTcTfaMxdwy690+L/ZADoeaTltLx+CVuzkeVIPuybK3jrRfw7YZnmdKsVVHAlEPIAEUNlA==", + "license": "MIT", + "dependencies": { + "@standard-schema/utils": "^0.3.0" + }, + "peerDependencies": { + "react-hook-form": "^7.55.0" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.13", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", + "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==", + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.0", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/remapping": { + "version": "2.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/remapping/-/remapping-2.3.5.tgz", + "integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==", + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.31", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz", + "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@jsdevtools/ono": { + "version": "7.1.3", + "resolved": "https://registry.npmjs.org/@jsdevtools/ono/-/ono-7.1.3.tgz", + "integrity": "sha512-4JQNk+3mVzK3xh2rqd6RB4J46qUR19azEHBneZyTZM+c456qOrbbM/5xcR8huNCCcbVt7+UmizG6GuUvPvKUYg==", + "dev": true, + "license": "MIT" + }, + "node_modules/@noble/curves": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/@noble/curves/-/curves-2.2.0.tgz", + "integrity": "sha512-T/BoHgFXirb0ENSPBquzX0rcjXeM6Lo892a2jlYJkqk83LqZx0l1Of7DzlKJ6jkpvMrkHSnAcgb5JegL8SeIkQ==", + "license": "MIT", + "dependencies": { + "@noble/hashes": "2.2.0" + }, + "engines": { + "node": ">= 20.19.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/@noble/hashes": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/@noble/hashes/-/hashes-2.2.0.tgz", + "integrity": "sha512-IYqDGiTXab6FniAgnSdZwgWbomxpy9FtYvLKs7wCUs2a8RkITG+DFGO1DM9cr+E3/RgADRpFjrKVaJ1z6sjtEg==", + "license": "MIT", + "engines": { + "node": ">= 20.19.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/@playwright/test": { + "version": "1.57.0", + "resolved": "https://registry.npmjs.org/@playwright/test/-/test-1.57.0.tgz", + "integrity": "sha512-6TyEnHgd6SArQO8UO2OMTxshln3QMWBtPGrOCgs3wVEmQmwyuNtB10IZMfmYDE0riwNR1cu4q+pPcxMVtaG3TA==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "playwright": "1.57.0" + }, + "bin": { + "playwright": "cli.js" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@radix-ui/number": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/number/-/number-1.1.1.tgz", + "integrity": "sha512-MkKCwxlXTgz6CFoJx3pCwn07GKp36+aZyu/u2Ln2VrA5DcdyCZkASEDBTd8x5whTQQL5CiYf4prXKLcgQdv29g==", + "license": "MIT" + }, + "node_modules/@radix-ui/primitive": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.1.3.tgz", + "integrity": "sha512-JTF99U/6XIjCBo0wqkU5sK10glYe27MRRsfwoiq5zzOEZLHU3A3KCMa5X/azekYRCJ0HlwI0crAXS/5dEHTzDg==", + "license": "MIT" + }, + "node_modules/@radix-ui/react-arrow": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-arrow/-/react-arrow-1.1.7.tgz", + "integrity": "sha512-F+M1tLhO+mlQaOWspE8Wstg+z6PwxwRd8oQ8IXceWz92kfAmalTRf0EjrouQeo7QssEPfCn05B4Ihs1K9WQ/7w==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.1.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-arrow/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-arrow/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-avatar": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/@radix-ui/react-avatar/-/react-avatar-1.1.11.tgz", + "integrity": "sha512-0Qk603AHGV28BOBO34p7IgD5m+V5Sg/YovfayABkoDDBM5d3NCx0Mp4gGrjzLGes1jV5eNOE1r3itqOR33VC6Q==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-context": "1.1.3", + "@radix-ui/react-primitive": "2.1.4", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-is-hydrated": "0.1.0", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-checkbox": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-checkbox/-/react-checkbox-1.3.3.tgz", + "integrity": "sha512-wBbpv+NQftHDdG86Qc0pIyXk5IR3tM8Vd0nWLKDcX8nNn4nXFOFwsKuqw2okA/1D/mpaAkmuyndrPJTYDNZtFw==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-previous": "1.1.1", + "@radix-ui/react-use-size": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-checkbox/node_modules/@radix-ui/react-context": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.2.tgz", + "integrity": "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-checkbox/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-checkbox/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-collection": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-collection/-/react-collection-1.1.7.tgz", + "integrity": "sha512-Fh9rGN0MoI4ZFUNyfFVNU4y9LUz93u9/0K+yLgA2bwRojxM8JU1DyvvMBabnZPBgMWREAJvU2jjVzq+LrFUglw==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-collection/node_modules/@radix-ui/react-context": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.2.tgz", + "integrity": "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-collection/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-collection/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-compose-refs": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-compose-refs/-/react-compose-refs-1.1.2.tgz", + "integrity": "sha512-z4eqJvfiNnFMHIIvXP3CY57y2WJs5g2v3X0zm9mEJkrkNv4rDxu+sg9Jh8EkXyeqBkB7SOcboo9dMVqhyrACIg==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-context": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.3.tgz", + "integrity": "sha512-ieIFACdMpYfMEjF0rEf5KLvfVyIkOz6PDGyNnP+u+4xQ6jny3VCgA4OgXOwNx2aUkxn8zx9fiVcM8CfFYv9Lxw==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dialog": { + "version": "1.1.15", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dialog/-/react-dialog-1.1.15.tgz", + "integrity": "sha512-TCglVRtzlffRNxRMEyR36DGBLJpeusFcgMVD9PZEzAKnUs1lKCgX5u9BmC2Yg+LL9MgZDugFFs1Vl+Jp4t/PGw==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-dismissable-layer": "1.1.11", + "@radix-ui/react-focus-guards": "1.1.3", + "@radix-ui/react-focus-scope": "1.1.7", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-portal": "1.1.9", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + "aria-hidden": "^1.2.4", + "react-remove-scroll": "^2.6.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-context": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.2.tgz", + "integrity": "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-direction": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-direction/-/react-direction-1.1.1.tgz", + "integrity": "sha512-1UEWRX6jnOA2y4H5WczZ44gOOjTEmlqv1uNW4GAJEO5+bauCBhv8snY65Iw5/VOS/ghKN9gr2KjnLKxrsvoMVw==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dismissable-layer": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.1.11.tgz", + "integrity": "sha512-Nqcp+t5cTB8BinFkZgXiMJniQH0PsUt2k51FUhbdfeKvc4ACcG2uQniY/8+h1Yv6Kza4Q7lD7PQV0z0oicE0Mg==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-escape-keydown": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dismissable-layer/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dismissable-layer/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dropdown-menu": { + "version": "2.1.16", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dropdown-menu/-/react-dropdown-menu-2.1.16.tgz", + "integrity": "sha512-1PLGQEynI/3OX/ftV54COn+3Sud/Mn8vALg2rWnBLnRaGtJDduNW/22XjlGgPdpcIbiQxjKtb7BkcjP00nqfJw==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-menu": "2.1.16", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-controllable-state": "1.2.2" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dropdown-menu/node_modules/@radix-ui/react-context": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.2.tgz", + "integrity": "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dropdown-menu/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dropdown-menu/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-focus-guards": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-guards/-/react-focus-guards-1.1.3.tgz", + "integrity": "sha512-0rFg/Rj2Q62NCm62jZw0QX7a3sz6QCQU0LpZdNrJX8byRGaGVTqbrW9jAoIAHyMQqsNpeZ81YgSizOt5WXq0Pw==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-focus-scope": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-scope/-/react-focus-scope-1.1.7.tgz", + "integrity": "sha512-t2ODlkXBQyn7jkl6TNaw/MtVEVvIGelJDCG41Okq/KwUsJBwQ4XVZsHAVUkK4mBv3ewiAS3PGuUWuY2BoK4ZUw==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-focus-scope/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-focus-scope/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-id": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-id/-/react-id-1.1.1.tgz", + "integrity": "sha512-kGkGegYIdQsOb4XjsfM97rXsiHaBwco+hFI66oO4s9LU+PLAC5oJ7khdOVFxkhsmlbpUqDAvXw11CluXP+jkHg==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-label": { + "version": "2.1.8", + "resolved": "https://registry.npmjs.org/@radix-ui/react-label/-/react-label-2.1.8.tgz", + "integrity": "sha512-FmXs37I6hSBVDlO4y764TNz1rLgKwjJMQ0EGte6F3Cb3f4bIuHB/iLa/8I9VKkmOy+gNHq8rql3j686ACVV21A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.1.4" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-menu": { + "version": "2.1.16", + "resolved": "https://registry.npmjs.org/@radix-ui/react-menu/-/react-menu-2.1.16.tgz", + "integrity": "sha512-72F2T+PLlphrqLcAotYPp0uJMr5SjP5SL01wfEspJbru5Zs5vQaSHb4VB3ZMJPimgHHCHG7gMOeOB9H3Hdmtxg==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-collection": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-dismissable-layer": "1.1.11", + "@radix-ui/react-focus-guards": "1.1.3", + "@radix-ui/react-focus-scope": "1.1.7", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-popper": "1.2.8", + "@radix-ui/react-portal": "1.1.9", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-roving-focus": "1.1.11", + "@radix-ui/react-slot": "1.2.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "aria-hidden": "^1.2.4", + "react-remove-scroll": "^2.6.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-menu/node_modules/@radix-ui/react-context": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.2.tgz", + "integrity": "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-menu/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-menu/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popper": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@radix-ui/react-popper/-/react-popper-1.2.8.tgz", + "integrity": "sha512-0NJQ4LFFUuWkE7Oxf0htBKS6zLkkjBH+hM1uk7Ng705ReR8m/uelduy1DBo0PyBXPKVnBA6YBlU94MBGXrSBCw==", + "license": "MIT", + "dependencies": { + "@floating-ui/react-dom": "^2.0.0", + "@radix-ui/react-arrow": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-layout-effect": "1.1.1", + "@radix-ui/react-use-rect": "1.1.1", + "@radix-ui/react-use-size": "1.1.1", + "@radix-ui/rect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popper/node_modules/@radix-ui/react-context": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.2.tgz", + "integrity": "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popper/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popper/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-portal": { + "version": "1.1.9", + "resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.1.9.tgz", + "integrity": "sha512-bpIxvq03if6UNwXZ+HTK71JLh4APvnXntDc6XOX8UVq4XQOVl7lwok0AvIl+b8zgCw3fSaVTZMpAPPagXbKmHQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-portal/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-portal/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-presence": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/@radix-ui/react-presence/-/react-presence-1.1.5.tgz", + "integrity": "sha512-/jfEwNDdQVBCNvjkGit4h6pMOzq8bHkopq458dPt2lMjx+eBQUohZNG9A7DtO/O5ukSbxuaNGXMjHicgwy6rQQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-primitive": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.4.tgz", + "integrity": "sha512-9hQc4+GNVtJAIEPEqlYqW5RiYdrr8ea5XQ0ZOnD6fgru+83kqT15mq2OCcbe8KnjRZl5vF3ks69AKz3kh1jrhg==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.4" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-radio-group": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/@radix-ui/react-radio-group/-/react-radio-group-1.3.8.tgz", + "integrity": "sha512-VBKYIYImA5zsxACdisNQ3BjCBfmbGH3kQlnFVqlWU4tXwjy7cGX8ta80BcrO+WJXIn5iBylEH3K6ZTlee//lgQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-roving-focus": "1.1.11", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-previous": "1.1.1", + "@radix-ui/react-use-size": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-radio-group/node_modules/@radix-ui/react-context": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.2.tgz", + "integrity": "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-radio-group/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-radio-group/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-roving-focus": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/@radix-ui/react-roving-focus/-/react-roving-focus-1.1.11.tgz", + "integrity": "sha512-7A6S9jSgm/S+7MdtNDSb+IU859vQqJ/QAtcYQcfFC6W8RS4IxIZDldLR0xqCFZ6DCyrQLjLPsxtTNch5jVA4lA==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-collection": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-controllable-state": "1.2.2" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-roving-focus/node_modules/@radix-ui/react-context": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.2.tgz", + "integrity": "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-roving-focus/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-roving-focus/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-scroll-area": { + "version": "1.2.10", + "resolved": "https://registry.npmjs.org/@radix-ui/react-scroll-area/-/react-scroll-area-1.2.10.tgz", + "integrity": "sha512-tAXIa1g3sM5CGpVT0uIbUx/U3Gs5N8T52IICuCtObaos1S8fzsrPXG5WObkQN3S6NVl6wKgPhAIiBGbWnvc97A==", + "license": "MIT", + "dependencies": { + "@radix-ui/number": "1.1.1", + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-scroll-area/node_modules/@radix-ui/react-context": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.2.tgz", + "integrity": "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-scroll-area/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-scroll-area/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-select": { + "version": "2.2.6", + "resolved": "https://registry.npmjs.org/@radix-ui/react-select/-/react-select-2.2.6.tgz", + "integrity": "sha512-I30RydO+bnn2PQztvo25tswPH+wFBjehVGtmagkU78yMdwTwVf12wnAOF+AeP8S2N8xD+5UPbGhkUfPyvT+mwQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/number": "1.1.1", + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-collection": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-dismissable-layer": "1.1.11", + "@radix-ui/react-focus-guards": "1.1.3", + "@radix-ui/react-focus-scope": "1.1.7", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-popper": "1.2.8", + "@radix-ui/react-portal": "1.1.9", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-layout-effect": "1.1.1", + "@radix-ui/react-use-previous": "1.1.1", + "@radix-ui/react-visually-hidden": "1.2.3", + "aria-hidden": "^1.2.4", + "react-remove-scroll": "^2.6.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-context": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.2.tgz", + "integrity": "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-separator": { + "version": "1.1.8", + "resolved": "https://registry.npmjs.org/@radix-ui/react-separator/-/react-separator-1.1.8.tgz", + "integrity": "sha512-sDvqVY4itsKwwSMEe0jtKgfTh+72Sy3gPmQpjqcQneqQ4PFmr/1I0YA+2/puilhggCe2gJcx5EBAYFkWkdpa5g==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.1.4" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-slot": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.4.tgz", + "integrity": "sha512-Jl+bCv8HxKnlTLVrcDE8zTMJ09R9/ukw4qBs/oZClOfoQk/cOTbDn+NceXfV7j09YPVQUryJPHurafcSg6EVKA==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-tabs": { + "version": "1.1.13", + "resolved": "https://registry.npmjs.org/@radix-ui/react-tabs/-/react-tabs-1.1.13.tgz", + "integrity": "sha512-7xdcatg7/U+7+Udyoj2zodtI9H/IIopqo+YOIcZOq1nJwXWBZ9p8xiu5llXlekDbZkca79a/fozEYQXIA4sW6A==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-roving-focus": "1.1.11", + "@radix-ui/react-use-controllable-state": "1.2.2" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-tabs/node_modules/@radix-ui/react-context": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.2.tgz", + "integrity": "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-tabs/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-tabs/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-tooltip": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@radix-ui/react-tooltip/-/react-tooltip-1.2.8.tgz", + "integrity": "sha512-tY7sVt1yL9ozIxvmbtN5qtmH2krXcBCfjEiCgKGLqunJHvgvZG2Pcl2oQ3kbcZARb1BGEHdkLzcYGO8ynVlieg==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-dismissable-layer": "1.1.11", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-popper": "1.2.8", + "@radix-ui/react-portal": "1.1.9", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-visually-hidden": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-context": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.2.tgz", + "integrity": "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-callback-ref": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-callback-ref/-/react-use-callback-ref-1.1.1.tgz", + "integrity": "sha512-FkBMwD+qbGQeMu1cOHnuGB6x4yzPjho8ap5WtbEJ26umhgqVXbhekKUQO+hZEL1vU92a3wHwdp0HAcqAUF5iDg==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-controllable-state": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-controllable-state/-/react-use-controllable-state-1.2.2.tgz", + "integrity": "sha512-BjasUjixPFdS+NKkypcyyN5Pmg83Olst0+c6vGov0diwTEo6mgdqVR6hxcEgFuh4QrAs7Rc+9KuGJ9TVCj0Zzg==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-effect-event": "0.0.2", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-effect-event": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-effect-event/-/react-use-effect-event-0.0.2.tgz", + "integrity": "sha512-Qp8WbZOBe+blgpuUT+lw2xheLP8q0oatc9UpmiemEICxGvFLYmHm9QowVZGHtJlGbS6A6yJ3iViad/2cVjnOiA==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-escape-keydown": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-escape-keydown/-/react-use-escape-keydown-1.1.1.tgz", + "integrity": "sha512-Il0+boE7w/XebUHyBjroE+DbByORGR9KKmITzbR7MyQ4akpORYP/ZmbhAr0DG7RmmBqoOnZdy2QlvajJ2QA59g==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-callback-ref": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-is-hydrated": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-is-hydrated/-/react-use-is-hydrated-0.1.0.tgz", + "integrity": "sha512-U+UORVEq+cTnRIaostJv9AGdV3G6Y+zbVd+12e18jQ5A3c0xL03IhnHuiU4UV69wolOQp5GfR58NW/EgdQhwOA==", + "license": "MIT", + "dependencies": { + "use-sync-external-store": "^1.5.0" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-layout-effect": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-layout-effect/-/react-use-layout-effect-1.1.1.tgz", + "integrity": "sha512-RbJRS4UWQFkzHTTwVymMTUv8EqYhOp8dOOviLj2ugtTiXRaRQS7GLGxZTLL1jWhMeoSCf5zmcZkqTl9IiYfXcQ==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-previous": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-previous/-/react-use-previous-1.1.1.tgz", + "integrity": "sha512-2dHfToCj/pzca2Ck724OZ5L0EVrr3eHRNsG/b3xQJLA2hZpVCS99bLAX+hm1IHXDEnzU6by5z/5MIY794/a8NQ==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-rect": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-rect/-/react-use-rect-1.1.1.tgz", + "integrity": "sha512-QTYuDesS0VtuHNNvMh+CjlKJ4LJickCMUAqjlE3+j8w+RlRpwyX3apEQKGFzbZGdo7XNG1tXa+bQqIE7HIXT2w==", + "license": "MIT", + "dependencies": { + "@radix-ui/rect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-size": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-size/-/react-use-size-1.1.1.tgz", + "integrity": "sha512-ewrXRDTAqAXlkl6t/fkXWNAhFX9I+CkKlw6zjEwk86RSPKwZr3xpBRso655aqYafwtnbpHLj6toFzmd6xdVptQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-visually-hidden": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-visually-hidden/-/react-visually-hidden-1.2.3.tgz", + "integrity": "sha512-pzJq12tEaaIhqjbzpCuv/OypJY/BPavOofm+dbab+MHLajy277+1lLm6JFcGgF5eskJ6mquGirhXY2GD/8u8Ug==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.1.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-visually-hidden/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-visually-hidden/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/rect": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/rect/-/rect-1.1.1.tgz", + "integrity": "sha512-HPwpGIzkl28mWyZqG52jiqDJ12waP11Pa1lGoiyUkIEuMLBP0oeK/C89esbXrxsky5we7dfd8U58nm0SgAWpVw==", + "license": "MIT" + }, + "node_modules/@rolldown/pluginutils": { + "version": "1.0.0-beta.47", + "resolved": "https://registry.npmjs.org/@rolldown/pluginutils/-/pluginutils-1.0.0-beta.47.tgz", + "integrity": "sha512-8QagwMH3kNCuzD8EWL8R2YPW5e4OrHNSAHRFDdmFqEwEaD/KcNKjVoumo+gP2vW5eKB2UPbM6vTYiGZX0ixLnw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.55.1.tgz", + "integrity": "sha512-9R0DM/ykwfGIlNu6+2U09ga0WXeZ9MRC2Ter8jnz8415VbuIykVuc6bhdrbORFZANDmTDvq26mJrEVTl8TdnDg==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.55.1.tgz", + "integrity": "sha512-eFZCb1YUqhTysgW3sj/55du5cG57S7UTNtdMjCW7LwVcj3dTTcowCsC8p7uBdzKsZYa8J7IDE8lhMI+HX1vQvg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.55.1.tgz", + "integrity": "sha512-p3grE2PHcQm2e8PSGZdzIhCKbMCw/xi9XvMPErPhwO17vxtvCN5FEA2mSLgmKlCjHGMQTP6phuQTYWUnKewwGg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.55.1.tgz", + "integrity": "sha512-rDUjG25C9qoTm+e02Esi+aqTKSBYwVTaoS1wxcN47/Luqef57Vgp96xNANwt5npq9GDxsH7kXxNkJVEsWEOEaQ==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-freebsd-arm64": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.55.1.tgz", + "integrity": "sha512-+JiU7Jbp5cdxekIgdte0jfcu5oqw4GCKr6i3PJTlXTCU5H5Fvtkpbs4XJHRmWNXF+hKmn4v7ogI5OQPaupJgOg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-freebsd-x64": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.55.1.tgz", + "integrity": "sha512-V5xC1tOVWtLLmr3YUk2f6EJK4qksksOYiz/TCsFHu/R+woubcLWdC9nZQmwjOAbmExBIVKsm1/wKmEy4z4u4Bw==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.55.1.tgz", + "integrity": "sha512-Rn3n+FUk2J5VWx+ywrG/HGPTD9jXNbicRtTM11e/uorplArnXZYsVifnPPqNNP5BsO3roI4n8332ukpY/zN7rQ==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm-musleabihf": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.55.1.tgz", + "integrity": "sha512-grPNWydeKtc1aEdrJDWk4opD7nFtQbMmV7769hiAaYyUKCT1faPRm2av8CX1YJsZ4TLAZcg9gTR1KvEzoLjXkg==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.55.1.tgz", + "integrity": "sha512-a59mwd1k6x8tXKcUxSyISiquLwB5pX+fJW9TkWU46lCqD/GRDe9uDN31jrMmVP3feI3mhAdvcCClhV8V5MhJFQ==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.55.1.tgz", + "integrity": "sha512-puS1MEgWX5GsHSoiAsF0TYrpomdvkaXm0CofIMG5uVkP6IBV+ZO9xhC5YEN49nsgYo1DuuMquF9+7EDBVYu4uA==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-gnu": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.55.1.tgz", + "integrity": "sha512-r3Wv40in+lTsULSb6nnoudVbARdOwb2u5fpeoOAZjFLznp6tDU8kd+GTHmJoqZ9lt6/Sys33KdIHUaQihFcu7g==", + "cpu": [ + "loong64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-musl": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.55.1.tgz", + "integrity": "sha512-MR8c0+UxAlB22Fq4R+aQSPBayvYa3+9DrwG/i1TKQXFYEaoW3B5b/rkSRIypcZDdWjWnpcvxbNaAJDcSbJU3Lw==", + "cpu": [ + "loong64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-gnu": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.55.1.tgz", + "integrity": "sha512-3KhoECe1BRlSYpMTeVrD4sh2Pw2xgt4jzNSZIIPLFEsnQn9gAnZagW9+VqDqAHgm1Xc77LzJOo2LdigS5qZ+gw==", + "cpu": [ + "ppc64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-musl": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.55.1.tgz", + "integrity": "sha512-ziR1OuZx0vdYZZ30vueNZTg73alF59DicYrPViG0NEgDVN8/Jl87zkAPu4u6VjZST2llgEUjaiNl9JM6HH1Vdw==", + "cpu": [ + "ppc64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.55.1.tgz", + "integrity": "sha512-uW0Y12ih2XJRERZ4jAfKamTyIHVMPQnTZcQjme2HMVDAHY4amf5u414OqNYC+x+LzRdRcnIG1YodLrrtA8xsxw==", + "cpu": [ + "riscv64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-musl": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.55.1.tgz", + "integrity": "sha512-u9yZ0jUkOED1BFrqu3BwMQoixvGHGZ+JhJNkNKY/hyoEgOwlqKb62qu+7UjbPSHYjiVy8kKJHvXKv5coH4wDeg==", + "cpu": [ + "riscv64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-s390x-gnu": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.55.1.tgz", + "integrity": "sha512-/0PenBCmqM4ZUd0190j7J0UsQ/1nsi735iPRakO8iPciE7BQ495Y6msPzaOmvx0/pn+eJVVlZrNrSh4WSYLxNg==", + "cpu": [ + "s390x" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.55.1.tgz", + "integrity": "sha512-a8G4wiQxQG2BAvo+gU6XrReRRqj+pLS2NGXKm8io19goR+K8lw269eTrPkSdDTALwMmJp4th2Uh0D8J9bEV1vg==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.55.1.tgz", + "integrity": "sha512-bD+zjpFrMpP/hqkfEcnjXWHMw5BIghGisOKPj+2NaNDuVT+8Ds4mPf3XcPHuat1tz89WRL+1wbcxKY3WSbiT7w==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-openbsd-x64": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.55.1.tgz", + "integrity": "sha512-eLXw0dOiqE4QmvikfQ6yjgkg/xDM+MdU9YJuP4ySTibXU0oAvnEWXt7UDJmD4UkYialMfOGFPJnIHSe/kdzPxg==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ] + }, + "node_modules/@rollup/rollup-openharmony-arm64": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.55.1.tgz", + "integrity": "sha512-xzm44KgEP11te3S2HCSyYf5zIzWmx3n8HDCc7EE59+lTcswEWNpvMLfd9uJvVX8LCg9QWG67Xt75AuHn4vgsXw==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ] + }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.55.1.tgz", + "integrity": "sha512-yR6Bl3tMC/gBok5cz/Qi0xYnVbIxGx5Fcf/ca0eB6/6JwOY+SRUcJfI0OpeTpPls7f194as62thCt/2BjxYN8g==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.55.1.tgz", + "integrity": "sha512-3fZBidchE0eY0oFZBnekYCfg+5wAB0mbpCBuofh5mZuzIU/4jIVkbESmd2dOsFNS78b53CYv3OAtwqkZZmU5nA==", + "cpu": [ + "ia32" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-gnu": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.55.1.tgz", + "integrity": "sha512-xGGY5pXj69IxKb4yv/POoocPy/qmEGhimy/FoTpTSVju3FYXUQQMFCaZZXJVidsmGxRioZAwpThl/4zX41gRKg==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.55.1.tgz", + "integrity": "sha512-SPEpaL6DX4rmcXtnhdrQYgzQ5W2uW3SCJch88lB2zImhJRhIIK44fkUrgIV/Q8yUNfw5oyZ5vkeQsZLhCb06lw==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@standard-schema/spec": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@standard-schema/spec/-/spec-1.1.0.tgz", + "integrity": "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w==", + "dev": true, + "license": "MIT" + }, + "node_modules/@standard-schema/utils": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/@standard-schema/utils/-/utils-0.3.0.tgz", + "integrity": "sha512-e7Mew686owMaPJVNNLs55PUvgz371nKgwsc4vxE49zsODpJEnxgxRo2y/OKrqueavXgZNMDVj3DdHFlaSAeU8g==", + "license": "MIT" + }, + "node_modules/@swc/core": { + "version": "1.15.8", + "resolved": "https://registry.npmjs.org/@swc/core/-/core-1.15.8.tgz", + "integrity": "sha512-T8keoJjXaSUoVBCIjgL6wAnhADIb09GOELzKg10CjNg+vLX48P93SME6jTfte9MZIm5m+Il57H3rTSk/0kzDUw==", + "dev": true, + "hasInstallScript": true, + "license": "Apache-2.0", + "dependencies": { + "@swc/counter": "^0.1.3", + "@swc/types": "^0.1.25" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/swc" + }, + "optionalDependencies": { + "@swc/core-darwin-arm64": "1.15.8", + "@swc/core-darwin-x64": "1.15.8", + "@swc/core-linux-arm-gnueabihf": "1.15.8", + "@swc/core-linux-arm64-gnu": "1.15.8", + "@swc/core-linux-arm64-musl": "1.15.8", + "@swc/core-linux-x64-gnu": "1.15.8", + "@swc/core-linux-x64-musl": "1.15.8", + "@swc/core-win32-arm64-msvc": "1.15.8", + "@swc/core-win32-ia32-msvc": "1.15.8", + "@swc/core-win32-x64-msvc": "1.15.8" + }, + "peerDependencies": { + "@swc/helpers": ">=0.5.17" + }, + "peerDependenciesMeta": { + "@swc/helpers": { + "optional": true + } + } + }, + "node_modules/@swc/core-darwin-arm64": { + "version": "1.15.8", + "resolved": "https://registry.npmjs.org/@swc/core-darwin-arm64/-/core-darwin-arm64-1.15.8.tgz", + "integrity": "sha512-M9cK5GwyWWRkRGwwCbREuj6r8jKdES/haCZ3Xckgkl8MUQJZA3XB7IXXK1IXRNeLjg6m7cnoMICpXv1v1hlJOg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "Apache-2.0 AND MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-darwin-x64": { + "version": "1.15.8", + "resolved": "https://registry.npmjs.org/@swc/core-darwin-x64/-/core-darwin-x64-1.15.8.tgz", + "integrity": "sha512-j47DasuOvXl80sKJHSi2X25l44CMc3VDhlJwA7oewC1nV1VsSzwX+KOwE5tLnfORvVJJyeiXgJORNYg4jeIjYQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "Apache-2.0 AND MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-linux-arm-gnueabihf": { + "version": "1.15.8", + "resolved": "https://registry.npmjs.org/@swc/core-linux-arm-gnueabihf/-/core-linux-arm-gnueabihf-1.15.8.tgz", + "integrity": "sha512-siAzDENu2rUbwr9+fayWa26r5A9fol1iORG53HWxQL1J8ym4k7xt9eME0dMPXlYZDytK5r9sW8zEA10F2U3Xwg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-linux-arm64-gnu": { + "version": "1.15.8", + "resolved": "https://registry.npmjs.org/@swc/core-linux-arm64-gnu/-/core-linux-arm64-gnu-1.15.8.tgz", + "integrity": "sha512-o+1y5u6k2FfPYbTRUPvurwzNt5qd0NTumCTFscCNuBksycloXY16J8L+SMW5QRX59n4Hp9EmFa3vpvNHRVv1+Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "Apache-2.0 AND MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-linux-arm64-musl": { + "version": "1.15.8", + "resolved": "https://registry.npmjs.org/@swc/core-linux-arm64-musl/-/core-linux-arm64-musl-1.15.8.tgz", + "integrity": "sha512-koiCqL09EwOP1S2RShCI7NbsQuG6r2brTqUYE7pV7kZm9O17wZ0LSz22m6gVibpwEnw8jI3IE1yYsQTVpluALw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "Apache-2.0 AND MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-linux-x64-gnu": { + "version": "1.15.8", + "resolved": "https://registry.npmjs.org/@swc/core-linux-x64-gnu/-/core-linux-x64-gnu-1.15.8.tgz", + "integrity": "sha512-4p6lOMU3bC+Vd5ARtKJ/FxpIC5G8v3XLoPEZ5s7mLR8h7411HWC/LmTXDHcrSXRC55zvAVia1eldy6zDLz8iFQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "Apache-2.0 AND MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-linux-x64-musl": { + "version": "1.15.8", + "resolved": "https://registry.npmjs.org/@swc/core-linux-x64-musl/-/core-linux-x64-musl-1.15.8.tgz", + "integrity": "sha512-z3XBnbrZAL+6xDGAhJoN4lOueIxC/8rGrJ9tg+fEaeqLEuAtHSW2QHDHxDwkxZMjuF/pZ6MUTjHjbp8wLbuRLA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "Apache-2.0 AND MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-win32-arm64-msvc": { + "version": "1.15.8", + "resolved": "https://registry.npmjs.org/@swc/core-win32-arm64-msvc/-/core-win32-arm64-msvc-1.15.8.tgz", + "integrity": "sha512-djQPJ9Rh9vP8GTS/Df3hcc6XP6xnG5c8qsngWId/BLA9oX6C7UzCPAn74BG/wGb9a6j4w3RINuoaieJB3t+7iQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "Apache-2.0 AND MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-win32-ia32-msvc": { + "version": "1.15.8", + "resolved": "https://registry.npmjs.org/@swc/core-win32-ia32-msvc/-/core-win32-ia32-msvc-1.15.8.tgz", + "integrity": "sha512-/wfAgxORg2VBaUoFdytcVBVCgf1isWZIEXB9MZEUty4wwK93M/PxAkjifOho9RN3WrM3inPLabICRCEgdHpKKQ==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "Apache-2.0 AND MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-win32-x64-msvc": { + "version": "1.15.8", + "resolved": "https://registry.npmjs.org/@swc/core-win32-x64-msvc/-/core-win32-x64-msvc-1.15.8.tgz", + "integrity": "sha512-GpMePrh9Sl4d61o4KAHOOv5is5+zt6BEXCOCgs/H0FLGeii7j9bWDE8ExvKFy2GRRZVNR1ugsnzaGWHKM6kuzA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "Apache-2.0 AND MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/counter": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@swc/counter/-/counter-0.1.3.tgz", + "integrity": "sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/@swc/types": { + "version": "0.1.25", + "resolved": "https://registry.npmjs.org/@swc/types/-/types-0.1.25.tgz", + "integrity": "sha512-iAoY/qRhNH8a/hBvm3zKj9qQ4oc2+3w1unPJa2XvTK3XjeLXtzcCingVPw/9e5mn1+0yPqxcBGp9Jf0pkfMb1g==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@swc/counter": "^0.1.3" + } + }, + "node_modules/@tailwindcss/node": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/node/-/node-4.1.18.tgz", + "integrity": "sha512-DoR7U1P7iYhw16qJ49fgXUlry1t4CpXeErJHnQ44JgTSKMaZUdf17cfn5mHchfJ4KRBZRFA/Coo+MUF5+gOaCQ==", + "license": "MIT", + "dependencies": { + "@jridgewell/remapping": "^2.3.4", + "enhanced-resolve": "^5.18.3", + "jiti": "^2.6.1", + "lightningcss": "1.30.2", + "magic-string": "^0.30.21", + "source-map-js": "^1.2.1", + "tailwindcss": "4.1.18" + } + }, + "node_modules/@tailwindcss/oxide": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide/-/oxide-4.1.18.tgz", + "integrity": "sha512-EgCR5tTS5bUSKQgzeMClT6iCY3ToqE1y+ZB0AKldj809QXk1Y+3jB0upOYZrn9aGIzPtUsP7sX4QQ4XtjBB95A==", + "license": "MIT", + "engines": { + "node": ">= 10" + }, + "optionalDependencies": { + "@tailwindcss/oxide-android-arm64": "4.1.18", + "@tailwindcss/oxide-darwin-arm64": "4.1.18", + "@tailwindcss/oxide-darwin-x64": "4.1.18", + "@tailwindcss/oxide-freebsd-x64": "4.1.18", + "@tailwindcss/oxide-linux-arm-gnueabihf": "4.1.18", + "@tailwindcss/oxide-linux-arm64-gnu": "4.1.18", + "@tailwindcss/oxide-linux-arm64-musl": "4.1.18", + "@tailwindcss/oxide-linux-x64-gnu": "4.1.18", + "@tailwindcss/oxide-linux-x64-musl": "4.1.18", + "@tailwindcss/oxide-wasm32-wasi": "4.1.18", + "@tailwindcss/oxide-win32-arm64-msvc": "4.1.18", + "@tailwindcss/oxide-win32-x64-msvc": "4.1.18" + } + }, + "node_modules/@tailwindcss/oxide-android-arm64": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-android-arm64/-/oxide-android-arm64-4.1.18.tgz", + "integrity": "sha512-dJHz7+Ugr9U/diKJA0W6N/6/cjI+ZTAoxPf9Iz9BFRF2GzEX8IvXxFIi/dZBloVJX/MZGvRuFA9rqwdiIEZQ0Q==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-darwin-arm64": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-arm64/-/oxide-darwin-arm64-4.1.18.tgz", + "integrity": "sha512-Gc2q4Qhs660bhjyBSKgq6BYvwDz4G+BuyJ5H1xfhmDR3D8HnHCmT/BSkvSL0vQLy/nkMLY20PQ2OoYMO15Jd0A==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-darwin-x64": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-x64/-/oxide-darwin-x64-4.1.18.tgz", + "integrity": "sha512-FL5oxr2xQsFrc3X9o1fjHKBYBMD1QZNyc1Xzw/h5Qu4XnEBi3dZn96HcHm41c/euGV+GRiXFfh2hUCyKi/e+yw==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-freebsd-x64": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-freebsd-x64/-/oxide-freebsd-x64-4.1.18.tgz", + "integrity": "sha512-Fj+RHgu5bDodmV1dM9yAxlfJwkkWvLiRjbhuO2LEtwtlYlBgiAT4x/j5wQr1tC3SANAgD+0YcmWVrj8R9trVMA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-linux-arm-gnueabihf": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm-gnueabihf/-/oxide-linux-arm-gnueabihf-4.1.18.tgz", + "integrity": "sha512-Fp+Wzk/Ws4dZn+LV2Nqx3IilnhH51YZoRaYHQsVq3RQvEl+71VGKFpkfHrLM/Li+kt5c0DJe/bHXK1eHgDmdiA==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-linux-arm64-gnu": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-gnu/-/oxide-linux-arm64-gnu-4.1.18.tgz", + "integrity": "sha512-S0n3jboLysNbh55Vrt7pk9wgpyTTPD0fdQeh7wQfMqLPM/Hrxi+dVsLsPrycQjGKEQk85Kgbx+6+QnYNiHalnw==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-linux-arm64-musl": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-musl/-/oxide-linux-arm64-musl-4.1.18.tgz", + "integrity": "sha512-1px92582HkPQlaaCkdRcio71p8bc8i/ap5807tPRDK/uw953cauQBT8c5tVGkOwrHMfc2Yh6UuxaH4vtTjGvHg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-linux-x64-gnu": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-gnu/-/oxide-linux-x64-gnu-4.1.18.tgz", + "integrity": "sha512-v3gyT0ivkfBLoZGF9LyHmts0Isc8jHZyVcbzio6Wpzifg/+5ZJpDiRiUhDLkcr7f/r38SWNe7ucxmGW3j3Kb/g==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-linux-x64-musl": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-musl/-/oxide-linux-x64-musl-4.1.18.tgz", + "integrity": "sha512-bhJ2y2OQNlcRwwgOAGMY0xTFStt4/wyU6pvI6LSuZpRgKQwxTec0/3Scu91O8ir7qCR3AuepQKLU/kX99FouqQ==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-wasm32-wasi": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-wasm32-wasi/-/oxide-wasm32-wasi-4.1.18.tgz", + "integrity": "sha512-LffYTvPjODiP6PT16oNeUQJzNVyJl1cjIebq/rWWBF+3eDst5JGEFSc5cWxyRCJ0Mxl+KyIkqRxk1XPEs9x8TA==", + "bundleDependencies": [ + "@napi-rs/wasm-runtime", + "@emnapi/core", + "@emnapi/runtime", + "@tybys/wasm-util", + "@emnapi/wasi-threads", + "tslib" + ], + "cpu": [ + "wasm32" + ], + "license": "MIT", + "optional": true, + "dependencies": { + "@emnapi/core": "^1.7.1", + "@emnapi/runtime": "^1.7.1", + "@emnapi/wasi-threads": "^1.1.0", + "@napi-rs/wasm-runtime": "^1.1.0", + "@tybys/wasm-util": "^0.10.1", + "tslib": "^2.4.0" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@tailwindcss/oxide-win32-arm64-msvc": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-arm64-msvc/-/oxide-win32-arm64-msvc-4.1.18.tgz", + "integrity": "sha512-HjSA7mr9HmC8fu6bdsZvZ+dhjyGCLdotjVOgLA2vEqxEBZaQo9YTX4kwgEvPCpRh8o4uWc4J/wEoFzhEmjvPbA==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-win32-x64-msvc": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-x64-msvc/-/oxide-win32-x64-msvc-4.1.18.tgz", + "integrity": "sha512-bJWbyYpUlqamC8dpR7pfjA0I7vdF6t5VpUGMWRkXVE3AXgIZjYUYAK7II1GNaxR8J1SSrSrppRar8G++JekE3Q==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/vite": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/vite/-/vite-4.1.18.tgz", + "integrity": "sha512-jVA+/UpKL1vRLg6Hkao5jldawNmRo7mQYrZtNHMIVpLfLhDml5nMRUo/8MwoX2vNXvnaXNNMedrMfMugAVX1nA==", + "license": "MIT", + "dependencies": { + "@tailwindcss/node": "4.1.18", + "@tailwindcss/oxide": "4.1.18", + "tailwindcss": "4.1.18" + }, + "peerDependencies": { + "vite": "^5.2.0 || ^6 || ^7" + } + }, + "node_modules/@tanstack/history": { + "version": "1.141.0", + "resolved": "https://registry.npmjs.org/@tanstack/history/-/history-1.141.0.tgz", + "integrity": "sha512-LS54XNyxyTs5m/pl1lkwlg7uZM3lvsv2FIIV1rsJgnfwVCnI+n4ZGZ2CcjNT13BPu/3hPP+iHmliBSscJxW5FQ==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + } + }, + "node_modules/@tanstack/query-core": { + "version": "5.90.16", + "resolved": "https://registry.npmjs.org/@tanstack/query-core/-/query-core-5.90.16.tgz", + "integrity": "sha512-MvtWckSVufs/ja463/K4PyJeqT+HMlJWtw6PrCpywznd2NSgO3m4KwO9RqbFqGg6iDE8vVMFWMeQI4Io3eEYww==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + } + }, + "node_modules/@tanstack/query-devtools": { + "version": "5.92.0", + "resolved": "https://registry.npmjs.org/@tanstack/query-devtools/-/query-devtools-5.92.0.tgz", + "integrity": "sha512-N8D27KH1vEpVacvZgJL27xC6yPFUy0Zkezn5gnB3L3gRCxlDeSuiya7fKge8Y91uMTnC8aSxBQhcK6ocY7alpQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + } + }, + "node_modules/@tanstack/react-query": { + "version": "5.90.16", + "resolved": "https://registry.npmjs.org/@tanstack/react-query/-/react-query-5.90.16.tgz", + "integrity": "sha512-bpMGOmV4OPmif7TNMteU/Ehf/hoC0Kf98PDc0F4BZkFrEapRMEqI/V6YS0lyzwSV6PQpY1y4xxArUIfBW5LVxQ==", + "license": "MIT", + "dependencies": { + "@tanstack/query-core": "5.90.16" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + }, + "peerDependencies": { + "react": "^18 || ^19" + } + }, + "node_modules/@tanstack/react-query-devtools": { + "version": "5.91.2", + "resolved": "https://registry.npmjs.org/@tanstack/react-query-devtools/-/react-query-devtools-5.91.2.tgz", + "integrity": "sha512-ZJ1503ay5fFeEYFUdo7LMNFzZryi6B0Cacrgr2h1JRkvikK1khgIq6Nq2EcblqEdIlgB/r7XDW8f8DQ89RuUgg==", + "license": "MIT", + "dependencies": { + "@tanstack/query-devtools": "5.92.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + }, + "peerDependencies": { + "@tanstack/react-query": "^5.90.14", + "react": "^18 || ^19" + } + }, + "node_modules/@tanstack/react-router": { + "version": "1.145.6", + "resolved": "https://registry.npmjs.org/@tanstack/react-router/-/react-router-1.145.6.tgz", + "integrity": "sha512-hXCSqf9689C24SjfJJILX/pdsFknqzyhmCFXt278IwAfBgMKThePEY7x7rG8VCnWC29tdVC9YptCHqiNJYauxA==", + "license": "MIT", + "dependencies": { + "@tanstack/history": "1.141.0", + "@tanstack/react-store": "^0.8.0", + "@tanstack/router-core": "1.145.6", + "isbot": "^5.1.22", + "tiny-invariant": "^1.3.3", + "tiny-warning": "^1.0.3" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + }, + "peerDependencies": { + "react": ">=18.0.0 || >=19.0.0", + "react-dom": ">=18.0.0 || >=19.0.0" + } + }, + "node_modules/@tanstack/react-router-devtools": { + "version": "1.145.6", + "resolved": "https://registry.npmjs.org/@tanstack/react-router-devtools/-/react-router-devtools-1.145.6.tgz", + "integrity": "sha512-zngg/C7Y3QPkIA24r3J4K8ihbi7xEVo3JIYog5inl3xKLSKbpgdRoBrLL+3ujmFgUMnSfXK4qn0YP7hpB3+6GA==", + "license": "MIT", + "dependencies": { + "@tanstack/router-devtools-core": "1.145.6" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + }, + "peerDependencies": { + "@tanstack/react-router": "^1.145.6", + "@tanstack/router-core": "^1.145.6", + "react": ">=18.0.0 || >=19.0.0", + "react-dom": ">=18.0.0 || >=19.0.0" + }, + "peerDependenciesMeta": { + "@tanstack/router-core": { + "optional": true + } + } + }, + "node_modules/@tanstack/react-store": { + "version": "0.8.0", + "resolved": "https://registry.npmjs.org/@tanstack/react-store/-/react-store-0.8.0.tgz", + "integrity": "sha512-1vG9beLIuB7q69skxK9r5xiLN3ztzIPfSQSs0GfeqWGO2tGIyInZx0x1COhpx97RKaONSoAb8C3dxacWksm1ow==", + "license": "MIT", + "dependencies": { + "@tanstack/store": "0.8.0", + "use-sync-external-store": "^1.6.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", + "react-dom": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/@tanstack/react-table": { + "version": "8.21.3", + "resolved": "https://registry.npmjs.org/@tanstack/react-table/-/react-table-8.21.3.tgz", + "integrity": "sha512-5nNMTSETP4ykGegmVkhjcS8tTLW6Vl4axfEGQN3v0zdHYbK4UfoqfPChclTrJ4EoK9QynqAu9oUf8VEmrpZ5Ww==", + "license": "MIT", + "dependencies": { + "@tanstack/table-core": "8.21.3" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + }, + "peerDependencies": { + "react": ">=16.8", + "react-dom": ">=16.8" + } + }, + "node_modules/@tanstack/router-core": { + "version": "1.145.6", + "resolved": "https://registry.npmjs.org/@tanstack/router-core/-/router-core-1.145.6.tgz", + "integrity": "sha512-pXUwrkMEwsM4w7G6QSGt/LwSl23NoyEXvTygpZiyzCzJatMvW9312mFVGbDGYZxAxNpCob1kJnKNxIH14a86nQ==", + "license": "MIT", + "dependencies": { + "@tanstack/history": "1.141.0", + "@tanstack/store": "^0.8.0", + "cookie-es": "^2.0.0", + "seroval": "^1.4.1", + "seroval-plugins": "^1.4.0", + "tiny-invariant": "^1.3.3", + "tiny-warning": "^1.0.3" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + } + }, + "node_modules/@tanstack/router-devtools": { + "version": "1.145.6", + "resolved": "https://registry.npmjs.org/@tanstack/router-devtools/-/router-devtools-1.145.6.tgz", + "integrity": "sha512-Tj6Yeg63wLxDAJPEG8b9sqikZusSzjgY4kh72heRgfOEowu+XEDLTr49RK5UKVh77uMvh8cb5Vy0rqO9wveVhg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@tanstack/react-router-devtools": "1.145.6", + "clsx": "^2.1.1", + "goober": "^2.1.16" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + }, + "peerDependencies": { + "@tanstack/react-router": "^1.145.6", + "csstype": "^3.0.10", + "react": ">=18.0.0 || >=19.0.0", + "react-dom": ">=18.0.0 || >=19.0.0" + }, + "peerDependenciesMeta": { + "csstype": { + "optional": true + } + } + }, + "node_modules/@tanstack/router-devtools-core": { + "version": "1.145.6", + "resolved": "https://registry.npmjs.org/@tanstack/router-devtools-core/-/router-devtools-core-1.145.6.tgz", + "integrity": "sha512-SuGWcPPAFuJ9VAox5GhTIth4KHTTtXSTh1io8xee0EYLAaPfNx2jqL1ySJahPI1iuf6Bk+g5dzwpE/cHnfFiBw==", + "license": "MIT", + "dependencies": { + "clsx": "^2.1.1", + "goober": "^2.1.16", + "tiny-invariant": "^1.3.3" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + }, + "peerDependencies": { + "@tanstack/router-core": "^1.145.6", + "csstype": "^3.0.10", + "solid-js": ">=1.9.5" + }, + "peerDependenciesMeta": { + "csstype": { + "optional": true + } + } + }, + "node_modules/@tanstack/router-generator": { + "version": "1.145.6", + "resolved": "https://registry.npmjs.org/@tanstack/router-generator/-/router-generator-1.145.6.tgz", + "integrity": "sha512-OBQ+vWgrFm9ThQWI8vUN/uHpqvQ8idI31QFS09q7s/K6+mODonBS1OcnlRfPzBbQ8VHOv93yg63nkPifmmjM3A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@tanstack/router-core": "1.145.6", + "@tanstack/router-utils": "1.143.11", + "@tanstack/virtual-file-routes": "1.145.4", + "prettier": "^3.5.0", + "recast": "^0.23.11", + "source-map": "^0.7.4", + "tsx": "^4.19.2", + "zod": "^3.24.2" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + } + }, + "node_modules/@tanstack/router-generator/node_modules/zod": { + "version": "3.25.76", + "resolved": "https://registry.npmjs.org/zod/-/zod-3.25.76.tgz", + "integrity": "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/colinhacks" + } + }, + "node_modules/@tanstack/router-plugin": { + "version": "1.145.6", + "resolved": "https://registry.npmjs.org/@tanstack/router-plugin/-/router-plugin-1.145.6.tgz", + "integrity": "sha512-/tMeAScTlHmt+aDr/YGiJDQ9Epm4869kk+LB3xaC8/1hnRS7i2Tp5gReNKPQu5NbqzVZimJHa8qOgxexmN3b+g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.28.5", + "@babel/plugin-syntax-jsx": "^7.27.1", + "@babel/plugin-syntax-typescript": "^7.27.1", + "@babel/template": "^7.27.2", + "@babel/traverse": "^7.28.5", + "@babel/types": "^7.28.5", + "@tanstack/router-core": "1.145.6", + "@tanstack/router-generator": "1.145.6", + "@tanstack/router-utils": "1.143.11", + "@tanstack/virtual-file-routes": "1.145.4", + "babel-dead-code-elimination": "^1.0.11", + "chokidar": "^3.6.0", + "unplugin": "^2.1.2", + "zod": "^3.24.2" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + }, + "peerDependencies": { + "@rsbuild/core": ">=1.0.2", + "@tanstack/react-router": "^1.145.6", + "vite": ">=5.0.0 || >=6.0.0 || >=7.0.0", + "vite-plugin-solid": "^2.11.10", + "webpack": ">=5.92.0" + }, + "peerDependenciesMeta": { + "@rsbuild/core": { + "optional": true + }, + "@tanstack/react-router": { + "optional": true + }, + "vite": { + "optional": true + }, + "vite-plugin-solid": { + "optional": true + }, + "webpack": { + "optional": true + } + } + }, + "node_modules/@tanstack/router-plugin/node_modules/zod": { + "version": "3.25.76", + "resolved": "https://registry.npmjs.org/zod/-/zod-3.25.76.tgz", + "integrity": "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/colinhacks" + } + }, + "node_modules/@tanstack/router-utils": { + "version": "1.143.11", + "resolved": "https://registry.npmjs.org/@tanstack/router-utils/-/router-utils-1.143.11.tgz", + "integrity": "sha512-N24G4LpfyK8dOlnP8BvNdkuxg1xQljkyl6PcrdiPSA301pOjatRT1y8wuCCJZKVVD8gkd0MpCZ0VEjRMGILOtA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.28.5", + "@babel/generator": "^7.28.5", + "@babel/parser": "^7.28.5", + "ansis": "^4.1.0", + "diff": "^8.0.2", + "pathe": "^2.0.3", + "tinyglobby": "^0.2.15" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + } + }, + "node_modules/@tanstack/store": { + "version": "0.8.0", + "resolved": "https://registry.npmjs.org/@tanstack/store/-/store-0.8.0.tgz", + "integrity": "sha512-Om+BO0YfMZe//X2z0uLF2j+75nQga6TpTJgLJQBiq85aOyZNIhkCgleNcud2KQg4k4v9Y9l+Uhru3qWMPGTOzQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + } + }, + "node_modules/@tanstack/table-core": { + "version": "8.21.3", + "resolved": "https://registry.npmjs.org/@tanstack/table-core/-/table-core-8.21.3.tgz", + "integrity": "sha512-ldZXEhOBb8Is7xLs01fR3YEc3DERiz5silj8tnGkFZytt1abEvl/GhUmCE0PMLaMPTa3Jk4HbKmRlHmu+gCftg==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + } + }, + "node_modules/@tanstack/virtual-file-routes": { + "version": "1.145.4", + "resolved": "https://registry.npmjs.org/@tanstack/virtual-file-routes/-/virtual-file-routes-1.145.4.tgz", + "integrity": "sha512-CI75JrfqSluhdGwLssgVeQBaCphgfkMQpi8MCY3UJX1hoGzXa8kHYJcUuIFMOLs1q7zqHy++EVVtMK03osR5wQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + } + }, + "node_modules/@types/chai": { + "version": "5.2.3", + "resolved": "https://registry.npmjs.org/@types/chai/-/chai-5.2.3.tgz", + "integrity": "sha512-Mw558oeA9fFbv65/y4mHtXDs9bPnFMZAL/jxdPFUpOHHIXX91mcgEHbS5Lahr+pwZFR8A7GQleRWeI6cGFC2UA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/deep-eql": "*", + "assertion-error": "^2.0.1" + } + }, + "node_modules/@types/deep-eql": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@types/deep-eql/-/deep-eql-4.0.2.tgz", + "integrity": "sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/estree": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", + "license": "MIT" + }, + "node_modules/@types/json-schema": { + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", + "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/node": { + "version": "25.0.3", + "resolved": "https://registry.npmjs.org/@types/node/-/node-25.0.3.tgz", + "integrity": "sha512-W609buLVRVmeW693xKfzHeIV6nJGGz98uCPfeXI1ELMLXVeKYZ9m15fAMSaUPBHYLGFsVRcMmSCksQOrZV9BYA==", + "devOptional": true, + "license": "MIT", + "dependencies": { + "undici-types": "~7.16.0" + } + }, + "node_modules/@types/react": { + "version": "19.2.7", + "resolved": "https://registry.npmjs.org/@types/react/-/react-19.2.7.tgz", + "integrity": "sha512-MWtvHrGZLFttgeEj28VXHxpmwYbor/ATPYbBfSFZEIRK0ecCFLl2Qo55z52Hss+UV9CRN7trSeq1zbgx7YDWWg==", + "devOptional": true, + "license": "MIT", + "dependencies": { + "csstype": "^3.2.2" + } + }, + "node_modules/@types/react-dom": { + "version": "19.2.3", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-19.2.3.tgz", + "integrity": "sha512-jp2L/eY6fn+KgVVQAOqYItbF0VY/YApe5Mz2F0aykSO8gx31bYCZyvSeYxCHKvzHG5eZjc+zyaS5BrBWya2+kQ==", + "devOptional": true, + "license": "MIT", + "peerDependencies": { + "@types/react": "^19.2.0" + } + }, + "node_modules/@vitejs/plugin-react-swc": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/@vitejs/plugin-react-swc/-/plugin-react-swc-4.2.2.tgz", + "integrity": "sha512-x+rE6tsxq/gxrEJN3Nv3dIV60lFflPj94c90b+NNo6n1QV1QQUTLoL0MpaOVasUZ0zqVBn7ead1B5ecx1JAGfA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@rolldown/pluginutils": "1.0.0-beta.47", + "@swc/core": "^1.13.5" + }, + "engines": { + "node": "^20.19.0 || >=22.12.0" + }, + "peerDependencies": { + "vite": "^4 || ^5 || ^6 || ^7" + } + }, + "node_modules/@vitest/expect": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-4.1.2.tgz", + "integrity": "sha512-gbu+7B0YgUJ2nkdsRJrFFW6X7NTP44WlhiclHniUhxADQJH5Szt9mZ9hWnJPJ8YwOK5zUOSSlSvyzRf0u1DSBQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@standard-schema/spec": "^1.1.0", + "@types/chai": "^5.2.2", + "@vitest/spy": "4.1.2", + "@vitest/utils": "4.1.2", + "chai": "^6.2.2", + "tinyrainbow": "^3.1.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/mocker": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-4.1.2.tgz", + "integrity": "sha512-Ize4iQtEALHDttPRCmN+FKqOl2vxTiNUhzobQFFt/BM1lRUTG7zRCLOykG/6Vo4E4hnUdfVLo5/eqKPukcWW7Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/spy": "4.1.2", + "estree-walker": "^3.0.3", + "magic-string": "^0.30.21" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "msw": "^2.4.9", + "vite": "^6.0.0 || ^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "msw": { + "optional": true + }, + "vite": { + "optional": true + } + } + }, + "node_modules/@vitest/pretty-format": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-4.1.2.tgz", + "integrity": "sha512-dwQga8aejqeuB+TvXCMzSQemvV9hNEtDDpgUKDzOmNQayl2OG241PSWeJwKRH3CiC+sESrmoFd49rfnq7T4RnA==", + "dev": true, + "license": "MIT", + "dependencies": { + "tinyrainbow": "^3.1.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/runner": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-4.1.2.tgz", + "integrity": "sha512-Gr+FQan34CdiYAwpGJmQG8PgkyFVmARK8/xSijia3eTFgVfpcpztWLuP6FttGNfPLJhaZVP/euvujeNYar36OQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/utils": "4.1.2", + "pathe": "^2.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/snapshot": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-4.1.2.tgz", + "integrity": "sha512-g7yfUmxYS4mNxk31qbOYsSt2F4m1E02LFqO53Xpzg3zKMhLAPZAjjfyl9e6z7HrW6LvUdTwAQR3HHfLjpko16A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/pretty-format": "4.1.2", + "@vitest/utils": "4.1.2", + "magic-string": "^0.30.21", + "pathe": "^2.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/spy": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-4.1.2.tgz", + "integrity": "sha512-DU4fBnbVCJGNBwVA6xSToNXrkZNSiw59H8tcuUspVMsBDBST4nfvsPsEHDHGtWRRnqBERBQu7TrTKskmjqTXKA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/utils": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-4.1.2.tgz", + "integrity": "sha512-xw2/TiX82lQHA06cgbqRKFb5lCAy3axQ4H4SoUFhUsg+wztiet+co86IAMDtF6Vm1hc7J6j09oh/rgDn+JdKIQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/pretty-format": "4.1.2", + "convert-source-map": "^2.0.0", + "tinyrainbow": "^3.1.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/acorn": { + "version": "8.15.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", + "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", + "dev": true, + "license": "MIT", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/ansi-colors": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-4.1.3.tgz", + "integrity": "sha512-/6w/C21Pm1A7aZitlI5Ni/2J6FFQN8i1Cvz3kHABAAbw93v/NlvKdVOqz7CCWz/3iv/JplRSEEZ83XION15ovw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/ansis": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/ansis/-/ansis-4.2.0.tgz", + "integrity": "sha512-HqZ5rWlFjGiV0tDm3UxxgNRqsOTniqoKZu0pIAfh7TZQMGuZK+hH0drySty0si0QXj1ieop4+SkSfPZBPPkHig==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=14" + } + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dev": true, + "license": "ISC", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "dev": true, + "license": "Python-2.0" + }, + "node_modules/aria-hidden": { + "version": "1.2.6", + "resolved": "https://registry.npmjs.org/aria-hidden/-/aria-hidden-1.2.6.tgz", + "integrity": "sha512-ik3ZgC9dY/lYVVM++OISsaYDeg1tb0VtP5uL3ouh1koGOaUMDPpbFIei4JkFimWUFPn90sbMNMXQAIVOlnYKJA==", + "license": "MIT", + "dependencies": { + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/assertion-error": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-2.0.1.tgz", + "integrity": "sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + } + }, + "node_modules/ast-types": { + "version": "0.16.1", + "resolved": "https://registry.npmjs.org/ast-types/-/ast-types-0.16.1.tgz", + "integrity": "sha512-6t10qk83GOG8p0vKmaCr8eiilZwO171AvbROMtvvNiwrTly62t+7XkA8RdIIVbpMhCASAsxgAzdRSwh6nw/5Dg==", + "dev": true, + "license": "MIT", + "dependencies": { + "tslib": "^2.0.1" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==", + "license": "MIT" + }, + "node_modules/axios": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/axios/-/axios-1.13.2.tgz", + "integrity": "sha512-VPk9ebNqPcy5lRGuSlKx752IlDatOjT9paPlm8A7yOuW2Fbvp4X3JznJtT4f0GzGLLiWE9W8onz51SqLYwzGaA==", + "license": "MIT", + "dependencies": { + "follow-redirects": "^1.15.6", + "form-data": "^4.0.4", + "proxy-from-env": "^1.1.0" + } + }, + "node_modules/babel-dead-code-elimination": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/babel-dead-code-elimination/-/babel-dead-code-elimination-1.0.11.tgz", + "integrity": "sha512-mwq3W3e/pKSI6TG8lXMiDWvEi1VXYlSBlJlB3l+I0bAb5u1RNUl88udos85eOPNK3m5EXK9uO7d2g08pesTySQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.23.7", + "@babel/parser": "^7.23.6", + "@babel/traverse": "^7.23.7", + "@babel/types": "^7.23.6" + } + }, + "node_modules/baseline-browser-mapping": { + "version": "2.9.11", + "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.9.11.tgz", + "integrity": "sha512-Sg0xJUNDU1sJNGdfGWhVHX0kkZ+HWcvmVymJbj6NSgZZmW/8S9Y2HQ5euytnIgakgxN6papOAWiwDo1ctFDcoQ==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "baseline-browser-mapping": "dist/cli.js" + } + }, + "node_modules/binary-extensions": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", + "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dev": true, + "license": "MIT", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browserslist": { + "version": "4.28.1", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.28.1.tgz", + "integrity": "sha512-ZC5Bd0LgJXgwGqUknZY/vkUQ04r8NXnJZ3yYi4vDmSiZmC/pdSN0NbNRPxZpbtO4uAfDUAFffO8IZoM3Gj8IkA==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "baseline-browser-mapping": "^2.9.0", + "caniuse-lite": "^1.0.30001759", + "electron-to-chromium": "^1.5.263", + "node-releases": "^2.0.27", + "update-browserslist-db": "^1.2.0" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/bundle-name": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/bundle-name/-/bundle-name-4.1.0.tgz", + "integrity": "sha512-tjwM5exMg6BGRI+kNmTntNsvdZS1X8BFYS6tnJ2hdH0kVxM6/eVZ2xy+FqStSWvYmtfFMDLIxurorHwDKfDz5Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "run-applescript": "^7.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/c12": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/c12/-/c12-2.0.1.tgz", + "integrity": "sha512-Z4JgsKXHG37C6PYUtIxCfLJZvo6FyhHJoClwwb9ftUkLpPSkuYqn6Tr+vnaN8hymm0kIbcg6Ey3kv/Q71k5w/A==", + "dev": true, + "license": "MIT", + "dependencies": { + "chokidar": "^4.0.1", + "confbox": "^0.1.7", + "defu": "^6.1.4", + "dotenv": "^16.4.5", + "giget": "^1.2.3", + "jiti": "^2.3.0", + "mlly": "^1.7.1", + "ohash": "^1.1.4", + "pathe": "^1.1.2", + "perfect-debounce": "^1.0.0", + "pkg-types": "^1.2.0", + "rc9": "^2.1.2" + }, + "peerDependencies": { + "magicast": "^0.3.5" + }, + "peerDependenciesMeta": { + "magicast": { + "optional": true + } + } + }, + "node_modules/c12/node_modules/chokidar": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-4.0.3.tgz", + "integrity": "sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA==", + "dev": true, + "license": "MIT", + "dependencies": { + "readdirp": "^4.0.1" + }, + "engines": { + "node": ">= 14.16.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/c12/node_modules/dotenv": { + "version": "16.6.1", + "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.6.1.tgz", + "integrity": "sha512-uBq4egWHTcTt33a72vpSG0z3HnPuIl6NqYcTrKEg2azoEyl2hpW0zqlxysq2pK9HlDIHyHyakeYaYnSAwd8bow==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://dotenvx.com" + } + }, + "node_modules/c12/node_modules/pathe": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/pathe/-/pathe-1.1.2.tgz", + "integrity": "sha512-whLdWMYL2TwI08hn8/ZqAbrVemu0LNaNNJZX73O6qaIdCTfXutsLhMkjdENX0qhsQ9uIimo4/aQOmXkoon2nDQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/c12/node_modules/readdirp": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-4.1.2.tgz", + "integrity": "sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 14.18.0" + }, + "funding": { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/call-bind-apply-helpers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001762", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001762.tgz", + "integrity": "sha512-PxZwGNvH7Ak8WX5iXzoK1KPZttBXNPuaOvI2ZYU7NrlM+d9Ov+TUvlLOBNGzVXAntMSMMlJPd+jY6ovrVjSmUw==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "CC-BY-4.0" + }, + "node_modules/chai": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/chai/-/chai-6.2.2.tgz", + "integrity": "sha512-NUPRluOfOiTKBKvWPtSD4PhFvWCqOi0BGStNWs57X9js7XGTprSmFoz5F0tWhR4WPjNeR9jXqdC7/UpSJTnlRg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + } + }, + "node_modules/chokidar": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", + "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/chownr": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/chownr/-/chownr-2.0.0.tgz", + "integrity": "sha512-bIomtDF5KGpdogkLd9VspvFzk9KfpyyGlS8YFVZl7TGPBHL5snIOnxeshwVgPteQ9b4Eydl+pVbIyE1DcvCWgQ==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=10" + } + }, + "node_modules/citty": { + "version": "0.1.6", + "resolved": "https://registry.npmjs.org/citty/-/citty-0.1.6.tgz", + "integrity": "sha512-tskPPKEs8D2KPafUypv2gxwJP8h/OaJmC82QQGGDQcHvXX43xF2VDACcJVmZ0EuSxkpO9Kc4MlrA3q0+FG58AQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "consola": "^3.2.3" + } + }, + "node_modules/class-variance-authority": { + "version": "0.7.1", + "resolved": "https://registry.npmjs.org/class-variance-authority/-/class-variance-authority-0.7.1.tgz", + "integrity": "sha512-Ka+9Trutv7G8M6WT6SeiRWz792K5qEqIGEGzXKhAE6xOWAY6pPH8U+9IY3oCMv6kqTmLsv7Xh/2w2RigkePMsg==", + "license": "Apache-2.0", + "dependencies": { + "clsx": "^2.1.1" + }, + "funding": { + "url": "https://polar.sh/cva" + } + }, + "node_modules/clsx": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.1.tgz", + "integrity": "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/color-support": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-support/-/color-support-1.1.3.tgz", + "integrity": "sha512-qiBjkpbMLO/HL68y+lh4q0/O1MZFj2RX6X/KmMa3+gJD3z+WwI1ZzDHysvqHGS3mP6mznPckpXmw1nI9cJjyRg==", + "dev": true, + "license": "ISC", + "bin": { + "color-support": "bin.js" + } + }, + "node_modules/combined-stream": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", + "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "license": "MIT", + "dependencies": { + "delayed-stream": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/commander": { + "version": "13.0.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-13.0.0.tgz", + "integrity": "sha512-oPYleIY8wmTVzkvQq10AEok6YcTC4sRUBl8F9gVuwchGVUCTbl/vhLTaQqutuuySYOsu8YTgV+OxKc/8Yvx+mQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + } + }, + "node_modules/confbox": { + "version": "0.1.8", + "resolved": "https://registry.npmjs.org/confbox/-/confbox-0.1.8.tgz", + "integrity": "sha512-RMtmw0iFkeR4YV+fUOSucriAQNb9g8zFR52MWCtl+cCZOFRNL6zeB395vPzFhEjjn4fMxXudmELnl/KF/WrK6w==", + "dev": true, + "license": "MIT" + }, + "node_modules/consola": { + "version": "3.4.2", + "resolved": "https://registry.npmjs.org/consola/-/consola-3.4.2.tgz", + "integrity": "sha512-5IKcdX0nnYavi6G7TtOhwkYzyjfJlatbjMjuLSfE2kYT5pMDOilZ4OvMhi637CcDICTmz3wARPoyhqyX1Y+XvA==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^14.18.0 || >=16.10.0" + } + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", + "dev": true, + "license": "MIT" + }, + "node_modules/cookie-es": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/cookie-es/-/cookie-es-2.0.0.tgz", + "integrity": "sha512-RAj4E421UYRgqokKUmotqAwuplYw15qtdXfY+hGzgCJ/MBjCVZcSoHK/kH9kocfjRjcDME7IiDWR/1WX1TM2Pg==", + "license": "MIT" + }, + "node_modules/csstype": { + "version": "3.2.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.2.3.tgz", + "integrity": "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==", + "license": "MIT" + }, + "node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/default-browser": { + "version": "5.4.0", + "resolved": "https://registry.npmjs.org/default-browser/-/default-browser-5.4.0.tgz", + "integrity": "sha512-XDuvSq38Hr1MdN47EDvYtx3U0MTqpCEn+F6ft8z2vYDzMrvQhVp0ui9oQdqW3MvK3vqUETglt1tVGgjLuJ5izg==", + "dev": true, + "license": "MIT", + "dependencies": { + "bundle-name": "^4.1.0", + "default-browser-id": "^5.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/default-browser-id": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/default-browser-id/-/default-browser-id-5.0.1.tgz", + "integrity": "sha512-x1VCxdX4t+8wVfd1so/9w+vQ4vx7lKd2Qp5tDRutErwmR85OgmfX7RlLRMWafRMY7hbEiXIbudNrjOAPa/hL8Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/define-lazy-prop": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-3.0.0.tgz", + "integrity": "sha512-N+MeXYoqr3pOgn8xfyRPREN7gHakLYjhsHhWGT3fWAiL4IkAt0iDw14QiiEm2bE30c5XX5q0FtAA3CK5f9/BUg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/defu": { + "version": "6.1.4", + "resolved": "https://registry.npmjs.org/defu/-/defu-6.1.4.tgz", + "integrity": "sha512-mEQCMmwJu317oSz8CwdIOdwf3xMif1ttiM8LTufzc3g6kR+9Pe236twL8j3IYT1F7GfRgGcW6MWxzZjLIkuHIg==", + "dev": true, + "license": "MIT" + }, + "node_modules/delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", + "license": "MIT", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/destr": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/destr/-/destr-2.0.5.tgz", + "integrity": "sha512-ugFTXCtDZunbzasqBxrK93Ik/DRYsO6S/fedkWEMKqt04xZ4csmnmwGDBAb07QWNaGMAmnTIemsYZCksjATwsA==", + "dev": true, + "license": "MIT" + }, + "node_modules/detect-libc": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.1.2.tgz", + "integrity": "sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ==", + "license": "Apache-2.0", + "engines": { + "node": ">=8" + } + }, + "node_modules/detect-node-es": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/detect-node-es/-/detect-node-es-1.1.0.tgz", + "integrity": "sha512-ypdmJU/TbBby2Dxibuv7ZLW3Bs1QEmM7nHjEANfohJLvE0XVujisn1qPJcZxg+qDucsr+bP6fLD1rPS3AhJ7EQ==", + "license": "MIT" + }, + "node_modules/diff": { + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/diff/-/diff-8.0.2.tgz", + "integrity": "sha512-sSuxWU5j5SR9QQji/o2qMvqRNYRDOcBTgsJ/DeCf4iSN4gW+gNMXM7wFIP+fdXZxoNiAnHUTGjCr+TSWXdRDKg==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.3.1" + } + }, + "node_modules/dotenv": { + "version": "17.2.3", + "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-17.2.3.tgz", + "integrity": "sha512-JVUnt+DUIzu87TABbhPmNfVdBDt18BLOWjMUFJMSi/Qqg7NTYtabbvSNJGOJ7afbRuv9D/lngizHtP7QyLQ+9w==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://dotenvx.com" + } + }, + "node_modules/dunder-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", + "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.1", + "es-errors": "^1.3.0", + "gopd": "^1.2.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/electron-to-chromium": { + "version": "1.5.267", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.267.tgz", + "integrity": "sha512-0Drusm6MVRXSOJpGbaSVgcQsuB4hEkMpHXaVstcPmhu5LIedxs1xNK/nIxmQIU/RPC0+1/o0AVZfBTkTNJOdUw==", + "dev": true, + "license": "ISC" + }, + "node_modules/enhanced-resolve": { + "version": "5.18.4", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.18.4.tgz", + "integrity": "sha512-LgQMM4WXU3QI+SYgEc2liRgznaD5ojbmY3sb8LxyguVkIg5FxdpTkvk72te2R38/TGKxH634oLxXRGY6d7AP+Q==", + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.4", + "tapable": "^2.2.0" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/es-define-property": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", + "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-module-lexer": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-2.0.0.tgz", + "integrity": "sha512-5POEcUuZybH7IdmGsD8wlf0AI55wMecM9rVBTI/qEAy2c1kTOm3DjFYjrBdI2K3BaJjJYfYFeRtM0t9ssnRuxw==", + "dev": true, + "license": "MIT" + }, + "node_modules/es-object-atoms": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-set-tostringtag": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz", + "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.6", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/esbuild": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.27.2.tgz", + "integrity": "sha512-HyNQImnsOC7X9PMNaCIeAm4ISCQXs5a5YasTXVliKv4uuBo1dKrG0A+uQS8M5eXjVMnLg3WgXaKvprHlFJQffw==", + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=18" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.27.2", + "@esbuild/android-arm": "0.27.2", + "@esbuild/android-arm64": "0.27.2", + "@esbuild/android-x64": "0.27.2", + "@esbuild/darwin-arm64": "0.27.2", + "@esbuild/darwin-x64": "0.27.2", + "@esbuild/freebsd-arm64": "0.27.2", + "@esbuild/freebsd-x64": "0.27.2", + "@esbuild/linux-arm": "0.27.2", + "@esbuild/linux-arm64": "0.27.2", + "@esbuild/linux-ia32": "0.27.2", + "@esbuild/linux-loong64": "0.27.2", + "@esbuild/linux-mips64el": "0.27.2", + "@esbuild/linux-ppc64": "0.27.2", + "@esbuild/linux-riscv64": "0.27.2", + "@esbuild/linux-s390x": "0.27.2", + "@esbuild/linux-x64": "0.27.2", + "@esbuild/netbsd-arm64": "0.27.2", + "@esbuild/netbsd-x64": "0.27.2", + "@esbuild/openbsd-arm64": "0.27.2", + "@esbuild/openbsd-x64": "0.27.2", + "@esbuild/openharmony-arm64": "0.27.2", + "@esbuild/sunos-x64": "0.27.2", + "@esbuild/win32-arm64": "0.27.2", + "@esbuild/win32-ia32": "0.27.2", + "@esbuild/win32-x64": "0.27.2" + } + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", + "dev": true, + "license": "BSD-2-Clause", + "bin": { + "esparse": "bin/esparse.js", + "esvalidate": "bin/esvalidate.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/estree-walker": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", + "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0" + } + }, + "node_modules/expect-type": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/expect-type/-/expect-type-1.3.0.tgz", + "integrity": "sha512-knvyeauYhqjOYvQ66MznSMs83wmHrCycNEN6Ao+2AeYEfxUIkuiVxdEa1qlGEPK+We3n0THiDciYSsCcgW/DoA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dev": true, + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/follow-redirects": { + "version": "1.15.11", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.11.tgz", + "integrity": "sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ==", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/RubenVerborgh" + } + ], + "license": "MIT", + "engines": { + "node": ">=4.0" + }, + "peerDependenciesMeta": { + "debug": { + "optional": true + } + } + }, + "node_modules/form-data": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.5.tgz", + "integrity": "sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==", + "license": "MIT", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.8", + "es-set-tostringtag": "^2.1.0", + "hasown": "^2.0.2", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fs-minipass": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-2.1.0.tgz", + "integrity": "sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg==", + "dev": true, + "license": "ISC", + "dependencies": { + "minipass": "^3.0.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/fs-minipass/node_modules/minipass": { + "version": "3.3.6", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", + "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", + "dev": true, + "license": "ISC", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/fs-minipass/node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "dev": true, + "license": "ISC" + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/get-intrinsic": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "function-bind": "^1.1.2", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "math-intrinsics": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-nonce": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-nonce/-/get-nonce-1.0.1.tgz", + "integrity": "sha512-FJhYRoDaiatfEkUK8HKlicmu/3SGFD51q3itKDGoSTysQJBnfOcxU5GxnhE1E6soB76MbT0MBtnKJuXyAx+96Q==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/get-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", + "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/get-tsconfig": { + "version": "4.13.0", + "resolved": "https://registry.npmjs.org/get-tsconfig/-/get-tsconfig-4.13.0.tgz", + "integrity": "sha512-1VKTZJCwBrvbd+Wn3AOgQP/2Av+TfTCOlE4AcRJE72W1ksZXbAx8PPBR9RzgTeSPzlPMHrbANMH3LbltH73wxQ==", + "devOptional": true, + "license": "MIT", + "dependencies": { + "resolve-pkg-maps": "^1.0.0" + }, + "funding": { + "url": "https://github.com/privatenumber/get-tsconfig?sponsor=1" + } + }, + "node_modules/giget": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/giget/-/giget-1.2.5.tgz", + "integrity": "sha512-r1ekGw/Bgpi3HLV3h1MRBIlSAdHoIMklpaQ3OQLFcRw9PwAj2rqigvIbg+dBUI51OxVI2jsEtDywDBjSiuf7Ug==", + "dev": true, + "license": "MIT", + "dependencies": { + "citty": "^0.1.6", + "consola": "^3.4.0", + "defu": "^6.1.4", + "node-fetch-native": "^1.6.6", + "nypm": "^0.5.4", + "pathe": "^2.0.3", + "tar": "^6.2.1" + }, + "bin": { + "giget": "dist/cli.mjs" + } + }, + "node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/goober": { + "version": "2.1.18", + "resolved": "https://registry.npmjs.org/goober/-/goober-2.1.18.tgz", + "integrity": "sha512-2vFqsaDVIT9Gz7N6kAL++pLpp41l3PfDuusHcjnGLfR6+huZkl6ziX+zgVC3ZxpqWhzH6pyDdGrCeDhMIvwaxw==", + "license": "MIT", + "peerDependencies": { + "csstype": "^3.0.10" + } + }, + "node_modules/gopd": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", + "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", + "license": "ISC" + }, + "node_modules/handlebars": { + "version": "4.7.8", + "resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.7.8.tgz", + "integrity": "sha512-vafaFqs8MZkRrSX7sFVUdo3ap/eNiLnb4IakshzvP56X5Nr1iGKAIqdX6tMlm6HcNRIkr6AxO5jFEoJzzpT8aQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "minimist": "^1.2.5", + "neo-async": "^2.6.2", + "source-map": "^0.6.1", + "wordwrap": "^1.0.0" + }, + "bin": { + "handlebars": "bin/handlebars" + }, + "engines": { + "node": ">=0.4.7" + }, + "optionalDependencies": { + "uglify-js": "^3.1.4" + } + }, + "node_modules/handlebars/node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/has-symbols": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", + "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-tostringtag": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", + "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", + "license": "MIT", + "dependencies": { + "has-symbols": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "dev": true, + "license": "MIT", + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-docker": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-3.0.0.tgz", + "integrity": "sha512-eljcgEDlEns/7AXFosB5K/2nCM4P7FQPkGc/DWLy5rmFEWvZayGrik1d9/QIY5nJ4f9YsVvBkA6kJpHn9rISdQ==", + "dev": true, + "license": "MIT", + "bin": { + "is-docker": "cli.js" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-inside-container": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-inside-container/-/is-inside-container-1.0.0.tgz", + "integrity": "sha512-KIYLCCJghfHZxqjYBE7rEy0OBuTd5xCHS7tHVgvCLkx7StIoaxwNW3hCALgEUjFfeRk+MG/Qxmp/vtETEF3tRA==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-docker": "^3.0.0" + }, + "bin": { + "is-inside-container": "cli.js" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-wsl": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-3.1.0.tgz", + "integrity": "sha512-UcVfVfaK4Sc4m7X3dUSoHoozQGBEFeDC+zVo06t98xe8CzHSZZBekNXH+tu0NalHolcJ/QAGqS46Hef7QXBIMw==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-inside-container": "^1.0.0" + }, + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/isbot": { + "version": "5.1.32", + "resolved": "https://registry.npmjs.org/isbot/-/isbot-5.1.32.tgz", + "integrity": "sha512-VNfjM73zz2IBZmdShMfAUg10prm6t7HFUQmNAEOAVS4YH92ZrZcvkMcGX6cIgBJAzWDzPent/EeAtYEHNPNPBQ==", + "license": "Unlicense", + "engines": { + "node": ">=18" + } + }, + "node_modules/jiti": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/jiti/-/jiti-2.6.1.tgz", + "integrity": "sha512-ekilCSN1jwRvIbgeg/57YFh8qQDNbwDb9xT/qu2DAHbFFZUicIl4ygVaAvzveMhMVr3LnpSKTNnwt8PoOfmKhQ==", + "license": "MIT", + "bin": { + "jiti": "lib/jiti-cli.mjs" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "license": "MIT" + }, + "node_modules/js-yaml": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", + "dev": true, + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/jsesc": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", + "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", + "dev": true, + "license": "MIT", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "dev": true, + "license": "MIT", + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/lightningcss": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss/-/lightningcss-1.30.2.tgz", + "integrity": "sha512-utfs7Pr5uJyyvDETitgsaqSyjCb2qNRAtuqUeWIAKztsOYdcACf2KtARYXg2pSvhkt+9NfoaNY7fxjl6nuMjIQ==", + "license": "MPL-2.0", + "dependencies": { + "detect-libc": "^2.0.3" + }, + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + }, + "optionalDependencies": { + "lightningcss-android-arm64": "1.30.2", + "lightningcss-darwin-arm64": "1.30.2", + "lightningcss-darwin-x64": "1.30.2", + "lightningcss-freebsd-x64": "1.30.2", + "lightningcss-linux-arm-gnueabihf": "1.30.2", + "lightningcss-linux-arm64-gnu": "1.30.2", + "lightningcss-linux-arm64-musl": "1.30.2", + "lightningcss-linux-x64-gnu": "1.30.2", + "lightningcss-linux-x64-musl": "1.30.2", + "lightningcss-win32-arm64-msvc": "1.30.2", + "lightningcss-win32-x64-msvc": "1.30.2" + } + }, + "node_modules/lightningcss-android-arm64": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss-android-arm64/-/lightningcss-android-arm64-1.30.2.tgz", + "integrity": "sha512-BH9sEdOCahSgmkVhBLeU7Hc9DWeZ1Eb6wNS6Da8igvUwAe0sqROHddIlvU06q3WyXVEOYDZ6ykBZQnjTbmo4+A==", + "cpu": [ + "arm64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-darwin-arm64": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss-darwin-arm64/-/lightningcss-darwin-arm64-1.30.2.tgz", + "integrity": "sha512-ylTcDJBN3Hp21TdhRT5zBOIi73P6/W0qwvlFEk22fkdXchtNTOU4Qc37SkzV+EKYxLouZ6M4LG9NfZ1qkhhBWA==", + "cpu": [ + "arm64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-darwin-x64": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss-darwin-x64/-/lightningcss-darwin-x64-1.30.2.tgz", + "integrity": "sha512-oBZgKchomuDYxr7ilwLcyms6BCyLn0z8J0+ZZmfpjwg9fRVZIR5/GMXd7r9RH94iDhld3UmSjBM6nXWM2TfZTQ==", + "cpu": [ + "x64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-freebsd-x64": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss-freebsd-x64/-/lightningcss-freebsd-x64-1.30.2.tgz", + "integrity": "sha512-c2bH6xTrf4BDpK8MoGG4Bd6zAMZDAXS569UxCAGcA7IKbHNMlhGQ89eRmvpIUGfKWNVdbhSbkQaWhEoMGmGslA==", + "cpu": [ + "x64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-arm-gnueabihf": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm-gnueabihf/-/lightningcss-linux-arm-gnueabihf-1.30.2.tgz", + "integrity": "sha512-eVdpxh4wYcm0PofJIZVuYuLiqBIakQ9uFZmipf6LF/HRj5Bgm0eb3qL/mr1smyXIS1twwOxNWndd8z0E374hiA==", + "cpu": [ + "arm" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-arm64-gnu": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-gnu/-/lightningcss-linux-arm64-gnu-1.30.2.tgz", + "integrity": "sha512-UK65WJAbwIJbiBFXpxrbTNArtfuznvxAJw4Q2ZGlU8kPeDIWEX1dg3rn2veBVUylA2Ezg89ktszWbaQnxD/e3A==", + "cpu": [ + "arm64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-arm64-musl": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-musl/-/lightningcss-linux-arm64-musl-1.30.2.tgz", + "integrity": "sha512-5Vh9dGeblpTxWHpOx8iauV02popZDsCYMPIgiuw97OJ5uaDsL86cnqSFs5LZkG3ghHoX5isLgWzMs+eD1YzrnA==", + "cpu": [ + "arm64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-x64-gnu": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss-linux-x64-gnu/-/lightningcss-linux-x64-gnu-1.30.2.tgz", + "integrity": "sha512-Cfd46gdmj1vQ+lR6VRTTadNHu6ALuw2pKR9lYq4FnhvgBc4zWY1EtZcAc6EffShbb1MFrIPfLDXD6Xprbnni4w==", + "cpu": [ + "x64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-x64-musl": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss-linux-x64-musl/-/lightningcss-linux-x64-musl-1.30.2.tgz", + "integrity": "sha512-XJaLUUFXb6/QG2lGIW6aIk6jKdtjtcffUT0NKvIqhSBY3hh9Ch+1LCeH80dR9q9LBjG3ewbDjnumefsLsP6aiA==", + "cpu": [ + "x64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-win32-arm64-msvc": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss-win32-arm64-msvc/-/lightningcss-win32-arm64-msvc-1.30.2.tgz", + "integrity": "sha512-FZn+vaj7zLv//D/192WFFVA0RgHawIcHqLX9xuWiQt7P0PtdFEVaxgF9rjM/IRYHQXNnk61/H/gb2Ei+kUQ4xQ==", + "cpu": [ + "arm64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-win32-x64-msvc": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss-win32-x64-msvc/-/lightningcss-win32-x64-msvc-1.30.2.tgz", + "integrity": "sha512-5g1yc73p+iAkid5phb4oVFMB45417DkRevRbt/El/gKXJk4jid+vPFF/AXbxn05Aky8PapwzZrdJShv5C0avjw==", + "cpu": [ + "x64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", + "dev": true, + "license": "MIT" + }, + "node_modules/loose-envify": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "dependencies": { + "js-tokens": "^3.0.0 || ^4.0.0" + }, + "bin": { + "loose-envify": "cli.js" + } + }, + "node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "dev": true, + "license": "ISC", + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/lucide-react": { + "version": "0.556.0", + "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.556.0.tgz", + "integrity": "sha512-iOb8dRk7kLaYBZhR2VlV1CeJGxChBgUthpSP8wom9jfj79qovgG6qcSdiy6vkoREKPnbUYzJsCn4o4PtG3Iy+A==", + "license": "ISC", + "peerDependencies": { + "react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/magic-string": { + "version": "0.30.21", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.21.tgz", + "integrity": "sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==", + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.5" + } + }, + "node_modules/math-intrinsics": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", + "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "license": "MIT", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/minipass": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-5.0.0.tgz", + "integrity": "sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=8" + } + }, + "node_modules/minizlib": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-2.1.2.tgz", + "integrity": "sha512-bAxsR8BVfj60DWXHE3u30oHzfl4G7khkSuPW+qvpd7jFRHm7dLxOjUk1EHACJ/hxLY8phGJ0YhYHZo7jil7Qdg==", + "dev": true, + "license": "MIT", + "dependencies": { + "minipass": "^3.0.0", + "yallist": "^4.0.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/minizlib/node_modules/minipass": { + "version": "3.3.6", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", + "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", + "dev": true, + "license": "ISC", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/minizlib/node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "dev": true, + "license": "ISC" + }, + "node_modules/mkdirp": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz", + "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==", + "dev": true, + "license": "MIT", + "bin": { + "mkdirp": "bin/cmd.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/mlly": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/mlly/-/mlly-1.8.0.tgz", + "integrity": "sha512-l8D9ODSRWLe2KHJSifWGwBqpTZXIXTeo8mlKjY+E2HAakaTeNpqAyBZ8GSqLzHgw4XmHmC8whvpjJNMbFZN7/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "acorn": "^8.15.0", + "pathe": "^2.0.3", + "pkg-types": "^1.3.1", + "ufo": "^1.6.1" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/nanoid": { + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/neo-async": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", + "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==", + "dev": true, + "license": "MIT" + }, + "node_modules/next-themes": { + "version": "0.4.6", + "resolved": "https://registry.npmjs.org/next-themes/-/next-themes-0.4.6.tgz", + "integrity": "sha512-pZvgD5L0IEvX5/9GWyHMf3m8BKiVQwsCMHfoFosXtXBMnaS0ZnIJ9ST4b4NqLVKDEm8QBxoNNGNaBv2JNF6XNA==", + "license": "MIT", + "peerDependencies": { + "react": "^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc" + } + }, + "node_modules/node-fetch-native": { + "version": "1.6.7", + "resolved": "https://registry.npmjs.org/node-fetch-native/-/node-fetch-native-1.6.7.tgz", + "integrity": "sha512-g9yhqoedzIUm0nTnTqAQvueMPVOuIY16bqgAJJC8XOOubYFNwz6IER9qs0Gq2Xd0+CecCKFjtdDTMA4u4xG06Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/node-releases": { + "version": "2.0.27", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.27.tgz", + "integrity": "sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/nypm": { + "version": "0.5.4", + "resolved": "https://registry.npmjs.org/nypm/-/nypm-0.5.4.tgz", + "integrity": "sha512-X0SNNrZiGU8/e/zAB7sCTtdxWTMSIO73q+xuKgglm2Yvzwlo8UoC5FNySQFCvl84uPaeADkqHUZUkWy4aH4xOA==", + "dev": true, + "license": "MIT", + "dependencies": { + "citty": "^0.1.6", + "consola": "^3.4.0", + "pathe": "^2.0.3", + "pkg-types": "^1.3.1", + "tinyexec": "^0.3.2", + "ufo": "^1.5.4" + }, + "bin": { + "nypm": "dist/cli.mjs" + }, + "engines": { + "node": "^14.16.0 || >=16.10.0" + } + }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/obug": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/obug/-/obug-2.1.1.tgz", + "integrity": "sha512-uTqF9MuPraAQ+IsnPf366RG4cP9RtUi7MLO1N3KEc+wb0a6yKpeL0lmk2IB1jY5KHPAlTc6T/JRdC/YqxHNwkQ==", + "dev": true, + "funding": [ + "https://github.com/sponsors/sxzz", + "https://opencollective.com/debug" + ], + "license": "MIT" + }, + "node_modules/ohash": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/ohash/-/ohash-1.1.6.tgz", + "integrity": "sha512-TBu7PtV8YkAZn0tSxobKY2n2aAQva936lhRrj6957aDaCf9IEtqsKbgMzXE/F/sjqYOwmrukeORHNLe5glk7Cg==", + "dev": true, + "license": "MIT" + }, + "node_modules/open": { + "version": "10.1.2", + "resolved": "https://registry.npmjs.org/open/-/open-10.1.2.tgz", + "integrity": "sha512-cxN6aIDPz6rm8hbebcP7vrQNhvRcveZoJU72Y7vskh4oIm+BZwBECnx5nTmrlres1Qapvx27Qo1Auukpf8PKXw==", + "dev": true, + "license": "MIT", + "dependencies": { + "default-browser": "^5.2.1", + "define-lazy-prop": "^3.0.0", + "is-inside-container": "^1.0.0", + "is-wsl": "^3.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/pathe": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.3.tgz", + "integrity": "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==", + "dev": true, + "license": "MIT" + }, + "node_modules/perfect-debounce": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/perfect-debounce/-/perfect-debounce-1.0.0.tgz", + "integrity": "sha512-xCy9V055GLEqoFaHoC1SoLIaLmWctgCUaBaWxDZ7/Zx4CTyX7cJQLJOok/orfjZAh9kEYpjJa4d0KcJmCbctZA==", + "dev": true, + "license": "MIT" + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pkg-types": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/pkg-types/-/pkg-types-1.3.1.tgz", + "integrity": "sha512-/Jm5M4RvtBFVkKWRu2BLUTNP8/M2a+UwuAX+ae4770q1qVGtfjG+WTCupoZixokjmHiry8uI+dlY8KXYV5HVVQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "confbox": "^0.1.8", + "mlly": "^1.7.4", + "pathe": "^2.0.1" + } + }, + "node_modules/playwright": { + "version": "1.57.0", + "resolved": "https://registry.npmjs.org/playwright/-/playwright-1.57.0.tgz", + "integrity": "sha512-ilYQj1s8sr2ppEJ2YVadYBN0Mb3mdo9J0wQ+UuDhzYqURwSoW4n1Xs5vs7ORwgDGmyEh33tRMeS8KhdkMoLXQw==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "playwright-core": "1.57.0" + }, + "bin": { + "playwright": "cli.js" + }, + "engines": { + "node": ">=18" + }, + "optionalDependencies": { + "fsevents": "2.3.2" + } + }, + "node_modules/playwright-core": { + "version": "1.57.0", + "resolved": "https://registry.npmjs.org/playwright-core/-/playwright-core-1.57.0.tgz", + "integrity": "sha512-agTcKlMw/mjBWOnD6kFZttAAGHgi/Nw0CZ2o6JqWSbMlI219lAFLZZCyqByTsvVAJq5XA5H8cA6PrvBRpBWEuQ==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "playwright-core": "cli.js" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/playwright/node_modules/fsevents": { + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", + "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/postcss": { + "version": "8.5.6", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.6.tgz", + "integrity": "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.11", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/prettier": { + "version": "3.7.4", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.7.4.tgz", + "integrity": "sha512-v6UNi1+3hSlVvv8fSaoUbggEM5VErKmmpGA7Pl3HF8V6uKY7rvClBOJlH6yNwQtfTueNkGVpOv/mtWL9L4bgRA==", + "dev": true, + "license": "MIT", + "bin": { + "prettier": "bin/prettier.cjs" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/prettier/prettier?sponsor=1" + } + }, + "node_modules/prop-types": { + "version": "15.8.1", + "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz", + "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==", + "dependencies": { + "loose-envify": "^1.4.0", + "object-assign": "^4.1.1", + "react-is": "^16.13.1" + } + }, + "node_modules/proxy-from-env": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", + "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==", + "license": "MIT" + }, + "node_modules/qr.js": { + "version": "0.0.0", + "resolved": "https://registry.npmjs.org/qr.js/-/qr.js-0.0.0.tgz", + "integrity": "sha512-c4iYnWb+k2E+vYpRimHqSu575b1/wKl4XFeJGpFmrJQz5I88v9aY2czh7s0w36srfCM1sXgC/xpoJz5dJfq+OQ==" + }, + "node_modules/rc9": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/rc9/-/rc9-2.1.2.tgz", + "integrity": "sha512-btXCnMmRIBINM2LDZoEmOogIZU7Qe7zn4BpomSKZ/ykbLObuBdvG+mFq11DL6fjH1DRwHhrlgtYWG96bJiC7Cg==", + "dev": true, + "license": "MIT", + "dependencies": { + "defu": "^6.1.4", + "destr": "^2.0.3" + } + }, + "node_modules/react": { + "version": "19.2.3", + "resolved": "https://registry.npmjs.org/react/-/react-19.2.3.tgz", + "integrity": "sha512-Ku/hhYbVjOQnXDZFv2+RibmLFGwFdeeKHFcOTlrt7xplBnya5OGn/hIRDsqDiSUcfORsDC7MPxwork8jBwsIWA==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-dom": { + "version": "19.2.3", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.2.3.tgz", + "integrity": "sha512-yELu4WmLPw5Mr/lmeEpox5rw3RETacE++JgHqQzd2dg+YbJuat3jH4ingc+WPZhxaoFzdv9y33G+F7Nl5O0GBg==", + "license": "MIT", + "dependencies": { + "scheduler": "^0.27.0" + }, + "peerDependencies": { + "react": "^19.2.3" + } + }, + "node_modules/react-error-boundary": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/react-error-boundary/-/react-error-boundary-6.0.2.tgz", + "integrity": "sha512-yvWErn55ag/ywZEFqYpXYX9rxIDPIabXIX25F184KY3F5Szk2x/cVieOflw5R47ltN3KzWOw82Lmlb4vNjyn9A==", + "license": "MIT", + "peerDependencies": { + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + } + }, + "node_modules/react-hook-form": { + "version": "7.70.0", + "resolved": "https://registry.npmjs.org/react-hook-form/-/react-hook-form-7.70.0.tgz", + "integrity": "sha512-COOMajS4FI3Wuwrs3GPpi/Jeef/5W1DRR84Yl5/ShlT3dKVFUfoGiEZ/QE6Uw8P4T2/CLJdcTVYKvWBMQTEpvw==", + "license": "MIT", + "engines": { + "node": ">=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/react-hook-form" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17 || ^18 || ^19" + } + }, + "node_modules/react-icons": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/react-icons/-/react-icons-5.5.0.tgz", + "integrity": "sha512-MEFcXdkP3dLo8uumGI5xN3lDFNsRtrjbOEKDLD7yv76v4wpnEq2Lt2qeHaQOr34I/wPN3s3+N08WkQ+CW37Xiw==", + "license": "MIT", + "peerDependencies": { + "react": "*" + } + }, + "node_modules/react-is": { + "version": "16.13.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", + "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==" + }, + "node_modules/react-qr-code": { + "version": "2.0.18", + "resolved": "https://registry.npmjs.org/react-qr-code/-/react-qr-code-2.0.18.tgz", + "integrity": "sha512-v1Jqz7urLMhkO6jkgJuBYhnqvXagzceg3qJUWayuCK/c6LTIonpWbwxR1f1APGd4xrW/QcQEovNrAojbUz65Tg==", + "dependencies": { + "prop-types": "^15.8.1", + "qr.js": "0.0.0" + }, + "peerDependencies": { + "react": "*" + } + }, + "node_modules/react-remove-scroll": { + "version": "2.7.2", + "resolved": "https://registry.npmjs.org/react-remove-scroll/-/react-remove-scroll-2.7.2.tgz", + "integrity": "sha512-Iqb9NjCCTt6Hf+vOdNIZGdTiH1QSqr27H/Ek9sv/a97gfueI/5h1s3yRi1nngzMUaOOToin5dI1dXKdXiF+u0Q==", + "license": "MIT", + "dependencies": { + "react-remove-scroll-bar": "^2.3.7", + "react-style-singleton": "^2.2.3", + "tslib": "^2.1.0", + "use-callback-ref": "^1.3.3", + "use-sidecar": "^1.1.3" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/react-remove-scroll-bar": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/react-remove-scroll-bar/-/react-remove-scroll-bar-2.3.8.tgz", + "integrity": "sha512-9r+yi9+mgU33AKcj6IbT9oRCO78WriSj6t/cF8DWBZJ9aOGPOTEDvdUDz1FwKim7QXWwmHqtdHnRJfhAxEG46Q==", + "license": "MIT", + "dependencies": { + "react-style-singleton": "^2.2.2", + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/react-style-singleton": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/react-style-singleton/-/react-style-singleton-2.2.3.tgz", + "integrity": "sha512-b6jSvxvVnyptAiLjbkWLE/lOnR4lfTtDAl+eUC7RZy+QQWc6wRzIV2CE6xBuMmDxc2qIihtDCZD5NPOFl7fRBQ==", + "license": "MIT", + "dependencies": { + "get-nonce": "^1.0.0", + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "dev": true, + "license": "MIT", + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/recast": { + "version": "0.23.11", + "resolved": "https://registry.npmjs.org/recast/-/recast-0.23.11.tgz", + "integrity": "sha512-YTUo+Flmw4ZXiWfQKGcwwc11KnoRAYgzAE2E7mXKCjSviTKShtxBsN6YUUBB2gtaBzKzeKunxhUwNHQuRryhWA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ast-types": "^0.16.1", + "esprima": "~4.0.0", + "source-map": "~0.6.1", + "tiny-invariant": "^1.3.3", + "tslib": "^2.0.1" + }, + "engines": { + "node": ">= 4" + } + }, + "node_modules/recast/node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/resolve-pkg-maps": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/resolve-pkg-maps/-/resolve-pkg-maps-1.0.0.tgz", + "integrity": "sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==", + "devOptional": true, + "license": "MIT", + "funding": { + "url": "https://github.com/privatenumber/resolve-pkg-maps?sponsor=1" + } + }, + "node_modules/rollup": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.55.1.tgz", + "integrity": "sha512-wDv/Ht1BNHB4upNbK74s9usvl7hObDnvVzknxqY/E/O3X6rW1U1rV1aENEfJ54eFZDTNo7zv1f5N4edCluH7+A==", + "license": "MIT", + "dependencies": { + "@types/estree": "1.0.8" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.55.1", + "@rollup/rollup-android-arm64": "4.55.1", + "@rollup/rollup-darwin-arm64": "4.55.1", + "@rollup/rollup-darwin-x64": "4.55.1", + "@rollup/rollup-freebsd-arm64": "4.55.1", + "@rollup/rollup-freebsd-x64": "4.55.1", + "@rollup/rollup-linux-arm-gnueabihf": "4.55.1", + "@rollup/rollup-linux-arm-musleabihf": "4.55.1", + "@rollup/rollup-linux-arm64-gnu": "4.55.1", + "@rollup/rollup-linux-arm64-musl": "4.55.1", + "@rollup/rollup-linux-loong64-gnu": "4.55.1", + "@rollup/rollup-linux-loong64-musl": "4.55.1", + "@rollup/rollup-linux-ppc64-gnu": "4.55.1", + "@rollup/rollup-linux-ppc64-musl": "4.55.1", + "@rollup/rollup-linux-riscv64-gnu": "4.55.1", + "@rollup/rollup-linux-riscv64-musl": "4.55.1", + "@rollup/rollup-linux-s390x-gnu": "4.55.1", + "@rollup/rollup-linux-x64-gnu": "4.55.1", + "@rollup/rollup-linux-x64-musl": "4.55.1", + "@rollup/rollup-openbsd-x64": "4.55.1", + "@rollup/rollup-openharmony-arm64": "4.55.1", + "@rollup/rollup-win32-arm64-msvc": "4.55.1", + "@rollup/rollup-win32-ia32-msvc": "4.55.1", + "@rollup/rollup-win32-x64-gnu": "4.55.1", + "@rollup/rollup-win32-x64-msvc": "4.55.1", + "fsevents": "~2.3.2" + } + }, + "node_modules/run-applescript": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/run-applescript/-/run-applescript-7.1.0.tgz", + "integrity": "sha512-DPe5pVFaAsinSaV6QjQ6gdiedWDcRCbUuiQfQa2wmWV7+xC9bGulGI8+TdRmoFkAPaBXk8CrAbnlY2ISniJ47Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/scheduler": { + "version": "0.27.0", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.27.0.tgz", + "integrity": "sha512-eNv+WrVbKu1f3vbYJT/xtiF5syA5HPIMtf9IgY/nKg0sWqzAUEvqY/xm7OcZc/qafLx/iO9FgOmeSAp4v5ti/Q==", + "license": "MIT" + }, + "node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/seroval": { + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/seroval/-/seroval-1.4.2.tgz", + "integrity": "sha512-N3HEHRCZYn3cQbsC4B5ldj9j+tHdf4JZoYPlcI4rRYu0Xy4qN8MQf1Z08EibzB0WpgRG5BGK08FTrmM66eSzKQ==", + "license": "MIT", + "engines": { + "node": ">=10" + } + }, + "node_modules/seroval-plugins": { + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/seroval-plugins/-/seroval-plugins-1.4.2.tgz", + "integrity": "sha512-X7p4MEDTi+60o2sXZ4bnDBhgsUYDSkQEvzYZuJyFqWg9jcoPsHts5nrg5O956py2wyt28lUrBxk0M0/wU8URpA==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "seroval": "^1.0" + } + }, + "node_modules/siginfo": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/siginfo/-/siginfo-2.0.0.tgz", + "integrity": "sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==", + "dev": true, + "license": "ISC" + }, + "node_modules/solid-js": { + "version": "1.9.10", + "resolved": "https://registry.npmjs.org/solid-js/-/solid-js-1.9.10.tgz", + "integrity": "sha512-Coz956cos/EPDlhs6+jsdTxKuJDPT7B5SVIWgABwROyxjY7Xbr8wkzD68Et+NxnV7DLJ3nJdAC2r9InuV/4Jew==", + "license": "MIT", + "peer": true, + "dependencies": { + "csstype": "^3.1.0", + "seroval": "~1.3.0", + "seroval-plugins": "~1.3.0" + } + }, + "node_modules/solid-js/node_modules/seroval": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/seroval/-/seroval-1.3.2.tgz", + "integrity": "sha512-RbcPH1n5cfwKrru7v7+zrZvjLurgHhGyso3HTyGtRivGWgYjbOmGuivCQaORNELjNONoK35nj28EoWul9sb1zQ==", + "license": "MIT", + "peer": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/solid-js/node_modules/seroval-plugins": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/seroval-plugins/-/seroval-plugins-1.3.3.tgz", + "integrity": "sha512-16OL3NnUBw8JG1jBLUoZJsLnQq0n5Ua6aHalhJK4fMQkz1lqR7Osz1sA30trBtd9VUDc2NgkuRCn8+/pBwqZ+w==", + "license": "MIT", + "peer": true, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "seroval": "^1.0" + } + }, + "node_modules/sonner": { + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/sonner/-/sonner-2.0.7.tgz", + "integrity": "sha512-W6ZN4p58k8aDKA4XPcx2hpIQXBRAgyiWVkYhT7CvK6D3iAu7xjvVyhQHg2/iaKJZ1XVJ4r7XuwGL+WGEK37i9w==", + "license": "MIT", + "peerDependencies": { + "react": "^18.0.0 || ^19.0.0 || ^19.0.0-rc", + "react-dom": "^18.0.0 || ^19.0.0 || ^19.0.0-rc" + } + }, + "node_modules/source-map": { + "version": "0.7.6", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.7.6.tgz", + "integrity": "sha512-i5uvt8C3ikiWeNZSVZNWcfZPItFQOsYTUAOkcUPGd8DqDy1uOUikjt5dG+uRlwyvR108Fb9DOd4GvXfT0N2/uQ==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">= 12" + } + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/stackback": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/stackback/-/stackback-0.0.2.tgz", + "integrity": "sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==", + "dev": true, + "license": "MIT" + }, + "node_modules/std-env": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/std-env/-/std-env-4.0.0.tgz", + "integrity": "sha512-zUMPtQ/HBY3/50VbpkupYHbRroTRZJPRLvreamgErJVys0ceuzMkD44J/QjqhHjOzK42GQ3QZIeFG1OYfOtKqQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/tailwind-merge": { + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/tailwind-merge/-/tailwind-merge-3.4.0.tgz", + "integrity": "sha512-uSaO4gnW+b3Y2aWoWfFpX62vn2sR3skfhbjsEnaBI81WD1wBLlHZe5sWf0AqjksNdYTbGBEd0UasQMT3SNV15g==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/dcastil" + } + }, + "node_modules/tailwindcss": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-4.1.18.tgz", + "integrity": "sha512-4+Z+0yiYyEtUVCScyfHCxOYP06L5Ne+JiHhY2IjR2KWMIWhJOYZKLSGZaP5HkZ8+bY0cxfzwDE5uOmzFXyIwxw==", + "license": "MIT" + }, + "node_modules/tapable": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.3.0.tgz", + "integrity": "sha512-g9ljZiwki/LfxmQADO3dEY1CbpmXT5Hm2fJ+QaGKwSXUylMybePR7/67YW7jOrrvjEgL1Fmz5kzyAjWVWLlucg==", + "license": "MIT", + "engines": { + "node": ">=6" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/tar": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/tar/-/tar-6.2.1.tgz", + "integrity": "sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A==", + "dev": true, + "license": "ISC", + "dependencies": { + "chownr": "^2.0.0", + "fs-minipass": "^2.0.0", + "minipass": "^5.0.0", + "minizlib": "^2.1.1", + "mkdirp": "^1.0.3", + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/tar/node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "dev": true, + "license": "ISC" + }, + "node_modules/tiny-invariant": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/tiny-invariant/-/tiny-invariant-1.3.3.tgz", + "integrity": "sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg==", + "license": "MIT" + }, + "node_modules/tiny-warning": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/tiny-warning/-/tiny-warning-1.0.3.tgz", + "integrity": "sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA==", + "license": "MIT" + }, + "node_modules/tinybench": { + "version": "2.9.0", + "resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.9.0.tgz", + "integrity": "sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==", + "dev": true, + "license": "MIT" + }, + "node_modules/tinyexec": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-0.3.2.tgz", + "integrity": "sha512-KQQR9yN7R5+OSwaK0XQoj22pwHoTlgYqmUscPYoknOoWCWfj/5/ABTMRi69FrKU5ffPVh5QcFikpWJI/P1ocHA==", + "dev": true, + "license": "MIT" + }, + "node_modules/tinyglobby": { + "version": "0.2.15", + "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz", + "integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==", + "license": "MIT", + "dependencies": { + "fdir": "^6.5.0", + "picomatch": "^4.0.3" + }, + "engines": { + "node": ">=12.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/SuperchupuDev" + } + }, + "node_modules/tinyglobby/node_modules/fdir": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "node_modules/tinyglobby/node_modules/picomatch": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/tinyrainbow": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/tinyrainbow/-/tinyrainbow-3.1.0.tgz", + "integrity": "sha512-Bf+ILmBgretUrdJxzXM0SgXLZ3XfiaUuOj/IKQHuTXip+05Xn+uyEYdVg0kYDipTBcLrCVyUzAPz7QmArb0mmw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/tslib": { + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + "license": "0BSD" + }, + "node_modules/tsx": { + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/tsx/-/tsx-4.21.0.tgz", + "integrity": "sha512-5C1sg4USs1lfG0GFb2RLXsdpXqBSEhAaA/0kPL01wxzpMqLILNxIxIOKiILz+cdg/pLnOUxFYOR5yhHU666wbw==", + "devOptional": true, + "license": "MIT", + "dependencies": { + "esbuild": "~0.27.0", + "get-tsconfig": "^4.7.5" + }, + "bin": { + "tsx": "dist/cli.mjs" + }, + "engines": { + "node": ">=18.0.0" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + } + }, + "node_modules/tw-animate-css": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/tw-animate-css/-/tw-animate-css-1.4.0.tgz", + "integrity": "sha512-7bziOlRqH0hJx80h/3mbicLW7o8qLsH5+RaLR2t+OHM3D0JlWGODQKQ4cxbK7WlvmUxpcj6Kgu6EKqjrGFe3QQ==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/Wombosvideo" + } + }, + "node_modules/typescript": { + "version": "5.9.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", + "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/ufo": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/ufo/-/ufo-1.6.1.tgz", + "integrity": "sha512-9a4/uxlTWJ4+a5i0ooc1rU7C7YOw3wT+UGqdeNNHWnOF9qcMBgLRS+4IYUqbczewFx4mLEig6gawh7X6mFlEkA==", + "dev": true, + "license": "MIT" + }, + "node_modules/uglify-js": { + "version": "3.19.3", + "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.19.3.tgz", + "integrity": "sha512-v3Xu+yuwBXisp6QYTcH4UbH+xYJXqnq2m/LtQVWKWzYc1iehYnLixoQDN9FH6/j9/oybfd6W9Ghwkl8+UMKTKQ==", + "dev": true, + "license": "BSD-2-Clause", + "optional": true, + "bin": { + "uglifyjs": "bin/uglifyjs" + }, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/undici-types": { + "version": "7.16.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-7.16.0.tgz", + "integrity": "sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw==", + "devOptional": true, + "license": "MIT" + }, + "node_modules/unplugin": { + "version": "2.3.11", + "resolved": "https://registry.npmjs.org/unplugin/-/unplugin-2.3.11.tgz", + "integrity": "sha512-5uKD0nqiYVzlmCRs01Fhs2BdkEgBS3SAVP6ndrBsuK42iC2+JHyxM05Rm9G8+5mkmRtzMZGY8Ct5+mliZxU/Ww==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/remapping": "^2.3.5", + "acorn": "^8.15.0", + "picomatch": "^4.0.3", + "webpack-virtual-modules": "^0.6.2" + }, + "engines": { + "node": ">=18.12.0" + } + }, + "node_modules/unplugin/node_modules/picomatch": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/update-browserslist-db": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.2.3.tgz", + "integrity": "sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "escalade": "^3.2.0", + "picocolors": "^1.1.1" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/use-callback-ref": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/use-callback-ref/-/use-callback-ref-1.3.3.tgz", + "integrity": "sha512-jQL3lRnocaFtu3V00JToYz/4QkNWswxijDaCVNZRiRTO3HQDLsdu1ZtmIUvV4yPp+rvWm5j0y0TG/S61cuijTg==", + "license": "MIT", + "dependencies": { + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/use-sidecar": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/use-sidecar/-/use-sidecar-1.1.3.tgz", + "integrity": "sha512-Fedw0aZvkhynoPYlA5WXrMCAMm+nSWdZt6lzJQ7Ok8S6Q+VsHmHpRWndVRJ8Be0ZbkfPc5LRYH+5XrzXcEeLRQ==", + "license": "MIT", + "dependencies": { + "detect-node-es": "^1.1.0", + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/use-sync-external-store": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.6.0.tgz", + "integrity": "sha512-Pp6GSwGP/NrPIrxVFAIkOQeyw8lFenOHijQWkUTrDvrF4ALqylP2C/KCkeS9dpUM3KvYRQhna5vt7IL95+ZQ9w==", + "license": "MIT", + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/vite": { + "version": "7.3.0", + "resolved": "https://registry.npmjs.org/vite/-/vite-7.3.0.tgz", + "integrity": "sha512-dZwN5L1VlUBewiP6H9s2+B3e3Jg96D0vzN+Ry73sOefebhYr9f94wwkMNN/9ouoU8pV1BqA1d1zGk8928cx0rg==", + "license": "MIT", + "dependencies": { + "esbuild": "^0.27.0", + "fdir": "^6.5.0", + "picomatch": "^4.0.3", + "postcss": "^8.5.6", + "rollup": "^4.43.0", + "tinyglobby": "^0.2.15" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^20.19.0 || >=22.12.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^20.19.0 || >=22.12.0", + "jiti": ">=1.21.0", + "less": "^4.0.0", + "lightningcss": "^1.21.0", + "sass": "^1.70.0", + "sass-embedded": "^1.70.0", + "stylus": ">=0.54.8", + "sugarss": "^5.0.0", + "terser": "^5.16.0", + "tsx": "^4.8.1", + "yaml": "^2.4.2" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "jiti": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "sass-embedded": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + }, + "tsx": { + "optional": true + }, + "yaml": { + "optional": true + } + } + }, + "node_modules/vite/node_modules/fdir": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "node_modules/vite/node_modules/picomatch": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/vitest": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/vitest/-/vitest-4.1.2.tgz", + "integrity": "sha512-xjR1dMTVHlFLh98JE3i/f/WePqJsah4A0FK9cc8Ehp9Udk0AZk6ccpIZhh1qJ/yxVWRZ+Q54ocnD8TXmkhspGg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/expect": "4.1.2", + "@vitest/mocker": "4.1.2", + "@vitest/pretty-format": "4.1.2", + "@vitest/runner": "4.1.2", + "@vitest/snapshot": "4.1.2", + "@vitest/spy": "4.1.2", + "@vitest/utils": "4.1.2", + "es-module-lexer": "^2.0.0", + "expect-type": "^1.3.0", + "magic-string": "^0.30.21", + "obug": "^2.1.1", + "pathe": "^2.0.3", + "picomatch": "^4.0.3", + "std-env": "^4.0.0-rc.1", + "tinybench": "^2.9.0", + "tinyexec": "^1.0.2", + "tinyglobby": "^0.2.15", + "tinyrainbow": "^3.1.0", + "vite": "^6.0.0 || ^7.0.0 || ^8.0.0", + "why-is-node-running": "^2.3.0" + }, + "bin": { + "vitest": "vitest.mjs" + }, + "engines": { + "node": "^20.0.0 || ^22.0.0 || >=24.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "@edge-runtime/vm": "*", + "@opentelemetry/api": "^1.9.0", + "@types/node": "^20.0.0 || ^22.0.0 || >=24.0.0", + "@vitest/browser-playwright": "4.1.2", + "@vitest/browser-preview": "4.1.2", + "@vitest/browser-webdriverio": "4.1.2", + "@vitest/ui": "4.1.2", + "happy-dom": "*", + "jsdom": "*", + "vite": "^6.0.0 || ^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "@edge-runtime/vm": { + "optional": true + }, + "@opentelemetry/api": { + "optional": true + }, + "@types/node": { + "optional": true + }, + "@vitest/browser-playwright": { + "optional": true + }, + "@vitest/browser-preview": { + "optional": true + }, + "@vitest/browser-webdriverio": { + "optional": true + }, + "@vitest/ui": { + "optional": true + }, + "happy-dom": { + "optional": true + }, + "jsdom": { + "optional": true + }, + "vite": { + "optional": false + } + } + }, + "node_modules/vitest/node_modules/picomatch": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.4.tgz", + "integrity": "sha512-QP88BAKvMam/3NxH6vj2o21R6MjxZUAd6nlwAS/pnGvN9IVLocLHxGYIzFhg6fUQ+5th6P4dv4eW9jX3DSIj7A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/vitest/node_modules/tinyexec": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-1.0.4.tgz", + "integrity": "sha512-u9r3uZC0bdpGOXtlxUIdwf9pkmvhqJdrVCH9fapQtgy/OeTTMZ1nqH7agtvEfmGui6e1XxjcdrlxvxJvc3sMqw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + } + }, + "node_modules/webpack-virtual-modules": { + "version": "0.6.2", + "resolved": "https://registry.npmjs.org/webpack-virtual-modules/-/webpack-virtual-modules-0.6.2.tgz", + "integrity": "sha512-66/V2i5hQanC51vBQKPH4aI8NMAcBW59FVBs+rC7eGHupMyfn34q7rZIE+ETlJ+XTevqfUhVVBgSUNSW2flEUQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/why-is-node-running": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.3.0.tgz", + "integrity": "sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==", + "dev": true, + "license": "MIT", + "dependencies": { + "siginfo": "^2.0.0", + "stackback": "0.0.2" + }, + "bin": { + "why-is-node-running": "cli.js" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wordwrap": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-1.0.0.tgz", + "integrity": "sha512-gvVzJFlPycKc5dZN4yPkP8w7Dc37BtP1yczEneOb4uq34pXZcvrtRTmWV8W+Ume+XCxKgbjM+nevkyFPMybd4Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", + "dev": true, + "license": "ISC" + }, + "node_modules/zod": { + "version": "4.3.5", + "resolved": "https://registry.npmjs.org/zod/-/zod-4.3.5.tgz", + "integrity": "sha512-k7Nwx6vuWx1IJ9Bjuf4Zt1PEllcwe7cls3VNzm4CQ1/hgtFUK2bRNG3rvnpPUhFjmqJKAKtjV576KnUkHocg/g==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/colinhacks" + } + } + } +} diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/package.json b/packages/tentacles/Services/Interfaces/node_web_interface/package.json new file mode 100644 index 0000000000..9ccb0bcaba --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/package.json @@ -0,0 +1,69 @@ +{ + "name": "octobot-node", + "private": true, + "version": "0.0.0", + "type": "module", + "scripts": { + "dev": "vite", + "build": "tsc -p tsconfig.build.json && vite build", + "lint": "biome check --write --unsafe --no-errors-on-unmatched --files-ignore-unknown=true ./", + "preview": "vite preview", + "generate-client": "openapi-ts", + "test": "vitest run", + "test:watch": "vitest" + }, + "dependencies": { + "@hookform/resolvers": "^5.2.2", + "@noble/curves": "^2.2.0", + "@radix-ui/react-avatar": "^1.1.11", + "@radix-ui/react-checkbox": "^1.3.3", + "@radix-ui/react-dialog": "^1.1.15", + "@radix-ui/react-dropdown-menu": "^2.1.16", + "@radix-ui/react-label": "^2.1.8", + "@radix-ui/react-radio-group": "^1.3.8", + "@radix-ui/react-scroll-area": "^1.2.10", + "@radix-ui/react-select": "^2.2.6", + "@radix-ui/react-separator": "^1.1.8", + "@radix-ui/react-slot": "^1.2.4", + "@radix-ui/react-tabs": "^1.1.13", + "@radix-ui/react-tooltip": "^1.2.8", + "@tailwindcss/vite": "^4.1.18", + "@tanstack/react-query": "^5.90.12", + "@tanstack/react-query-devtools": "^5.91.1", + "@tanstack/react-router": "^1.142.11", + "@tanstack/react-router-devtools": "^1.142.8", + "@tanstack/react-table": "^8.21.3", + "axios": "1.13.2", + "class-variance-authority": "^0.7.1", + "clsx": "^2.1.1", + "form-data": "4.0.5", + "lucide-react": "^0.556.0", + "next-themes": "^0.4.6", + "react": "^19.1.1", + "react-dom": "^19.2.3", + "react-error-boundary": "^6.0.0", + "react-hook-form": "^7.68.0", + "react-icons": "^5.5.0", + "react-qr-code": "^2.0.18", + "sonner": "^2.0.7", + "tailwind-merge": "^3.4.0", + "tailwindcss": "^4.1.17", + "zod": "^4.2.1" + }, + "devDependencies": { + "@biomejs/biome": "^2.3.10", + "@hey-api/openapi-ts": "0.73.0", + "@playwright/test": "1.57.0", + "@tanstack/router-devtools": "^1.142.11", + "@tanstack/router-plugin": "^1.140.0", + "@types/node": "^25.0.2", + "@types/react": "^19.2.7", + "@types/react-dom": "^19.2.3", + "@vitejs/plugin-react-swc": "^4.2.2", + "dotenv": "^17.2.3", + "tw-animate-css": "^1.4.0", + "typescript": "^5.9.3", + "vite": "^7.3.0", + "vitest": "^4.1.2" + } +} diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/favicon.png b/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/favicon.png new file mode 100644 index 0000000000..6f3ec04a26 Binary files /dev/null and b/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/favicon.png differ diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_design_100.png b/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_design_100.png new file mode 100644 index 0000000000..b865867065 Binary files /dev/null and b/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_design_100.png differ diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_design_1024.png b/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_design_1024.png new file mode 100644 index 0000000000..ede47bf125 Binary files /dev/null and b/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_design_1024.png differ diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_design_512.png b/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_design_512.png new file mode 100644 index 0000000000..86ca048533 Binary files /dev/null and b/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_design_512.png differ diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_lab_100.png b/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_lab_100.png new file mode 100644 index 0000000000..60572388a8 Binary files /dev/null and b/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_lab_100.png differ diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_lab_1024.png b/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_lab_1024.png new file mode 100644 index 0000000000..552fe469fd Binary files /dev/null and b/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_lab_1024.png differ diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_lab_512.png b/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_lab_512.png new file mode 100644 index 0000000000..29d6566a2f Binary files /dev/null and b/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_lab_512.png differ diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_launching_100.png b/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_launching_100.png new file mode 100644 index 0000000000..7ab6ae08f7 Binary files /dev/null and b/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_launching_100.png differ diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_launching_1024.png b/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_launching_1024.png new file mode 100644 index 0000000000..1ff3270d26 Binary files /dev/null and b/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_launching_1024.png differ diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_launching_512.png b/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_launching_512.png new file mode 100644 index 0000000000..d4b610b02b Binary files /dev/null and b/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_launching_512.png differ diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_node_100.png b/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_node_100.png new file mode 100644 index 0000000000..6cadeb1fbe Binary files /dev/null and b/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_node_100.png differ diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_node_1024.png b/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_node_1024.png new file mode 100644 index 0000000000..8bad10dcdf Binary files /dev/null and b/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_node_1024.png differ diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_node_16.png b/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_node_16.png new file mode 100644 index 0000000000..9aabd12941 Binary files /dev/null and b/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_node_16.png differ diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_node_256.png b/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_node_256.png new file mode 100644 index 0000000000..ceea830593 Binary files /dev/null and b/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_node_256.png differ diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_node_32.png b/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_node_32.png new file mode 100644 index 0000000000..6f3ec04a26 Binary files /dev/null and b/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_node_32.png differ diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_node_48.png b/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_node_48.png new file mode 100644 index 0000000000..cfd16ed170 Binary files /dev/null and b/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_node_48.png differ diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_node_512.png b/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_node_512.png new file mode 100644 index 0000000000..8e055eaf1e Binary files /dev/null and b/packages/tentacles/Services/Interfaces/node_web_interface/public/assets/images/octobot_node_512.png differ diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/public/export-template-examples/portfolio_summary.json b/packages/tentacles/Services/Interfaces/node_web_interface/public/export-template-examples/portfolio_summary.json new file mode 100644 index 0000000000..1a11e0d793 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/public/export-template-examples/portfolio_summary.json @@ -0,0 +1,14 @@ +{ + "id": "portfolio_summary", + "label": "Portfolio Summary", + "description": "Portfolio and balance focused columns", + "columns": [ + { "key": "name", "label": "Name", "jsonPath": "__task_name__", "formatter": "text" }, + { "key": "status", "label": "Status", "jsonPath": "__exec_status__", "formatter": "text" }, + { "key": "asset", "label": "Asset", "jsonPath": "asset", "formatter": "text" }, + { "key": "free", "label": "Free Balance", "jsonPath": "free", "formatter": "number" }, + { "key": "used", "label": "Used Balance", "jsonPath": "used", "formatter": "number" }, + { "key": "total", "label": "Total Balance", "jsonPath": "total", "formatter": "number" }, + { "key": "exchange", "label": "Exchange", "jsonPath": "exchange", "formatter": "text" } + ] +} diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/public/export-template-examples/trade_with_fees.json b/packages/tentacles/Services/Interfaces/node_web_interface/public/export-template-examples/trade_with_fees.json new file mode 100644 index 0000000000..75cd5646be --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/public/export-template-examples/trade_with_fees.json @@ -0,0 +1,16 @@ +{ + "id": "trade_with_fees", + "label": "Trade + Fees", + "description": "Trading execution details including fee breakdowns", + "columns": [ + { "key": "name", "label": "Name", "jsonPath": "__task_name__", "formatter": "text" }, + { "key": "status", "label": "Status", "jsonPath": "__exec_status__", "formatter": "text" }, + { "key": "symbol", "label": "Symbol", "jsonPath": "symbol", "formatter": "text" }, + { "key": "side", "label": "Side", "jsonPath": "side", "formatter": "text" }, + { "key": "amount", "label": "Amount", "jsonPath": "amount", "formatter": "number" }, + { "key": "price", "label": "Price", "jsonPath": "price", "formatter": "number" }, + { "key": "fee_cost", "label": "Fee Cost", "jsonPath": "fee.cost", "formatter": "number" }, + { "key": "fee_currency", "label": "Fee Currency", "jsonPath": "fee.currency", "formatter": "text" }, + { "key": "exchange", "label": "Exchange", "jsonPath": "exchange", "formatter": "text" } + ] +} diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/public/export-template-examples/transfer_detailed.json b/packages/tentacles/Services/Interfaces/node_web_interface/public/export-template-examples/transfer_detailed.json new file mode 100644 index 0000000000..e40d913953 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/public/export-template-examples/transfer_detailed.json @@ -0,0 +1,17 @@ +{ + "id": "transfer_detailed", + "label": "Transfer Detailed", + "description": "Blockchain transfers with confirmation and block details", + "columns": [ + { "key": "name", "label": "Name", "jsonPath": "__task_name__", "formatter": "text" }, + { "key": "status", "label": "Status", "jsonPath": "__exec_status__", "formatter": "text" }, + { "key": "from_address", "label": "From", "jsonPath": "from_address", "formatter": "text" }, + { "key": "to_address", "label": "To", "jsonPath": "to_address", "formatter": "text" }, + { "key": "amount", "label": "Amount", "jsonPath": "amount", "formatter": "number" }, + { "key": "asset", "label": "Asset", "jsonPath": "asset", "formatter": "text" }, + { "key": "tx_hash", "label": "TX Hash", "jsonPath": "tx_hash", "formatter": "text" }, + { "key": "confirmations", "label": "Confirmations", "jsonPath": "confirmations", "formatter": "number" }, + { "key": "block_number", "label": "Block", "jsonPath": "block_number", "formatter": "number" }, + { "key": "network", "label": "Network", "jsonPath": "network", "formatter": "text" } + ] +} diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/public/meta-template-examples/cancel_then_trade.json b/packages/tentacles/Services/Interfaces/node_web_interface/public/meta-template-examples/cancel_then_trade.json new file mode 100644 index 0000000000..829b402bd8 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/public/meta-template-examples/cancel_then_trade.json @@ -0,0 +1,9 @@ +{ + "id": "cancel_then_trade", + "label": "Cancel + Trade", + "description": "Cancel open orders for a symbol, then place a new order on the same symbol", + "steps": [ + { "templateId": "cancel" }, + { "templateId": "trade" } + ] +} diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/public/meta-template-examples/deposit_then_trade.json b/packages/tentacles/Services/Interfaces/node_web_interface/public/meta-template-examples/deposit_then_trade.json new file mode 100644 index 0000000000..cddaef8245 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/public/meta-template-examples/deposit_then_trade.json @@ -0,0 +1,9 @@ +{ + "id": "deposit_then_trade", + "label": "Deposit + Trade", + "description": "Deposit funds from a blockchain wallet to an exchange, then place a trade on that exchange", + "steps": [ + { "templateId": "deposit" }, + { "templateId": "trade" } + ] +} diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/public/meta-template-examples/wait_trade_kraken.json b/packages/tentacles/Services/Interfaces/node_web_interface/public/meta-template-examples/wait_trade_kraken.json new file mode 100644 index 0000000000..bc11103062 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/public/meta-template-examples/wait_trade_kraken.json @@ -0,0 +1,13 @@ +{ + "id": "wait_trade_kraken", + "label": "Wait + Trade (Kraken)", + "description": "Wait for a delay, then place an order on Kraken", + "steps": [ + { "templateId": "wait" }, + { + "templateId": "trade", + "overrides": { "EXCHANGE_TO": "kraken" }, + "hiddenParams": ["API_KEY", "API_SECRET"] + } + ] +} diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/src/client/core/ApiError.ts b/packages/tentacles/Services/Interfaces/node_web_interface/src/client/core/ApiError.ts new file mode 100644 index 0000000000..36675d288a --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/src/client/core/ApiError.ts @@ -0,0 +1,21 @@ +import type { ApiRequestOptions } from './ApiRequestOptions'; +import type { ApiResult } from './ApiResult'; + +export class ApiError extends Error { + public readonly url: string; + public readonly status: number; + public readonly statusText: string; + public readonly body: unknown; + public readonly request: ApiRequestOptions; + + constructor(request: ApiRequestOptions, response: ApiResult, message: string) { + super(message); + + this.name = 'ApiError'; + this.url = response.url; + this.status = response.status; + this.statusText = response.statusText; + this.body = response.body; + this.request = request; + } +} \ No newline at end of file diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/src/client/core/ApiRequestOptions.ts b/packages/tentacles/Services/Interfaces/node_web_interface/src/client/core/ApiRequestOptions.ts new file mode 100644 index 0000000000..939a0aa4c8 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/src/client/core/ApiRequestOptions.ts @@ -0,0 +1,21 @@ +export type ApiRequestOptions = { + readonly body?: any; + readonly cookies?: Record; + readonly errors?: Record; + readonly formData?: Record | any[] | Blob | File; + readonly headers?: Record; + readonly mediaType?: string; + readonly method: + | 'DELETE' + | 'GET' + | 'HEAD' + | 'OPTIONS' + | 'PATCH' + | 'POST' + | 'PUT'; + readonly path?: Record; + readonly query?: Record; + readonly responseHeader?: string; + readonly responseTransformer?: (data: unknown) => Promise; + readonly url: string; +}; \ No newline at end of file diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/src/client/core/ApiResult.ts b/packages/tentacles/Services/Interfaces/node_web_interface/src/client/core/ApiResult.ts new file mode 100644 index 0000000000..4c58e39138 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/src/client/core/ApiResult.ts @@ -0,0 +1,7 @@ +export type ApiResult = { + readonly body: TData; + readonly ok: boolean; + readonly status: number; + readonly statusText: string; + readonly url: string; +}; \ No newline at end of file diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/src/client/core/CancelablePromise.ts b/packages/tentacles/Services/Interfaces/node_web_interface/src/client/core/CancelablePromise.ts new file mode 100644 index 0000000000..ccc082e8f2 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/src/client/core/CancelablePromise.ts @@ -0,0 +1,126 @@ +export class CancelError extends Error { + constructor(message: string) { + super(message); + this.name = 'CancelError'; + } + + public get isCancelled(): boolean { + return true; + } +} + +export interface OnCancel { + readonly isResolved: boolean; + readonly isRejected: boolean; + readonly isCancelled: boolean; + + (cancelHandler: () => void): void; +} + +export class CancelablePromise implements Promise { + private _isResolved: boolean; + private _isRejected: boolean; + private _isCancelled: boolean; + readonly cancelHandlers: (() => void)[]; + readonly promise: Promise; + private _resolve?: (value: T | PromiseLike) => void; + private _reject?: (reason?: unknown) => void; + + constructor( + executor: ( + resolve: (value: T | PromiseLike) => void, + reject: (reason?: unknown) => void, + onCancel: OnCancel + ) => void + ) { + this._isResolved = false; + this._isRejected = false; + this._isCancelled = false; + this.cancelHandlers = []; + this.promise = new Promise((resolve, reject) => { + this._resolve = resolve; + this._reject = reject; + + const onResolve = (value: T | PromiseLike): void => { + if (this._isResolved || this._isRejected || this._isCancelled) { + return; + } + this._isResolved = true; + if (this._resolve) this._resolve(value); + }; + + const onReject = (reason?: unknown): void => { + if (this._isResolved || this._isRejected || this._isCancelled) { + return; + } + this._isRejected = true; + if (this._reject) this._reject(reason); + }; + + const onCancel = (cancelHandler: () => void): void => { + if (this._isResolved || this._isRejected || this._isCancelled) { + return; + } + this.cancelHandlers.push(cancelHandler); + }; + + Object.defineProperty(onCancel, 'isResolved', { + get: (): boolean => this._isResolved, + }); + + Object.defineProperty(onCancel, 'isRejected', { + get: (): boolean => this._isRejected, + }); + + Object.defineProperty(onCancel, 'isCancelled', { + get: (): boolean => this._isCancelled, + }); + + return executor(onResolve, onReject, onCancel as OnCancel); + }); + } + + get [Symbol.toStringTag]() { + return "Cancellable Promise"; + } + + public then( + onFulfilled?: ((value: T) => TResult1 | PromiseLike) | null, + onRejected?: ((reason: unknown) => TResult2 | PromiseLike) | null + ): Promise { + return this.promise.then(onFulfilled, onRejected); + } + + public catch( + onRejected?: ((reason: unknown) => TResult | PromiseLike) | null + ): Promise { + return this.promise.catch(onRejected); + } + + public finally(onFinally?: (() => void) | null): Promise { + return this.promise.finally(onFinally); + } + + public cancel(): void { + if (this._isResolved || this._isRejected || this._isCancelled) { + return; + } + this._isCancelled = true; + if (this.cancelHandlers.length) { + try { + for (const cancelHandler of this.cancelHandlers) { + cancelHandler(); + } + } catch (error) { + console.warn('Cancellation threw an error', error); + return; + } + } + this.cancelHandlers.length = 0; + if (this._reject) this._reject(new CancelError('Request aborted')); + } + + public get isCancelled(): boolean { + return this._isCancelled; + } +} \ No newline at end of file diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/src/client/core/OpenAPI.ts b/packages/tentacles/Services/Interfaces/node_web_interface/src/client/core/OpenAPI.ts new file mode 100644 index 0000000000..74f92b4085 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/src/client/core/OpenAPI.ts @@ -0,0 +1,57 @@ +import type { AxiosRequestConfig, AxiosResponse } from 'axios'; +import type { ApiRequestOptions } from './ApiRequestOptions'; + +type Headers = Record; +type Middleware = (value: T) => T | Promise; +type Resolver = (options: ApiRequestOptions) => Promise; + +export class Interceptors { + _fns: Middleware[]; + + constructor() { + this._fns = []; + } + + eject(fn: Middleware): void { + const index = this._fns.indexOf(fn); + if (index !== -1) { + this._fns = [...this._fns.slice(0, index), ...this._fns.slice(index + 1)]; + } + } + + use(fn: Middleware): void { + this._fns = [...this._fns, fn]; + } +} + +export type OpenAPIConfig = { + BASE: string; + CREDENTIALS: 'include' | 'omit' | 'same-origin'; + ENCODE_PATH?: ((path: string) => string) | undefined; + HEADERS?: Headers | Resolver | undefined; + PASSWORD?: string | Resolver | undefined; + TOKEN?: string | Resolver | undefined; + USERNAME?: string | Resolver | undefined; + VERSION: string; + WITH_CREDENTIALS: boolean; + interceptors: { + request: Interceptors; + response: Interceptors; + }; +}; + +export const OpenAPI: OpenAPIConfig = { + BASE: '', + CREDENTIALS: 'include', + ENCODE_PATH: undefined, + HEADERS: undefined, + PASSWORD: undefined, + TOKEN: undefined, + USERNAME: undefined, + VERSION: '0.1.0', + WITH_CREDENTIALS: false, + interceptors: { + request: new Interceptors(), + response: new Interceptors(), + }, +}; \ No newline at end of file diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/src/client/core/request.ts b/packages/tentacles/Services/Interfaces/node_web_interface/src/client/core/request.ts new file mode 100644 index 0000000000..ecc2e393cd --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/src/client/core/request.ts @@ -0,0 +1,347 @@ +import axios from 'axios'; +import type { AxiosError, AxiosRequestConfig, AxiosResponse, AxiosInstance } from 'axios'; + +import { ApiError } from './ApiError'; +import type { ApiRequestOptions } from './ApiRequestOptions'; +import type { ApiResult } from './ApiResult'; +import { CancelablePromise } from './CancelablePromise'; +import type { OnCancel } from './CancelablePromise'; +import type { OpenAPIConfig } from './OpenAPI'; + +export const isString = (value: unknown): value is string => { + return typeof value === 'string'; +}; + +export const isStringWithValue = (value: unknown): value is string => { + return isString(value) && value !== ''; +}; + +export const isBlob = (value: any): value is Blob => { + return value instanceof Blob; +}; + +export const isFormData = (value: unknown): value is FormData => { + return value instanceof FormData; +}; + +export const isSuccess = (status: number): boolean => { + return status >= 200 && status < 300; +}; + +export const base64 = (str: string): string => { + try { + return btoa(str); + } catch (err) { + // @ts-ignore + return Buffer.from(str).toString('base64'); + } +}; + +export const getQueryString = (params: Record): string => { + const qs: string[] = []; + + const append = (key: string, value: unknown) => { + qs.push(`${encodeURIComponent(key)}=${encodeURIComponent(String(value))}`); + }; + + const encodePair = (key: string, value: unknown) => { + if (value === undefined || value === null) { + return; + } + + if (value instanceof Date) { + append(key, value.toISOString()); + } else if (Array.isArray(value)) { + value.forEach(v => encodePair(key, v)); + } else if (typeof value === 'object') { + Object.entries(value).forEach(([k, v]) => encodePair(`${key}[${k}]`, v)); + } else { + append(key, value); + } + }; + + Object.entries(params).forEach(([key, value]) => encodePair(key, value)); + + return qs.length ? `?${qs.join('&')}` : ''; +}; + +const getUrl = (config: OpenAPIConfig, options: ApiRequestOptions): string => { + const encoder = config.ENCODE_PATH || encodeURI; + + const path = options.url + .replace('{api-version}', config.VERSION) + .replace(/{(.*?)}/g, (substring: string, group: string) => { + if (options.path?.hasOwnProperty(group)) { + return encoder(String(options.path[group])); + } + return substring; + }); + + const url = config.BASE + path; + return options.query ? url + getQueryString(options.query) : url; +}; + +export const getFormData = (options: ApiRequestOptions): FormData | undefined => { + if (options.formData) { + const formData = new FormData(); + + const process = (key: string, value: unknown) => { + if (isString(value) || isBlob(value)) { + formData.append(key, value); + } else { + formData.append(key, JSON.stringify(value)); + } + }; + + Object.entries(options.formData) + .filter(([, value]) => value !== undefined && value !== null) + .forEach(([key, value]) => { + if (Array.isArray(value)) { + value.forEach(v => process(key, v)); + } else { + process(key, value); + } + }); + + return formData; + } + return undefined; +}; + +type Resolver = (options: ApiRequestOptions) => Promise; + +export const resolve = async (options: ApiRequestOptions, resolver?: T | Resolver): Promise => { + if (typeof resolver === 'function') { + return (resolver as Resolver)(options); + } + return resolver; +}; + +export const getHeaders = async (config: OpenAPIConfig, options: ApiRequestOptions): Promise> => { + const [token, username, password, additionalHeaders] = await Promise.all([ + // @ts-ignore + resolve(options, config.TOKEN), + // @ts-ignore + resolve(options, config.USERNAME), + // @ts-ignore + resolve(options, config.PASSWORD), + // @ts-ignore + resolve(options, config.HEADERS), + ]); + + const headers = Object.entries({ + Accept: 'application/json', + ...additionalHeaders, + ...options.headers, + }) + .filter(([, value]) => value !== undefined && value !== null) + .reduce((headers, [key, value]) => ({ + ...headers, + [key]: String(value), + }), {} as Record); + + if (isStringWithValue(token)) { + headers['Authorization'] = `Bearer ${token}`; + } + + if (isStringWithValue(username) && isStringWithValue(password)) { + const credentials = base64(`${username}:${password}`); + headers['Authorization'] = `Basic ${credentials}`; + } + + if (options.body !== undefined) { + if (options.mediaType) { + headers['Content-Type'] = options.mediaType; + } else if (isBlob(options.body)) { + headers['Content-Type'] = options.body.type || 'application/octet-stream'; + } else if (isString(options.body)) { + headers['Content-Type'] = 'text/plain'; + } else if (!isFormData(options.body)) { + headers['Content-Type'] = 'application/json'; + } + } else if (options.formData !== undefined) { + if (options.mediaType) { + headers['Content-Type'] = options.mediaType; + } + } + + return headers; +}; + +export const getRequestBody = (options: ApiRequestOptions): unknown => { + if (options.body) { + return options.body; + } + return undefined; +}; + +export const sendRequest = async ( + config: OpenAPIConfig, + options: ApiRequestOptions, + url: string, + body: unknown, + formData: FormData | undefined, + headers: Record, + onCancel: OnCancel, + axiosClient: AxiosInstance +): Promise> => { + const controller = new AbortController(); + + let requestConfig: AxiosRequestConfig = { + data: body ?? formData, + headers, + method: options.method, + signal: controller.signal, + url, + withCredentials: config.WITH_CREDENTIALS, + }; + + onCancel(() => controller.abort()); + + for (const fn of config.interceptors.request._fns) { + requestConfig = await fn(requestConfig); + } + + try { + return await axiosClient.request(requestConfig); + } catch (error) { + const axiosError = error as AxiosError; + if (axiosError.response) { + return axiosError.response; + } + throw error; + } +}; + +export const getResponseHeader = (response: AxiosResponse, responseHeader?: string): string | undefined => { + if (responseHeader) { + const content = response.headers[responseHeader]; + if (isString(content)) { + return content; + } + } + return undefined; +}; + +export const getResponseBody = (response: AxiosResponse): unknown => { + if (response.status !== 204) { + return response.data; + } + return undefined; +}; + +export const catchErrorCodes = (options: ApiRequestOptions, result: ApiResult): void => { + const errors: Record = { + 400: 'Bad Request', + 401: 'Unauthorized', + 402: 'Payment Required', + 403: 'Forbidden', + 404: 'Not Found', + 405: 'Method Not Allowed', + 406: 'Not Acceptable', + 407: 'Proxy Authentication Required', + 408: 'Request Timeout', + 409: 'Conflict', + 410: 'Gone', + 411: 'Length Required', + 412: 'Precondition Failed', + 413: 'Payload Too Large', + 414: 'URI Too Long', + 415: 'Unsupported Media Type', + 416: 'Range Not Satisfiable', + 417: 'Expectation Failed', + 418: 'Im a teapot', + 421: 'Misdirected Request', + 422: 'Unprocessable Content', + 423: 'Locked', + 424: 'Failed Dependency', + 425: 'Too Early', + 426: 'Upgrade Required', + 428: 'Precondition Required', + 429: 'Too Many Requests', + 431: 'Request Header Fields Too Large', + 451: 'Unavailable For Legal Reasons', + 500: 'Internal Server Error', + 501: 'Not Implemented', + 502: 'Bad Gateway', + 503: 'Service Unavailable', + 504: 'Gateway Timeout', + 505: 'HTTP Version Not Supported', + 506: 'Variant Also Negotiates', + 507: 'Insufficient Storage', + 508: 'Loop Detected', + 510: 'Not Extended', + 511: 'Network Authentication Required', + ...options.errors, + } + + const error = errors[result.status]; + if (error) { + throw new ApiError(options, result, error); + } + + if (!result.ok) { + const errorStatus = result.status ?? 'unknown'; + const errorStatusText = result.statusText ?? 'unknown'; + const errorBody = (() => { + try { + return JSON.stringify(result.body, null, 2); + } catch (e) { + return undefined; + } + })(); + + throw new ApiError(options, result, + `Generic Error: status: ${errorStatus}; status text: ${errorStatusText}; body: ${errorBody}` + ); + } +}; + +/** + * Request method + * @param config The OpenAPI configuration object + * @param options The request options from the service + * @param axiosClient The axios client instance to use + * @returns CancelablePromise + * @throws ApiError + */ +export const request = (config: OpenAPIConfig, options: ApiRequestOptions, axiosClient: AxiosInstance = axios): CancelablePromise => { + return new CancelablePromise(async (resolve, reject, onCancel) => { + try { + const url = getUrl(config, options); + const formData = getFormData(options); + const body = getRequestBody(options); + const headers = await getHeaders(config, options); + + if (!onCancel.isCancelled) { + let response = await sendRequest(config, options, url, body, formData, headers, onCancel, axiosClient); + + for (const fn of config.interceptors.response._fns) { + response = await fn(response); + } + + const responseBody = getResponseBody(response); + const responseHeader = getResponseHeader(response, options.responseHeader); + + let transformedBody = responseBody; + if (options.responseTransformer && isSuccess(response.status)) { + transformedBody = await options.responseTransformer(responseBody) + } + + const result: ApiResult = { + url, + ok: isSuccess(response.status), + status: response.status, + statusText: response.statusText, + body: responseHeader ?? transformedBody, + }; + + catchErrorCodes(options, result); + + resolve(result.body); + } + } catch (error) { + reject(error); + } + }); +}; \ No newline at end of file diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/src/client/index.ts b/packages/tentacles/Services/Interfaces/node_web_interface/src/client/index.ts new file mode 100644 index 0000000000..50a1dd734c --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/src/client/index.ts @@ -0,0 +1,6 @@ +// This file is auto-generated by @hey-api/openapi-ts +export { ApiError } from './core/ApiError'; +export { CancelablePromise, CancelError } from './core/CancelablePromise'; +export { OpenAPI, type OpenAPIConfig } from './core/OpenAPI'; +export * from './sdk.gen'; +export * from './types.gen'; \ No newline at end of file diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Common/AppHeader.tsx b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Common/AppHeader.tsx new file mode 100644 index 0000000000..7a8d62c69c --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Common/AppHeader.tsx @@ -0,0 +1,27 @@ +import { Link } from "@tanstack/react-router" +import { Plus } from "lucide-react" + +import { Logo } from "@/components/Common/Logo" +import UserMenu from "@/components/Common/UserMenu" +import { Button } from "@/components/ui/button" + +export function AppHeader() { + return ( +
+
+ +
+ + +
+
+
+ ) +} + +export default AppHeader diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Common/AuthLayout.tsx b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Common/AuthLayout.tsx new file mode 100644 index 0000000000..e541b07af2 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Common/AuthLayout.tsx @@ -0,0 +1,16 @@ +import { Logo } from "@/components/Common/Logo" + +interface AuthLayoutProps { + children: React.ReactNode +} + +export function AuthLayout({ children }: AuthLayoutProps) { + return ( +
+
+ +
+
{children}
+
+ ) +} diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Common/CollectionHeader.tsx b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Common/CollectionHeader.tsx new file mode 100644 index 0000000000..8a4bcc1b17 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Common/CollectionHeader.tsx @@ -0,0 +1,71 @@ +import { ReactNode } from "react" + +import { Tabs, TabsList, TabsTrigger } from "@/components/ui/tabs" +import { SearchInput } from "@/components/Common/SearchInput" + +export type FilterOption = { + value: string + label: string +} + +interface CollectionHeaderProps { + title?: string + description?: string + action?: ReactNode + searchValue: string + onSearchChange: (value: string) => void + searchPlaceholder?: string + filters: FilterOption[] + filterValue: string + onFilterChange: (value: string) => void +} + +export function CollectionHeader({ + title, + description, + action, + searchValue, + onSearchChange, + searchPlaceholder, + filters, + filterValue, + onFilterChange, +}: CollectionHeaderProps) { + const hasHeading = title || description || action + return ( +
+ {hasHeading && ( +
+
+ {title &&

{title}

} + {description && ( +

{description}

+ )} +
+ {action} +
+ )} +
+ + + {filters.map((filter) => ( + + {filter.label} + + ))} + + + +
+
+ ) +} diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Common/ErrorComponent.tsx b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Common/ErrorComponent.tsx new file mode 100644 index 0000000000..e4a97d29cd --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Common/ErrorComponent.tsx @@ -0,0 +1,29 @@ +import { Link } from "@tanstack/react-router" +import { Button } from "@/components/ui/button" + +const ErrorComponent = () => { + return ( +
+
+
+ + Error + + Oops! +
+
+ +

+ Something went wrong. Please try again. +

+ + + +
+ ) +} + +export default ErrorComponent diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Common/Logo.tsx b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Common/Logo.tsx new file mode 100644 index 0000000000..5bd49a53e5 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Common/Logo.tsx @@ -0,0 +1,62 @@ +import { Link } from "@tanstack/react-router" + +import { cn, getAssetPath } from "@/lib/utils" + +interface LogoProps { + variant?: "full" | "icon" | "responsive" + className?: string + asLink?: boolean +} + +export function Logo({ + variant = "full", + className, + asLink = true, +}: LogoProps) { + const logoPath = getAssetPath("images/octobot_node_1024.png") + const iconPath = getAssetPath("images/octobot_node_100.png") + + const content = + variant === "responsive" ? ( +
+ OctoBot Node + + + OctoBot Node + +
+ ) : ( +
+ OctoBot Node + {variant === "full" && ( + + OctoBot Node + + )} +
+ ) + + if (!asLink) { + return content + } + + return {content} +} diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Common/NewBotCards.tsx b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Common/NewBotCards.tsx new file mode 100644 index 0000000000..85689955b0 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Common/NewBotCards.tsx @@ -0,0 +1,129 @@ +import { Link } from "@tanstack/react-router" +import { Star, Upload } from "lucide-react" + +import { Button } from "@/components/ui/button" +import { + Card, + CardContent, + CardDescription, + CardHeader, + CardTitle, +} from "@/components/ui/card" +import { Tooltip, TooltipContent, TooltipTrigger } from "@/components/ui/tooltip" +import { getAssetPath } from "@/lib/utils" + +export function NewBotCards() { + const launchImage = getAssetPath("images/octobot_launching_512.png") + const designImage = getAssetPath("images/octobot_design_512.png") + const labImage = getAssetPath("images/octobot_lab_512.png") + + return ( +
+ + +
+ Launching OctoBot +
+ + Pre-configured setup + + + Start fast with curated presets. + Available soon on octobot.cloud and from the mobile app. + +
+ + + + + + + + Easy to setup + + + +
+ + +
+ Design strategy +
+ + Your own rules + + + Build with your own rules. + Available soon on octobot.cloud and from the mobile app. + +
+ + + + + + + + + Easy to medium setup + + + +
+ + +
+ Custom configuration +
+ + Custom configuration + + + Full control with advanced options. You'll configure everything + after start, including each parameter. + +
+ + + + + + + + + + Advanced setup + + + +
+
+ Already have a saved configuration? + + + Restore from a file + +
+
+ ) +} diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Common/NotFound.tsx b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Common/NotFound.tsx new file mode 100644 index 0000000000..04f42b8562 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Common/NotFound.tsx @@ -0,0 +1,31 @@ +import { Link } from "@tanstack/react-router" +import { Button } from "@/components/ui/button" + +const NotFound = () => { + return ( +
+
+
+ + 404 + + Oops! +
+
+ +

+ The page you are looking for was not found. +

+
+ + + +
+
+ ) +} + +export default NotFound diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Common/SearchInput.tsx b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Common/SearchInput.tsx new file mode 100644 index 0000000000..858bb93072 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Common/SearchInput.tsx @@ -0,0 +1,30 @@ +import { Search } from "lucide-react" + +import { Input } from "@/components/ui/input" +import { cn } from "@/lib/utils" + +interface SearchInputProps { + value: string + onChange: (value: string) => void + placeholder?: string + className?: string +} + +export function SearchInput({ + value, + onChange, + placeholder, + className, +}: SearchInputProps) { + return ( +
+ + onChange(event.target.value)} + placeholder={placeholder} + className="pl-9" + /> +
+ ) +} diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Common/UserMenu.tsx b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Common/UserMenu.tsx new file mode 100644 index 0000000000..84cf6455b1 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Common/UserMenu.tsx @@ -0,0 +1,25 @@ +import { Link } from "@tanstack/react-router" +import { User } from "lucide-react" + +import useAuth from "@/hooks/useAuth" + +export function UserMenu() { + const { user } = useAuth() + + if (!user) return null + + return ( + + + + + {user?.full_name || user?.email?.slice(0, 8) || "—"} + + ) +} + +export default UserMenu diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/src/components/OctoBots/BotAvatar.tsx b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/OctoBots/BotAvatar.tsx new file mode 100644 index 0000000000..9043eab4f7 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/OctoBots/BotAvatar.tsx @@ -0,0 +1,37 @@ +import { Bot } from "lucide-react" +import { memo, useState } from "react" + +const RING_RADIUS = 22 +const RING_CIRCUMFERENCE = 2 * Math.PI * RING_RADIUS + +export const BotAvatar = memo(function BotAvatar({ isRunning }: { isRunning: boolean }) { + const [animOffset] = useState(() => `-${(Math.random() * 3).toFixed(2)}s`) + + return ( +
+ {isRunning && ( + <> + + + + + + + + )} +
+ +
+
+ ) +}) diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/src/components/OctoBots/BotCard.tsx b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/OctoBots/BotCard.tsx new file mode 100644 index 0000000000..38b00f1395 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/OctoBots/BotCard.tsx @@ -0,0 +1,232 @@ +import { Check, Clock, Layers, Lock } from "lucide-react" +import { memo } from "react" + +import type { Task_Output as Task, TaskStatus } from "@/client" +import { Badge } from "@/components/ui/badge" +import { Card, CardContent, CardHeader } from "@/components/ui/card" +import { Tooltip, TooltipContent, TooltipTrigger } from "@/components/ui/tooltip" +import { cn } from "@/lib/utils" +import { getActiveExecution, getStatusGroup, hasStartedExecution } from "@/utils/executions" +import { formatDate, formatElapsed, formatIsoTooltip, formatRelativeFuture, parseActionCount } from "@/utils/task-format" +import { getDisplayDate, getStatusVariant, statusLabels } from "@/utils/task-status" +import { BotAvatar } from "./BotAvatar" + +function DateRow({ date }: { date: { label: string; value: string } | null }) { + if (!date) return null + return ( + + + + + {date.label}: {formatDate(date.value)} + + + + {formatIsoTooltip(date.value)} + + + ) +} + +function BotCardBody({ task, isRunning, isScheduled }: { task: Task; isRunning: boolean; isScheduled: boolean }) { + const activeExec = getActiveExecution(task.executions) + const group = getStatusGroup(activeExec?.status) + const date = getDisplayDate(task) + const runCount = task.executions?.length ?? 0 + const completedSteps = task.executions?.filter((e) => e.status === "completed" || e.status === "failed").length ?? 0 + const actionCount = parseActionCount(activeExec?.actions) + + if (group === "active") { + if (isRunning) { + const runningExec = task.executions?.find((e) => e.status === "running") + const elapsedFrom = runningExec?.scheduled_at ?? activeExec?.scheduled_at + return ( + +
+ {activeExec?.type && ( + + {activeExec.type} + + )} + {runCount > 0 && ( + + + {runCount} run{runCount !== 1 ? "s" : ""} + + )} + {completedSteps > 0 && ( + + {completedSteps} done + + )} + {elapsedFrom && ( + + Running {formatElapsed(elapsedFrom)} + + )} +
+ +
+ ) + } + + return ( + +
+ {activeExec?.type && ( + + {activeExec.type} + + )} + {actionCount != null && ( + + {actionCount} action{actionCount !== 1 ? "s" : ""} queued + + )} +
+ {isScheduled && activeExec?.scheduled_at ? ( + + + + + Next run: {formatRelativeFuture(activeExec.scheduled_at)} + + + + {formatIsoTooltip(activeExec.scheduled_at)} + + + ) : ( + + )} +
+ ) + } + + const isFailed = activeExec?.status === "failed" + + return ( + +
+ {activeExec?.type && ( + + {activeExec.type} + + )} + {runCount > 0 && ( + + + {runCount} run{runCount !== 1 ? "s" : ""} + + )} +
+
+ {date ? `${date.label}: ${formatDate(date.value)}` : null} +
+
+ ) +} + +function areTaskPropsEqual( + prev: { task: Task; selected: boolean }, + next: { task: Task; selected: boolean }, +): boolean { + return ( + prev.selected === next.selected && + prev.task.id === next.task.id && + prev.task.name === next.task.name && + prev.task.error === next.task.error && + prev.task.executions?.length === next.task.executions?.length && + JSON.stringify(prev.task.executions) === JSON.stringify(next.task.executions) + ) +} + +export const BotCard = memo(function BotCard({ + task, + selected, + onToggleSelect, +}: { + task: Task + selected: boolean + onToggleSelect: (id: string) => void +}) { + const activeExec = getActiveExecution(task.executions) + const rawStatus = (activeExec?.status ?? "scheduled") as TaskStatus + const group = getStatusGroup(rawStatus) + const hasError = !!task.error + const started = hasStartedExecution(task.executions) + + const label = task.name || activeExec?.name || `OctoBot ${task.id?.slice(0, 6) || "new"}` + const isEncrypted = !!task.executions?.some((e) => e.content_metadata) + + let displayLabel: string + let badgeStatus: TaskStatus + if (hasError) { + displayLabel = "Error" + badgeStatus = "failed" + } else if (group === "active") { + if (rawStatus === "periodic") { + displayLabel = "Recurring" + badgeStatus = "periodic" + } else if (started) { + displayLabel = "Running" + badgeStatus = "running" + } else { + displayLabel = "Scheduled" + badgeStatus = "scheduled" + } + } else { + displayLabel = statusLabels[rawStatus] + badgeStatus = rawStatus + } + + const isRunning = displayLabel === "Running" + const isScheduled = displayLabel === "Scheduled" + + return ( + task.id && onToggleSelect(task.id)} + > + {selected && ( +
+ +
+ )} + +
+ +
+
+ + {label} + {isEncrypted && } + + + {displayLabel} + +
+
+ + ID: {task.id?.slice(0, 12) || "—"} + + {hasError && ( + {task.error} + )} +
+
+
+
+ +
+ ) +}, areTaskPropsEqual) diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/src/components/OctoBots/BotGrid.tsx b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/OctoBots/BotGrid.tsx new file mode 100644 index 0000000000..6c08321587 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/OctoBots/BotGrid.tsx @@ -0,0 +1,67 @@ +import { Link } from "@tanstack/react-router" +import { Bot, Plus } from "lucide-react" + +import type { Task_Output as Task } from "@/client" +import { Button } from "@/components/ui/button" +import { BotCard } from "./BotCard" + +export function BotGrid({ + tasks, + allTasksEmpty, + selectedIds, + onToggleSelect, +}: { + tasks: Task[] + allTasksEmpty: boolean + selectedIds: Set + onToggleSelect: (id: string) => void +}) { + if (allTasksEmpty) { + return ( +
+ +
+

No OctoBots yet

+

Start your first OctoBot or import a saved configuration.

+
+ +
+ ) + } + + if (tasks.length === 0) { + return ( +
+ +
+

No OctoBots match this filter

+

Try another filter or search term.

+
+ +
+ ) + } + + return ( +
+ {tasks.map((task) => ( + + ))} +
+ ) +} diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/src/components/OctoBots/BotsFilterBar.tsx b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/OctoBots/BotsFilterBar.tsx new file mode 100644 index 0000000000..ecf6ed1da4 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/OctoBots/BotsFilterBar.tsx @@ -0,0 +1,61 @@ +import { Search, X } from "lucide-react" + +import { cn } from "@/lib/utils" +import { filters, type TaskFilterGroup } from "@/utils/task-status" + +export function BotsFilterBar({ + filterValue, + searchValue, + counts, + onFilterChange, + onSearchChange, +}: { + filterValue: TaskFilterGroup + searchValue: string + counts: Record + onFilterChange: (value: TaskFilterGroup) => void + onSearchChange: (value: string) => void +}) { + return ( +
+ {filters.map((f) => { + const active = filterValue === f.value + return ( + + ) + })} +
+
+ + onSearchChange(e.target.value)} + placeholder="Search..." + className="h-7 w-32 rounded-md bg-transparent pl-8 pr-2 text-sm text-foreground placeholder:text-muted-foreground/50 focus:outline-none" + /> + {searchValue && ( + + )} +
+
+ ) +} diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/src/components/OctoBots/SelectionToolbar.tsx b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/OctoBots/SelectionToolbar.tsx new file mode 100644 index 0000000000..825b8e038b --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/OctoBots/SelectionToolbar.tsx @@ -0,0 +1,186 @@ +import { useMutation, useQueryClient } from "@tanstack/react-query" +import { useNavigate } from "@tanstack/react-router" +import { Trash2 } from "lucide-react" +import { useMemo, useState } from "react" + +import type { Task_Output as Task } from "@/client" +import { TasksService } from "@/client" +import { Button } from "@/components/ui/button" +import { + Dialog, + DialogClose, + DialogContent, + DialogDescription, + DialogFooter, + DialogHeader, + DialogTitle, +} from "@/components/ui/dialog" +import { LoadingButton } from "@/components/ui/loading-button" +import useCustomToast from "@/hooks/useCustomToast" +import { loadPassword } from "@/lib/device-key" +import { getTaskFilterGroup } from "@/utils/task-status" + +export function SelectionToolbar({ + selectedIds, + filteredTasks, + allTasks, + onSelectAll, + onDeselectAll, + onDeleted, +}: { + selectedIds: Set + filteredTasks: Task[] + allTasks: Task[] + onSelectAll: () => void + onDeselectAll: () => void + onDeleted: () => void +}) { + const [deleteOpen, setDeleteOpen] = useState(false) + const [shareLogsOpen, setShareLogsOpen] = useState(false) + const [shareLogsLoading, setShareLogsLoading] = useState(false) + const [exportLoading, setExportLoading] = useState(false) + const [shareCreds, setShareCreds] = useState<{ errorId: string; errorSecret: string } | null>(null) + const navigate = useNavigate() + const queryClient = useQueryClient() + const { showSuccessToast, showErrorToast } = useCustomToast() + + const deleteMutation = useMutation({ + mutationFn: () => TasksService.deleteTasks({ taskIds: Array.from(selectedIds) }), + onSuccess: () => { + showSuccessToast(`Deleted ${selectedIds.size} OctoBot${selectedIds.size !== 1 ? "s" : ""}`) + setDeleteOpen(false) + onDeleted() + queryClient.invalidateQueries({ queryKey: ["tasks"] }) + }, + onError: () => { + showErrorToast("Some deletions failed") + }, + }) + + const exportableTasks = useMemo( + () => + allTasks.filter( + (t) => + t.id && + selectedIds.has(t.id) && + getTaskFilterGroup(t) !== "active", + ), + [allTasks, selectedIds], + ) + + const handleExportResults = () => { + if (exportableTasks.length === 0) { + showErrorToast("No results to export for selected OctoBots") + return + } + setExportLoading(true) + const taskIds = exportableTasks.map((t) => t.id).filter(Boolean).join(",") + navigate({ to: "/octobots/export", search: { tasks: taskIds } }) + } + + const handleShareLogs = async () => { + setShareLogsLoading(true) + try { + const username = localStorage.getItem("auth_username") || "node" + const password = (await loadPassword()) ?? "" + const res = await fetch("/api/v1/logs/share", { + method: "POST", + headers: { + Authorization: `Basic ${btoa(`${username}:${password}`)}`, + "Content-Type": "application/json", + }, + body: JSON.stringify({ automation_ids: Array.from(selectedIds) }), + }) + const data = await res.json() + if (data.success) { + setShareCreds({ errorId: data.errorId, errorSecret: data.errorSecret }) + setShareLogsOpen(true) + } else { + showErrorToast(data.error ?? "Failed to share logs") + } + } catch { + showErrorToast("Failed to share logs") + } finally { + setShareLogsLoading(false) + } + } + + const allFilteredSelected = filteredTasks.every((t) => t.id && selectedIds.has(t.id)) + + return ( + <> +
+ {selectedIds.size} selected +
+ {!allFilteredSelected && ( + + )} + +
+
+ + Export results + + + Share logs + + +
+
+ + + + + Delete {selectedIds.size} OctoBot{selectedIds.size !== 1 ? "s" : ""} + + This will permanently delete the selected OctoBots. This action cannot be undone. + + + + + + + deleteMutation.mutate()} + > + Delete + + + + + + + + + Logs shared + + Share these credentials with the OctoBot team to help diagnose issues. + + + {shareCreds && ( +
+ Error ID + {shareCreds.errorId} + Error Secret + {shareCreds.errorSecret} +
+ )} + + + + + +
+
+ + ) +} diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Tasks/ExportResultsContent.tsx b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Tasks/ExportResultsContent.tsx new file mode 100644 index 0000000000..11579db0d0 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Tasks/ExportResultsContent.tsx @@ -0,0 +1,566 @@ +import { + type ColumnDef, + type SortingState, + type ColumnFiltersState, + type VisibilityState, + flexRender, + getCoreRowModel, + getFilteredRowModel, + getSortedRowModel, + useReactTable, +} from "@tanstack/react-table" +import { ArrowLeft, ArrowUpDown, Download, Eye, EyeOff, Loader2, Plus, Search, Upload, X } from "lucide-react" +import { useCallback, useEffect, useMemo, useRef, useState } from "react" + +import type { Task_Output as Task } from "@/client" +import { Badge } from "@/components/ui/badge" +import { Button } from "@/components/ui/button" +import { Input } from "@/components/ui/input" +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from "@/components/ui/select" +import { + Table, + TableBody, + TableCell, + TableHead, + TableHeader, + TableRow, +} from "@/components/ui/table" +import { generateCSV, downloadCSV } from "@/lib/csv" +import { + EXPORT_TEMPLATES, + getAllExportTemplates, + validateExportTemplateJson, + saveUserExportTemplate, + getLastUsedExportTemplateId, + setLastUsedExportTemplateId, + type ExportColumnDef, +} from "@/lib/export-templates" +import useCustomToast from "@/hooks/useCustomToast" +import { decryptAndVerify, type ClientKeys } from "@/lib/client-encryption" +import { + extractValue, + discoverPaths, + formatCellValue, +} from "@/lib/json-path" +import { loadClientKeys } from "@/lib/device-key" +import { fetchServerPublicKeys } from "@/lib/server-keys" +import { getActiveExecution } from "@/utils/executions" + +// ── Types ────────────────────────────────────────────────────────────── + +interface ExportRow { + taskId: string + /** Parsed result JSON */ + resultData: Record + /** Task-level metadata injected as special keys */ + meta: Record +} + +export interface ExportResultsContentProps { + tasks: Task[] + onClose?: () => void +} + +// ── Helpers ──────────────────────────────────────────────────────────── + +/** Resolve a column's jsonPath against a row, checking meta keys first. */ +function resolveValue( + row: ExportRow, + jsonPath: string, +): unknown { + if (jsonPath.startsWith("__") && jsonPath.endsWith("__")) { + return row.meta[jsonPath] + } + return extractValue(row.resultData, jsonPath) +} + +function buildExportRows(tasks: Task[]): ExportRow[] { + return tasks + .map((task) => { + const activeExec = getActiveExecution(task.executions) + let resultData: Record = {} + try { + const parsed = activeExec?.result ? JSON.parse(activeExec.result) : {} + resultData = + typeof parsed === "object" && parsed !== null && !Array.isArray(parsed) + ? parsed + : { result: parsed } + } catch { + resultData = { result: activeExec?.result ?? "" } + } + + return { + taskId: task.id ?? "", + resultData, + meta: { + __task_name__: task.name ?? "", + __exec_status__: activeExec?.status ?? "", + __task_status__: task.error ? "errored" : (activeExec?.status ?? ""), + __task_error__: task.error ?? "", + __exec_type__: activeExec?.type ?? "", + __exec_completed_at__: activeExec?.completed_at ?? "", + __exec_result_metadata__: activeExec?.result_metadata ?? "", + }, + } + }) +} + +function buildColumnsForFullDetails( + rows: ExportRow[], +): ExportColumnDef[] { + // Discover all paths across all rows + const allPaths = new Set() + for (const row of rows) { + for (const path of discoverPaths(row.resultData)) { + allPaths.add(path) + } + } + + const metaCols: ExportColumnDef[] = [ + { key: "name", label: "Name", jsonPath: "__task_name__" }, + { key: "status", label: "Status", jsonPath: "__exec_status__" }, + ] + + const dataCols: ExportColumnDef[] = Array.from(allPaths) + .sort() + .map((path) => ({ + key: `data_${path}`, + label: path, + jsonPath: path, + })) + + return [...metaCols, ...dataCols] +} + +// ── Component ────────────────────────────────────────────────────────── + +export default function ExportResultsContent({ + tasks, + onClose, +}: ExportResultsContentProps) { + const { showSuccessToast, showErrorToast } = useCustomToast() + const fileInputRef = useRef(null) + const [userTemplatesVersion, setUserTemplatesVersion] = useState(0) + const [selectedTemplateId, setSelectedTemplateId] = useState(() => getLastUsedExportTemplateId() ?? "general") + const [customColumns, setCustomColumns] = useState([]) + const [addColumnPath, setAddColumnPath] = useState("") + const [addColumnLabel, setAddColumnLabel] = useState("") + const [sorting, setSorting] = useState([]) + const [columnFilters, setColumnFilters] = useState([]) + const [columnVisibility, setColumnVisibility] = useState({}) + const [globalFilter, setGlobalFilter] = useState("") + + const exportRows = useMemo(() => buildExportRows(tasks), [tasks]) + const [decryptedRows, setDecryptedRows] = useState(null) + const [isDecrypting, setIsDecrypting] = useState(false) + + const showErrorToastRef = useRef(showErrorToast) + showErrorToastRef.current = showErrorToast + + const encryptedTaskCount = useMemo( + () => tasks.filter((t) => getActiveExecution(t.executions)?.result_metadata).length, + [tasks], + ) + + useEffect(() => { + let cancelled = false + async function tryDecryptAll() { + const rawKeys = await loadClientKeys() + if (!rawKeys?.rsa_private?.trim()) return + const keys = rawKeys as ClientKeys + let serverKeys: { rsa_public: string; ecdsa_public: string } + try { + serverKeys = await fetchServerPublicKeys() + } catch { + showErrorToastRef.current("Failed to fetch server keys — encrypted results cannot be decrypted") + return + } + if (cancelled) return + setDecryptedRows(null) + setIsDecrypting(true) + let failCount = 0 + try { + const base = buildExportRows(tasks) + const rows = await Promise.all( + tasks.map(async (task, i) => { + const activeExec = getActiveExecution(task.executions) + if (!activeExec?.result_metadata || !activeExec?.result) return base[i] + try { + const decrypted = await decryptAndVerify(activeExec.result, activeExec.result_metadata, keys, serverKeys.ecdsa_public) + let resultData: Record = {} + try { + const parsed: unknown = JSON.parse(decrypted) + resultData = + typeof parsed === "object" && parsed !== null && !Array.isArray(parsed) + ? (parsed as Record) + : { result: parsed } + } catch { + resultData = { result: decrypted } + } + return { ...base[i], resultData } + } catch { + failCount++ + return base[i] + } + }), + ) + if (!cancelled) { + setDecryptedRows(rows) + if (failCount > 0) showErrorToastRef.current(`Failed to decrypt ${failCount} result(s) — check your keys`) + } + } finally { + if (!cancelled) setIsDecrypting(false) + } + } + tryDecryptAll() + return () => { cancelled = true } + }, [tasks]) + + const displayRows = decryptedRows ?? exportRows + + const discoveredPaths = useMemo(() => { + const paths = new Set() + for (const row of displayRows) { + for (const path of discoverPaths(row.resultData)) { + paths.add(path) + } + } + return Array.from(paths).sort() + }, [displayRows]) + + const activeTemplate = useMemo( + () => getAllExportTemplates().find((t) => t.id === selectedTemplateId), + [selectedTemplateId, userTemplatesVersion], + ) + + const templateColumns = useMemo((): ExportColumnDef[] => { + if (selectedTemplateId === "full") { + return buildColumnsForFullDetails(displayRows) + } + return activeTemplate?.columns ?? [] + }, [activeTemplate, selectedTemplateId, displayRows]) + + const allColumns = useMemo( + () => [...templateColumns, ...customColumns], + [templateColumns, customColumns], + ) + + const tableColumns = useMemo((): ColumnDef[] => { + return allColumns.map((col) => ({ + id: col.key, + accessorFn: (row: ExportRow) => { + const val = resolveValue(row, col.jsonPath) + return formatCellValue(val, col.formatter) + }, + header: ({ column }) => ( + + ), + cell: ({ getValue }) => ( + + {String(getValue() ?? "")} + + ), + enableColumnFilter: true, + filterFn: "includesString", + })) + }, [allColumns]) + + const table = useReactTable({ + data: displayRows, + columns: tableColumns, + state: { sorting, columnFilters, columnVisibility, globalFilter }, + onSortingChange: setSorting, + onColumnFiltersChange: setColumnFilters, + onColumnVisibilityChange: setColumnVisibility, + onGlobalFilterChange: setGlobalFilter, + getCoreRowModel: getCoreRowModel(), + getSortedRowModel: getSortedRowModel(), + getFilteredRowModel: getFilteredRowModel(), + globalFilterFn: "includesString", + }) + + const handleTemplateFileChange = useCallback( + async (e: React.ChangeEvent) => { + const file = e.target.files?.[0] + if (!file) return + e.target.value = "" + try { + const text = await file.text() + const json: unknown = JSON.parse(text) + const def = validateExportTemplateJson(json) + saveUserExportTemplate(def) + setUserTemplatesVersion((v) => v + 1) + showSuccessToast(`Export template "${def.label}" imported`) + } catch (err) { + showErrorToast(err instanceof Error ? err.message : "Invalid template file") + } + }, + [showSuccessToast, showErrorToast], + ) + + const handleAddColumn = useCallback(() => { + const path = addColumnPath.trim() + const label = addColumnLabel.trim() || path + if (!path) return + + setCustomColumns((prev) => [ + ...prev, + { + key: `custom_${Date.now()}`, + label, + jsonPath: path, + isCustom: true, + }, + ]) + setAddColumnPath("") + setAddColumnLabel("") + }, [addColumnPath, addColumnLabel]) + + const handleRemoveCustomColumn = useCallback((key: string) => { + setCustomColumns((prev) => prev.filter((c) => c.key !== key)) + }, []) + + const handleExport = useCallback(() => { + const visibleRows = table.getFilteredRowModel().rows + const headers = allColumns.map((c) => c.label) + const csvRows = visibleRows.map((row) => + allColumns.map((col) => { + const val = resolveValue(row.original, col.jsonPath) + return formatCellValue(val, col.formatter) + }), + ) + const csv = generateCSV(headers, csvRows) + downloadCSV(csv, `task-results-${new Date().toISOString().split("T")[0]}`) + if (encryptedTaskCount > 0 && decryptedRows === null && !isDecrypting) { + showErrorToast(`${encryptedTaskCount} encrypted result(s) exported as raw ciphertext — configure browser keys in Settings to decrypt`) + } else { + showSuccessToast(`Exported ${visibleRows.length} row(s)`) + } + }, [table, allColumns, encryptedTaskCount, decryptedRows, isDecrypting, showErrorToast, showSuccessToast]) + + return ( +
+ {/* Controls row */} +
+ + + + +
+ + setGlobalFilter(e.target.value)} + placeholder="Search all columns..." + className="h-8 w-48 pl-8 text-xs" + /> + {globalFilter && ( + + )} +
+ + {/* Column visibility toggles */} +
+ {allColumns.map((col) => { + const isVisible = + columnVisibility[col.key] !== false + return ( + + ) + })} +
+
+ + {/* Add custom column */} +
+ setAddColumnLabel(e.target.value)} + placeholder="Column label" + className="h-7 w-32 text-xs" + /> + + + + {customColumns.length > 0 && ( +
+ {customColumns.map((col) => ( + handleRemoveCustomColumn(col.key)} + > + {col.label} + + ))} +
+ )} +
+ + {/* Data table */} +
+ + + {table.getHeaderGroups().map((headerGroup) => ( + + {headerGroup.headers.map((header) => ( + + {header.isPlaceholder + ? null + : flexRender( + header.column.columnDef.header, + header.getContext(), + )} + + ))} + + ))} + + + {table.getRowModel().rows.length === 0 ? ( + + + No data to display + + + ) : ( + table.getRowModel().rows.map((row) => ( + + {row.getVisibleCells().map((cell) => ( + + {flexRender( + cell.column.columnDef.cell, + cell.getContext(), + )} + + ))} + + )) + )} + +
+
+ +

+ {isDecrypting && } + {isDecrypting ? "Decrypting results…" : ( + <>Showing {table.getFilteredRowModel().rows.length} of {displayRows.length} rows + )} +

+ + {/* Footer */} +
+ {onClose && ( + + )} + +
+
+ ) +} diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Tasks/ImportSteps/ColumnMappingStep.tsx b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Tasks/ImportSteps/ColumnMappingStep.tsx new file mode 100644 index 0000000000..5443029b69 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Tasks/ImportSteps/ColumnMappingStep.tsx @@ -0,0 +1,452 @@ +import { + createColumnHelper, + flexRender, + getCoreRowModel, + useReactTable, +} from "@tanstack/react-table" +import { Trash2 } from "lucide-react" +import { useCallback, useMemo, useRef, useState } from "react" + +import { Button } from "@/components/ui/button" +import { Input } from "@/components/ui/input" +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from "@/components/ui/select" +import { + Table, + TableBody, + TableCell, + TableHead, + TableHeader, + TableRow, +} from "@/components/ui/table" +import useCustomToast from "@/hooks/useCustomToast" +import { isParamValueValid, type ActionParamDef } from "@/lib/action-templates" +import { + getAllTemplates, + getTemplateById, + getLastUsedImportTemplateId, + setLastUsedImportTemplateId, + resolveMetaTemplate, + saveUserMetaTemplate, + validateMetaTemplateJson, +} from "@/lib/meta-templates" +import { + type ColumnMapping, + type RowDetectionResult, + detectColumnsAndTemplates, + detectMappingsForTemplate, + buildParamValuesForRow, +} from "@/lib/column-detector" + +const SENSITIVE_HEADER_PATTERNS = /\b(key|private|secret|password|mnemonic|seed|pk)\b/i + +function isSensitiveHeader(header: string): boolean { + return SENSITIVE_HEADER_PATTERNS.test(header) +} + +// ── Types ────────────────────────────────────────────────────────────── + +export interface ActionRow { + rowIndex: number + templateId: string + paramValues: Record + mappings: ColumnMapping[] + unmappedColumns: number[] + /** User-provided task name (optional) */ + name: string +} + +export interface ColumnMappingStepProps { + headers: string[] + rows: string[][] + onConfirm: (actions: ActionRow[]) => void + onBack: () => void +} + +interface RowParamsCellProps { + actionRow: ActionRow + headers: string[] + rows: string[][] + onParamChange: (rowIndex: number, paramKey: string, value: string) => void +} + +function RowParamsCell({ actionRow, headers, rows, onParamChange }: RowParamsCellProps) { + const { requiredParams, sortedOptional } = useMemo(() => { + const template = getTemplateById(actionRow.templateId) + const byLabel = (a: ActionParamDef, b: ActionParamDef) => a.label.localeCompare(b.label) + return { + requiredParams: (template?.params.filter((p) => p.required && !p.hidden) ?? []).sort(byLabel), + sortedOptional: (template?.params.filter((p) => !p.required && !p.hidden) ?? []).sort(byLabel), + } + }, [actionRow.templateId]) + + const filledOptional = sortedOptional.filter( + (p) => isParamValueValid(p, actionRow.paramValues[p.key]), + ) + const emptyOptional = sortedOptional.filter( + (p) => !isParamValueValid(p, actionRow.paramValues[p.key]), + ) + + const renderParam = (param: ActionParamDef) => { + const value = actionRow.paramValues[param.key] ?? "" + return ( +
+ + {param.label} + {param.required && *}: + + onParamChange(actionRow.rowIndex, param.key, e.target.value)} + type={param.type === "number" ? "number" : param.type === "password" ? "password" : "text"} + placeholder={param.type === "numberOrDate" ? `${param.label} (number or date)` : param.label} + className="h-6 text-xs w-28" + /> +
+ ) + } + + return ( +
+
+ {requiredParams.map(renderParam)} + {filledOptional.map(renderParam)} +
+ {emptyOptional.length > 0 && ( +
+ + {emptyOptional.length} optional parameter{emptyOptional.length !== 1 ? "s" : ""} + +
+ {emptyOptional.map(renderParam)} +
+
+ )} + {actionRow.unmappedColumns.length > 0 && ( +
+ + {actionRow.unmappedColumns.length} unmapped column + {actionRow.unmappedColumns.length !== 1 ? "s" : ""} + +
+ {actionRow.unmappedColumns.map((colIdx) => { + const header = headers[colIdx] ?? "" + const value = rows[actionRow.rowIndex]?.[colIdx] ?? "" + const masked = isSensitiveHeader(header) + return ( + + {header}:{" "} + {masked ? "\u2022\u2022\u2022\u2022\u2022\u2022" : value} + + ) + })} +
+
+ )} +
+ ) +} + +const columnHelper = createColumnHelper() + +export default function ColumnMappingStep({ + headers, + rows, + onConfirm, + onBack, +}: ColumnMappingStepProps) { + const { showSuccessToast, showErrorToast } = useCustomToast() + const fileInputRef = useRef(null) + // Increment to force re-render after a user template is imported (getAllTemplates reads localStorage) + const [, setUserTemplatesVersion] = useState(0) + + // Run initial detection + const initialDetection = useMemo( + () => detectColumnsAndTemplates(headers, rows, getLastUsedImportTemplateId() ?? undefined), + [headers, rows], + ) + + const [actionRows, setActionRows] = useState(() => { + const nameIdx = headers.findIndex((h) => h.trim().toLowerCase() === "name") + return initialDetection.map((det, idx) => { + const nameFromCsv = nameIdx >= 0 ? (rows[idx]?.[nameIdx] ?? "").trim() : "" + const unmappedColumns = + nameIdx >= 0 && det.unmappedColumns.includes(nameIdx) + ? det.unmappedColumns.filter((i) => i !== nameIdx) + : det.unmappedColumns + return { + rowIndex: idx, + templateId: det.templateId, + paramValues: det.paramValues, + mappings: det.mappings, + unmappedColumns, + name: nameFromCsv || `Action ${idx + 1}`, + } + }) + }) + + const updateRow = useCallback( + (rowIndex: number, update: Partial) => { + setActionRows((prev) => + prev.map((row) => + row.rowIndex === rowIndex ? { ...row, ...update } : row, + ), + ) + }, + [], + ) + + const deleteRow = useCallback((rowIndex: number) => { + setActionRows((prev) => prev.filter((row) => row.rowIndex !== rowIndex)) + }, []) + + const handleTemplateChange = useCallback( + (rowIndex: number, newTemplateId: string) => { + const template = getTemplateById(newTemplateId) + if (!template) return + + const newMappings = detectMappingsForTemplate(template, headers, rows) + const csvRow = rows[rowIndex] + if (!csvRow) return + + const newParamValues = buildParamValuesForRow(csvRow, newMappings, template) + const mappedCols = new Set(newMappings.map((m) => m.columnIndex)) + const unmappedColumns = headers + .map((_, i) => i) + .filter((i) => !mappedCols.has(i)) + + setLastUsedImportTemplateId(newTemplateId) + updateRow(rowIndex, { + templateId: newTemplateId, + mappings: newMappings, + paramValues: newParamValues, + unmappedColumns, + }) + }, + [headers, rows, updateRow], + ) + + const handleParamChange = useCallback( + (rowIndex: number, paramKey: string, value: string) => { + setActionRows((prev) => + prev.map((row) => { + if (row.rowIndex !== rowIndex) return row + return { + ...row, + paramValues: { ...row.paramValues, [paramKey]: value }, + } + }), + ) + }, + [], + ) + + const handleNameChange = useCallback( + (rowIndex: number, name: string) => { + updateRow(rowIndex, { name }) + }, + [updateRow], + ) + + const handleTemplateFileChange = useCallback( + async (e: React.ChangeEvent) => { + const file = e.target.files?.[0] + if (!file) return + e.target.value = "" + try { + const text = await file.text() + const json: unknown = JSON.parse(text) + const def = validateMetaTemplateJson(json) + resolveMetaTemplate(def) // validate it resolves without errors + saveUserMetaTemplate(def) + setUserTemplatesVersion((v) => v + 1) + showSuccessToast(`Template "${def.label}" imported`) + } catch (err) { + showErrorToast(err instanceof Error ? err.message : "Invalid template file") + } + }, + [showSuccessToast, showErrorToast], + ) + + // Build dynamic columns based on the union of all param keys across rows + const columns = useMemo(() => { + const cols = [ + columnHelper.display({ + id: "row_number", + header: "#", + cell: (info) => ( + + {info.row.index + 1} + + ), + size: 40, + maxSize: 40, + }), + columnHelper.accessor("name", { + header: "Name", + cell: (info) => ( + + handleNameChange(info.row.original.rowIndex, e.target.value) + } + className="h-7 text-xs w-32" + /> + ), + size: 140, + maxSize: 140, + }), + columnHelper.accessor("templateId", { + header: "Action Template", + cell: (info) => ( + + ), + size: 160, + maxSize: 160, + }), + ] + + return cols + }, [handleNameChange, handleTemplateChange]) + + const table = useReactTable({ + data: actionRows, + columns, + getCoreRowModel: getCoreRowModel(), + }) + + return ( +
+
+
+

+ {actionRows.length} action{actionRows.length !== 1 ? "s" : ""}{" "} + detected +

+

+ Review auto-detected templates and parameter mappings. Edit values + or change templates as needed. +

+
+
+ + +
+
+ +
+ + + + {table.getHeaderGroups().map((headerGroup) => + headerGroup.headers.map((header) => ( + + {header.isPlaceholder + ? null + : flexRender( + header.column.columnDef.header, + header.getContext(), + )} + + )), + )} + Parameters + + + + + {table.getRowModel().rows.map((row) => { + const actionRow = row.original + + return ( + + {row.getVisibleCells().map((cell) => ( + + {flexRender( + cell.column.columnDef.cell, + cell.getContext(), + )} + + ))} + + + + + + + + ) + })} + +
+
+ +
+ + +
+
+ ) +} diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Tasks/ImportSteps/CsvUploadStep.tsx b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Tasks/ImportSteps/CsvUploadStep.tsx new file mode 100644 index 0000000000..47134c9572 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Tasks/ImportSteps/CsvUploadStep.tsx @@ -0,0 +1,170 @@ +import { Upload, FileText } from "lucide-react" +import { useRef, useState } from "react" + +import { Button } from "@/components/ui/button" +import { + Table, + TableBody, + TableCell, + TableHead, + TableHeader, + TableRow, +} from "@/components/ui/table" +import useCustomToast from "@/hooks/useCustomToast" +import { parseCSVRaw, isValidCSVFile, type CSVRawResult } from "@/lib/csv" + +export interface CsvUploadStepProps { + onParsed: (result: CSVRawResult) => void +} + +const MAX_PREVIEW_ROWS = 5 +const MAX_FILE_SIZE = 10 * 1024 * 1024 // 10 MB +const MAX_IMPORT_ROWS = 500 + +export default function CsvUploadStep({ onParsed }: CsvUploadStepProps) { + const [selectedFile, setSelectedFile] = useState(null) + const [preview, setPreview] = useState(null) + const [isParsing, setIsParsing] = useState(false) + const fileInputRef = useRef(null) + const { showErrorToast } = useCustomToast() + + const handleFileSelect = async ( + event: React.ChangeEvent, + ) => { + const file = event.target.files?.[0] + if (!file) return + + if (!isValidCSVFile(file)) { + showErrorToast("File must be a CSV file") + return + } + + if (file.size > MAX_FILE_SIZE) { + showErrorToast("File too large (max 10 MB)") + return + } + + setSelectedFile(file) + setIsParsing(true) + + try { + const text = await file.text() + const result = parseCSVRaw(text) + if (result.rows.length === 0) { + showErrorToast("No data rows found in the CSV file") + setSelectedFile(null) + setIsParsing(false) + return + } + if (result.rows.length > MAX_IMPORT_ROWS) { + result.rows = result.rows.slice(0, MAX_IMPORT_ROWS) + showErrorToast( + `CSV has more than ${MAX_IMPORT_ROWS} rows. Only the first ${MAX_IMPORT_ROWS} will be imported.`, + ) + } + setPreview(result) + } catch (error) { + showErrorToast( + error instanceof Error ? error.message : "Failed to parse CSV file", + ) + setSelectedFile(null) + setPreview(null) + } finally { + setIsParsing(false) + } + } + + const reset = () => { + setSelectedFile(null) + setPreview(null) + if (fileInputRef.current) { + fileInputRef.current.value = "" + } + } + + return ( +
+ + + {preview && ( + <> +
+

+ Preview (first {Math.min(MAX_PREVIEW_ROWS, preview.rows.length)} of{" "} + {preview.rows.length} rows): +

+
+ + + + {preview.headers.map((header, i) => ( + {header} + ))} + + + + {preview.rows.slice(0, MAX_PREVIEW_ROWS).map((row, rowIdx) => ( + + {preview.headers.map((_, colIdx) => ( + + {row[colIdx] ?? ""} + + ))} + + ))} + +
+
+
+ +
+ + +
+ + )} +
+ ) +} diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Tasks/ImportSteps/EncryptStep.tsx b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Tasks/ImportSteps/EncryptStep.tsx new file mode 100644 index 0000000000..b2df5426b7 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Tasks/ImportSteps/EncryptStep.tsx @@ -0,0 +1,223 @@ +import { AlertTriangle, KeyRound, Lock, LockOpen, ShieldCheck } from "lucide-react" +import { useEffect, useState } from "react" + +import type { Task_Output as Task } from "@/client" +import { NodesService } from "@/client" +import { Button } from "@/components/ui/button" +import { LoadingButton } from "@/components/ui/loading-button" +import { isParamValueValid } from "@/lib/action-templates" +import { getTemplateById } from "@/lib/meta-templates" +import { hasStoredClientKeys, loadClientKeys } from "@/lib/device-key" +import { encryptAndSign, derivePublicPemsFromPrivates } from "@/lib/client-encryption" +import type { ClientKeys } from "@/lib/client-encryption" +import { fetchServerPublicKeys } from "@/lib/server-keys" +import useCustomToast from "@/hooks/useCustomToast" +import type { ActionRow } from "./ColumnMappingStep" + +export interface EncryptStepProps { + actions: ActionRow[] + onImport: (tasks: Task[]) => void + onBack: () => void + isImporting: boolean +} + +function getValidActions(actions: ActionRow[]): ActionRow[] { + return actions.filter((action) => { + const template = getTemplateById(action.templateId) + if (!template) return false + return template.params.every( + (p) => !p.required || p.hidden || isParamValueValid(p, action.paramValues[p.key]), + ) + }) +} + +function buildContentString(action: ActionRow): string { + const template = getTemplateById(action.templateId) + const actions = template?.actionTypes.join(",") ?? "" + return JSON.stringify({ ...action.paramValues, ACTIONS: actions }) +} + +export default function EncryptStep({ + actions, + onImport, + onBack, + isImporting, +}: EncryptStepProps) { + const [encryptionEnabled, setEncryptionEnabled] = useState(null) + const [envVars, setEnvVars] = useState([]) + const [clientKeysStored, setClientKeysStored] = useState(false) + const validActions = getValidActions(actions) + const { showErrorToast } = useCustomToast() + + useEffect(() => { + NodesService.getNodeConfig() + .then((data) => { + const d = data as { tasks_encryption_enabled?: boolean; server_encryption_env_vars?: string[] } + setEncryptionEnabled(d.tasks_encryption_enabled ?? false) + setEnvVars(d.server_encryption_env_vars ?? []) + }) + .catch(() => setEncryptionEnabled(false)) + hasStoredClientKeys().then(setClientKeysStored) + }, []) + + const buildPlaintextTasks = (): Task[] => + validActions.map((action) => ({ + name: action.name, + content: buildContentString(action), + type: "execute_actions", + })) + + const handleImportWithEncryption = async () => { + try { + const clientKeys = await loadClientKeys() + if (!clientKeys) throw new Error("Browser keys not configured — add them in Settings") + const serverKeys = await fetchServerPublicKeys() + const { rsa_public_pem, ecdsa_public_pem } = await derivePublicPemsFromPrivates(clientKeys as ClientKeys) + const tasks: Task[] = await Promise.all( + validActions.map(async (action) => { + const { content, content_metadata } = await encryptAndSign( + buildContentString(action), + clientKeys as ClientKeys, + serverKeys.rsa_public, + ) + return { + name: action.name, + content, + content_metadata, + type: "execute_actions", + user_rsa_public_key: rsa_public_pem, + user_ecdsa_public_key: ecdsa_public_pem, + } + }), + ) + onImport(tasks) + } catch (err) { + showErrorToast(err instanceof Error ? err.message : "Encryption failed") + } + } + + const handleImportWithoutEncryption = () => { + onImport(buildPlaintextTasks()) + } + + return ( +
+
+

+ Encrypt & Import +

+

+ Encryption adds an additional security layer by protecting all action parameters before they are stored. +

+
+ + {encryptionEnabled === null ? ( +

Checking encryption status...

+ ) : encryptionEnabled ? ( +
+
+ +
+

Server encryption is enabled

+

+ Task content will be encrypted before submission using hybrid + encryption (AES-256-GCM + RSA-4096 + ECDSA). +

+
+
+ + {clientKeysStored ? ( +
+ +
+

Client decryption keys configured

+

+ Task results can be decrypted in the browser. +

+
+
+ ) : ( +
+ +
+

Client decryption keys not configured

+

+ Task will be imported without encryption. Configure browser keys in Settings to enable encrypted import. +

+
+
+ )} + +
+ + + {clientKeysStored ? : } + Import {validActions.length} Action{validActions.length !== 1 ? "s" : ""} + +
+
+ ) : ( +
+
+ +
+

Server encryption is not configured

+

+ Set the following environment variables and restart to enable task encryption: +

+
    + {envVars.map((v) => ( +
  • {v}
  • + ))} +
+
+
+ + {clientKeysStored ? ( +
+ +
+

Client decryption keys configured

+

+ Task results can be decrypted in the browser once server encryption is enabled. +

+
+
+ ) : ( +
+ +
+

Client decryption keys not configured

+

+ Configure browser keys in Settings to decrypt task results in the browser. +

+
+
+ )} + +
+ + + + Import {validActions.length} Action{validActions.length !== 1 ? "s" : ""} + +
+
+ )} +
+ ) +} diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Tasks/ImportSteps/ReviewStep.tsx b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Tasks/ImportSteps/ReviewStep.tsx new file mode 100644 index 0000000000..ef23668431 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Tasks/ImportSteps/ReviewStep.tsx @@ -0,0 +1,159 @@ +import { AlertCircle, CheckCircle2 } from "lucide-react" +import { useMemo } from "react" + +import { Badge } from "@/components/ui/badge" +import { Button } from "@/components/ui/button" +import { + Card, + CardContent, + CardHeader, +} from "@/components/ui/card" +import { isParamValueValid } from "@/lib/action-templates" +import { getTemplateById } from "@/lib/meta-templates" +import type { ActionRow } from "./ColumnMappingStep" + +export interface ReviewStepProps { + actions: ActionRow[] + onNext: () => void + onBack: () => void +} + +interface ValidationResult { + isValid: boolean + missingParams: string[] +} + +function validateAction(action: ActionRow): ValidationResult { + const template = getTemplateById(action.templateId) + if (!template) return { isValid: false, missingParams: ["Unknown template"] } + + const missingParams: string[] = [] + for (const param of template.params) { + if (param.required && !param.hidden) { + if (!isParamValueValid(param, action.paramValues[param.key])) { + missingParams.push(param.label) + } + } + } + + return { + isValid: missingParams.length === 0, + missingParams, + } +} + +export default function ReviewStep({ + actions, + onNext, + onBack, +}: ReviewStepProps) { + const validations = useMemo( + () => actions.map((action) => ({ + action, + validation: validateAction(action), + })), + [actions], + ) + + const validCount = validations.filter((v) => v.validation.isValid).length + const invalidCount = validations.filter((v) => !v.validation.isValid).length + const allValid = invalidCount === 0 + + return ( +
+
+

+ {actions.length} action{actions.length !== 1 ? "s" : ""} ready for + import +

+

+ {validCount} valid + {invalidCount > 0 && ( + + , {invalidCount} with missing required parameters + + )} +

+
+ +
+ {validations.map(({ action, validation }) => { + const template = getTemplateById(action.templateId) + const visibleParams = template?.params.filter((p) => !p.hidden) ?? [] + const filledCount = visibleParams.filter( + (p) => isParamValueValid(p, action.paramValues[p.key]), + ).length + const totalParams = visibleParams.length + + return ( + + +
+
+ {validation.isValid ? ( + + ) : ( + + )} + {action.name} + + {template?.label ?? action.templateId} + +
+ + {filledCount}/{totalParams} params filled + +
+
+ +
+ {template?.params + .filter((p) => !p.hidden && isParamValueValid(p, action.paramValues[p.key])) + .map((param) => ( + + + {param.label}: + {" "} + + {param.sensitive + ? "\u2022\u2022\u2022\u2022\u2022\u2022" + : action.paramValues[param.key]} + + + ))} +
+ {!validation.isValid && ( +

+ Missing: {validation.missingParams.join(", ")} +

+ )} +
+
+ ) + })} +
+ +
+ + +
+
+ ) +} diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Tasks/ImportTask.tsx b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Tasks/ImportTask.tsx new file mode 100644 index 0000000000..71d786fa82 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/Tasks/ImportTask.tsx @@ -0,0 +1,157 @@ +import { useMutation, useQueryClient } from "@tanstack/react-query" +import { useState } from "react" + +import { type Task_Output as Task, TasksService } from "@/client" +import useCustomToast from "@/hooks/useCustomToast" +import type { CSVRawResult } from "@/lib/csv" + +import CsvUploadStep from "./ImportSteps/CsvUploadStep" +import ColumnMappingStep, { + type ActionRow, +} from "./ImportSteps/ColumnMappingStep" +import EncryptStep from "./ImportSteps/EncryptStep" +import ReviewStep from "./ImportSteps/ReviewStep" + +export interface ImportTaskProps { + onSuccess?: () => void +} + +type ImportStep = "upload" | "mapping" | "review" | "encrypt" + +const STEP_LABELS: Record = { + upload: "Upload CSV", + mapping: "Map Columns", + review: "Review", + encrypt: "Import", +} + +const STEPS: ImportStep[] = ["upload", "mapping", "review", "encrypt"] + +export default function ImportTask({ onSuccess }: ImportTaskProps) { + const [currentStep, setCurrentStep] = useState("upload") + const [csvData, setCsvData] = useState(null) + const [actionRows, setActionRows] = useState([]) + const queryClient = useQueryClient() + const { showSuccessToast, showErrorToast } = useCustomToast() + + const createTaskMutation = useMutation({ + mutationFn: (data: Array) => + TasksService.createTasks({ requestBody: data }), + }) + + const handleCsvParsed = (result: CSVRawResult) => { + setCsvData(result) + setCurrentStep("mapping") + } + + const handleMappingConfirm = (rows: ActionRow[]) => { + setActionRows(rows) + setCurrentStep("review") + } + + const handleImport = async (tasks: Task[]) => { + if (tasks.length === 0) { + showErrorToast("No actions to import") + return + } + + try { + const result = await createTaskMutation.mutateAsync(tasks) + const [successCount, errorCount] = result as [number, number] + + if (successCount > 0) { + showSuccessToast( + `Successfully imported ${successCount} action${successCount > 1 ? "s" : ""}${ + errorCount > 0 ? ` (${errorCount} failed)` : "" + }`, + ) + } else { + showErrorToast("Failed to import actions") + } + + queryClient.invalidateQueries({ queryKey: ["tasks"] }) + onSuccess?.() + } catch { + showErrorToast("An error occurred during import") + } + } + + const currentStepIndex = STEPS.indexOf(currentStep) + + return ( +
+ {/* Step indicator */} +
+ {STEPS.map((step, index) => { + const isActive = index === currentStepIndex + const isCompleted = index < currentStepIndex + + return ( +
+ {index > 0 && ( +
+ )} +
+
+ {index + 1} +
+ + {STEP_LABELS[step]} + +
+
+ ) + })} +
+ + {/* Step content */} + {currentStep === "upload" && ( + + )} + + {currentStep === "mapping" && csvData && ( + setCurrentStep("upload")} + /> + )} + + {currentStep === "review" && ( + setCurrentStep("encrypt")} + onBack={() => setCurrentStep("mapping")} + /> + )} + + {currentStep === "encrypt" && ( + setCurrentStep("review")} + isImporting={createTaskMutation.isPending} + /> + )} +
+ ) +} diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/src/components/theme-provider.tsx b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/theme-provider.tsx new file mode 100644 index 0000000000..a582b28cf1 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/theme-provider.tsx @@ -0,0 +1,115 @@ +import { + createContext, + useCallback, + useContext, + useEffect, + useState, +} from "react" + +export type Theme = "dark" | "light" | "system" + +type ThemeProviderProps = { + children: React.ReactNode + defaultTheme?: Theme + storageKey?: string +} + +type ThemeProviderState = { + theme: Theme + resolvedTheme: "dark" | "light" + setTheme: (theme: Theme) => void +} + +const initialState: ThemeProviderState = { + theme: "system", + resolvedTheme: "light", + setTheme: () => null, +} + +const ThemeProviderContext = createContext(initialState) + +export function ThemeProvider({ + children, + defaultTheme = "system", + storageKey = "vite-ui-theme", + ...props +}: ThemeProviderProps) { + const [theme, setTheme] = useState( + () => (localStorage.getItem(storageKey) as Theme) || defaultTheme, + ) + + const getResolvedTheme = useCallback((theme: Theme): "dark" | "light" => { + if (theme === "system") { + return window.matchMedia("(prefers-color-scheme: dark)").matches + ? "dark" + : "light" + } + return theme + }, []) + + const [resolvedTheme, setResolvedTheme] = useState<"dark" | "light">(() => + getResolvedTheme(theme), + ) + + const updateTheme = useCallback((newTheme: Theme) => { + const root = window.document.documentElement + + root.classList.remove("light", "dark") + + if (newTheme === "system") { + const systemTheme = window.matchMedia("(prefers-color-scheme: dark)") + .matches + ? "dark" + : "light" + + root.classList.add(systemTheme) + return + } + + root.classList.add(newTheme) + }, []) + + useEffect(() => { + updateTheme(theme) + setResolvedTheme(getResolvedTheme(theme)) + + const mediaQuery = window.matchMedia("(prefers-color-scheme: dark)") + + const handleChange = () => { + if (theme === "system") { + updateTheme("system") + setResolvedTheme(getResolvedTheme("system")) + } + } + + mediaQuery.addEventListener("change", handleChange) + + return () => { + mediaQuery.removeEventListener("change", handleChange) + } + }, [theme, updateTheme, getResolvedTheme]) + + const value = { + theme, + resolvedTheme, + setTheme: (theme: Theme) => { + localStorage.setItem(storageKey, theme) + setTheme(theme) + }, + } + + return ( + + {children} + + ) +} + +export const useTheme = () => { + const context = useContext(ThemeProviderContext) + + if (context === undefined) + throw new Error("useTheme must be used within a ThemeProvider") + + return context +} diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/src/components/ui/badge.tsx b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/ui/badge.tsx new file mode 100644 index 0000000000..fd3a406bad --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/ui/badge.tsx @@ -0,0 +1,46 @@ +import * as React from "react" +import { Slot } from "@radix-ui/react-slot" +import { cva, type VariantProps } from "class-variance-authority" + +import { cn } from "@/lib/utils" + +const badgeVariants = cva( + "inline-flex items-center justify-center rounded-full border px-2 py-0.5 text-xs font-medium w-fit whitespace-nowrap shrink-0 [&>svg]:size-3 gap-1 [&>svg]:pointer-events-none focus-visible:border-ring focus-visible:ring-ring/50 focus-visible:ring-[3px] aria-invalid:ring-destructive/20 dark:aria-invalid:ring-destructive/40 aria-invalid:border-destructive transition-[color,box-shadow] overflow-hidden", + { + variants: { + variant: { + default: + "border-transparent bg-primary text-primary-foreground [a&]:hover:bg-primary/90", + secondary: + "border-transparent bg-secondary text-secondary-foreground [a&]:hover:bg-secondary/90", + destructive: + "border-transparent bg-destructive text-white [a&]:hover:bg-destructive/90 focus-visible:ring-destructive/20 dark:focus-visible:ring-destructive/40 dark:bg-destructive/60", + outline: + "text-foreground [a&]:hover:bg-accent [a&]:hover:text-accent-foreground", + }, + }, + defaultVariants: { + variant: "default", + }, + } +) + +function Badge({ + className, + variant, + asChild = false, + ...props +}: React.ComponentProps<"span"> & + VariantProps & { asChild?: boolean }) { + const Comp = asChild ? Slot : "span" + + return ( + + ) +} + +export { Badge, badgeVariants } diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/src/components/ui/button.tsx b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/ui/button.tsx new file mode 100644 index 0000000000..21409a0666 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/ui/button.tsx @@ -0,0 +1,60 @@ +import * as React from "react" +import { Slot } from "@radix-ui/react-slot" +import { cva, type VariantProps } from "class-variance-authority" + +import { cn } from "@/lib/utils" + +const buttonVariants = cva( + "inline-flex items-center justify-center gap-2 whitespace-nowrap rounded-md text-sm font-medium transition-all disabled:pointer-events-none disabled:opacity-50 [&_svg]:pointer-events-none [&_svg:not([class*='size-'])]:size-4 shrink-0 [&_svg]:shrink-0 outline-none focus-visible:border-ring focus-visible:ring-ring/50 focus-visible:ring-[3px] aria-invalid:ring-destructive/20 dark:aria-invalid:ring-destructive/40 aria-invalid:border-destructive", + { + variants: { + variant: { + default: "bg-primary text-primary-foreground hover:bg-primary/90", + destructive: + "bg-destructive text-white hover:bg-destructive/90 focus-visible:ring-destructive/20 dark:focus-visible:ring-destructive/40 dark:bg-destructive/60", + outline: + "border bg-background shadow-xs hover:bg-accent hover:text-accent-foreground dark:bg-input/30 dark:border-input dark:hover:bg-input/50", + secondary: + "bg-secondary text-secondary-foreground hover:bg-secondary/80", + ghost: + "hover:bg-accent hover:text-accent-foreground dark:hover:bg-accent/50", + link: "text-primary underline-offset-4 hover:underline", + }, + size: { + default: "h-9 px-4 py-2 has-[>svg]:px-3", + sm: "h-8 rounded-md gap-1.5 px-3 has-[>svg]:px-2.5", + lg: "h-10 rounded-md px-6 has-[>svg]:px-4", + icon: "size-9", + "icon-sm": "size-8", + "icon-lg": "size-10", + }, + }, + defaultVariants: { + variant: "default", + size: "default", + }, + } +) + +function Button({ + className, + variant, + size, + asChild = false, + ...props +}: React.ComponentProps<"button"> & + VariantProps & { + asChild?: boolean + }) { + const Comp = asChild ? Slot : "button" + + return ( + + ) +} + +export { Button, buttonVariants } diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/src/components/ui/card.tsx b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/ui/card.tsx new file mode 100644 index 0000000000..681ad980f2 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/ui/card.tsx @@ -0,0 +1,92 @@ +import * as React from "react" + +import { cn } from "@/lib/utils" + +function Card({ className, ...props }: React.ComponentProps<"div">) { + return ( +
+ ) +} + +function CardHeader({ className, ...props }: React.ComponentProps<"div">) { + return ( +
+ ) +} + +function CardTitle({ className, ...props }: React.ComponentProps<"div">) { + return ( +
+ ) +} + +function CardDescription({ className, ...props }: React.ComponentProps<"div">) { + return ( +
+ ) +} + +function CardAction({ className, ...props }: React.ComponentProps<"div">) { + return ( +
+ ) +} + +function CardContent({ className, ...props }: React.ComponentProps<"div">) { + return ( +
+ ) +} + +function CardFooter({ className, ...props }: React.ComponentProps<"div">) { + return ( +
+ ) +} + +export { + Card, + CardHeader, + CardFooter, + CardTitle, + CardAction, + CardDescription, + CardContent, +} diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/src/components/ui/dialog.tsx b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/ui/dialog.tsx new file mode 100644 index 0000000000..6cb123b385 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/ui/dialog.tsx @@ -0,0 +1,141 @@ +import * as React from "react" +import * as DialogPrimitive from "@radix-ui/react-dialog" +import { XIcon } from "lucide-react" + +import { cn } from "@/lib/utils" + +function Dialog({ + ...props +}: React.ComponentProps) { + return +} + +function DialogTrigger({ + ...props +}: React.ComponentProps) { + return +} + +function DialogPortal({ + ...props +}: React.ComponentProps) { + return +} + +function DialogClose({ + ...props +}: React.ComponentProps) { + return +} + +function DialogOverlay({ + className, + ...props +}: React.ComponentProps) { + return ( + + ) +} + +function DialogContent({ + className, + children, + showCloseButton = true, + ...props +}: React.ComponentProps & { + showCloseButton?: boolean +}) { + return ( + + + + {children} + {showCloseButton && ( + + + Close + + )} + + + ) +} + +function DialogHeader({ className, ...props }: React.ComponentProps<"div">) { + return ( +
+ ) +} + +function DialogFooter({ className, ...props }: React.ComponentProps<"div">) { + return ( +
+ ) +} + +function DialogTitle({ + className, + ...props +}: React.ComponentProps) { + return ( + + ) +} + +function DialogDescription({ + className, + ...props +}: React.ComponentProps) { + return ( + + ) +} + +export { + Dialog, + DialogClose, + DialogContent, + DialogDescription, + DialogFooter, + DialogHeader, + DialogOverlay, + DialogPortal, + DialogTitle, + DialogTrigger, +} diff --git a/packages/tentacles/Services/Interfaces/node_web_interface/src/components/ui/form.tsx b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/ui/form.tsx new file mode 100644 index 0000000000..7d7474cc93 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/node_web_interface/src/components/ui/form.tsx @@ -0,0 +1,165 @@ +import * as React from "react" +import * as LabelPrimitive from "@radix-ui/react-label" +import { Slot } from "@radix-ui/react-slot" +import { + Controller, + FormProvider, + useFormContext, + useFormState, + type ControllerProps, + type FieldPath, + type FieldValues, +} from "react-hook-form" + +import { cn } from "@/lib/utils" +import { Label } from "@/components/ui/label" + +const Form = FormProvider + +type FormFieldContextValue< + TFieldValues extends FieldValues = FieldValues, + TName extends FieldPath = FieldPath, +> = { + name: TName +} + +const FormFieldContext = React.createContext( + {} as FormFieldContextValue +) + +const FormField = < + TFieldValues extends FieldValues = FieldValues, + TName extends FieldPath = FieldPath, +>({ + ...props +}: ControllerProps) => { + return ( + + + + ) +} + +const useFormField = () => { + const fieldContext = React.useContext(FormFieldContext) + const itemContext = React.useContext(FormItemContext) + const { getFieldState } = useFormContext() + const formState = useFormState({ name: fieldContext.name }) + const fieldState = getFieldState(fieldContext.name, formState) + + if (!fieldContext) { + throw new Error("useFormField should be used within ") + } + + const { id } = itemContext + + return { + id, + name: fieldContext.name, + formItemId: `${id}-form-item`, + formDescriptionId: `${id}-form-item-description`, + formMessageId: `${id}-form-item-message`, + ...fieldState, + } +} + +type FormItemContextValue = { + id: string +} + +const FormItemContext = React.createContext( + {} as FormItemContextValue +) + +function FormItem({ className, ...props }: React.ComponentProps<"div">) { + const id = React.useId() + + return ( + +
+ + ) +} + +function FormLabel({ + className, + ...props +}: React.ComponentProps) { + const { error, formItemId } = useFormField() + + return ( +
",a.fn.datepicker.DPGlobal=l,a.fn.datepicker.noConflict=function(){return a.fn.datepicker=g,this},a(document).on("focus.datepicker.data-api click.datepicker.data-api",'[data-provide="datepicker"]',function(b){var c=a(this);c.data("datepicker")||(b.preventDefault(),h.call(c,"show"))}),a(function(){h.call(a('[data-provide="datepicker-inline"]'))})}(window.jQuery),function(a){"use strict";a.fn.bdatepicker=a.fn.datepicker.noConflict(),a.fn.datepicker||(a.fn.datepicker=a.fn.bdatepicker);var b=function(a){this.init("date",a,b.defaults),this.initPicker(a,b.defaults)};a.fn.editableutils.inherit(b,a.fn.editabletypes.abstractinput),a.extend(b.prototype,{initPicker:function(b,c){this.options.viewformat||(this.options.viewformat=this.options.format),b.datepicker=a.fn.editableutils.tryParseJson(b.datepicker,!0),this.options.datepicker=a.extend({},c.datepicker,b.datepicker,{format:this.options.viewformat}),this.options.datepicker.language=this.options.datepicker.language||"en",this.dpg=a.fn.bdatepicker.DPGlobal,this.parsedFormat=this.dpg.parseFormat(this.options.format),this.parsedViewFormat=this.dpg.parseFormat(this.options.viewformat)},render:function(){this.$input.bdatepicker(this.options.datepicker),this.options.clear&&(this.$clear=a('').html(this.options.clear).click(a.proxy(function(a){a.preventDefault(),a.stopPropagation(),this.clear()},this)),this.$tpl.parent().append(a('
').append(this.$clear)))},value2html:function(a,c){var d=a?this.dpg.formatDate(a,this.parsedViewFormat,this.options.datepicker.language):"";b.superclass.value2html.call(this,d,c)},html2value:function(a){return this.parseDate(a,this.parsedViewFormat)},value2str:function(a){return a?this.dpg.formatDate(a,this.parsedFormat,this.options.datepicker.language):""},str2value:function(a){return this.parseDate(a,this.parsedFormat)},value2submit:function(a){return this.value2str(a)},value2input:function(a){this.$input.bdatepicker("update",a)},input2value:function(){return this.$input.data("datepicker").date},activate:function(){},clear:function(){this.$input.data("datepicker").date=null,this.$input.find(".active").removeClass("active"),this.options.showbuttons||this.$input.closest("form").submit()},autosubmit:function(){this.$input.on("mouseup",".day",function(b){if(!a(b.currentTarget).is(".old")&&!a(b.currentTarget).is(".new")){var c=a(this).closest("form");setTimeout(function(){c.submit()},200)}})},parseDate:function(a,b){var c,d=null;return a&&(d=this.dpg.parseDate(a,b,this.options.datepicker.language),"string"==typeof a&&(c=this.dpg.formatDate(d,b,this.options.datepicker.language),a!==c&&(d=null))),d}}),b.defaults=a.extend({},a.fn.editabletypes.abstractinput.defaults,{tpl:'
',inputclass:null,format:"yyyy-mm-dd",viewformat:null,datepicker:{weekStart:0,startView:0,minViewMode:0,autoclose:!1},clear:"× clear"}),a.fn.editabletypes.date=b}(window.jQuery),function(a){"use strict";var b=function(a){this.init("datefield",a,b.defaults),this.initPicker(a,b.defaults)};a.fn.editableutils.inherit(b,a.fn.editabletypes.date),a.extend(b.prototype,{render:function(){this.$input=this.$tpl.find("input"),this.setClass(),this.setAttr("placeholder"),this.$tpl.bdatepicker(this.options.datepicker),this.$input.off("focus keydown"),this.$input.keyup(a.proxy(function(){this.$tpl.removeData("date"),this.$tpl.bdatepicker("update")},this))},value2input:function(a){this.$input.val(a?this.dpg.formatDate(a,this.parsedViewFormat,this.options.datepicker.language):""),this.$tpl.bdatepicker("update")},input2value:function(){return this.html2value(this.$input.val())},activate:function(){a.fn.editabletypes.text.prototype.activate.call(this)},autosubmit:function(){}}),b.defaults=a.extend({},a.fn.editabletypes.date.defaults,{tpl:'
',inputclass:"input-small",datepicker:{weekStart:0,startView:0,minViewMode:0,autoclose:!0}}),a.fn.editabletypes.datefield=b}(window.jQuery),function(a){"use strict";var b=function(a){this.init("datetime",a,b.defaults),this.initPicker(a,b.defaults)};a.fn.editableutils.inherit(b,a.fn.editabletypes.abstractinput),a.extend(b.prototype,{initPicker:function(b,c){this.options.viewformat||(this.options.viewformat=this.options.format),b.datetimepicker=a.fn.editableutils.tryParseJson(b.datetimepicker,!0),this.options.datetimepicker=a.extend({},c.datetimepicker,b.datetimepicker,{format:this.options.viewformat}),this.options.datetimepicker.language=this.options.datetimepicker.language||"en",this.dpg=a.fn.datetimepicker.DPGlobal,this.parsedFormat=this.dpg.parseFormat(this.options.format,this.options.formatType),this.parsedViewFormat=this.dpg.parseFormat(this.options.viewformat,this.options.formatType)},render:function(){this.$input.datetimepicker(this.options.datetimepicker),this.$input.on("changeMode",function(){var b=a(this).closest("form").parent();setTimeout(function(){b.triggerHandler("resize")},0)}),this.options.clear&&(this.$clear=a('').html(this.options.clear).click(a.proxy(function(a){a.preventDefault(),a.stopPropagation(),this.clear()},this)),this.$tpl.parent().append(a('
').append(this.$clear)))},value2html:function(a,c){var d=a?this.dpg.formatDate(this.toUTC(a),this.parsedViewFormat,this.options.datetimepicker.language,this.options.formatType):"";return c?(b.superclass.value2html.call(this,d,c),void 0):d},html2value:function(a){var b=this.parseDate(a,this.parsedViewFormat);return b?this.fromUTC(b):null},value2str:function(a){return a?this.dpg.formatDate(this.toUTC(a),this.parsedFormat,this.options.datetimepicker.language,this.options.formatType):""},str2value:function(a){var b=this.parseDate(a,this.parsedFormat);return b?this.fromUTC(b):null},value2submit:function(a){return this.value2str(a)},value2input:function(a){a&&this.$input.data("datetimepicker").setDate(a)},input2value:function(){var a=this.$input.data("datetimepicker");return a.date?a.getDate():null},activate:function(){},clear:function(){this.$input.data("datetimepicker").date=null,this.$input.find(".active").removeClass("active"),this.options.showbuttons||this.$input.closest("form").submit()},autosubmit:function(){this.$input.on("mouseup",".minute",function(){var b=a(this).closest("form");setTimeout(function(){b.submit()},200)})},toUTC:function(a){return a?new Date(a.valueOf()-6e4*a.getTimezoneOffset()):a},fromUTC:function(a){return a?new Date(a.valueOf()+6e4*a.getTimezoneOffset()):a},parseDate:function(a,b){var c,d=null;return a&&(d=this.dpg.parseDate(a,b,this.options.datetimepicker.language,this.options.formatType),"string"==typeof a&&(c=this.dpg.formatDate(d,b,this.options.datetimepicker.language,this.options.formatType),a!==c&&(d=null))),d}}),b.defaults=a.extend({},a.fn.editabletypes.abstractinput.defaults,{tpl:'
',inputclass:null,format:"yyyy-mm-dd hh:ii",formatType:"standard",viewformat:null,datetimepicker:{todayHighlight:!1,autoclose:!1},clear:"× clear"}),a.fn.editabletypes.datetime=b}(window.jQuery),function(a){"use strict";var b=function(a){this.init("datetimefield",a,b.defaults),this.initPicker(a,b.defaults)};a.fn.editableutils.inherit(b,a.fn.editabletypes.datetime),a.extend(b.prototype,{render:function(){this.$input=this.$tpl.find("input"),this.setClass(),this.setAttr("placeholder"),this.$tpl.datetimepicker(this.options.datetimepicker),this.$input.off("focus keydown"),this.$input.keyup(a.proxy(function(){this.$tpl.removeData("date"),this.$tpl.datetimepicker("update")},this))},value2input:function(a){this.$input.val(this.value2html(a)),this.$tpl.datetimepicker("update")},input2value:function(){return this.html2value(this.$input.val())},activate:function(){a.fn.editabletypes.text.prototype.activate.call(this)},autosubmit:function(){}}),b.defaults=a.extend({},a.fn.editabletypes.datetime.defaults,{tpl:'
',inputclass:"input-medium",datetimepicker:{todayHighlight:!1,autoclose:!0}}),a.fn.editabletypes.datetimefield=b}(window.jQuery); \ No newline at end of file diff --git a/packages/tentacles/Services/Interfaces/web_interface/static/js/lib/w2ui-1.5.min.js b/packages/tentacles/Services/Interfaces/web_interface/static/js/lib/w2ui-1.5.min.js new file mode 100644 index 0000000000..0de3b51b75 --- /dev/null +++ b/packages/tentacles/Services/Interfaces/web_interface/static/js/lib/w2ui-1.5.min.js @@ -0,0 +1,3 @@ +/* w2ui 1.5 (c) http://w2ui.com, vitmalina@gmail.com */ +/* WARNING: local patch: added "typeof col!=='undefined'&&" before "NOTICE: grid column.caption property is deprecated, please use column.text. Column" warning*/ +var w2ui=w2ui||{},w2obj=w2obj||{},w2utils=function(g){var e={};return{version:"1.5.x",settings:{locale:"en-us",dateFormat:"m/d/yyyy",timeFormat:"hh:mi pm",datetimeFormat:"m/d/yyyy|hh:mi pm",currencyPrefix:"$",currencySuffix:"",currencyPrecision:2,groupSymbol:",",decimalSymbol:".",shortmonths:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"],fullmonths:["January","February","March","April","May","June","July","August","September","October","November","December"],shortdays:["M","T","W","T","F","S","S"],fulldays:["Monday","Tuesday","Wednesday","Thursday","Friday","Saturday","Sunday"],weekStarts:"M",dataType:"HTTPJSON",phrases:{},dateStartYear:1950,dateEndYear:2030,macButtonOrder:!1},isBin:function(e){return/^[0-1]+$/.test(e)},isInt:h,isFloat:function(e){"string"==typeof e&&(e=e.replace(/\s+/g,"").replace(w2utils.settings.groupSymbol,"").replace(w2utils.settings.decimalSymbol,"."));return("number"==typeof e||"string"==typeof e&&""!==e)&&!isNaN(Number(e))},isMoney:function(e){var t=w2utils.settings,i=new RegExp("^"+(t.currencyPrefix?"\\"+t.currencyPrefix+"?":"")+"[-+]?"+(t.currencyPrefix?"\\"+t.currencyPrefix+"?":"")+"[0-9]*[\\"+t.decimalSymbol+"]?[0-9]+"+(t.currencySuffix?"\\"+t.currencySuffix+"?":"")+"$","i");"string"==typeof e&&(e=e.replace(new RegExp(t.groupSymbol,"g"),""));return"object"!=typeof e&&""!==e&&i.test(e)},isHex:function(e){return/^(0x)?[0-9a-fA-F]+$/.test(e)},isAlphaNumeric:function(e){return/^[a-zA-Z0-9_-]+$/.test(e)},isEmail:function(e){return/^[a-zA-Z0-9._%\-+]+@[а-яА-Яa-zA-Z0-9.-]+\.[а-яА-Яa-zA-Z]+$/.test(e)},isIpAddress:function(e){return new RegExp("^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?).){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$").test(e)},isDate:function(e,t,i){if(!e)return!1;var s,n,r,a="Invalid Date";null==t&&(t=w2utils.settings.dateFormat);if("function"==typeof e.getFullYear)r=e.getFullYear(),s=e.getMonth()+1,n=e.getDate();else if(parseInt(e)==e&&0'+u+""},formatSize:function(e){if(!w2utils.isFloat(e)||""===e)return"";if(0===(e=parseFloat(e)))return 0;var t=parseInt(Math.floor(Math.log(e)/Math.log(1024)));return(Math.floor(e/Math.pow(1024,t)*10)/10).toFixed(0===t?0:1)+" "+(["Bt","KB","MB","GB","TB","PB","EB","ZB"][t]||"??")},formatNumber:function(e,t,i){if(null==e||""===e||"object"==typeof e)return"";var s={minimumFractionDigits:t,maximumFractionDigits:t,useGrouping:i};(null==t||t<0)&&(s.minimumFractionDigits=0,s.maximumFractionDigits=20);return parseFloat(e).toLocaleString(w2utils.settings.locale,s)},formatDate:function(e,t){t=t||this.settings.dateFormat;if(""===e||null==e||"object"==typeof e&&!e.getMonth)return"";var i=new Date(e);w2utils.isInt(e)&&(i=new Date(Number(e)));if("Invalid Date"===String(i))return"";var s=i.getFullYear(),n=i.getMonth(),r=i.getDate();return t.toLowerCase().replace("month",w2utils.settings.fullmonths[n]).replace("mon",w2utils.settings.shortmonths[n]).replace(/yyyy/g,("000"+s).slice(-4)).replace(/yyy/g,("000"+s).slice(-4)).replace(/yy/g,("0"+s).slice(-2)).replace(/(^|[^a-z$])y/g,"$1"+s).replace(/mm/g,("0"+(n+1)).slice(-2)).replace(/dd/g,("0"+r).slice(-2)).replace(/th/g,1==r?"st":"th").replace(/th/g,2==r?"nd":"th").replace(/th/g,3==r?"rd":"th").replace(/(^|[^a-z$])m/g,"$1"+(n+1)).replace(/(^|[^a-z$])d/g,"$1"+r)},formatTime:function(e,t){w2utils.settings.shortmonths,w2utils.settings.fullmonths;t=t||this.settings.timeFormat;if(""===e||null==e||"object"==typeof e&&!e.getMonth)return"";var i=new Date(e);w2utils.isInt(e)&&(i=new Date(Number(e)));{var s;w2utils.isTime(e)&&(s=w2utils.isTime(e,!0),(i=new Date).setHours(s.hours),i.setMinutes(s.minutes))}if("Invalid Date"===String(i))return"";var n="am",r=i.getHours(),a=i.getHours(),l=i.getMinutes(),o=i.getSeconds();l<10&&(l="0"+l);o<10&&(o="0"+o);-1===t.indexOf("am")&&-1===t.indexOf("pm")||(12<=r&&(n="pm"),12=]|='[^']*'|="[^"]*"|=[^'"][^\s>]*)*>/gi,"");break;case"object":if(Array.isArray(e)){e=g.extend(!0,[],e);for(var t=0;t").replace(/</g,"<").replace(/"/g,'"').replace(/&/g,"&");break;case"object":if(Array.isArray(e)){e=g.extend(!0,[],e);for(var t=0;t\|\/? {}\\])/g,"\\$1")},base64encode:function(e){var t,i,s,n,r,a,l,o="",d=0,u="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";e=function(e){e=String(e).replace(/\r\n/g,"\n");for(var t="",i=0;i>6|192):(t+=String.fromCharCode(s>>12|224),t+=String.fromCharCode(s>>6&63|128)),t+=String.fromCharCode(63&s|128))}return t}(e);for(;d>2,r=(3&t)<<4|i>>4,a=(15&i)<<2|s>>6,l=63&s,isNaN(i)?a=l=64:isNaN(s)&&(l=64),o=o+u.charAt(n)+u.charAt(r)+u.charAt(a)+u.charAt(l);return o},base64decode:function(e){var t,i,s,n,r,a,l,o="",d=0,u="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";e=e.replace(/[^A-Za-z0-9\+\/\=]/g,"");for(;d>4,i=(15&r)<<4|a>>2,s=(3&a)<<6|l,o+=String.fromCharCode(t),64!==a&&(o+=String.fromCharCode(i)),64!==l&&(o+=String.fromCharCode(s));return o=function(e){var t,i,s="",n=0,r=0;for(;n>>4&15)+i.charAt(15&t);return s}function s(e){for(var t,i,s="",n=-1;++n>>6&31,128|63&t):t<=65535?s+=String.fromCharCode(224|t>>>12&15,128|t>>>6&63,128|63&t):t<=2097151&&(s+=String.fromCharCode(240|t>>>18&7,128|t>>>12&63,128|t>>>6&63,128|63&t));return s}function n(e){for(var t=Array(e.length>>2),i=0;i>5]|=(255&e.charCodeAt(i/8))<>5]>>>i%32&255);return t}function l(e,t){e[t>>5]|=128<>>9<<4)]=t;for(var i=1732584193,s=-271733879,n=-1732584194,r=271733878,a=0;a>>32-l,i);var a,l}function c(e,t,i,s,n,r,a){return o(t&i|~t&s,e,t,n,r,a)}function h(e,t,i,s,n,r,a){return o(t&s|i&~s,e,t,n,r,a)}function p(e,t,i,s,n,r,a){return o(t^i^s,e,t,n,r,a)}function f(e,t,i,s,n,r,a){return o(i^(t|~s),e,t,n,r,a)}function g(e,t){var i=(65535&e)+(65535&t);return(e>>16)+(t>>16)+(i>>16)<<16|65535&i}return function(e){return i(t(s(e)))}(e)},transition:function(e,t,i,s){var n=g(e).width(),r=g(e).height();if(!e||!t)return void console.log("ERROR: Cannot do transition when one of the divs is null");switch(e.parentNode.style.cssText+="perspective: 900px; overflow: hidden;",e.style.cssText+="; position: absolute; z-index: 1019; backface-visibility: hidden",t.style.cssText+="; position: absolute; z-index: 1020; backface-visibility: hidden",i){case"slide-left":e.style.cssText+="overflow: hidden; transform: translate3d(0, 0, 0)",t.style.cssText+="overflow: hidden; transform: translate3d("+n+"px, 0, 0)",g(t).show(),window.setTimeout(function(){t.style.cssText+="transition: 0.5s; transform: translate3d(0, 0, 0)",e.style.cssText+="transition: 0.5s; transform: translate3d(-"+n+"px, 0, 0)"},1);break;case"slide-right":e.style.cssText+="overflow: hidden; transform: translate3d(0, 0, 0)",t.style.cssText+="overflow: hidden; transform: translate3d(-"+n+"px, 0, 0)",g(t).show(),window.setTimeout(function(){t.style.cssText+="transition: 0.5s; transform: translate3d(0px, 0, 0)",e.style.cssText+="transition: 0.5s; transform: translate3d("+n+"px, 0, 0)"},1);break;case"slide-down":e.style.cssText+="overflow: hidden; z-index: 1; transform: translate3d(0, 0, 0)",t.style.cssText+="overflow: hidden; z-index: 0; transform: translate3d(0, 0, 0)",g(t).show(),window.setTimeout(function(){t.style.cssText+="transition: 0.5s; transform: translate3d(0, 0, 0)",e.style.cssText+="transition: 0.5s; transform: translate3d(0, "+r+"px, 0)"},1);break;case"slide-up":e.style.cssText+="overflow: hidden; transform: translate3d(0, 0, 0)",t.style.cssText+="overflow: hidden; transform: translate3d(0, "+r+"px, 0)",g(t).show(),window.setTimeout(function(){t.style.cssText+="transition: 0.5s; transform: translate3d(0, 0, 0)",e.style.cssText+="transition: 0.5s; transform: translate3d(0, 0, 0)"},1);break;case"flip-left":e.style.cssText+="overflow: hidden; transform: rotateY(0deg)",t.style.cssText+="overflow: hidden; transform: rotateY(-180deg)",g(t).show(),window.setTimeout(function(){t.style.cssText+="transition: 0.5s; transform: rotateY(0deg)",e.style.cssText+="transition: 0.5s; transform: rotateY(180deg)"},1);break;case"flip-right":e.style.cssText+="overflow: hidden; transform: rotateY(0deg)",t.style.cssText+="overflow: hidden; transform: rotateY(180deg)",g(t).show(),window.setTimeout(function(){t.style.cssText+="transition: 0.5s; transform: rotateY(0deg)",e.style.cssText+="transition: 0.5s; transform: rotateY(-180deg)"},1);break;case"flip-down":e.style.cssText+="overflow: hidden; transform: rotateX(0deg)",t.style.cssText+="overflow: hidden; transform: rotateX(180deg)",g(t).show(),window.setTimeout(function(){t.style.cssText+="transition: 0.5s; transform: rotateX(0deg)",e.style.cssText+="transition: 0.5s; transform: rotateX(-180deg)"},1);break;case"flip-up":e.style.cssText+="overflow: hidden; transform: rotateX(0deg)",t.style.cssText+="overflow: hidden; transform: rotateX(-180deg)",g(t).show(),window.setTimeout(function(){t.style.cssText+="transition: 0.5s; transform: rotateX(0deg)",e.style.cssText+="transition: 0.5s; transform: rotateX(180deg)"},1);break;case"pop-in":e.style.cssText+="overflow: hidden; transform: translate3d(0, 0, 0)",t.style.cssText+="overflow: hidden; transform: translate3d(0, 0, 0); transform: scale(.8); opacity: 0;",g(t).show(),window.setTimeout(function(){t.style.cssText+="transition: 0.5s; transform: scale(1); opacity: 1;",e.style.cssText+="transition: 0.5s;"},1);break;case"pop-out":e.style.cssText+="overflow: hidden; transform: translate3d(0, 0, 0); transform: scale(1); opacity: 1;",t.style.cssText+="overflow: hidden; transform: translate3d(0, 0, 0); opacity: 0;",g(t).show(),window.setTimeout(function(){t.style.cssText+="transition: 0.5s; opacity: 1;",e.style.cssText+="transition: 0.5s; transform: scale(1.7); opacity: 0;"},1);break;default:e.style.cssText+="overflow: hidden; transform: translate3d(0, 0, 0)",t.style.cssText+="overflow: hidden; translate3d(0, 0, 0); opacity: 0;",g(t).show(),window.setTimeout(function(){t.style.cssText+="transition: 0.5s; opacity: 1;",e.style.cssText+="transition: 0.5s"},1)}setTimeout(function(){"slide-down"===i&&(g(e).css("z-index","1019"),g(t).css("z-index","1020")),t&&g(t).css({opacity:"1"}).css(w2utils.cssPrefix({transition:"",transform:""})),e&&g(e).css({opacity:"1"}).css(w2utils.cssPrefix({transition:"",transform:""})),"function"==typeof s&&s()},500)},lock:function(e,t,i){var s={};"object"==typeof t?s=t:(s.msg=t,s.spinner=i);s.msg||0===s.msg||(s.msg="");w2utils.unlock(e),g(e).prepend('
');var n=g(e).find(".w2ui-lock"),r=g(e).find(".w2ui-lock-msg");s.msg||r.css({"background-color":"transparent",border:"0px"});!0===s.spinner&&(s.msg='
"+s.msg);null!=s.opacity&&n.css("opacity",s.opacity);"function"==typeof n.fadeIn?(n.fadeIn(200),r.html(s.msg).fadeIn(200)):(n.show(),r.html(s.msg).show(0))},unlock:function(e,t){h(t)?(g(e).find(".w2ui-lock").fadeOut(t),setTimeout(function(){g(e).find(".w2ui-lock").remove(),g(e).find(".w2ui-lock-msg").remove()},t)):(g(e).find(".w2ui-lock").remove(),g(e).find(".w2ui-lock-msg").remove())},message:function(s,e){var t,n,r=this;g().w2tag(),e=e||{width:200,height:100};null==e.on&&g.extend(e,w2utils.event);null==e.width&&(e.width=200);null==e.height&&(e.height=100);var i=parseInt(g(s.box).width()),a=parseInt(g(s.box).height()),l=parseInt(g(s.box).find(s.title).css("height")||0);e.width>i&&(e.width=i-10);e.height>a-l&&(e.height=a-10-l);e.originalWidth=e.width,e.originalHeight=e.height,parseInt(e.width)<0&&(e.width=i+e.width);parseInt(e.width)<10&&(e.width=10);parseInt(e.height)<0&&(e.height=a+e.height-l);parseInt(e.height)<10&&(e.height=10);null==e.hideOnClick&&(e.hideOnClick=!1);var o=g(s.box).data("options")||{};(null==e.width||e.width>o.width-10)&&(e.width=o.width-10);(null==e.height||e.height>o.height-l-5)&&(e.height=o.height-l-5);e.originalHeight<0&&(e.height=a+e.originalHeight-l);e.originalWidth<0&&(e.width=i+2*e.originalWidth);var d=g(s.box).find(s.title),u=g(s.box).find(".w2ui-message.w2ui-closing");0'+(e.body||"")+'
'+(e.buttons||"")+"
"),g(s.box).find(".w2ui-message").css("z-index",1390),d.data("old-z-index",d.css("z-index")),d.css("z-index",1501),g(s.box).find(s.body).before('"),g(s.box).find("#w2ui-message"+c).data("options",e).data("prev_focus",g(":focus"));var p=g(s.box).find("#w2ui-message"+c).css("display");if(g(s.box).find("#w2ui-message"+c).css(w2utils.cssPrefix({transform:"none"===p?"translateY(-"+e.height+"px)":"translateY(0px)"})),"none"===p){if(g(s.box).find("#w2ui-message"+c).show().html(e.html),e.box=g(s.box).find("#w2ui-message"+c),!0===(n=e.trigger({phase:"before",type:"open",target:"self"})).isCancelled)return d.css("z-index",d.data("old-z-index")),void g(s.box).find("#w2ui-message"+c).remove();setTimeout(function(){g(s.box).find("#w2ui-message"+c).css(w2utils.cssPrefix({transform:"none"===p?"translateY(0px)":"translateY(-"+e.height+"px)"}))},1),0===c&&this.lock&&(s.param?this.lock(s.param):this.lock()),setTimeout(function(){g(s.box).find("#w2ui-message"+c).css(w2utils.cssPrefix({transition:"0s"})),e.trigger(g.extend(n,{phase:"after"}))},350)}}function f(e,t){if(null==n&&!0===(n=t.trigger({phase:"before",type:"open",target:"self"})).isCancelled)return d.css("z-index",d.data("old-z-index")),void g(s.box).find("#w2ui-message"+c).remove();var i=e.data("prev_focus");e.remove(),i&&0'+n(e)+"
";return g("body").append(s),i=g("#_tmp_width").width(),g("#_tmp_width").remove(),i},scrollBarSize:function(){if(e.scrollBarSize)return e.scrollBarSize;g("body").append('
1
'),e.scrollBarSize=100-g("#_scrollbar_width > div").width(),g("#_scrollbar_width").remove(),0<=String(navigator.userAgent).indexOf("MSIE")&&(e.scrollBarSize=e.scrollBarSize/2);return e.scrollBarSize},checkName:function(e,t){return e&&null!=e.name?null==w2ui[e.name]?!!w2utils.isAlphaNumeric(e.name)||(console.log('ERROR: The parameter "name" has to be alpha-numeric (a-z, 0-9, dash and underscore). '),!1):(console.log('ERROR: The parameter "name" is not unique. There are other objects already created with the same name (obj: '+e.name+")."),!1):(console.log('ERROR: The parameter "name" is required but not supplied in $().'+t+"()."),!1)},checkUniqueId:function(e,t,i,s){g.isArray(t)||(t=[t]);for(var n=0;n").replace(/&/g,"&").replace(/"/g,'"').replace(/ /g," ")),t<=l.length){(s=e.childNodes[a]).childNodes&&0s.length&&(t=s.length);n.setStart(s,t),i?n.setEnd(s,i):n.collapse(!0);r.removeAllRanges(),r.addRange(n)},testLocalStorage:t,hasLocalStorage:t(),isIOS:-1!==navigator.userAgent.toLowerCase().indexOf("iphone")||-1!==navigator.userAgent.toLowerCase().indexOf("ipod")||-1!==navigator.userAgent.toLowerCase().indexOf("ipad")||-1!==navigator.userAgent.toLowerCase().indexOf("mobile")||-1!==navigator.userAgent.toLowerCase().indexOf("android"),isIE:-1!==navigator.userAgent.toLowerCase().indexOf("msie")||-1!==navigator.userAgent.toLowerCase().indexOf("trident")};function h(e){return/^[-+]?[0-9]+$/.test(e)}function n(e){if(null==e)return e;switch(typeof e){case"number":break;case"string":e=String(e).replace(/&/g,"&").replace(/>/g,">").replace(/'+w2utils.formatDate(i,t)+""},datetime:function(e,t){if(""===t&&(t=w2utils.settings.datetimeFormat),null==e||0===e||""===e)return"";var i=w2utils.isDateTime(e,t,!0);return!1===i&&(i=w2utils.isDate(e,t,!0)),''+w2utils.formatDateTime(i,t)+""},time:function(e,t){if(""===t&&(t=w2utils.settings.timeFormat),"h12"===t&&(t="hh:mi pm"),"h24"===t&&(t="h24:mi"),null==e||0===e||""===e)return"";var i=w2utils.isDateTime(e,t,!0);return!1===i&&(i=w2utils.isDate(e,t,!0)),''+w2utils.formatTime(e,t)+""},timestamp:function(e,t){if(""===t&&(t=w2utils.settings.datetimeFormat),null==e||0===e||""===e)return"";var i=w2utils.isDateTime(e,t,!0);return!1===i&&(i=w2utils.isDate(e,t,!0)),i.toString?i.toString():""},gmt:function(e,t){if(""===t&&(t=w2utils.settings.datetimeFormat),null==e||0===e||""===e)return"";var i=w2utils.isDateTime(e,t,!0);return!1===i&&(i=w2utils.isDate(e,t,!0)),i.toUTCString?i.toUTCString():""},age:function(e,t){if(null==e||0===e||""===e)return"";var i=w2utils.isDateTime(e,null,!0);return!1===i&&(i=w2utils.isDate(e,null,!0)),''+w2utils.age(e)+(t?" "+t:"")+""},interval:function(e,t){return null==e||0===e||""===e?"":w2utils.interval(e)+(t?" "+t:"")},toggle:function(e,t){return e?"Yes":""},password:function(e,t){for(var i="",s=0;s/g,"<");var n=new RegExp(s+"(?!([^<]+)?>)","gi");t.innerHTML=t.innerHTML.replace(n,r)}function r(e){return''+e+""}}):_(this).each(l);function l(e,t){for(;-1!==t.innerHTML.indexOf('');)t.innerHTML=t.innerHTML.replace(/\((.|\n|\r)*)\<\/span\>/gi,"$1")}},_.fn.w2tag=function(o,d){if(1===arguments.length&&"object"==typeof o&&null!=(d=o).html&&(o=d.html),null!=(d=_.extend({id:null,auto:null,html:o,position:"right|top",align:"none",left:0,top:0,maxWidth:null,style:"",css:{},className:"",inputClass:"",onShow:null,onHide:null,hideOnKeyPress:!0,hideOnFocus:!1,hideOnBlur:!1,hideOnClick:!1,hideOnChange:!0},d)).name&&null==d.id&&(d.id=d.name),""!==d.class&&""===d.inputClass&&(d.inputClass=d.class),0!==_(this).length)return!0===d.auto||null!=d.showOn||null!=d.hideOn?0!=arguments.length&&o?_(this).each(function(e,t){var i="mouseenter",s="mouseleave";d.showOn&&(i=String(d.showOn).toLowerCase(),delete d.showOn),d.hideOn&&(s=String(d.hideOn).toLowerCase(),delete d.hideOn),d.potision||(d.position="top|bottom"),_(t).off(".w2tooltip").on(i+".w2tooltip",function(){d.auto=!1,_(this).w2tag(o,d)}).on(s+".w2tooltip",function(){_(this).w2tag()})}):_(this).each(function(e,t){_(t).off(".w2tooltip")}):_(this).each(function(e,t){var c,i=d.id?d.id:t.id;""==i&&(i=_(t).find("input").attr("id")),i=i||"noid";var s,n=w2utils.escapeId(i);return null!=_(this).data("w2tag")?(c=_(this).data("w2tag"),_.extend(c.options,d)):c={id:i,attachedTo:t,box:_("#w2ui-tag-"+n),options:_.extend({},d),init:r,hide:function(){if(c.box.length<=0)return;c.tmp.timer&&clearTimeout(c.tmp.timer);c.box.remove(),c.options.hideOnClick&&_("body").off(".w2tag"+(c.id||""));_(c.attachedTo).off(".w2tag").removeClass(c.options.inputClass).removeData("w2tag"),0<_(c.attachedTo).length&&(_(c.attachedTo)[0].style.cssText=c.tmp.originalCSS);"function"==typeof c.options.onHide&&c.options.onHide()},getPos:l,isMoved:a,tmp:{}},void(""===o||null==o?c.hide():0!==c.box.length?c.box.find(".w2ui-tag-body").css(c.options.css).attr("style",c.options.style).addClass(c.options.className).html(c.options.html):(c.tmp.originalCSS="",0<_(c.attachedTo).length&&(c.tmp.originalCSS=_(c.attachedTo)[0].style.cssText),s="white-space: nowrap;",c.options.maxWidth&&w2utils.getStrWidth(o)>c.options.maxWidth&&(s="width: "+c.options.maxWidth+"px"),_("body").append('"),c.box=_("#w2ui-tag-"+n),_(c.attachedTo).data("w2tag",c),setTimeout(r,1)));function r(){var e;c.box.css("display","block"),c&&c.box&&_(c.attachedTo).offset()&&(e=c.getPos(),c.box.css({opacity:"1",left:e.left+"px",top:e.top+"px"}).data("w2tag",c).find(".w2ui-tag-body").addClass(e.posClass),c.tmp.pos=e.left+"x"+e.top,_(c.attachedTo).off(".w2tag").css(c.options.css).addClass(c.options.inputClass),c.options.hideOnKeyPress&&_(c.attachedTo).on("keypress.w2tag",c.hide),c.options.hideOnFocus&&_(c.attachedTo).on("focus.w2tag",c.hide),d.hideOnChange&&("INPUT"===t.nodeName?_(t).on("change.w2tag",c.hide):_(t).find("input").on("change.w2tag",c.hide)),c.options.hideOnBlur&&_(c.attachedTo).on("blur.w2tag",c.hide),c.options.hideOnClick&&_("body").on("click.w2tag"+(c.id||""),c.hide),"function"==typeof c.options.onShow&&c.options.onShow(),a())}function a(e){var t,i=_(c.attachedTo).offset();0===_(c.attachedTo).length||0===i.left&&0===i.top||0===c.box.find(".w2ui-tag-body").length?c.hide():(t=l(),c.tmp.pos!==t.left+"x"+t.top&&(c.box.css(w2utils.cssPrefix({transition:e?"0s":".2s"})).css({left:t.left+"px",top:t.top+"px"}),c.tmp.pos=t.left+"x"+t.top),c.tmp.timer&&clearTimeout(c.tmp.timer),c.tmp.timer=setTimeout(a,100))}function l(){var e=_(c.attachedTo).offset(),t="w2ui-tag-right",i=parseInt(e.left+c.attachedTo.offsetWidth+(c.options.left?c.options.left:0)),s=parseInt(e.top+(c.options.top?c.options.top:0)),n=c.box.find(".w2ui-tag-body"),r=n[0].offsetWidth,a=n[0].offsetHeight;if("string"==typeof c.options.position&&-1!==c.options.position.indexOf("|")&&(c.options.position=c.options.position.split("|")),"top"===c.options.position)t="w2ui-tag-top",i=parseInt(e.left+(c.options.left?c.options.left:0))-14,s=parseInt(e.top+(c.options.top?c.options.top:0))-a-10;else if("bottom"===c.options.position)t="w2ui-tag-bottom",i=parseInt(e.left+(c.options.left?c.options.left:0))-14,s=parseInt(e.top+c.attachedTo.offsetHeight+(c.options.top?c.options.top:0))+10;else if("left"===c.options.position)t="w2ui-tag-left",i=parseInt(e.left+(c.options.left?c.options.left:0))-r-20,s=parseInt(e.top+(c.options.top?c.options.top:0));else if(Array.isArray(c.options.position)){for(var l=window.innerWidth,o=window.innerHeight,d=0;d
');var s=_("#w2ui-overlay"+b),n=s.find(" > div");n.html(v.html);var r=n.css("background-color");null!=r&&"rgba(0, 0, 0, 0)"!==r&&"transparent"!==r&&s.css({"background-color":r,"border-color":r});var a=_(y).offset()||{};return s.data("element",0 div"),i=_("#w2ui-overlay"+b+" div.w2ui-menu"),s={};if(0e.height()&&t.find("div.w2ui-menu").css("overflow-y","hidden")},1),setTimeout(function(){var e=t.find("div.w2ui-menu");"auto"!==e.css("overflow-y")&&e.css("overflow-y","auto")},10)),v.tmp.contentWidth&&"both"!==v.align?(a=parseInt(v.tmp.contentWidth),t.width(a),setTimeout(function(){a>t.find("div.w2ui-menu > table").width()&&t.find("div.w2ui-menu > table").css("overflow-x","hidden")},1),setTimeout(function(){t.find("div.w2ui-menu > table").css("overflow-x","auto")},10)):t.find("div.w2ui-menu").css("width","100%");var l=v.left,o=v.width,d=v.tipLeft;switch(v.align){case"both":l=17,0===v.width&&(v.width=w2utils.getSize(_(y),"width")),v.maxWidth&&v.width>v.maxWidth&&(v.width=v.maxWidth);break;case"left":l=17;break;case"right":l=w2utils.getSize(_(y),"width")-a+10,d=a-40}var u,c,h,p="auto"!==(o=30!==a||o?v.width?v.width:"auto":30)?(o-17)/2:(a-17)/2;p<25&&(l=25-p,d=Math.floor(p)),h=v.contextMenu?(u=v.pageX+8,c=+v.pageY,v.pageY):(u=(25<(g=y.offset()||{}).left?g.left:25)+l,c=g.top+w2utils.getSize(y,"height")+v.top+7,g.top),e.css({left:u+"px",top:c+"px","min-width":o,"min-height":v.height?v.height:"auto"});var f,g=t.offset()||{},m=window.innerHeight+_(document).scrollTop()-g.top-7,w=window.innerWidth+_(document).scrollLeft()-g.left-7;v.contextMenu&&(m=window.innerHeight+_(document).scrollTop()-v.pageY-15,w=window.innerWidth+_(document).scrollLeft()-v.pageX),(-50v.maxHeight&&(m=v.maxHeight),mv.maxHeight&&(m=v.maxHeight),mstyle").html("#w2ui-overlay"+b+":before { margin-left: "+parseInt(d)+"px; }#w2ui-overlay"+b+":after { margin-left: "+parseInt(d)+"px; }"),a=t.width(),w=window.innerWidth+_(document).scrollLeft()-g.left-7,v.maxWidth&&w>v.maxWidth&&(w=v.maxWidth),wstyle").html("#w2ui-overlay"+b+":before { display: none; }#w2ui-overlay"+b+":after { display: none; }"),n&&"both"!==v.align&&t.width(a+w2utils.scrollBarSize()+2)}0a+s)&&n.animate({scrollTop:i-(s-2*r.height())/2},200,"linear")),h()}else{1===arguments.length?v=e:v.items=e,"object"!=typeof v&&(v={}),v=_.extend({},{type:"normal",index:null,items:[],render:null,msgNoItems:"No items",onSelect:null,hideOnRemove:!1,tmp:{}},v),(_.fn.w2menuOptions=v).name&&(d="-"+v.name),"function"==typeof v.select&&"function"!=typeof v.onSelect&&(v.onSelect=v.select),"function"==typeof v.remove&&"function"!=typeof v.onRemove&&(v.onRemove=v.remove),"function"==typeof v.onRender&&"function"!=typeof v.render&&(v.render=v.onRender),_.fn.w2menuClick=function(e,t,i){var s,n,r=!1,a=_(e.target).closest("tr");(e.shiftKey||e.metaKey||e.ctrlKey)&&(r=!0),items=null==i?v.items:v.items[i].items,_(e.target).hasClass("remove")?("function"==typeof v.onRemove&&v.onRemove({index:t,parentIndex:i,item:items[t],keepOpen:r,originalEvent:e}),r=!v.hideOnRemove,_(e.target).closest("tr").remove(),h()):a.hasClass("has-sub-menu")?(r=!0,a.hasClass("expanded")?(items[t].expanded=!1,a.removeClass("expanded").addClass("collapsed").next().hide()):(items[t].expanded=!0,a.addClass("expanded").removeClass("collapsed").next().show()),h()):"function"==typeof v.onSelect&&(s=items,"function"==typeof items&&(s=items(v.items[i])),null!=s[t].keepOpen&&(r=s[t].keepOpen),v.onSelect({index:t,parentIndex:i,item:s[t],keepOpen:r,originalEvent:e})),null!=items[t]&&!0===items[t].keepOpen||((n=_("#w2ui-overlay"+d)).removeData("keepOpen"),0'+y()+"
",t=_(this).w2overlay(l,v),setTimeout(function(){if(_("#w2ui-overlay"+d+" #menu-search").on("keyup",p).on("keydown",function(e){9===e.keyCode&&(e.stopPropagation(),e.preventDefault())}),v.search){if(-1!==["text","password"].indexOf(_(o)[0].type)||"TEXTAREA"===_(o)[0].tagName.toUpperCase())return;_("#w2ui-overlay"+d+" #menu-search").focus()}h()},250),h();var c=_("#w2ui-overlay"+d);0s+t)&&_("#w2ui-overlay"+d+" div.w2ui-menu").animate({scrollTop:e-(t-2*i.height())/2},200,"linear"))},1)}function p(e){var t=this.value,i=!1;switch(e.keyCode){case 13:_("#w2ui-overlay"+d).remove(),_.fn.w2menuClick(e,v.index);break;case 9:case 27:_("#w2ui-overlay"+d).remove(),_.fn.w2menuClick(e,-1);break;case 38:for(v.index=w2utils.isInt(v.index)?parseInt(v.index):0,v.index--;0=v.items.length&&(v.index=v.items.length-1),i=!0}if(!i){for(var s=0,n=0;n
'+w2utils.lang("Loading...")+"
";var n=0,r='',a=null,l=null;null==e&&(e=v.items),Array.isArray(e)||(e=[]);for(var o=0;o
'),l&&(d='
'),"break"!==w.type&&null!=u&&""!==u&&"--"!=String(u).substr(0,2)?(h=n%2==0?"w2ui-item-even":"w2ui-item-odd",!0!==v.altRows&&(h=""),p=1,""===d&&p++,null==w.count&&null==w.hotkey&&!0!==w.remove&&null==w.items&&p++,null==w.tooltip&&null!=w.hint&&(w.tooltip=w.hint),!(f="")===w.remove?f='X':null!=w.items?(g=[],"function"==typeof w.items?g=w.items(w):Array.isArray(w.items)&&(g=w.items),f="",c='"):(null!=w.count&&(f+=""+w.count+""),null!=w.hotkey&&(f+=''+w.hotkey+"")),r+=''+(t?"":"")+d+' "+c,n++):r+='"),e[o]=w}return 0===n&&v.msgNoItems&&(r+='"),r+="
'+y(g,!0,w.expanded,o)+"
'+v.msgNoItems+"
"}},_.fn.w2color=function(n,e){var r,a,c,l,o,h,d,p,t,s,u,f,g,i=_(this),m=i[0];function w(e){e.color;for(var t,i='
',s=0;s";for(var n=0;n'+(e.color==a[s][n]?"•":" ")+" ",e.color==a[s][n]&&(r=[s,n]);i+="",s<2&&(i+='')}return i+="
",i+='',i+="
"+("string"==typeof e.html?e.html:"")+'
'}i.data("skipInit")?i.removeData("skipInit"):(r=[-1,-1],null==_.fn.w2colorPalette&&(_.fn.w2colorPalette=[["000000","333333","555555","777777","888888","999999","AAAAAA","CCCCCC","DDDDDD","EEEEEE","F7F7F7","FFFFFF"],["FF011B","FF9838","FFC300","FFFD59","86FF14","14FF7A","2EFFFC","2693FF","006CE7","9B24F4","FF21F5","FF0099"],["FFEAEA","FCEFE1","FCF4DC","FFFECF","EBFFD9","D9FFE9","E0FFFF","E8F4FF","ECF4FC","EAE6F4","FFF5FE","FCF0F7"],["F4CCCC","FCE5CD","FFF1C2","FFFDA1","D5FCB1","B5F7D0","BFFFFF","D6ECFF","CFE2F3","D9D1E9","FFE3FD","FFD9F0"],["EA9899","F9CB9C","FFE48C","F7F56F","B9F77E","84F0B1","83F7F7","B5DAFF","9FC5E8","B4A7D6","FAB9F6","FFADDE"],["E06666","F6B26B","DEB737","E0DE51","8FDB48","52D189","4EDEDB","76ACE3","6FA8DC","8E7CC3","E07EDA","F26DBD"],["CC0814","E69138","AB8816","B5B20E","6BAB30","27A85F","1BA8A6","3C81C7","3D85C6","674EA7","A14F9D","BF4990"],["99050C","B45F17","80650E","737103","395E14","10783D","13615E","094785","0A5394","351C75","780172","782C5A"]]),a=_.fn.w2colorPalette,"string"==typeof n&&(n={color:n,transparent:!0}),null==n.onSelect&&null!=e&&(n.onSelect=e),n.transparent&&"333333"==a[0][1]&&(a[0].splice(1,1),a[0].push("")),n.transparent||"333333"==a[0][1]||(a[0].splice(1,0,"333333"),a[0].pop()),n.color&&(n.color=String(n.color).toUpperCase()),"string"==typeof n.color&&"#"===n.color.substr(0,1)&&(n.color=n.color.substr(1)),null==n.fireChange&&(n.fireChange=!0),0===_("#w2ui-overlay").length?_(m).w2overlay(w(n),n):(_("#w2ui-overlay .w2ui-colors").parent().html(w(n)),_("#w2ui-overlay").show()),_("#w2ui-overlay .w2ui-color").off(".w2color").on("mousedown.w2color",function(e){var t=_(e.originalEvent.target).attr("name");r=_(e.originalEvent.target).attr("index").split(":"),"INPUT"===m.tagName.toUpperCase()?(n.fireChange&&_(m).change(),_(m).next().find(">div").css("background-color",t)):_(m).data("_color",t),"function"==typeof n.onSelect&&n.onSelect(t)}).on("mouseup.w2color",function(){setTimeout(function(){0<_("#w2ui-overlay").length&&_("#w2ui-overlay").removeData("keepOpen")[0].hide()},10)}),_("#w2ui-overlay .color-original").off(".w2color").on("click.w2color",function(e){var t=w2utils.parseColor(_(e.target).css("background-color"));null!=t&&(o=t,l=w2utils.rgb2hsv(o),h(l),d(),p())}),_("#w2ui-overlay input").off(".w2color").on("mousedown.w2color",function(e){_("#w2ui-overlay").data("keepOpen",!0),setTimeout(function(){_("#w2ui-overlay").data("keepOpen",!0)},10),e.stopPropagation()}).on("change.w2color",function(){var e=_(this),t=parseFloat(e.val()),i=parseFloat(e.attr("max"));isNaN(t)&&(t=0),1div").css("background-color","#"+i)):_(m).data("_color",i),"function"==typeof n.onSelect&&n.onSelect(i))},d=function(){var e=_("#w2ui-overlay .palette .value1"),t=_("#w2ui-overlay .rainbow .value2"),i=_("#w2ui-overlay .alpha .value2"),s=parseInt(e.width())/2,n=parseInt(t.width())/2;e.css({left:150*l.s/100-s,top:125*(100-l.v)/100-s}),t.css("left",l.h/2.4-n),i.css("left",150*o.a-n)},p=function(){var e=w2utils.hsv2rgb(l.h,100,100),t=e.r+","+e.g+","+e.b;_("#w2ui-overlay .palette").css("background-image","linear-gradient(90deg, rgba("+t+",0) 0%, rgba("+t+",1) 100%)")},t=function(e){var t=_(this).find(".value1, .value2"),i=parseInt(t.width())/2;t.hasClass("move-x")&&t.css({left:e.offsetX-i+"px"}),t.hasClass("move-y")&&t.css({top:e.offsetY-i+"px"}),c={$el:t,x:e.pageX,y:e.pageY,width:t.parent().width(),height:t.parent().height(),left:parseInt(t.css("left")),top:parseInt(t.css("top"))},u(e),_("body").off(".w2color").on(g,u).on(f,s)},s=function(e){_("body").off(".w2color")},u=function(e){var t=c.$el,i=e.pageX-c.x,s=e.pageY-c.y,n=c.left+i,r=c.top+s,a=parseInt(t.width())/2;n<-a&&(n=-a),r<-a&&(r=-a),n>c.width-a&&(n=c.width-a),r>c.height-a&&(r=c.height-a),t.hasClass("move-x")&&t.css({left:n+"px"}),t.hasClass("move-y")&&t.css({top:r+"px"});var l,o=t.parent().attr("name"),d=parseInt(t.css("left"))+a,u=parseInt(t.css("top"))+a;"palette"===o&&h({s:Math.round(d/c.width*100),v:Math.round(100-u/c.height*100)}),"rainbow"===o&&(l=Math.round(2.4*d),h({h:l}),p()),"alpha"===o&&h({a:parseFloat(Number(d/150).toFixed(2))})},!0!==_.fn._colorAdvanced&&!0!==n.advanced||(_("#w2ui-overlay .w2ui-color-tabs :nth-child(2)").click(),_("#w2ui-overlay").removeData("keepOpen")),h({},!0),p(),d(),f="mouseup.w2color",g="mousemove.w2color",w2utils.isIOS&&(f="touchend.w2color",g="touchmove.w2color "),_("#w2ui-overlay .palette").off(".w2color").on("mousedown.w2color",t),_("#w2ui-overlay .rainbow").off(".w2color").on("mousedown.w2color",t),_("#w2ui-overlay .alpha").off(".w2color").on("mousedown.w2color",t),m.nav=function(e){switch(e){case"up":r[0]--;break;case"down":r[0]++;break;case"right":r[1]++;break;case"left":r[1]--}return r[0]<0&&(r[0]=0),r[0]>a.length-2&&(r[0]=a.length-2),r[1]<0&&(r[1]=0),r[1]>a[0].length-1&&(r[1]=a[0].length-1),color=a[r[0]][r[1]],_(m).data("_color",color),color})}}(jQuery),function($){var w2grid=function(e){this.name=null,this.box=null,this.columns=[],this.columnGroups=[],this.records=[],this.summary=[],this.searches=[],this.sortMap={},this.toolbar={},this.ranges=[],this.menu=[],this.searchData=[],this.sortData=[],this.total=0,this.recid=null,this.last={field:"",label:"",logic:"OR",search:"",searchIds:[],selection:{indexes:[],columns:{}},multi:!1,scrollTop:0,scrollLeft:0,colStart:0,colEnd:0,sortData:null,sortCount:0,xhr:null,loaded:!1,range_start:null,range_end:null,sel_ind:null,sel_col:null,sel_type:null,edit_col:null,isSafari:/^((?!chrome|android).)*safari/i.test(navigator.userAgent)},$.extend(!0,this,w2obj.grid),this.show=$.extend(!0,{},w2grid.prototype.show),this.postData=$.extend(!0,{},w2grid.prototype.postData),this.routeData=$.extend(!0,{},w2grid.prototype.routeData),this.httpHeaders=$.extend(!0,{},w2grid.prototype.httpHeaders),this.buttons=$.extend(!0,{},w2grid.prototype.buttons),this.operators=$.extend(!0,{},w2grid.prototype.operators),this.operatorsMap=$.extend(!0,{},w2grid.prototype.operatorsMap),this.stateColProps=$.extend(!0,{},w2grid.prototype.stateColProps),this.stateColDefaults=$.extend(!0,{},w2grid.prototype.stateColDefaults),$.extend(!0,this,e)};$.fn.w2grid=function(e){if($.isPlainObject(e)){if(!w2utils.checkName(e,"w2grid"))return;var t,i=e.columns,s=e.columnGroups,n=e.records,r=e.searches,a=e.searchData,l=e.sortData,o=new w2grid(e);if($.extend(o,{records:[],columns:[],searches:[],sortData:[],searchData:[],handlers:[]}),i)for(t=0;t
"},"search-go":{type:"drop",id:"w2ui-search-advanced",icon:"w2ui-icon-search",text:"Search",tooltip:"Open Search Fields"},add:{type:"button",id:"w2ui-add",text:"Add New",tooltip:"Add new record",icon:"w2ui-icon-plus"},edit:{type:"button",id:"w2ui-edit",text:"Edit",tooltip:"Edit selected record",icon:"w2ui-icon-pencil",disabled:!0},delete:{type:"button",id:"w2ui-delete",text:"Delete",tooltip:"Delete selected records",icon:"w2ui-icon-cross",disabled:!0},save:{type:"button",id:"w2ui-save",text:"Save",tooltip:"Save changed records",icon:"w2ui-icon-check"}},operators:{text:["is","begins","contains","ends"],number:["=","between",">","<",">=","<="],date:["is","between",{oper:"less",text:"before"},{oper:"more",text:"after"}],list:["is"],hex:["is","between"],color:["is","begins","contains","ends"],enum:["in","not in"]},operatorsMap:{text:"text",int:"number",float:"number",money:"number",currency:"number",percent:"number",hex:"hex",alphanumeric:"text",color:"color",date:"date",time:"date",datetime:"date",list:"list",combo:"text",enum:"enum",file:"enum",select:"list",radio:"list",checkbox:"list",toggle:"list"},onAdd:null,onEdit:null,onRequest:null,onLoad:null,onDelete:null,onSave:null,onSelect:null,onUnselect:null,onClick:null,onDblClick:null,onContextMenu:null,onMenuClick:null,onColumnClick:null,onColumnDblClick:null,onColumnResize:null,onColumnAutoResize:null,onSort:null,onSearch:null,onSearchOpen:null,onChange:null,onRestore:null,onExpand:null,onCollapse:null,onError:null,onKeydown:null,onToolbar:null,onColumnOnOff:null,onCopy:null,onPaste:null,onSelectionExtend:null,onEditField:null,onRender:null,onRefresh:null,onReload:null,onResize:null,onDestroy:null,onStateSave:null,onStateRestore:null,onFocus:null,onBlur:null,onReorderRow:null,add:function(e,t){$.isArray(e)||(e=[e]);for(var i=0,s=0;ss.length?1:i.lengtht.constructor.name?r:-r;e&&"object"==typeof e&&(e=e.valueOf()),t&&"object"==typeof t&&(t=t.valueOf());var a={}.toString;return e&&"object"==typeof e&&e.toString!=a&&(e=String(e)),t&&"object"==typeof t&&t.toString!=a&&(t=String(t)),"string"==typeof e&&(e=$.trim(e.toLowerCase())),"string"==typeof t&&(t=$.trim(t.toLowerCase())),"natural"===n&&(n=w2utils.naturalCompare),"function"==typeof n?n(e,t)*r:t=parseFloat(o)&&parseFloat(g.parseField(t,a.field))<=parseFloat(l)&&i++:"date"==a.type?(c=g.parseField(t,a.field+"_")instanceof Date?g.parseField(t,a.field+"_"):g.parseField(t,a.field),u=w2utils.isDate(c,w2utils.settings.dateFormat,!0),o=w2utils.isDate(o,w2utils.settings.dateFormat,!0),null!=(l=w2utils.isDate(l,w2utils.settings.dateFormat,!0))&&(l=new Date(l.getTime()+864e5)),o<=u&&u=":s=!0;case">":case"more":-1!=["int","float","money","currency","percent"].indexOf(a.type)?(u=parseFloat(g.parseField(t,a.field)),((o=parseFloat(r.value))this.records.length&&(l=this.records.length-s),0this.last.colStart&&(o=$("#grid_"+this.name+"_rec_"+w2utils.escapeId(r.recid)+' td[col="start"]')),r.columnthis.last.colEnd&&(h='"end"',d=$("#grid_"+this.name+"_rec_"+w2utils.escapeId(l.recid)+' td[col="end"]'));var p=parseInt($("#grid_"+this.name+"_rec_top").next().attr("index")),f=parseInt($("#grid_"+this.name+"_rec_bottom").prev().attr("index")),g=parseInt($("#grid_"+this.name+"_frec_top").next().attr("index")),m=parseInt($("#grid_"+this.name+"_frec_bottom").prev().attr("index"));0===o.length&&r.indexp&&(o=$("#grid_"+this.name+"_rec_top").next().find("td[col="+r.column+"]")),0===d.length&&l.index>f&&r.indexg&&(u=$("#grid_"+this.name+"_frec_top").next().find("td[col="+r.column+"]")),0===c.length&&l.index>m&&r.index'+("selection"==n.name?'
':"")+"
"),w=$("#grid_"+this.name+"_f"+n.name)):(w.attr("style",n.style),w.find(".w2ui-selection-resizer").show()),0===c.length&&(0===(c=$("#grid_"+this.name+"_frec_"+w2utils.escapeId(l.recid)+" td:last-child")).length&&(c=$("#grid_"+this.name+"_frec_bottom td:first-child")),w.css("border-right","0px"),w.find(".w2ui-selection-resizer").hide()),null!=r.recid&&null!=l.recid&&0'+("selection"==n.name?'
':"")+"
"),w=$("#grid_"+this.name+"_"+n.name)):w.attr("style",n.style),0===o.length&&0===(o=$("#grid_"+this.name+"_rec_"+w2utils.escapeId(r.recid)+" td:first-child")).length&&(o=$("#grid_"+this.name+"_rec_top td:first-child")),0!==c.length&&w.css("border-left","0px"),null!=r.recid&&null!=l.recid&&0=this.last.range_start&&h+1<=this.last.range_end)&&(l=$("#grid_"+this.name+"_frec_"+w2utils.escapeId(a)),o=$("#grid_"+this.name+"_rec_"+w2utils.escapeId(a))),"row"==this.selectType){if(-1!=t.indexes.indexOf(h))continue;t.indexes.push(h),l&&o&&(l.addClass("w2ui-selected").data("selected","yes").find(".w2ui-col-number").addClass("w2ui-row-selected"),o.addClass("w2ui-selected").data("selected","yes").find(".w2ui-col-number").addClass("w2ui-row-selected"),l.find(".w2ui-grid-select-check").prop("checked",!0)),e++}}}else{for(var d={},r=0;r=this.last.range_start&&h+1<=this.last.range_end&&(l=$("#grid_"+this.name+"_rec_"+w2utils.escapeId(a)),o=$("#grid_"+this.name+"_frec_"+w2utils.escapeId(a)));var f=t.columns[h]||[];-1==t.indexes.indexOf(h)&&t.indexes.push(h);for(var g=0;g td[col="+c+"]").removeClass("w2ui-selected w2ui-inactive"),$("#grid_"+this.name+"_frec_"+w2utils.escapeId(a)).find(" > td[col="+c+"]").removeClass("w2ui-selected w2ui-inactive");for(var g=!1,m=!1,s=this.getSelection(),p=0;p=this.searches.length?(this.last.field="",this.last.label=""):(this.last.field=this.searches[r].field,this.last.label=this.searches[r].label)}this.last.multi=!1,this.last.xhr_offset=0,this.last.scrollTop=0,this.last.scrollLeft=0,this.last.selection.indexes=[],this.last.selection.columns={},this.searchClose(),$("#grid_"+this.name+"_search_all").val("").removeData("selected"),e||this.reload(),this.trigger($.extend(n,{phase:"after"}))}},searchShowFields:function(e){var t=$("#grid_"+this.name+"_search_all");if(!0!==e){for(var i='
',s=-1;s",n),n.label=n.caption),i+='"}i+="
'+n.label+"
";var d=this.name+"-searchFields";1==$("#w2ui-overlay-"+d).length&&(i=""),setTimeout(function(){$(t).w2overlay({html:i,name:d,left:-10})},1)}else $(t).w2overlay({name:this.name+"-searchFields"})},initAllField:function(e,t){var i=$("#grid_"+this.name+"_search_all");if("all"==e){var s={field:"all",label:w2utils.lang("All Fields")};i.w2field("clear")}else{if(null==(s=this.getSearch(e)))return;var n=s.type;-1!=["enum","select"].indexOf(n)&&(n="list"),i.w2field(n,$.extend({},s.options,{suffix:"",autoFormat:!1,selected:t})),-1!=["list","enum","date","time","datetime"].indexOf(s.type)&&(this.last.search="",this.last.item="",i.val(""),$("#grid_"+this.name+"_searchClear").hide())}""!=this.last.search?(this.last.label=s.label,this.search(s.field,this.last.search)):(this.last.field=s.field,this.last.label=s.label),i.attr("placeholder",w2utils.lang(s.label||s.caption||s.field)),$().w2overlay({name:this.name+"-searchFields"})},clear:function(e){this.total=0,this.records=[],this.summary=[],this.last.xhr_offset=0,this.last.idCache={},this.reset(!0),e||this.refresh()},reset:function(e){this.last.scrollTop=0,this.last.scrollLeft=0,this.last.selection={indexes:[],columns:{}},this.last.range_start=null,this.last.range_end=null,$("#grid_"+this.name+"_records").prop("scrollTop",0),e||this.refresh()},skip:function(e,t){("object"!=typeof this.url?this.url:this.url.get)?(this.offset=parseInt(e),this.offset>this.total&&(this.offset=this.total-this.limit),(this.offset<0||!w2utils.isInt(this.offset))&&(this.offset=0),this.clear(!0),this.reload(t)):console.log("ERROR: grid.skip() can only be called when you have remote data source.")},load:function(e,t){null!=e?(this.clear(!0),this.request("get",{},e,t)):console.log('ERROR: You need to provide url argument when calling .load() method of "'+this.name+'" object.')},reload:function(e){var t=this,i="object"!=typeof this.url?this.url:this.url.get;t.selectionSave(),i?this.load(i,function(){t.selectionRestore(),"function"==typeof e&&e()}):(this.reset(!0),this.localSearch(),this.selectionRestore(),"function"==typeof e&&e({status:"success"}))},request:function(a,e,t,l){if(null==e&&(e={}),""!=t&&null!=t||(t=this.url),""!=t&&null!=t){w2utils.isInt(this.offset)||(this.offset=0),w2utils.isInt(this.last.xhr_offset)||(this.last.xhr_offset=0);var i={limit:this.limit,offset:parseInt(this.offset)+parseInt(this.last.xhr_offset),searchLogic:this.last.logic,search:this.searchData.map(function(e){var t=$.extend({},e);return this.searchMap&&this.searchMap[t.field]&&(t.field=this.searchMap[t.field]),t}.bind(this)),sort:this.sortData.map(function(e){var t=$.extend({},e);return this.sortMap&&this.sortMap[t.field]&&(t.field=this.sortMap[t.field]),t}.bind(this))};if(0===this.searchData.length&&(delete i.search,delete i.searchLogic),0===this.sortData.length&&delete i.sort,$.extend(i,this.postData),$.extend(i,e),"delete"!=a&&"save"!=a||(delete i.limit,delete i.offset,"delete"==(i.action=a)&&(i[this.recid||"recid"]=this.getSelection())),"get"==a){if(!0===(s=this.trigger({phase:"before",type:"request",target:this.name,url:t,postData:i,httpHeaders:this.httpHeaders})).isCancelled)return void("function"==typeof l&&l({status:"error",message:"Request aborted."}))}else var s={url:t,postData:i,httpHeaders:this.httpHeaders};var o=this;if(0===this.last.xhr_offset&&this.lock(w2utils.lang(this.msgRefresh),!0),this.last.xhr)try{this.last.xhr.abort()}catch(e){}if(t="object"!=typeof s.url?s.url:s.url.get,"save"==a&&"object"==typeof s.url&&(t=s.url.save),"delete"==a&&"object"==typeof s.url&&(t=s.url.remove),!$.isEmptyObject(o.routeData)){var n=w2utils.parseRoute(t);if(0 div");$(this.box).find("div.w2ui-edit-box").remove(),"row"!=this.selectType&&($("#grid_"+this.name+r+"selection").attr("id","grid_"+this.name+"_editable").removeClass("w2ui-selection").addClass("w2ui-edit-box").prepend('
').find(".w2ui-selection-resizer").remove(),o=$("#grid_"+this.name+"_editable >div:first-child")),null==n.inTag&&(n.inTag=""),null==n.outTag&&(n.outTag=""),null==n.style&&(n.style=""),null==n.items&&(n.items=[]);var g=p.w2ui&&p.w2ui.changes&&null!=p.w2ui.changes[f.field]?w2utils.stripTags(p.w2ui.changes[f.field]):w2utils.stripTags(p[f.field]);null==g&&(g="");var m="object"!=typeof g?g:"";null!=a.old_value&&(m=a.old_value),null!=e&&(g=e);var w=null!=f.style?f.style+";":"";switch("string"==typeof f.render&&-1!=["number","int","float","money","percent","size"].indexOf(f.render.split(":")[0])&&(w+="text-align: right;"),0"+n.items[y].text+"";o.addClass("w2ui-editable").html('"+n.outTag),setTimeout(function(){o.find("select").on("change",function(e){delete c.last.move}).on("blur",function(e){1!=$(this).data("keep-open")&&c.editChange.call(c,this,h,u,e)})},10);break;case"div":var b="font-family: "+(_=l.find("[col="+u+"] > div")).css("font-family")+"; font-size: "+_.css("font-size")+";";o.addClass("w2ui-editable").html('
"+n.outTag),null==e&&o.find("div.w2ui-input").text("object"!=typeof g?g:"");var x=o.find("div.w2ui-input").get(0);setTimeout(function(){var t=x;$(t).on("blur",function(e){1!=$(this).data("keep-open")&&c.editChange.call(c,t,h,u,e)})},10),null!=e&&$(x).text("object"!=typeof g?g:"");break;default:var _,b="font-family: "+(_=l.find("[col="+u+"] > div")).css("font-family")+"; font-size: "+_.css("font-size");o.addClass("w2ui-editable").html('"+n.outTag),"number"==n.type&&(g=w2utils.formatNumber(g)),"date"==n.type&&(g=w2utils.formatDate(w2utils.isDate(g,n.format,!0)||new Date,n.format)),null==e&&o.find("input").val("object"!=typeof g?g:"");x=o.find("input").get(0);$(x).w2field(n.type,$.extend(n,{selected:g})),setTimeout(function(){var e=x;"list"==n.type&&(e=$($(x).data("w2field").helpers.focus).find("input"),"object"!=typeof g&&""!=g&&e.val(g).css({opacity:1}).prev().css({opacity:1}),o.find("input").on("change",function(e){c.editChange.call(c,x,h,u,e)})),$(e).on("blur",function(e){1!=$(this).data("keep-open")&&c.editChange.call(c,x,h,u,e)})},10),null!=e&&$(x).val("object"!=typeof g?g:"")}return void setTimeout(function(){c.last.inEditMode&&(o.find("input, select, div.w2ui-input").data("old_value",m).on("mousedown",function(e){e.stopPropagation()}).on("click",function(e){"div"==n.type?k.call(o.find("div.w2ui-input")[0],null):k.call(o.find("input, select")[0],null)}).on("paste",function(e){var t=e.originalEvent;e.preventDefault();var i=t.clipboardData.getData("text/plain");document.execCommand("insertHTML",!1,i)}).on("keydown",function(l){var o=this,e="DIV"==o.tagName.toUpperCase()?$(o).text():$(o).val();switch(l.keyCode){case 8:"list"!=n.type||$(x).data("w2field")||l.preventDefault();break;case 9:case 13:l.preventDefault();break;case 37:0===w2utils.getCursorPosition(o)&&l.preventDefault();break;case 39:w2utils.getCursorPosition(o)==e.length&&(w2utils.setCursorPosition(o,e.length),l.preventDefault())}setTimeout(function(){switch(l.keyCode){case 9:var e=d,t=l.shiftKey?c.prevCell(h,u,!0):c.nextCell(h,u,!0);if(null==t){var i=l.shiftKey?c.prevRow(h,u):c.nextRow(h,u);if(null!=i&&i!=h){e=c.records[i].recid;for(var s=0;si.width()&&i.width(n+20)}catch(e){}}},editChange:function(e,t,i,s){var n=this;setTimeout(function(){var e=$(n.box).find("#grid_"+n.name+"_focus");e.is(":focus")||e.focus()},10);var r=t<0;t=t<0?-t-1:t;var a=(r?this.summary:this.records)[t],l=this.columns[i],o=$("#grid_"+this.name+(!0===l.frozen?"_frec_":"_rec_")+w2utils.escapeId(a.recid)),d=e.tagName&&"DIV"==e.tagName.toUpperCase()?$(e).text():e.value,u=this.parseField(a,l.field),c=$(e).data("w2field");c&&("list"==c.type&&(d=$(e).data("selected")),!$.isEmptyObject(d)&&null!=d||(d=""),$.isPlainObject(d)||(d=c.clean(d))),"checkbox"==e.type&&(a.w2ui&&!1===a.w2ui.editable&&(e.checked=!e.checked),d=e.checked);var h={phase:"before",type:"change",target:this.name,input_id:e.id,recid:a.recid,index:t,column:i,originalEvent:s.originalEvent?s.originalEvent:s,value_new:d,value_previous:a.w2ui&&a.w2ui.changes&&a.w2ui.changes.hasOwnProperty(l.field)?a.w2ui.changes[l.field]:u,value_original:u};for(null!=$(s.target).data("old_value")&&(h.value_previous=$(s.target).data("old_value"));;){if("object"!=typeof(d=h.value_new)&&String(u)!=String(d)||"object"==typeof d&&d&&d.id!=u&&("object"!=typeof u||null==u||d.id!=u.id)){if(!0!==(h=this.trigger($.extend(h,{type:"change",phase:"before"}))).isCancelled){if(d!==h.value_new)continue;a.w2ui=a.w2ui||{},a.w2ui.changes=a.w2ui.changes||{},a.w2ui.changes[l.field]=h.value_new,this.trigger($.extend(h,{phase:"after"}))}}else if(!0!==(h=this.trigger($.extend(h,{type:"restore",phase:"before"}))).isCancelled){if(d!==h.value_new)continue;a.w2ui&&a.w2ui.changes&&delete a.w2ui.changes[l.field],a.w2ui&&$.isEmptyObject(a.w2ui.changes)&&delete a.w2ui.changes,this.trigger($.extend(h,{phase:"after"}))}break}var p=$(o).find("[col="+i+"]");r||(a.w2ui&&a.w2ui.changes&&null!=a.w2ui.changes[l.field]?p.addClass("w2ui-changed"):p.removeClass("w2ui-changed"),p.replaceWith(this.getCellHTML(t,i,r))),$(this.box).find("div.w2ui-edit-box").remove(),this.show.toolbarSave&&(0'+w2utils.lang(i.msgDelete).replace("NN",s.length).replace("records",1==s.length?"record":"records")+"
",buttons:w2utils.settings.macButtonOrder?'":'",onOpen:function(e){var t=$(this.box).find("input, textarea, select, button");t.off(".message").on("blur.message",function(e){t.index(e.target)+1===t.length&&(t.get(0).focus(),e.preventDefault())}).on("keydown.message",function(e){27==e.keyCode&&i.message()}),setTimeout(function(){$(this.box).find(".w2ui-btn.btn-default").focus(),clearTimeout(i.last.kbd_timer)}.bind(this),50)}})}},click:function(e,t){var i=(new Date).getTime(),s=null;if(!(1==this.last.cancelClick||t&&t.altKey))if("object"==typeof e&&null!==e&&(s=e.column,e=e.recid),null==t&&(t={}),i-parseInt(this.last.click_time)<350&&this.last.click_recid==e&&"click"==t.type)this.dblClick(e,t);else{this.last.bubbleEl&&($(this.last.bubbleEl).w2tag(),this.last.bubbleEl=null),this.last.click_time=i;var n=this.last.click_recid;this.last.click_recid=e,null==s&&t.target&&("TD"!=(g=t.target).tagName.toUpperCase()&&(g=$(g).parents("td")[0]),null!=$(g).attr("col")&&(s=parseInt($(g).attr("col"))));var r=this.trigger({phase:"before",target:this.name,type:"click",recid:e,column:s,originalEvent:t});if(!0!==r.isCancelled){var a=this,l=this.getSelection();$("#grid_"+this.name+"_check_all").prop("checked",!1);var o=this.get(e,!0),d=(this.records[o],[]);if(a.last.sel_ind=o,a.last.sel_col=s,a.last.sel_recid=e,a.last.sel_type="click",t.shiftKey&&0l[0].column?(u=l[0].column,s):(u=s,l[0].column);for(var f=u;f<=c;f++)d.push(f)}else h=this.get(n,!0),p=this.get(e,!0);var g,m=[];p=this.records.length?this.selectNone():this.selectAll());else if(t.altKey&&(n=this.getColumn(e))&&n.sortable&&this.sort(e,null,!(!t||!t.ctrlKey&&!t.metaKey)),"line-number"==i.field)this.getSelection().length>=this.records.length?this.selectNone():this.selectAll();else{t.shiftKey||t.metaKey||t.ctrlKey||this.selectNone();var i,s=this.getSelection(),n=this.getColumn(i.field,!0),r=[],a=[];if(0!=s.length&&t.shiftKey){var l=n,o=s[0].column;o.w2ui-message").length)27==i.keyCode&&this.message();else{var n=!1,r=$("#grid_"+s.name+"_records"),a=s.getSelection();0===a.length&&(n=!0);var l=a[0]||null,o=[],d=a[a.length-1];if("object"==typeof l&&null!=l){l=a[0].recid,o=[];for(var u=0;a[u]&&a[u].recid==l;)o.push(a[u].column),u++;d=a[a.length-1].recid}var c=s.get(l,!0),h=s.get(d,!0),p=(s.get(l),$("#grid_"+s.name+"_rec_"+(null!=c?w2utils.escapeId(s.records[c].recid):"none"))),f=!1,g=i.keyCode,m=i.shiftKey;switch(g){case 8:case 46:s.delete(),f=!0,i.stopPropagation();break;case 27:s.selectNone(),f=!0;break;case 65:if(!i.metaKey&&!i.ctrlKey)break;s.selectAll(),f=!0;break;case 13:if("row"==this.selectType&&!0===s.show.expandColumn){if(p.length<=0)break;s.toggle(l,i),f=!0}else{for(var w=0;wv&&s.last.sel_ind!=h?s.unselect(s.records[h].recid):s.select(s.records[v].recid);else if(s.last.sel_ind>v&&s.last.sel_ind!=h){v=h;for(y=[],w=0;w
'),$("#grid_"+this.name+"_frec_"+n).after(''+(this.show.lineNumbers?'':"")+'
'),!0===(s=this.trigger({phase:"before",type:"expand",target:this.name,recid:e,box_id:"grid_"+this.name+"_rec_"+e+"_expanded",fbox_id:"grid_"+this.name+"_frec_"+n+"_expanded"})).isCancelled)return $("#grid_"+this.name+"_rec_"+n+"_expanded_row").remove(),$("#grid_"+this.name+"_frec_"+n+"_expanded_row").remove(),!1;var a=$(this.box).find("#grid_"+this.name+"_rec_"+e+"_expanded"),l=$(this.box).find("#grid_"+this.name+"_frec_"+e+"_expanded"),o=a.find("> div:first-child").height();a.height()a&&(a=i[o].column),-1==l.indexOf(i[o].index)&&l.push(i[o].index);l.sort(function(e,t){return e-t});for(var d=0;d div.w2ui-grid-box").css("width",$(this.box).width()).css("height",$(this.box).height());var i=this.trigger({phase:"before",type:"resize",target:this.name});if(!0!==i.isCancelled)return e.resizeBoxes(),e.resizeRecords(),e.toolbar&&e.toolbar.resize&&e.toolbar.resize(),this.trigger($.extend(i,{phase:"after"})),(new Date).getTime()-t}},update:function(e){var t=(new Date).getTime();if(null==this.box)return 0;if(null==e){for(var i=this.last.range_start-1;i<=this.last.range_end-1;i++)if(!(i<0)){(a=this.records[i]||{}).w2ui||(a.w2ui={});for(var s=0;s'+s[0]+'
"+s[1]+'
'+n[0]+'
'+n[1]+"
",l=$("#grid_"+this.name+"_body",a.box).html(r),o=$("#grid_"+this.name+"_records",a.box),d=$("#grid_"+this.name+"_frecords",a.box),u=this;"row"==this.selectType&&(o.on("mouseover mouseout","tr",function(e){$("#grid_"+u.name+"_frec_"+w2utils.escapeId($(this).attr("recid"))).toggleClass("w2ui-record-hover","mouseover"==e.type)}),d.on("mouseover mouseout","tr",function(e){$("#grid_"+u.name+"_rec_"+w2utils.escapeId($(this).attr("recid"))).toggleClass("w2ui-record-hover","mouseover"==e.type)})),w2utils.isIOS?o.add(d).on("click","tr",function(e){u.dblClick($(this).attr("recid"),e)}):o.add(d).on("click","tr",function(e){u.click($(this).attr("recid"),e)}).on("contextmenu","tr",function(e){u.contextMenu($(this).attr("recid"),null,e)}),l.data("scrolldata",{lastTime:0,lastDelta:0,time:0}).find(".w2ui-grid-frecords").on("mousewheel DOMMouseScroll ",function(e){e.preventDefault();var t=e.originalEvent,i=l.data("scrolldata"),s=$(this).siblings(".w2ui-grid-records").addBack().filter(".w2ui-grid-records"),n=null!=typeof t.wheelDelta?-1*t.wheelDelta/120:(t.detail||t.deltaY)/3,r=s.scrollTop();i.time=+new Date,i.lastTime
'+this.msgEmpty+"
"):0<$("#grid_"+this.name+"_empty_msg").length&&$("#grid_"+this.name+"_empty_msg").remove(),0=this.searches.length?(this.last.field="",this.last.label=""):(this.last.field=this.searches[r].field,this.last.label=this.searches[r].label)}$(this.box).attr("name",this.name).addClass("w2ui-reset w2ui-grid w2ui-inactive").html('
"),"row"!=this.selectType&&$(this.box).addClass("w2ui-ss"),0<$(this.box).length&&($(this.box)[0].style.cssText+=this.style),this.initToolbar(),null!=this.toolbar&&this.toolbar.render($("#grid_"+this.name+"_toolbar")[0]),this.last.field&&"all"!=this.last.field&&(i=this.searchData,setTimeout(function(){C.initAllField(C.last.field,1==i.length?i[0].value:null)},1)),$("#grid_"+this.name+"_footer").html(this.getFooterHTML()),this.last.state||(this.last.state=this.stateSave(!0)),this.stateRestore(),s&&(this.clear(),this.refresh());for(var T,a=!1,l=0;l
'),(g=$(C.box).find(".w2ui-grid-records")).append('
'),g.append('
'),$("#grid_"+C.name+"_ghost").append(t).append(h.ghost)),f=$("#grid_"+C.name+"_ghost"),g=$(C.box).find(".w2ui-grid-records"),f.css({top:h.pos.top+g.scrollTop(),left:h.pos.left,"border-top":"1px solid #aaa","border-bottom":"1px solid #aaa"})):C.last.move.reorder=!1)}$(document).on("mousemove.w2ui-"+C.name,m).on("mouseup.w2ui-"+C.name,w),e.stopPropagation()}),this.updateToolbar(),this.trigger($.extend(n,{phase:"after"})),0===$(".w2ui-layout").length&&$(window).off("resize.w2ui-"+C.name).on("resize.w2ui-"+C.name,function(e){null==w2ui[C.name]?$(window).off("resize.w2ui-"+C.name):w2ui[C.name].resize()}),(new Date).getTime()-t}}function m(e){var t=C.last.move;if(t&&-1!=["select","select-column"].indexOf(t.type)&&(t.divX=e.screenX-t.x,t.divY=e.screenY-t.y,!(Math.abs(t.divX)<=1&&Math.abs(t.divY)<=1))){if(C.last.cancelClick=!0,1==C.reorderRows&&C.last.move.reorder){var i,s,n,r=$(C.box).find(".w2ui-grid-records");return"-none-"==(a=(c=$(e.target).parents("tr")).attr("recid"))&&(a="bottom"),a!=t.from&&($("#grid_"+C.name+"_rec_"+t.recid),i=$("#grid_"+C.name+"_rec_"+a),$(C.box).find(".insert-before"),i.addClass("insert-before"),t.lastY=e.screenY,t.to=a,s=i.position(),n=$("#grid_"+C.name+"_ghost_line"),s?n.css({top:s.top+r.scrollTop(),left:t.pos.left,"border-top":"2px solid #769EFC"}):n.css({"border-top":"2px solid transparent"})),void $("#grid_"+C.name+"_ghost").css({top:t.pos.top+t.divY+r.scrollTop(),left:t.pos.left})}t.start&&t.recid&&(C.selectNone(),t.start=!1);var a,l=[];if(null==(a="TR"==e.target.tagName.toUpperCase()?$(e.target).attr("recid"):$(e.target).parents("tr").attr("recid"))){if("row"==C.selectType)return;if(C.last.move&&"select"==C.last.move.type)return;var o=parseInt($(e.target).parents("td").attr("col"));if(isNaN(o))C.removeRange("column-selection"),$(C.box).find(".w2ui-grid-columns .w2ui-col-header, .w2ui-grid-fcolumns .w2ui-col-header").removeClass("w2ui-col-selected"),$(C.box).find(".w2ui-col-number").removeClass("w2ui-row-selected"),delete t.colRange;else{var d=o+"-"+o;t.columno&&(d=o+"-"+t.column);for(var u=[],c=d.split("-"),h=parseInt(c[0]);h<=parseInt(c[1]);h++)u.push(h);if(t.colRange!=d&&!0!==(T=C.trigger({phase:"before",type:"columnSelect",target:C.name,columns:u,isCancelled:!1})).isCancelled){null==t.colRange&&C.selectNone();c=d.split("-");$(C.box).find(".w2ui-grid-columns .w2ui-col-header, .w2ui-grid-fcolumns .w2ui-col-header").removeClass("w2ui-col-selected");for(var p=parseInt(c[0]);p<=parseInt(c[1]);p++)$(C.box).find("#grid_"+C.name+"_column_"+p+" .w2ui-col-header").addClass("w2ui-col-selected");$(C.box).find(".w2ui-col-number").not(".w2ui-head").addClass("w2ui-row-selected"),t.colRange=d,C.removeRange("column-selection"),C.addRange({name:"column-selection",range:[{recid:C.records[0].recid,column:c[0]},{recid:C.records[C.records.length-1].recid,column:c[1]}],style:"background-color: rgba(90, 145, 234, 0.1)"})}}}else{var f=C.get(t.recid,!0);if(null==f||C.records[f]&&C.records[f].recid!=t.recid)return;var g=C.get(a,!0);if(null==g)return;var m=parseInt(t.column),w=parseInt("TD"==e.target.tagName.toUpperCase()?$(e.target).attr("col"):$(e.target).parents("td").attr("col"));isNaN(m)&&isNaN(w)&&(m=0,w=C.columns.length-1),g",i=0;i ")}var r="object"!=typeof this.url?this.url:this.url.get;(r&&e.show.skipRecords||e.show.saveRestoreState)&&(t+=''),r&&e.show.skipRecords&&(t+='"),e.show.saveRestoreState&&(t+='"),t+="
'+w2utils.lang("Skip")+' "+w2utils.lang("Records")+"
"+w2utils.lang("Save Grid State")+'
"+w2utils.lang("Restore Default State")+"
",this.toolbar.get("w2ui-column-on-off").html=t}},initColumnDrag:function(e){if(this.columnGroups&&this.columnGroups.length)throw"Draggable columns are not currently supported with column groups.";var u=this,c={};function t(){c.pressed=!1,clearTimeout(c.timeout)}function i(o){c.timeout&&clearTimeout(c.timeout);var d=this;c.pressed=!0,c.timeout=setTimeout(function(){if(c.pressed&&0!==c.numberPreColumnsPresent){var e,t,i,s,n,r=["w2ui-col-number","w2ui-col-expand","w2ui-col-select"].concat(["w2ui-head-last"]);if($(o.originalEvent.target).parents().hasClass("w2ui-head")){for(var a=0,l=r.length;a=t[t.length-1]+i)return t.length;for(var s=0,n=t.length;s
'),c.markerLeft=$('
'));c.lastInt&&c.lastInt===e||(c.lastInt=e,c.marker.remove(),c.markerLeft.remove(),$(".w2ui-head").removeClass("w2ui-col-intersection"),e>=c.columns.length?($(c.columns[c.columns.length-1]).children("div:last").append(c.marker.addClass("right").removeClass("left")),$(c.columns[c.columns.length-1]).addClass("w2ui-col-intersection")):e<=c.numberPreColumnsPresent?($(c.columns[c.numberPreColumnsPresent]).prepend(c.marker.addClass("left").removeClass("right")).css({position:"relative"}),$(c.columns[c.numberPreColumnsPresent]).prev().addClass("w2ui-col-intersection")):($(c.columns[e]).children("div:last").prepend(c.marker.addClass("left").removeClass("right")),$(c.columns[e]).prev().children("div:last").append(c.markerLeft.addClass("right").removeClass("left")).css({position:"relative"}),$(c.columns[e-1]).addClass("w2ui-col-intersection")))}(c.targetInt),r=t,a=i,$(c.ghost).css({left:r-10,top:a-10}))}function p(e){c.pressed=!1;var t,i,s,n,r=$(".w2ui-grid-ghost"),a=u.trigger({type:"columnDragEnd",phase:"before",originalEvent:e,target:c.columnHead[0]});if(!0===a.isCancelled)return!1;i=u.columns[c.originalPos],s=u.columns,n=$(c.columns[Math.min(c.lastInt,c.columns.length-1)]),(t=c.lastInt
'+this.buttons.search.html+'
',this.toolbar.items.push({type:"html",id:"w2ui-search",html:e})),this.show.toolbarSearch&&this.multiSearch&&0 div',r.box).each(function(){var e=this.offsetWidth-this.scrollWidth;e div.w2ui-grid-box"),i=$("#grid_"+this.name+"_header"),s=$("#grid_"+this.name+"_toolbar"),n=$("#grid_"+this.name+"_summary"),a=$("#grid_"+this.name+"_fsummary"),l=$("#grid_"+this.name+"_footer"),o=$("#grid_"+this.name+"_body"),d=$("#grid_"+this.name+"_columns"),u=$("#grid_"+this.name+"_fcolumns"),c=$("#grid_"+this.name+"_records"),h=$("#grid_"+this.name+"_frecords"),p=$("#grid_"+this.name+"_scroll1"),f=8*String(this.total).length+10;f<34&&(f=34),null!=this.lineNumberWidth&&(f=this.lineNumberWidth);for(var g,m=!1,w=!1,v=0,y=0;ytable").height()+(m?w2utils.scrollBarSize():0)&&(w=!0),this.fixedBody?(g=t.height()-(this.show.header?w2utils.getSize(i,"height"):0)-(this.show.toolbar?w2utils.getSize(s,"height"):0)-("none"!=n.css("display")?w2utils.getSize(n,"height"):0)-(this.show.footer?w2utils.getSize(l,"height"):0),o.css("height",g)):(g=w2utils.getSize(d,"height")+w2utils.getSize($("#grid_"+r.name+"_records table"),"height")+(m?w2utils.scrollBarSize():0),r.height=g+w2utils.getSize(t,"+height")+(r.show.header?w2utils.getSize(i,"height"):0)+(r.show.toolbar?w2utils.getSize(s,"height"):0)+("none"!=n.css("display")?w2utils.getSize(n,"height"):0)+(r.show.footer?w2utils.getSize(l,"height"):0),t.css("height",r.height),o.css("height",g),e.css("height",w2utils.getSize(t,"height")+w2utils.getSize(e,"+height")));var b=this.records.length,x="object"!=typeof this.url?this.url:this.url.get;if(0==this.searchData.length||x||(b=this.last.searchIds.length),this.fixedBody||(w=!1),m||w?(d.find("> table > tbody > tr:nth-child(1) td.w2ui-head-last").css("width",w2utils.scrollBarSize()).show(),c.css({top:(0 table > tbody > tr:nth-child(1) td.w2ui-head-last").hide(),c.css({top:(0=this.recordHeight&&(k-=this.recordHeight,_++),this.fixedBody){for(var C=b;C<_;C++)T(C,this.recordHeight,this);T(_,k,this)}}function T(e,t,i){var s,n="",r="";n+='',r+='',i.show.lineNumbers&&(n+=''),i.show.selectColumn&&(n+=''),i.show.expandColumn&&(n+=''),r+='',i.show.orderColumn&&(r+='');for(var a=0;ai.last.colEnd)&&!l.frozen||(s='',l.frozen?n+=s:r+=s)}n+=' ',r+=' ',$("#grid_"+i.name+"_frecords > table").append(n),$("#grid_"+i.name+"_records > table").append(r)}if(0O&&!0!==R.hidden&&(D=R.hidden=!0),R.gridMinWidthparseInt(R.max)&&(R.sizeCalculated=R.max+"px"),I+=parseInt(R.sizeCalculated))}var E=parseInt(O)-parseInt(I);if(0 table > tbody > tr:nth-child(1) td.w2ui-head-last").css("width",w2utils.scrollBarSize()).show();var F=1;this.show.lineNumbers&&(F+=f),this.show.selectColumn&&(F+=26),this.show.expandColumn&&(F+=26);for(y=0;y table > tbody > tr:nth-child(1) td").add(u.find("> table > tbody > tr:nth-child(1) td")).each(function(e,t){$(t).hasClass("w2ui-col-number")&&$(t).css("width",f);var i=$(t).attr("col");if(null!=i){if("start"==i){for(var s=0,n=0;n table > tbody > tr").length&&d.find("> table > tbody > tr:nth-child(1) td").add(u.find("> table > tbody > tr:nth-child(1) td")).html("").css({height:"0px",border:"0px",padding:"0px",margin:"0px"}),c.find("> table > tbody > tr:nth-child(1) td").add(h.find("> table > tbody > tr:nth-child(1) td")).each(function(e,t){$(t).hasClass("w2ui-col-number")&&$(t).css("width",f);var i=$(t).attr("col");if(null!=i){if("start"==i){for(var s=0,n=0;n table > tbody > tr:nth-child(1) td").add(a.find("> table > tbody > tr:nth-child(1) td")).each(function(e,t){$(t).hasClass("w2ui-col-number")&&$(t).css("width",f);var i=$(t).attr("col");if(null!=i){if("start"==i){for(var s=0,n=0;n',t=!1,i=0;iX",t=!0),null==s.inTag&&(s.inTag=""),null==s.outTag&&(s.outTag=""),null==s.style&&(s.style=""),null==s.type&&(s.type="text"),null==s.label&&null!=s.caption&&(console.log("NOTICE: grid search.caption property is deprecated, please use search.label. Search ->",s),s.label=s.caption);var r='";switch(e+=' '+n+' '+(s.label||"")+' '+r+' ',s.type){case"text":case"alphanumeric":case"hex":case"color":case"list":case"combo":case"enum":var a="width: 250px;";-1!=["hex","color"].indexOf(s.type)&&(a="width: 90px;"),e+='";break;case"int":case"float":case"money":case"currency":case"percent":case"date":case"time":case"datetime":a="width: 90px;";"datetime"==s.type&&(a="width: 140px;"),e+='";break;case"select":e+='"}e+=s.outTag+" "}}return e+='
"},initOperator:function(e,t){var i=this.searches[t],s=$("#grid_"+this.name+"_range_"+t),n=$("#grid_"+this.name+"_field_"+t),r=n.parent().find("span input");switch(n.show(),s.hide(),$(e).val()){case"between":s.show(),r.w2field(i.type,i.options);break;case"not null":case"null":n.hide(),n.val("1"),n.change()}},initSearches:function(){for(var t=this,e=0;e--',a=0;a'+u+""):o+='"}$("#grid_"+this.name+"_field_"+e).html(o)}null!=s?("int"==s.type&&-1!=["in","not in"].indexOf(s.operator)&&$("#grid_"+this.name+"_field_"+e).w2field("clear").val(s.value),$("#grid_"+this.name+"_operator_"+e).val(s.operator).trigger("change"),$.isArray(s.value)?-1!=["in","not in"].indexOf(s.operator)?$("#grid_"+this.name+"_field_"+e).val(s.value).trigger("change"):($("#grid_"+this.name+"_field_"+e).val(s.value[0]).trigger("change"),$("#grid_"+this.name+"_field2_"+e).val(s.value[1]).trigger("change")):null!=s.value&&$("#grid_"+this.name+"_field_"+e).val(s.value).trigger("change")):$("#grid_"+this.name+"_operator_"+e).val(r).trigger("change")}$("#w2ui-overlay-"+this.name+"-searchOverlay .w2ui-grid-searches *[rel=search]").on("keypress",function(e){13==e.keyCode&&(t.search(),$().w2overlay({name:t.name+"-searchOverlay"}))})},getColumnsHTML:function(){var e,t,i,s,g=this,n="",r="";return this.show.columnHeaders&&(r=0 ",g.columnGroups[s]),g.columnGroups[s].text=g.columnGroups[s].caption);""!=g.columnGroups[g.columnGroups.length-1].text&&g.columnGroups.push({text:""});g.show.lineNumbers&&(e+='
 
');g.show.selectColumn&&(e+='
 
');g.show.expandColumn&&(e+='
 
');var n=0;t+='',g.show.orderColumn&&(t+='
 
');for(var r=0;r",l),l.text=l.caption);for(var o=0,d=n;d');var p="function"==typeof l.text?l.text(l):l.text;i='"+h+'
'+(p||" ")+"
",l&&l.frozen?e+=i:t+=i}else{var f="function"==typeof a.text?a.text(a):a.text;i='
'+(f||" ")+"
",l&&l.frozen?e+=i:t+=i}n+=a.span}return e+="",t+='',[e,t]}(),i=a(!1),n=e[0]+t[0]+i[0],e[1]+t[1]+i[1]):(n=(s=a(!0))[0],s[1])),[n,r];function a(e){var t="",i="";g.show.lineNumbers&&(t+='
#
"),g.show.selectColumn&&(t+='
"),g.show.expandColumn&&(t+='
 
');var s,n=0,r=0;i+='',g.show.orderColumn&&(i+='
 
');for(var a=0;a ",o),o.text=o.caption),null==o.size&&(o.size="100%"),a==r&&(r+=(s=g.columnGroups[n++]||{}).span),(ag.last.colEnd)&&!o.frozen||o.hidden||!0===s.master&&!e||(l=g.getColumnCellHTML(a),o&&o.frozen?t+=l:i+=l)}return t+='
 
',i+='
 
',[t+="",i+=""]}},getColumnCellHTML:function(e){var t=this.columns[e];if(null==t)return"";for(var i=!this.reorderColumns||this.columnGroups&&this.columnGroups.length?"":" w2ui-reorder-cols-head ",s="",n=0;n"+(!1!==t.resizable?'
':"")+'
'+(o||" ")+"
"},columnTooltipShow:function(e){var t,i,s;"normal"!=this.columnTooltip&&(t=$(this.box).find("#grid_"+this.name+"_column_"+e),i=this.columns[e],s=this.columnTooltip,t.prop("_mouse_over",!0),setTimeout(function(){!0===t.prop("_mouse_over")&&!0!==t.prop("_mouse_tooltip")&&(t.prop("_mouse_tooltip",!0),t.w2tag(i.tooltip,{position:s,top:5}))},1))},columnTooltipHide:function(e){var t;"normal"!=this.columnTooltip&&(t=$(this.box).find("#grid_"+this.name+"_column_"+e),this.columns[e],t.removeProp("_mouse_over"),setTimeout(function(){!0!==t.prop("_mouse_over")&&!0===t.prop("_mouse_tooltip")&&(t.removeProp("_mouse_tooltip"),t.w2tag())},1))},getRecordsHTML:function(){var e=this.records.length,t="object"!=typeof this.url?this.url:this.url.get;0==this.searchData.length||t||(e=this.last.searchIds.length),e>this.vs_start?this.last.show_extra=this.vs_extra:this.last.show_extra=this.vs_start;var i=$("#grid_"+this.name+"_records"),s=Math.floor((i.height()||0)/this.recordHeight)+this.last.show_extra+1;(!this.fixedBody||e"+n[0],a=""+n[1];r+='',a+='';for(var l=0;l
',a+=' ',this.last.range_start=0,this.last.range_end=s,[r,a]},getSummaryHTML:function(){if(0!==this.summary.length){for(var e=this.getRecordHTML(-1,0),t=""+e[0],i="
"+e[1],s=0;s
",i+=""]}},scroll:function(e){(new Date).getTime();var t,i,r=this,s="object"!=typeof this.url?this.url:this.url.get,n=$("#grid_"+this.name+"_records"),a=$("#grid_"+this.name+"_frecords");e&&(t=e.target.scrollTop,i=e.target.scrollLeft,r.last.scrollTop=t,r.last.scrollLeft=i,$("#grid_"+r.name+"_columns")[0].scrollLeft=i,$("#grid_"+r.name+"_summary")[0].scrollLeft=i,a[0].scrollTop=t),this.last.bubbleEl&&($(this.last.bubbleEl).w2tag(),this.last.bubbleEl=null);var l=null,o=null;if(r.disableCVS||0r.last.scrollLeft&&null==l&&(l=h),c+d-30>r.last.scrollLeft+u&&null==o&&(o=h),c+=d)}null==o&&(o=r.columns.length-1)}if(null!=l&&(l<0&&(l=0),o<0&&(o=0),l==o&&(0r.last.colStart)for(h=r.last.colStart;h';null!=i&&(s=r.getCellHTML(parseInt(i),h,!1)),$(t).after(s)}),b.each(function(e,t){var i=$(t).parent().attr("index"),s='';null!=i&&(s=r.getCellHTML(parseInt(i),h,!0)),$(t).after(s)}));if(o>r.last.colEnd)for(h=r.last.colEnd+1;h<=o;h++)r.columns[h]&&(r.columns[h].frozen||r.columns[h].hidden)||(w.before(r.getColumnCellHTML(h)),y.each(function(e,t){var i=$(t).parent().attr("index"),s='';null!=i&&(s=r.getCellHTML(parseInt(i),h,!1)),$(t).before(s)}),x.each(function(e,t){var i=$(t).parent().attr("index")||-1,s=r.getCellHTML(parseInt(i),h,!0);$(t).before(s)}));r.last.colStart=l,r.last.colEnd=o,r.resizeRecords()}else{r.last.colStart=l,r.last.colEnd=o;var _=this.getColumnsHTML(),k=this.getRecordsHTML(),C=this.getSummaryHTML(),T=p.find("#grid_"+this.name+"_columns"),S=p.find("#grid_"+this.name+"_records"),O=p.find("#grid_"+this.name+"_frecords"),z=p.find("#grid_"+this.name+"_summary");T.find("tbody").html(_[1]),O.html(k[0]),S.prepend(k[1]),null!=C&&z.html(C[1]),setTimeout(function(){S.find("> table").not("table:first-child").remove(),z[0]&&(z[0].scrollLeft=r.last.scrollLeft)},1),r.resizeRecords()}}var D=this.records.length;if(D>this.total&&(D=this.total),0==this.searchData.length||s||(D=this.last.searchIds.length),0!==D&&0!==n.length&&0!==n.height()){D>this.vs_start?this.last.show_extra=this.vs_extra:this.last.show_extra=this.vs_start;var I=Math.round(n[0].scrollTop/this.recordHeight+1),E=I+(Math.round(n.height()/this.recordHeight)-1);if(Dthis.total&&-1!=this.total&&(F=this.total);var j=n.find("#grid_"+this.name+"_rec_top"),A=n.find("#grid_"+this.name+"_rec_bottom"),M=a.find("#grid_"+this.name+"_frec_top"),N=a.find("#grid_"+this.name+"_frec_bottom");-1!=String(j.next().prop("id")).indexOf("_expanded_row")&&(j.next().remove(),M.next().remove()),this.total>F&&-1!=String(A.prev().prop("id")).indexOf("_expanded_row")&&(A.prev().remove(),N.prev().remove());var L,P=parseInt(j.next().attr("line")),H=parseInt(A.prev().attr("line"));if(P=P-this.last.show_extra+2&&1F))break;B.remove(),Y.remove()}"bottom"==(L=n.find("#grid_"+this.name+"_rec_top").next().attr("line"))&&(L=F);for(var Y,X,h=parseInt(L)-1;R<=h;h--){this.records[h-1]&&((Y=this.records[h-1].w2ui)&&!Array.isArray(Y.children)&&(Y.expanded=!1),X=this.getRecordHTML(h-1,h),j.after(X[1]),M.after(X[0]))}U(),setTimeout(function(){r.refreshRanges()},0)}var W=(R-1)*r.recordHeight,K=(D-F)*this.recordHeight;K<0&&(K=0),j.css("height",W+"px"),M.css("height",W+"px"),A.css("height",K+"px"),N.css("height",K+"px"),r.last.range_start=R,r.last.range_end=F,D
'),r.last.pull_more=!0,r.last.xhr_offset+=r.limit,r.request("get")}).find("td").html(r.autoLoad?'
':'
'+w2utils.lang("Load")+" "+r.limit+" "+w2utils.lang("More")+"...
"))}}function U(){r.markSearch&&(clearTimeout(r.last.marker_timer),r.last.marker_timer=setTimeout(function(){for(var e=[],t=0;t',r+='',this.show.lineNumbers&&(n+=''),this.show.selectColumn&&(n+=''),this.show.expandColumn&&(n+=''),r+='',this.show.orderColumn&&(r+='');for(var l=0;l';if((w=this.columns[l]).frozen&&!w.hidden)n+=o;else{if(w.hidden||lthis.last.colEnd)continue;r+=o}}return n+='',r+='',[n+="",r+=""]}var d,u="object"!=typeof this.url?this.url:this.url.get;if(!0!==i)if(0=this.last.searchIds.length)return"";e=this.last.searchIds[e],s=this.records[e]}else{if(e>=this.records.length)return"";s=this.records[e]}else{if(e>=this.summary.length)return"";s=this.summary[e]}if(!s)return"";null!=s.recid||null==this.recid||null!=(d=this.parseField(s,this.recid))&&(s.recid=d);w2utils.escapeId(s.recid);var c=!1;-1!=a.indexes.indexOf(e)&&(c=!0);var h=s.w2ui?s.w2ui.style:"";null!=h&&"string"==typeof h||(h="");var p,f=s.w2ui?s.w2ui.class:"";null!=f&&"string"==typeof f||(f=""),n+='",r+='",this.show.lineNumbers&&(n+='"+(!0!==i?this.getLineHTML(t,s):"")+""),this.show.selectColumn&&(s&&s.w2ui&&s.w2ui.hideCheckBox,n+=''+(!0===i||s.w2ui&&!0===s.w2ui.hideCheckBox?"":'
')+""),this.show.expandColumn&&(p="",p=s.w2ui&&!0===s.w2ui.expanded?"-":"+",s.w2ui&&"none"==s.w2ui.expanded&&(p=""),s.w2ui&&"spinner"==s.w2ui.expanded&&(p='
'),n+=''+(!0!==i?'
"+p+"
":"")+""),r+='',this.show.orderColumn&&(r+=''+(!0!==i?'
 
':"")+"");for(var g=0,m=0;;){var w,v,y,b=1;if(null==(w=this.columns[g]))break;if(w.hidden)g++,0this.last.colEnd)||w.frozen){if(s.w2ui&&"object"==typeof s.w2ui.colspan){var x=parseInt(s.w2ui.colspan[w.field])||null;if(1=this.columns.length);l++)this.columns[l].hidden&&_++;b=x-_,m=x-1}}var k=this.getCellHTML(e,g,i,b);w.frozen?n+=k:r+=k,g++}else g++}}return n+='',r+='',[n+="",r+=""]},getLineHTML:function(e){return"
"+e+"
"},getCellHTML:function(i,s,e,t){var n=this,r=this.columns[s];if(null==r)return"";var a,l,o,d,u,c,h,p=!0!==e?this.records[i]:this.summary[i],f=-1!==i?this.getCellValue(i,s,e):"",g=-1!==i?this.getCellEditable(i,s):"",m="max-height: "+parseInt(this.recordHeight)+"px;"+(r.clipboardCopy?"margin-right: 20px":""),w=!e&&p&&p.w2ui&&p.w2ui.changes&&null!=p.w2ui.changes[r.field],v="",y="",b=this.last.selection,x=!1,_="";if(-1!=b.indexes.indexOf(i)&&(x=!0),null==t&&(t=p&&p.w2ui&&p.w2ui.colspan&&p.w2ui.colspan[r.field]?p.w2ui.colspan[r.field]:1),0===s&&p&&p.w2ui&&Array.isArray(p.w2ui.children)){for(var k,C=0,T=this.get(p.w2ui.parent_recid,!0);null!=T;){if(C++,null==(k=this.records[T].w2ui)||null==k.parent_recid)break;T=this.get(k.parent_recid,!0)}if(p.w2ui.parent_recid)for(var S=0;S';_+='"}!0===r.info&&(r.info={}),null!=r.info&&(a="w2ui-icon-info","function"==typeof r.info.icon?a=r.info.icon(p):"object"==typeof r.info.icon?a=r.info.icon[this.parseField(p,r.field)]||"":"string"==typeof r.info.icon&&(a=r.info.icon),l=r.info.style||"","function"==typeof r.info.style?l=r.info.style(p):"object"==typeof r.info.style?l=r.info.style[this.parseField(p,r.field)]||"":"string"==typeof r.info.style&&(l=r.info.style),_+='"),null!=r.render&&-1!==i?("function"==typeof r.render&&(null!=(o=r.render.call(this,p,i,s,f))&&"object"==typeof o?(f=$.trim(o.html||""),y=o.class||"",v=o.style||""):f=$.trim(o),(f.length<4||"'+_+String(f)+"")),"object"==typeof r.render&&(null!=(d=r.render[f])&&""!==d||(d=f),f='
'+_+String(d)+"
"),"string"==typeof r.render&&(k=[],-1==(u=r.render.toLowerCase().indexOf(":"))?(k[0]=r.render.toLowerCase(),k[1]=""):(k[0]=r.render.toLowerCase().substr(0,u),k[1]=r.render.toLowerCase().substr(u+1)),c=w2utils.formatters[k[0]],r.options&&!1===r.options.autoFormat&&(c=null),f='
'+_+String(f)+"
")):(g&&-1!=["checkbox","check"].indexOf(g.type)&&(h=e?-(i+1):i,m+="text-align: center;",f='',_=""),f='
'+_+String(f)+"
"),null==f&&(f=""),"string"==typeof r.render&&(k=r.render.toLowerCase().split(":"),-1!=["number","int","float","money","currency","percent","size"].indexOf(k[0])&&(v+="text-align: right;")),p&&p.w2ui&&("object"==typeof p.w2ui.style&&("string"==typeof p.w2ui.style[s]&&(v+=p.w2ui.style[s]+";"),"string"==typeof p.w2ui.style[r.field]&&(v+=p.w2ui.style[r.field]+";")),"object"==typeof p.w2ui.class&&("string"==typeof p.w2ui.class[s]&&(y+=p.w2ui.class[s]+" "),"string"==typeof p.w2ui.class[r.field]&&(y+=p.w2ui.class[r.field]+" ")));var O=!1;x&&-1!=$.inArray(s,b.columns[i])&&(O=!0);var z="string"==typeof r.clipboardCopy?r.clipboardCopy:"Copy to clipboard",D="',f='"+f+(""!=w2utils.stripTags(f)&&r.clipboardCopy&&z?D:"")+"";return-1===i&&!0===e&&(f='"),f;function I(e){var t="";return n.show.recordTitles&&(null!=r.title?("function"==typeof r.title&&(t=r.title.call(n,p,i,s)),"string"==typeof r.title&&(t=r.title)):t=w2utils.stripTags(String(e).replace(/"/g,"''"))),null!=t?String(t):""}},clipboardCopy:function(e,t){var i=this.records[e],s=this.columns[t],n=s?this.parseField(i,s.field):"";"function"==typeof s.clipboardCopy&&(n=s.clipboardCopy(i)),$("#grid_"+this.name+"_focus").text(n).select(),document.execCommand("copy")},showBubble:function(e,t){var i="",s=this.columns[t].info,n=this.records[e],r=$(this.box).find("#grid_"+this.name+"_data_"+e+"_"+t+" .w2ui-info");if(this.last.bubbleEl&&$(this.last.bubbleEl).w2tag(),this.last.bubbleEl=r,null==s.fields){s.fields=[];for(var a=0;as.maxLength&&(c=c.substr(0,s.maxLength)+"..."),i+=""+l.text+""+((0===c?"0":c)||"")+"")):i+='
'}i+=""}else if($.isPlainObject(o)){for(var d in i='',o){var u,c,h=o[d];""!=h&&"-"!=h&&"--"!=h&&"---"!=h?(u=String(h).split(":"),null==(l=this.getColumn(u[0]))&&(l={field:u[0],caption:u[0]}),c=l?this.parseField(n,l.field):"",1s.maxLength&&(c=c.substr(0,s.maxLength)+"..."),i+="")):i+=''}i+="
"+d+""+(c||"")+"
"}$(r).w2tag($.extend({html:i,left:-4,position:"bottom|top",className:"w2ui-info-bubble",style:"",hideOnClick:!0},s.options||{}))},getCellEditable:function(e,t){var i=this.columns[t],s=this.records[e];if(!s||!i)return null;var n,r=s.w2ui?s.w2ui.editable:null;return!1===r?null:(null!=r&&!0!==r||"function"==typeof(r=i?i.editable:null)&&(n=this.getCellValue(e,t,!1),r=r.call(this,s,e,t,n)),r)},getCellValue:function(e,t,i){var s=this.columns[t],n=!0!==i?this.records[e]:this.summary[e],r=this.parseField(n,s.field);return n&&n.w2ui&&n.w2ui.changes&&null!=n.w2ui.changes[s.field]&&(r=n.w2ui.changes[s.field]),$.isPlainObject(r)&&(s.options&&s.options.items?(val=s.options.items.find(function(e){return e.id==r.id}),r=val?val.text:r.id):(null!=r.text&&(r=r.text),null!=r.id&&(r=r.id))),null==r&&(r=""),r},getFooterHTML:function(){return'
'},status:function(e){var t,i,s;null!=e?$("#grid_"+this.name+"_footer").find(".w2ui-footer-left").html(e):(t="",0<(i=this.getSelection()).length&&(this.show.statusSelection&&1=this.columns.length)return null;var n=this.records[e].w2ui,r=(this.columns[t],this.columns[s]),a=n&&n.colspan&&!isNaN(n.colspan[r.field])?parseInt(n.colspan[r.field]):1;if(null==r)return null;if(r&&r.hidden||0===a)return this.nextCell(e,s,i);if(i){var l=this.getCellEditable(e,t);if(null==l||-1!=["checkbox","check"].indexOf(l.type))return this.nextCell(e,s,i)}return s},prevCell:function(e,t,i){var s=t-1;if(s<0)return null;var n=this.records[e].w2ui,r=this.columns[s],a=n&&n.colspan&&!isNaN(n.colspan[r.field])?parseInt(n.colspan[r.field]):1;if(null==r)return null;if(r&&r.hidden||0===a)return this.prevCell(e,s,i);if(i){var l=this.getCellEditable(e,t);if(null==l||-1!=["checkbox","check"].indexOf(l.type))return this.prevCell(e,s,i)}return s},nextRow:function(e,t){var i=this.last.searchIds,s=null;if(e+1this.records.length);)e++;var n=this.records[e].w2ui,r=this.columns[t],s=0===(n&&n.colspan&&null!=r&&!isNaN(n.colspan[r.field])?parseInt(n.colspan[r.field]):1)?this.nextRow(e,t):e}return s},prevRow:function(e,t){var i=this.last.searchIds,s=null;if(0i[0]){if(e--,0'+e+"",buttons:'",onOpen:function(e){setTimeout(function(){$(this.box).find(".w2ui-btn").focus()},25)},onClose:function(e){"function"==typeof t&&t()}}),w2utils.message.call(this,{box:this.box,path:"w2ui."+this.name,title:".w2ui-grid-header:visible",body:".w2ui-grid-box"},e)}},$.extend(w2grid.prototype,w2utils.event),w2obj.grid=w2grid}(jQuery),function(z){function l(e){this.box=null,this.name=null,this.panels=[],this.tmp={},this.padding=1,this.resizer=4,this.style="",z.extend(!0,this,w2obj.layout,e)}var D=["top","left","main","preview","right","bottom"];z.fn.w2layout=function(e){if(z.isPlainObject(e)){if(!w2utils.checkName(e,"w2layout"))return;var t=e.panels||[],i=new l(e);z.extend(i,{handlers:[],panels:[]});for(var s=0,n=t.length;s"+t+""),r.status=!0,r;if(null==n)return console.log("ERROR: incorrect panel name. Panel name can be main, left, right, top, bottom, preview or css"),r.error=!0,r;if(null==t)return r;var a=this.trigger({phase:"before",type:"content",target:e,object:n,content:t,transition:i});if(!0===a.isCancelled)return r.cancelled=!0,r;if(t instanceof jQuery)return console.log("ERROR: You can not pass jQuery object to w2layout.content() method"),r;var l,o,d="#layout_"+this.name+"_panel_"+n.type,u=z(d+"> .w2ui-panel-content"),c=0;return 0 .w2ui-panel-content")).after('
'),o=z(d+"> .w2ui-panel-content.new-panel"),l.css("top",c),o.css("top",c),"object"==typeof t?(t.box=o[0],t.render()):o.html(t),w2utils.transition(l[0],o[0],i,function(){l.remove(),o.removeClass("new-panel"),o.css("overflow",n.overflow),z(d+"> .w2ui-panel-content").slice(1).remove(),s.resize(),-1!=window.navigator.userAgent.indexOf("MSIE")&&setTimeout(function(){s.resize()},100)}))),this.refresh(e),s.trigger(z.extend(a,{phase:"after"})),s.resize(),-1!=window.navigator.userAgent.indexOf("MSIE")&&setTimeout(function(){s.resize()},100),r},message:function(e,t){var i=this;"string"==typeof t&&(t={width:t.length<300?350:550,height:t.length<300?170:250,body:'
'+t+"
",buttons:'",onOpen:function(e){setTimeout(function(){z(this.box).find(".w2ui-btn").focus()},25)}});var s,n=this.get(e),r=z("#layout_"+this.name+"_panel_"+n.type).css("overflow");t&&(t.onClose&&(s=t.onClose),t.onClose=function(e){"function"==typeof s&&s(e),e.done(function(){z("#layout_"+i.name+"_panel_"+n.type).css("overflow",r)})}),z("#layout_"+this.name+"_panel_"+n.type).css("overflow","hidden"),w2utils.message.call(this,{box:z("#layout_"+this.name+"_panel_"+n.type),param:e,path:"w2ui."+this.name,title:".w2ui-panel-title:visible",body:".w2ui-panel-content"},t)},load:function(s,e,n,r){var a=this;return"css"==s?(z.get(e,function(e,t,i){a.html(s,i.responseText),r&&r()}),!0):null!=this.get(s)&&(z.get(e,function(e,t,i){a.html(s,i.responseText,n),r&&r(),a.resize(),-1!=window.navigator.userAgent.indexOf("MSIE")&&setTimeout(function(){a.resize()},100)}),!0)},sizeTo:function(e,t,i){var s=this;return null!=s.get(e)&&(z(s.box).find(" > div > .w2ui-panel").css(w2utils.cssPrefix("transition",!0!==i?".2s":"0s")),setTimeout(function(){s.set(e,{size:t})},1),setTimeout(function(){z(s.box).find(" > div > .w2ui-panel").css(w2utils.cssPrefix("transition","0s")),s.resize()},500),!0)},show:function(e,t){var i=this,s=this.trigger({phase:"before",type:"show",target:e,object:this.get(e),immediate:t});if(!0!==s.isCancelled){var n=i.get(e);return null==n?!1:(!(n.hidden=!1)===t?(z("#layout_"+i.name+"_panel_"+e).css({opacity:"1"}),i.trigger(z.extend(s,{phase:"after"})),i.resize()):(z("#layout_"+i.name+"_panel_"+e).css({opacity:"0"}),z(i.box).find(" > div > .w2ui-panel").css(w2utils.cssPrefix("transition",".2s")),setTimeout(function(){i.resize()},1),setTimeout(function(){z("#layout_"+i.name+"_panel_"+e).css({opacity:"1"})},250),setTimeout(function(){z(i.box).find(" > div > .w2ui-panel").css(w2utils.cssPrefix("transition","0s")),i.trigger(z.extend(s,{phase:"after"})),i.resize()},500)),!0)}},hide:function(e,t){var i=this,s=this.trigger({phase:"before",type:"hide",target:e,object:this.get(e),immediate:t});if(!0!==s.isCancelled){var n=i.get(e);return null==n?!1:((n.hidden=!0)===t?(z("#layout_"+i.name+"_panel_"+e).css({opacity:"0"}),i.trigger(z.extend(s,{phase:"after"})),i.resize()):(z(i.box).find(" > div > .w2ui-panel").css(w2utils.cssPrefix("transition",".2s")),z("#layout_"+i.name+"_panel_"+e).css({opacity:"0"}),setTimeout(function(){i.resize()},1),setTimeout(function(){z(i.box).find(" > div > .w2ui-panel").css(w2utils.cssPrefix("transition","0s")),i.trigger(z.extend(s,{phase:"after"})),i.resize()},500)),!0)}},toggle:function(e,t){var i=this.get(e);return null!=i&&(i.hidden?this.show(e,t):this.hide(e,t))},set:function(e,t){var i=this.get(e,!0);return null!=i&&(z.extend(this.panels[i],t),null==t.content&&null==t.resizable||this.refresh(e),this.resize(),!0)},get:function(e,t){for(var i=0;i .w2ui-panel-content");return 1!=t.length?null:t[0]},hideToolbar:function(e){var t=this.get(e);t&&(t.show.toolbar=!1,z("#layout_"+this.name+"_panel_"+e+"> .w2ui-panel-toolbar").hide(),this.resize())},showToolbar:function(e){var t=this.get(e);t&&(t.show.toolbar=!0,z("#layout_"+this.name+"_panel_"+e+"> .w2ui-panel-toolbar").show(),this.resize())},toggleToolbar:function(e){var t=this.get(e);t&&(t.show.toolbar?this.hideToolbar(e):this.showToolbar(e))},assignToolbar:function(e,t){"string"==typeof t&&null!=w2ui[t]&&(t=w2ui[t]);var i=this.get(e);i.toolbar=t;var s=z(this.box).find(e+"> .w2ui-panel-toolbar");null!=i.toolbar?(0===s.find("[name="+i.toolbar.name+"]").length?s.w2render(i.toolbar):null!=i.toolbar&&i.toolbar.refresh(),(t.owner=this).showToolbar(e),this.refresh(e)):(s.html(""),this.hideToolbar(e))},hideTabs:function(e){var t=this.get(e);t&&(t.show.tabs=!1,z("#layout_"+this.name+"_panel_"+e+"> .w2ui-panel-tabs").hide(),this.resize())},showTabs:function(e){var t=this.get(e);t&&(t.show.tabs=!0,z("#layout_"+this.name+"_panel_"+e+"> .w2ui-panel-tabs").show(),this.resize())},toggleTabs:function(e){var t=this.get(e);t&&(t.show.tabs?this.hideTabs(e):this.showTabs(e))},render:function(e){var c=this,t=(new Date).getTime(),i=c.trigger({phase:"before",type:"render",target:c.name,box:e});if(!0!==i.isCancelled){if(null!=e&&(0"),0
';z(c.box).find(" > div").append(n)}return z(c.box).find(" > div").append('
'),c.refresh(),c.trigger(z.extend(i,{phase:"after"})),setTimeout(function(){c.tmp.events={resize:function(e){null==w2ui[c.name]?z(window).off("resize.w2ui-"+c.name):w2ui[c.name].resize()},resizeStart:r,mouseMove:l,mouseUp:a},z(window).on("resize.w2ui-"+c.name,c.tmp.events.resize),c.resize()},0),(new Date).getTime()-t}function r(e,t){if(c.box){t=t||window.event,z(document).off("mousemove",c.tmp.events.mouseMove).on("mousemove",c.tmp.events.mouseMove),z(document).off("mouseup",c.tmp.events.mouseUp).on("mouseup",c.tmp.events.mouseUp),c.tmp.resize={type:e,x:t.screenX,y:t.screenY,diff_x:0,diff_y:0,value:0};for(var i=0;it.width&&(r=t.minSize-t.width),t.maxSize&&t.width+r>t.maxSize&&(r=t.maxSize-t.width),l.minSize+r>l.width&&(r=l.width-l.minSize);break;case"right":t.minSize+r>t.width&&(r=t.width-t.minSize),t.maxSize&&t.width-r>t.maxSize&&(r=t.width-t.maxSize),l.minSize-r>l.width&&(r=l.minSize-l.width);break;case"top":t.minSize-a>t.height&&(a=t.minSize-t.height),t.maxSize&&t.height+a>t.maxSize&&(a=t.maxSize-t.height),l.minSize+a>l.height&&(a=l.height-l.minSize);break;case"preview":case"bottom":t.minSize+a>t.height&&(a=t.height-t.minSize),t.maxSize&&t.height-a>t.maxSize&&(a=t.height-t.maxSize),l.minSize-a>l.height&&(a=l.minSize-l.height)}switch(i.diff_x=r,i.diff_y=a,i.type){case"top":case"preview":case"bottom":(i.diff_x=0) .w2ui-panel-content")[0],setTimeout(function(){0 .w2ui-panel-content").length&&(z(r+"> .w2ui-panel-content").removeClass().removeAttr("name").addClass("w2ui-panel-content").css("overflow",n.overflow)[0].style.cssText+=";"+n.style),n.content&&"function"==typeof n.content.render&&n.content.render()},1)):0 .w2ui-panel-content").length&&(z(r+"> .w2ui-panel-content").removeClass().removeAttr("name").addClass("w2ui-panel-content").html(n.content).css("overflow",n.overflow)[0].style.cssText+=";"+n.style);var l=z(t.box).find(r+"> .w2ui-panel-tabs");n.show.tabs?0===l.find("[name="+n.tabs.name+"]").length&&null!=n.tabs?l.w2render(n.tabs):n.tabs.refresh():l.html("").removeClass("w2ui-tabs").hide(),l=z(t.box).find(r+"> .w2ui-panel-toolbar"),n.show.toolbar?0===l.find("[name="+n.toolbar.name+"]").length&&null!=n.toolbar?l.w2render(n.toolbar):n.toolbar.refresh():l.html("").removeClass("w2ui-toolbar").hide(),l=z(t.box).find(r+"> .w2ui-panel-title"),n.title?l.html(n.title).show():l.html("").hide()}else{if(0===z("#layout_"+t.name+"_panel_main").length)return void t.render();t.resize();for(var o=0;o div").css({width:s+"px",height:n+"px"});for(var r,a,l,o,d,u,c,h=this,p=this.get("main"),f=this.get("preview"),g=this.get("left"),m=this.get("right"),w=this.get("top"),v=this.get("bottom"),y=null!=f&&!0!==f.hidden,b=null!=g&&!0!==g.hidden,x=null!=m&&!0!==m.hidden,_=null!=w&&!0!==w.hidden,k=null!=v&&!0!==v.hidden,C=0;Cthis.padding?this.resizer:this.padding,z("#layout_"+this.name+"_resizer_top").show().css({display:"block",left:r+"px",top:a+"px",width:l+"px",height:o+"px",cursor:"ns-resize"}).off("mousedown").on("mousedown",function(e){var t=h.trigger({phase:"before",type:"resizerClick",target:"top",originalEvent:e});if(!0!==t.isCancelled)return w2ui[h.name].tmp.events.resizeStart("top",e),h.trigger(z.extend(t,{phase:"after"})),!1}))):(z("#layout_"+this.name+"_panel_top").hide(),z("#layout_"+this.name+"_resizer_top").hide()),null!=g&&!0!==g.hidden?(a=(r=0)+(_?w.sizeCalculated+this.padding:0),l=g.sizeCalculated,o=n-(_?w.sizeCalculated+this.padding:0)-(k?v.sizeCalculated+this.padding:0),d=z("#layout_"+this.name+"_panel_left"),-1!=window.navigator.userAgent.indexOf("MSIE")&&0this.padding?this.resizer:this.padding,z("#layout_"+this.name+"_resizer_left").show().css({display:"block",left:r+"px",top:a+"px",width:l+"px",height:o+"px",cursor:"ew-resize"}).off("mousedown").on("mousedown",function(e){var t=h.trigger({phase:"before",type:"resizerClick",target:"left",originalEvent:e});if(!0!==t.isCancelled)return w2ui[h.name].tmp.events.resizeStart("left",e),h.trigger(z.extend(t,{phase:"after"})),!1}))):(z("#layout_"+this.name+"_panel_left").hide(),z("#layout_"+this.name+"_resizer_left").hide()),null!=m&&!0!==m.hidden?(r=s-m.sizeCalculated,a=0+(_?w.sizeCalculated+this.padding:0),l=m.sizeCalculated,o=n-(_?w.sizeCalculated+this.padding:0)-(k?v.sizeCalculated+this.padding:0),z("#layout_"+this.name+"_panel_right").css({display:"block",left:r+"px",top:a+"px",width:l+"px",height:o+"px"}).show(),m.width=l,m.height=o,m.resizable&&(r-=this.padding,l=this.resizer>this.padding?this.resizer:this.padding,z("#layout_"+this.name+"_resizer_right").show().css({display:"block",left:r+"px",top:a+"px",width:l+"px",height:o+"px",cursor:"ew-resize"}).off("mousedown").on("mousedown",function(e){var t=h.trigger({phase:"before",type:"resizerClick",target:"right",originalEvent:e});if(!0!==t.isCancelled)return w2ui[h.name].tmp.events.resizeStart("right",e),h.trigger(z.extend(t,{phase:"after"})),!1}))):(z("#layout_"+this.name+"_panel_right").hide(),z("#layout_"+this.name+"_resizer_right").hide()),null!=v&&!0!==v.hidden?(r=0,a=n-v.sizeCalculated,l=s,o=v.sizeCalculated,z("#layout_"+this.name+"_panel_bottom").css({display:"block",left:r+"px",top:a+"px",width:l+"px",height:o+"px"}).show(),v.width=l,v.height=o,v.resizable&&(a-=0===this.padding?0:this.padding,o=this.resizer>this.padding?this.resizer:this.padding,z("#layout_"+this.name+"_resizer_bottom").show().css({display:"block",left:r+"px",top:a+"px",width:l+"px",height:o+"px",cursor:"ns-resize"}).off("mousedown").on("mousedown",function(e){var t=h.trigger({phase:"before",type:"resizerClick",target:"bottom",originalEvent:e});if(!0!==t.isCancelled)return w2ui[h.name].tmp.events.resizeStart("bottom",e),h.trigger(z.extend(t,{phase:"after"})),!1}))):(z("#layout_"+this.name+"_panel_bottom").hide(),z("#layout_"+this.name+"_resizer_bottom").hide()),r=0+(b?g.sizeCalculated+this.padding:0),a=0+(_?w.sizeCalculated+this.padding:0),l=s-(b?g.sizeCalculated+this.padding:0)-(x?m.sizeCalculated+this.padding:0),o=n-(_?w.sizeCalculated+this.padding:0)-(k?v.sizeCalculated+this.padding:0)-(y?f.sizeCalculated+this.padding:0),d=z("#layout_"+this.name+"_panel_main"),-1!=window.navigator.userAgent.indexOf("MSIE")&&0this.padding?this.resizer:this.padding,z("#layout_"+this.name+"_resizer_preview").show().css({display:"block",left:r+"px",top:a+"px",width:l+"px",height:o+"px",cursor:"ns-resize"}).off("mousedown").on("mousedown",function(e){var t=h.trigger({phase:"before",type:"resizerClick",target:"preview",originalEvent:e});if(!0!==t.isCancelled)return w2ui[h.name].tmp.events.resizeStart("preview",e),h.trigger(z.extend(t,{phase:"after"})),!1}))):(z("#layout_"+this.name+"_panel_preview").hide(),z("#layout_"+this.name+"_resizer_preview").hide());for(var T=0;T .w2ui-panel-",O=0;S&&(S.title&&(O+=w2utils.getSize(z($+"title").css({top:O+"px",display:"block"}),"height")),S.show.tabs&&(null!=S.tabs&&w2ui[this.name+"_"+D[T]+"_tabs"]&&w2ui[this.name+"_"+D[T]+"_tabs"].resize(),O+=w2utils.getSize(z($+"tabs").css({top:O+"px",display:"block"}),"height")),S.show.toolbar&&(null!=S.toolbar&&w2ui[this.name+"_"+D[T]+"_toolbar"]&&w2ui[this.name+"_"+D[T]+"_toolbar"].resize(),O+=w2utils.getSize(z($+"toolbar").css({top:O+"px",display:"block"}),"height"))),z($+"content").css({display:"block"}).css({top:O+"px"})}return clearTimeout(this._resize_timer),this._resize_timer=setTimeout(function(){for(var e in w2ui){var t;"function"==typeof w2ui[e].resize&&(null==w2ui[e].panels&&w2ui[e].resize(),0<(t=z(w2ui[e].box).parents(".w2ui-layout")).length&&t.attr("name")==h.name&&w2ui[e].resize())}},100),this.trigger(z.extend(i,{phase:"after"})),(new Date).getTime()-e}},destroy:function(){var e=this.trigger({phase:"before",type:"destroy",target:this.name});if(!0!==e.isCancelled)return null!=w2ui[this.name]&&(0"+e+""),"object"==typeof t&&(i.buttons+='"),"string"==typeof t&&(i.buttons+=t)})),0===m("#w2ui-popup").length){if(!0===(a=this.trigger({phase:"before",type:"open",target:"popup",options:i,present:!1})).isCancelled)return;w2popup.status="opening",w2popup.lockScreen(i);var d="";i.showClose&&(d+='
Close
'),i.showMax&&(d+='
Max
');var u='
';m("body").append(u);var c=m("#w2ui-popup");0'+d+'
';m("#w2ui-popup").html(u),i.title&&m("#w2ui-popup .w2ui-popup-title").append(i.title),i.buttons&&m("#w2ui-popup .w2ui-popup-buttons").append(i.buttons),i.body&&m("#w2ui-popup .w2ui-popup-body").append(i.body),setTimeout(function(){m("#w2ui-popup").css(w2utils.cssPrefix({transition:i.speed+"s opacity, "+i.speed+"s -webkit-transform"})).removeClass("w2ui-popup-opening"),t.focus()},1),setTimeout(function(){m("#w2ui-popup").css(w2utils.cssPrefix("transform",""))},1e3*i.speed),w2popup.status="open",t.trigger(m.extend(a,{phase:"after"}))}else if(!0===i.multiple)w2popup.message(e);else{if(null==w2popup._prev&&null!=w2popup._template&&t.restoreTemplate(),!0===(a=this.trigger({phase:"before",type:"open",target:"popup",options:i,present:!0})).isCancelled)return;w2popup.status="opening",null!=r&&(r.maximized||r.width==i.width&&r.height==i.height||w2popup.resize(i.width,i.height),i.prevSize=i.width+"px:"+i.height+"px",i.maximized=r.maximized);var h=m("#w2ui-popup .w2ui-box").clone();h.removeClass("w2ui-box").addClass("w2ui-box-temp").find(".w2ui-popup-body").empty().append(i.body),"string"==typeof i.body&&0Close':"")+(i.showMax?'
Max
':"")).append(i.title),m("#w2ui-popup .w2ui-popup-body").removeClass("w2ui-popup-no-title"),m("#w2ui-popup .w2ui-box, #w2ui-popup .w2ui-box-temp").css("top","")):(m("#w2ui-popup .w2ui-popup-title").hide().html(""),m("#w2ui-popup .w2ui-popup-body").addClass("w2ui-popup-no-title"),m("#w2ui-popup .w2ui-box, #w2ui-popup .w2ui-box-temp").css("top","0px"));var p=m("#w2ui-popup .w2ui-box")[0],f=m("#w2ui-popup .w2ui-box-temp")[0];w2utils.transition(p,f,i.transition,function(){t.restoreTemplate(),m(p).remove(),m(f).removeClass("w2ui-box-temp").addClass("w2ui-box");var e=m(f).find(".w2ui-popup-body");1==e.length&&(e[0].style.cssText=i.style),m("#w2ui-popup").data("prev-size",null),t.focus()}),w2popup.status="open",t.trigger(m.extend(a,{phase:"after"}))}i._last_focus=m(":focus"),i.keyboard&&m(document).on("keydown",this.keydown);var g={resizing:!1,mvMove:function(e){if(1!=g.resizing)return;e=e||window.event;g.div_x=e.screenX-g.x,g.div_y=e.screenY-g.y;var t=w2popup.trigger({phase:"before",type:"move",target:"popup",div_x:g.div_x,div_y:g.div_y});if(!0===t.isCancelled)return;m("#w2ui-popup").css(w2utils.cssPrefix({transition:"none",transform:"translate3d("+g.div_x+"px, "+g.div_y+"px, 0px)"})),w2popup.trigger(m.extend(t,{phase:"after"}))},mvStop:function(e){if(1!=g.resizing)return;e=e||window.event;w2popup.status="open",g.div_x=e.screenX-g.x,g.div_y=e.screenY-g.y,m("#w2ui-popup").css({left:g.pos_x+g.div_x+"px",top:g.pos_y+g.div_y+"px"}).css(w2utils.cssPrefix({transition:"none",transform:"translate3d(0px, 0px, 0px)"})),g.resizing=!1,m(document).off("mousemove",g.mvMove),m(document).off("mouseup",g.mvStop),g.isLocked||w2popup.unlock()}};return m("#w2ui-popup .w2ui-popup-title").on("mousedown",function(e){w2popup.get().maximized||function(e){e=e||window.event;w2popup.status="moving",g.resizing=!0,g.isLocked=1==m("#w2ui-popup > .w2ui-lock").length,g.x=e.screenX,g.y=e.screenY,g.pos_x=m("#w2ui-popup").position().left,g.pos_y=m("#w2ui-popup").position().top,g.isLocked||w2popup.lock({opacity:0});m(document).on("mousemove",g.mvMove),m(document).on("mouseup",g.mvStop),e.stopPropagation?e.stopPropagation():e.cancelBubble=!0;{if(!e.preventDefault)return;e.preventDefault()}}(e)}),this}setTimeout(function(){t.open.call(t,i)},100)},action:function(e,t){var i=this,s=m("#w2ui-popup").data("options");null!=t&&(i={parent:this,options:s=m("#w2ui-message"+t).data("options"),close:function(){w2popup.message({msgId:t})}});var n=s.actions[e],r=n;m.isPlainObject(n)&&n.onClick&&(r=n.onClick);var a=this.trigger({phase:"before",target:e,msgId:t,type:"action",action:n,originalEvent:event});!0!==a.isCancelled&&("function"==typeof r&&r.call(i,event),this.trigger(m.extend(a,{phase:"after"})))},keydown:function(e){var t,i=m("#w2ui-popup").data("options");i&&!i.keyboard||!0!==(t=w2popup.trigger({phase:"before",type:"keydown",target:"popup",options:i,originalEvent:e})).isCancelled&&(27===e.keyCode&&(e.preventDefault(),0